func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
static int read_pkcs12_kdf_params(ASN1_TYPE pbes2_asn, struct pbkdf2_params *params) { int result; memset(params, 0, sizeof(params)); /* read the salt */ params->salt_size = sizeof(params->salt); result = asn1_read_value(pbes2_asn, "salt", params->salt, &params->salt_size); if (result != ASN1_SUCCESS) { gnutls_assert(); result = _gnutls_asn2err(result); goto error; } _gnutls_hard_log("salt.size: %d\n", params->salt_size); /* read the iteration count */ result = _gnutls_x509_read_uint(pbes2_asn, "iterations", &params->iter_count); if (result != ASN1_SUCCESS) { gnutls_assert(); goto error; } _gnutls_hard_log("iterationCount: %d\n", params->iter_count); params->key_size = 0; return 0; error: return result; }
0
[]
gnutls
112d537da5f3500f14316db26d18c37d678a5e0e
290,408,903,043,983,600,000,000,000,000,000,000,000
38
some changes for 64bit machines.
void gg_free_session(struct gg_session *sess) { struct gg_dcc7 *dcc; struct gg_chat_list *chat; gg_debug_session(sess, GG_DEBUG_FUNCTION, "** gg_free_session(%p);\n", sess); if (sess == NULL) return; /* XXX dopisać zwalnianie i zamykanie wszystkiego, co mogło zostać */ free(sess->resolver_result); free(sess->connect_host); free(sess->password); free(sess->initial_descr); free(sess->client_version); free(sess->header_buf); free(sess->recv_buf); #ifdef GG_CONFIG_HAVE_GNUTLS if (sess->ssl != NULL) { gg_session_gnutls_t *tmp; tmp = (gg_session_gnutls_t*) sess->ssl; gnutls_deinit(tmp->session); gnutls_certificate_free_credentials(tmp->xcred); gnutls_global_deinit(); free(sess->ssl); } #endif #ifdef GG_CONFIG_HAVE_OPENSSL if (sess->ssl) SSL_free(sess->ssl); if (sess->ssl_ctx) SSL_CTX_free(sess->ssl_ctx); #endif sess->resolver_cleanup(&sess->resolver, 1); if (sess->fd != -1) close(sess->fd); while (sess->images) gg_image_queue_remove(sess, sess->images, 1); free(sess->send_buf); for (dcc = sess->dcc7_list; dcc; dcc = dcc->next) dcc->sess = NULL; chat = sess->chat_list; while (chat != NULL) { struct gg_chat_list *next = chat->next; free(chat->participants); free(chat); chat = next; } free(sess); }
0
[ "CWE-310" ]
libgadu
23644f1fb8219031b3cac93289a588b05f90226b
211,291,224,733,660,200,000,000,000,000,000,000,000
63
Poprawka ograniczania długości opisu.
void mg_mgr_wakeup(struct mg_connection *c) { LOG(LL_INFO, ("skt: %p", c->pfn_data)); send((SOCKET) (size_t) c->pfn_data, "\x01", 1, MSG_NONBLOCKING); }
0
[ "CWE-552" ]
mongoose
c65c8fdaaa257e0487ab0aaae9e8f6b439335945
271,146,950,325,656,040,000,000,000,000,000,000,000
4
Protect against the directory traversal in mg_upload()
IsoPresentation_createConnectPdu(IsoPresentation* self, IsoConnectionParameters parameters, BufferChain buffer, BufferChain payload) { self->acseContextId = 1; self->mmsContextId = 3; self->callingPresentationSelector = parameters->localPSelector; self->calledPresentationSelector = parameters->remotePSelector; createConnectPdu(self, buffer, payload); }
0
[ "CWE-703", "CWE-835" ]
libiec61850
cfa94cbf10302bedc779703f874ee2e8387a0721
140,796,700,680,993,980,000,000,000,000,000,000,000
9
- fixed - Bug in presentation layer parser can cause infinite loop (LIB61850-302)
int save_in_field(Field *field, bool no_conversions) { Timestamp_or_zero_datetime_native native(m_value, decimals); return native.save_in_field(field, decimals); }
0
[ "CWE-617" ]
server
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
56,897,607,028,915,015,000,000,000,000,000,000,000
5
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order... When doing condition pushdown from HAVING into WHERE, Item_equal::create_pushable_equalities() calls item->set_extraction_flag(IMMUTABLE_FL) for constant items. Then, Item::cleanup_excluding_immutables_processor() checks for this flag to see if it should call item->cleanup() or leave the item as-is. The failure happens when a constant item has a non-constant one inside it, like: (tbl.col=0 AND impossible_cond) item->walk(cleanup_excluding_immutables_processor) works in a bottom-up way so it 1. will call Item_func_eq(tbl.col=0)->cleanup() 2. will not call Item_cond_and->cleanup (as the AND is constant) This creates an item tree where a fixed Item has an un-fixed Item inside it which eventually causes an assertion failure. Fixed by introducing this rule: instead of just calling item->set_extraction_flag(IMMUTABLE_FL); we call Item::walk() to set the flag for all sub-items of the item.
Http::FilterMetadataStatus Context::onResponseMetadata() { if (!wasm_->onResponseMetadata_) { return Http::FilterMetadataStatus::Continue; } if (wasm_->onResponseMetadata_(this, id_).u64_ == 0) { return Http::FilterMetadataStatus::Continue; } return Http::FilterMetadataStatus::Continue; // This is currently the only return code. }
1
[ "CWE-476" ]
envoy
8788a3cf255b647fd14e6b5e2585abaaedb28153
135,015,741,758,650,450,000,000,000,000,000,000,000
9
1.4 - Do not call into the VM unless the VM Context has been created. (#24) * Ensure that the in VM Context is created before onDone is called. Signed-off-by: John Plevyak <[email protected]> * Update as per offline discussion. Signed-off-by: John Plevyak <[email protected]> * Set in_vm_context_created_ in onNetworkNewConnection. Signed-off-by: John Plevyak <[email protected]> * Add guards to other network calls. Signed-off-by: John Plevyak <[email protected]> * Fix common/wasm tests. Signed-off-by: John Plevyak <[email protected]> * Patch tests. Signed-off-by: John Plevyak <[email protected]> * Remove unecessary file from cherry-pick. Signed-off-by: John Plevyak <[email protected]>
sub_font_params(gs_memory_t *mem, const ref *op, gs_matrix *pmat, gs_matrix *pomat, ref *pfname) { ref *pmatrix, *pfontname, *pfontstyle, *porigfont, *pfontinfo; if (dict_find_string(op, "FontMatrix", &pmatrix) <= 0 || read_matrix(mem, pmatrix, pmat) < 0 ) return_error(gs_error_invalidfont); if (dict_find_string(op, "OrigFont", &porigfont) <= 0) porigfont = NULL; if (porigfont != NULL && !r_has_type(porigfont, t_dictionary)) return_error(gs_error_typecheck); if (pomat!= NULL) { if (porigfont == NULL || dict_find_string(porigfont, "FontMatrix", &pmatrix) <= 0 || read_matrix(mem, pmatrix, pomat) < 0 ) memset(pomat, 0, sizeof(*pomat)); } /* Use the FontInfo/OrigFontName key preferrentially (created by MS PSCRIPT driver) */ if ((dict_find_string((porigfont != NULL ? porigfont : op), "FontInfo", &pfontinfo) > 0) && r_has_type(pfontinfo, t_dictionary) && (dict_find_string(pfontinfo, "OrigFontName", &pfontname) > 0) && (r_has_type(pfontname, t_name) || r_has_type(pfontname, t_string))) { if ((dict_find_string(pfontinfo, "OrigFontStyle", &pfontstyle) > 0) && (r_has_type(pfontname, t_name) || r_has_type(pfontname, t_string)) && r_size(pfontstyle) > 0) { const byte *tmpStr1 = pfontname->value.const_bytes; const byte *tmpStr2 = pfontstyle->value.const_bytes; int fssize1 = r_size(pfontname), fssize2 = r_size(pfontstyle), fssize = fssize1 + fssize2 + 1; byte *sfname = gs_alloc_string(mem, fssize, "sub_font_params"); if (sfname == NULL) return_error(gs_error_VMerror); memcpy(sfname, tmpStr1, fssize1); sfname[fssize1]=',' ; memcpy(sfname + fssize1 + 1, tmpStr2, fssize2); make_string(pfname, a_readonly, fssize, sfname); } else get_font_name(mem, pfname, pfontname); } else if (dict_find_string((porigfont != NULL ? porigfont : op), ".Alias", &pfontname) > 0) { /* If we emulate the font, we want the requested name rather than a substitute. */ get_font_name(mem, pfname, pfontname); } else if (dict_find_string((porigfont != NULL ? porigfont : op), "FontName", &pfontname) > 0) { get_font_name(mem, pfname, pfontname); } else make_empty_string(pfname, a_readonly); return 0; }
0
[ "CWE-704" ]
ghostpdl
548bb434e81dadcc9f71adf891a3ef5bea8e2b4e
297,925,919,679,966,350,000,000,000,000,000,000,000
48
PS interpreter - add some type checking These were 'probably' safe anyway, since they mostly treat the objects as integers without checking, which at least can't result in a crash. Nevertheless, we ought to check. The return from comparedictkeys could be wrong if one of the keys had a value which was not an array, it could incorrectly decide the two were in fact the same.
onig_end(void) { THREAD_ATOMIC_START; #ifdef ONIG_DEBUG_STATISTICS onig_print_statistics(stderr); #endif #ifdef USE_SHARED_CCLASS_TABLE onig_free_shared_cclass_table(); #endif #ifdef USE_PARSE_TREE_NODE_RECYCLE onig_free_node_list(); #endif onig_inited = 0; THREAD_ATOMIC_END; THREAD_SYSTEM_END; return 0; }
0
[ "CWE-125" ]
php-src
28362ed4fae6969b5a8878591a5a06eadf114e03
42,764,466,797,900,420,000,000,000,000,000,000,000
22
Fix bug #77371 (heap buffer overflow in mb regex functions - compile_string_node)
GF_Err trgr_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem) { GF_TrackGroupBox *ptr = (GF_TrackGroupBox *)s; BOX_FIELD_LIST_ASSIGN(groups) return gf_list_add(ptr->groups, a); }
0
[ "CWE-787" ]
gpac
77510778516803b7f7402d7423c6d6bef50254c3
82,417,372,005,479,940,000,000,000,000,000,000,000
7
fixed #2255
static int handle_ip_over_ddp(struct sk_buff *skb) { struct net_device *dev = __dev_get_by_name(&init_net, "ipddp0"); struct net_device_stats *stats; /* This needs to be able to handle ipddp"N" devices */ if (!dev) return -ENODEV; skb->protocol = htons(ETH_P_IP); skb_pull(skb, 13); skb->dev = dev; skb_reset_transport_header(skb); stats = netdev_priv(dev); stats->rx_packets++; stats->rx_bytes += skb->len + 13; netif_rx(skb); /* Send the SKB up to a higher place. */ return 0; }
0
[ "CWE-200" ]
linux-2.6
3d392475c873c10c10d6d96b94d092a34ebd4791
79,106,646,792,186,980,000,000,000,000,000,000,000
20
appletalk: fix atalk_getname() leak atalk_getname() can leak 8 bytes of kernel memory to user Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
dirserv_set_cached_consensus_networkstatus(const char *networkstatus, const char *flavor_name, const digests_t *digests, time_t published) { cached_dir_t *new_networkstatus; cached_dir_t *old_networkstatus; if (!cached_consensuses) cached_consensuses = strmap_new(); new_networkstatus = new_cached_dir(tor_strdup(networkstatus), published); memcpy(&new_networkstatus->digests, digests, sizeof(digests_t)); old_networkstatus = strmap_set(cached_consensuses, flavor_name, new_networkstatus); if (old_networkstatus) cached_dir_decref(old_networkstatus); }
0
[ "CWE-264" ]
tor
00fffbc1a15e2696a89c721d0c94dc333ff419ef
273,360,839,687,311,260,000,000,000,000,000,000,000
17
Don't give the Guard flag to relays without the CVE-2011-2768 fix
RZ_API bool rz_analysis_function_rebase_vars(RzAnalysis *a, RzAnalysisFunction *fcn) { rz_return_val_if_fail(a && fcn, false); RzListIter *it; RzAnalysisVar *var; RzList *var_list = rz_analysis_var_all_list(a, fcn); rz_return_val_if_fail(var_list, false); rz_list_foreach (var_list, it, var) { // Resync delta in case the registers list changed if (var->isarg && var->kind == 'r') { RzRegItem *reg = rz_reg_get(a->reg, var->regname, -1); if (reg) { if (var->delta != reg->index) { var->delta = reg->index; } } } } rz_list_free(var_list); return true; }
0
[ "CWE-703" ]
rizin
6ce71d8aa3dafe3cdb52d5d72ae8f4b95916f939
239,281,048,539,882,270,000,000,000,000,000,000,000
22
Initialize retctx,ctx before freeing the inner elements In rz_core_analysis_type_match retctx structure was initialized on the stack only after a "goto out_function", where a field of that structure was freed. When the goto path is taken, the field is not properly initialized and it cause cause a crash of Rizin or have other effects. Fixes: CVE-2021-4022
static int __f2fs_commit_super(struct buffer_head *bh, struct f2fs_super_block *super) { lock_buffer(bh); if (super) memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super)); set_buffer_uptodate(bh); set_buffer_dirty(bh); unlock_buffer(bh); /* it's rare case, we can do fua all the time */ return __sync_dirty_buffer(bh, REQ_PREFLUSH | REQ_FUA); }
0
[ "CWE-284" ]
linux
b9dd46188edc2f0d1f37328637860bb65a771124
328,152,648,146,841,560,000,000,000,000,000,000,000
13
f2fs: sanity check segment count F2FS uses 4 bytes to represent block address. As a result, supported size of disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments. Signed-off-by: Jin Qian <[email protected]> Signed-off-by: Jaegeuk Kim <[email protected]>
static int sit_tunnel_rcv(struct sk_buff *skb, u8 ipproto) { const struct iphdr *iph; struct ip_tunnel *tunnel; int sifindex; sifindex = netif_is_l3_master(skb->dev) ? IPCB(skb)->iif : 0; iph = ip_hdr(skb); tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, iph->saddr, iph->daddr, sifindex); if (tunnel) { const struct tnl_ptk_info *tpi; if (tunnel->parms.iph.protocol != ipproto && tunnel->parms.iph.protocol != 0) goto drop; if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto drop; #if IS_ENABLED(CONFIG_MPLS) if (ipproto == IPPROTO_MPLS) tpi = &mplsip_tpi; else #endif tpi = &ipip_tpi; if (iptunnel_pull_header(skb, 0, tpi->proto, false)) goto drop; return ip_tunnel_rcv(tunnel, skb, tpi, NULL, log_ecn_error); } return 1; drop: kfree_skb(skb); return 0; }
0
[ "CWE-703", "CWE-772", "CWE-401" ]
linux
07f12b26e21ab359261bf75cfcb424fdc7daeb6d
237,011,745,504,340,500,000,000,000,000,000,000,000
37
net: sit: fix memory leak in sit_init_net() If register_netdev() is failed to register sitn->fb_tunnel_dev, it will go to err_reg_dev and forget to free netdev(sitn->fb_tunnel_dev). BUG: memory leak unreferenced object 0xffff888378daad00 (size 512): comm "syz-executor.1", pid 4006, jiffies 4295121142 (age 16.115s) hex dump (first 32 bytes): 00 e6 ed c0 83 88 ff ff 00 00 00 00 00 00 00 00 ................ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [<00000000d6dcb63e>] kvmalloc include/linux/mm.h:577 [inline] [<00000000d6dcb63e>] kvzalloc include/linux/mm.h:585 [inline] [<00000000d6dcb63e>] netif_alloc_netdev_queues net/core/dev.c:8380 [inline] [<00000000d6dcb63e>] alloc_netdev_mqs+0x600/0xcc0 net/core/dev.c:8970 [<00000000867e172f>] sit_init_net+0x295/0xa40 net/ipv6/sit.c:1848 [<00000000871019fa>] ops_init+0xad/0x3e0 net/core/net_namespace.c:129 [<00000000319507f6>] setup_net+0x2ba/0x690 net/core/net_namespace.c:314 [<0000000087db4f96>] copy_net_ns+0x1dc/0x330 net/core/net_namespace.c:437 [<0000000057efc651>] create_new_namespaces+0x382/0x730 kernel/nsproxy.c:107 [<00000000676f83de>] copy_namespaces+0x2ed/0x3d0 kernel/nsproxy.c:165 [<0000000030b74bac>] copy_process.part.27+0x231e/0x6db0 kernel/fork.c:1919 [<00000000fff78746>] copy_process kernel/fork.c:1713 [inline] [<00000000fff78746>] _do_fork+0x1bc/0xe90 kernel/fork.c:2224 [<000000001c2e0d1c>] do_syscall_64+0xc8/0x580 arch/x86/entry/common.c:290 [<00000000ec48bd44>] entry_SYSCALL_64_after_hwframe+0x49/0xbe [<0000000039acff8a>] 0xffffffffffffffff Signed-off-by: Mao Wenan <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void nf_conntrack_standalone_fini_sysctl(struct net *net) { }
0
[ "CWE-203" ]
linux
2671fa4dc0109d3fb581bc3078fdf17b5d9080f6
56,466,389,168,611,110,000,000,000,000,000,000,000
3
netfilter: conntrack: Make global sysctls readonly in non-init netns These sysctls point to global variables: - NF_SYSCTL_CT_MAX (&nf_conntrack_max) - NF_SYSCTL_CT_EXPECT_MAX (&nf_ct_expect_max) - NF_SYSCTL_CT_BUCKETS (&nf_conntrack_htable_size_user) Because their data pointers are not updated to point to per-netns structures, they must be marked read-only in a non-init_net ns. Otherwise, changes in any net namespace are reflected in (leaked into) all other net namespaces. This problem has existed since the introduction of net namespaces. The current logic marks them read-only only if the net namespace is owned by an unprivileged user (other than init_user_ns). Commit d0febd81ae77 ("netfilter: conntrack: re-visit sysctls in unprivileged namespaces") "exposes all sysctls even if the namespace is unpriviliged." Since we need to mark them readonly in any case, we can forego the unprivileged user check altogether. Fixes: d0febd81ae77 ("netfilter: conntrack: re-visit sysctls in unprivileged namespaces") Signed-off-by: Jonathon Reinhart <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void LibarchivePlugin::slotRestoreWorkingDir() { if (m_oldWorkingDir.isEmpty()) { return; } if (!QDir::setCurrent(m_oldWorkingDir)) { qCWarning(ARK) << "Failed to restore old working directory:" << m_oldWorkingDir; } else { m_oldWorkingDir.clear(); } }
0
[ "CWE-59", "CWE-61" ]
ark
8bf8c5ef07b0ac5e914d752681e470dea403a5bd
151,324,329,464,999,530,000,000,000,000,000,000,000
12
Pass the ARCHIVE_EXTRACT_SECURE_SYMLINKS flag to libarchive There are archive types which allow to first create a symlink and then later on dereference it. If the symlink points outside of the archive, this results in writing outside of the destination directory. With the ARCHIVE_EXTRACT_SECURE_SYMLINKS option set, libarchive avoids this situation by verifying that none of the target path components are symlinks before writing. Remove the commented out code in the method, which would actually misbehave if enabled again. Signed-off-by: Fabian Vogt <[email protected]>
static void dp8393x_set_next_tick(dp8393xState *s) { uint32_t ticks; int64_t delay; if (s->regs[SONIC_CR] & SONIC_CR_STP) { timer_del(s->watchdog); return; } ticks = dp8393x_wt(s); s->wt_last_update = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); delay = NANOSECONDS_PER_SECOND * ticks / 5000000; timer_mod(s->watchdog, s->wt_last_update + delay); }
0
[]
qemu
915976bd98a9286efe6f2e573cb4f1360603adf9
294,413,602,691,955,580,000,000,000,000,000,000,000
15
hw/net/dp8393x: fix integer underflow in dp8393x_do_transmit_packets() An integer underflow could occur during packet transmission due to 'tx_len' not being updated if SONIC_TFC register is set to zero. Check for negative 'tx_len' when removing existing FCS. RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1899722 Signed-off-by: Mauro Matteo Cascella <[email protected]> Reported-by: Gaoning Pan <[email protected]> Acked-by: Jason Wang <[email protected]> Message-id: [email protected] Signed-off-by: Peter Maydell <[email protected]>
static int find_port_owner(struct usb_device *hdev, unsigned port1, struct usb_dev_state ***ppowner) { struct usb_hub *hub = usb_hub_to_struct_hub(hdev); if (hdev->state == USB_STATE_NOTATTACHED) return -ENODEV; if (port1 == 0 || port1 > hdev->maxchild) return -EINVAL; /* Devices not managed by the hub driver * will always have maxchild equal to 0. */ *ppowner = &(hub->ports[port1 - 1]->port_owner); return 0; }
0
[ "CWE-703" ]
linux
e50293ef9775c5f1cf3fcc093037dd6a8c5684ea
155,548,411,841,992,150,000,000,000,000,000,000,000
16
USB: fix invalid memory access in hub_activate() Commit 8520f38099cc ("USB: change hub initialization sleeps to delayed_work") changed the hub_activate() routine to make part of it run in a workqueue. However, the commit failed to take a reference to the usb_hub structure or to lock the hub interface while doing so. As a result, if a hub is plugged in and quickly unplugged before the work routine can run, the routine will try to access memory that has been deallocated. Or, if the hub is unplugged while the routine is running, the memory may be deallocated while it is in active use. This patch fixes the problem by taking a reference to the usb_hub at the start of hub_activate() and releasing it at the end (when the work is finished), and by locking the hub interface while the work routine is running. It also adds a check at the start of the routine to see if the hub has already been disconnected, in which nothing should be done. Signed-off-by: Alan Stern <[email protected]> Reported-by: Alexandru Cornea <[email protected]> Tested-by: Alexandru Cornea <[email protected]> Fixes: 8520f38099cc ("USB: change hub initialization sleeps to delayed_work") CC: <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
file_ascmagic_with_encoding(struct magic_set *ms, const unsigned char *buf, size_t nbytes, unichar *ubuf, size_t ulen, const char *code, const char *type, int text) { unsigned char *utf8_buf = NULL, *utf8_end; size_t mlen, i; int rv = -1; int mime = ms->flags & MAGIC_MIME; const char *subtype = NULL; const char *subtype_mime = NULL; int has_escapes = 0; int has_backspace = 0; int seen_cr = 0; int n_crlf = 0; int n_lf = 0; int n_cr = 0; int n_nel = 0; int executable = 0; size_t last_line_end = (size_t)-1; int has_long_lines = 0; if (ms->flags & MAGIC_APPLE) return 0; nbytes = trim_nuls(buf, nbytes); /* If we have fewer than 2 bytes, give up. */ if (nbytes <= 1) { rv = 0; goto done; } if (ulen > 0 && (ms->flags & MAGIC_NO_CHECK_SOFT) == 0) { /* Convert ubuf to UTF-8 and try text soft magic */ /* malloc size is a conservative overestimate; could be improved, or at least realloced after conversion. */ mlen = ulen * 6; if ((utf8_buf = CAST(unsigned char *, malloc(mlen))) == NULL) { file_oomem(ms, mlen); goto done; } if ((utf8_end = encode_utf8(utf8_buf, mlen, ubuf, ulen)) == NULL) goto done; if ((rv = file_softmagic(ms, utf8_buf, (size_t)(utf8_end - utf8_buf), TEXTTEST, text)) == 0) rv = -1; } /* Now try to discover other details about the file. */ for (i = 0; i < ulen; i++) { if (ubuf[i] == '\n') { if (seen_cr) n_crlf++; else n_lf++; last_line_end = i; } else if (seen_cr) n_cr++; seen_cr = (ubuf[i] == '\r'); if (seen_cr) last_line_end = i; if (ubuf[i] == 0x85) { /* X3.64/ECMA-43 "next line" character */ n_nel++; last_line_end = i; } /* If this line is _longer_ than MAXLINELEN, remember it. */ if (i > last_line_end + MAXLINELEN) has_long_lines = 1; if (ubuf[i] == '\033') has_escapes = 1; if (ubuf[i] == '\b') has_backspace = 1; } /* Beware, if the data has been truncated, the final CR could have been followed by a LF. If we have HOWMANY bytes, it indicates that the data might have been truncated, probably even before this function was called. */ if (seen_cr && nbytes < HOWMANY) n_cr++; if (strcmp(type, "binary") == 0) { rv = 0; goto done; } if (mime) { if (!file_printedlen(ms) && (mime & MAGIC_MIME_TYPE) != 0) { if (subtype_mime) { if (file_printf(ms, "%s", subtype_mime) == -1) goto done; } else { if (file_printf(ms, "text/plain") == -1) goto done; } } } else { if (file_printedlen(ms)) { switch (file_replace(ms, " text$", ", ")) { case 0: switch (file_replace(ms, " text executable$", ", ")) { case 0: if (file_printf(ms, ", ") == -1) goto done; break; case -1: goto done; default: executable = 1; break; } break; case -1: goto done; default: break; } } if (file_printf(ms, "%s", code) == -1) goto done; if (subtype) { if (file_printf(ms, " %s", subtype) == -1) goto done; } if (file_printf(ms, " %s", type) == -1) goto done; if (executable) if (file_printf(ms, " executable") == -1) goto done; if (has_long_lines) if (file_printf(ms, ", with very long lines") == -1) goto done; /* * Only report line terminators if we find one other than LF, * or if we find none at all. */ if ((n_crlf == 0 && n_cr == 0 && n_nel == 0 && n_lf == 0) || (n_crlf != 0 || n_cr != 0 || n_nel != 0)) { if (file_printf(ms, ", with") == -1) goto done; if (n_crlf == 0 && n_cr == 0 && n_nel == 0 && n_lf == 0) { if (file_printf(ms, " no") == -1) goto done; } else { if (n_crlf) { if (file_printf(ms, " CRLF") == -1) goto done; if (n_cr || n_lf || n_nel) if (file_printf(ms, ",") == -1) goto done; } if (n_cr) { if (file_printf(ms, " CR") == -1) goto done; if (n_lf || n_nel) if (file_printf(ms, ",") == -1) goto done; } if (n_lf) { if (file_printf(ms, " LF") == -1) goto done; if (n_nel) if (file_printf(ms, ",") == -1) goto done; } if (n_nel) if (file_printf(ms, " NEL") == -1) goto done; } if (file_printf(ms, " line terminators") == -1) goto done; } if (has_escapes) if (file_printf(ms, ", with escape sequences") == -1) goto done; if (has_backspace) if (file_printf(ms, ", with overstriking") == -1) goto done; } rv = 1; done: free(utf8_buf); return rv; }
1
[ "CWE-755" ]
file
cc9e74dfeca5265ad725acc926ef0b8d2a18ee70
101,208,015,526,519,740,000,000,000,000,000,000,000
203
count indirect recursion as recursion.
Pl_RunLength::flush_encode() { if (this->length == 128) { QTC::TC("libtests", "Pl_RunLength flush full buffer", (this->state == st_copying ? 0 : this->state == st_run ? 1 : -1)); } if (this->length == 0) { QTC::TC("libtests", "Pl_RunLength flush empty buffer"); } if (this->state == st_run) { if ((this->length < 2) || (this->length > 128)) { throw std::logic_error( "Pl_RunLength: invalid length in flush_encode for run"); } unsigned char ch = static_cast<unsigned char>(257 - this->length); this->getNext()->write(&ch, 1); this->getNext()->write(&this->buf[0], 1); } else if (this->length > 0) { unsigned char ch = static_cast<unsigned char>(this->length - 1); this->getNext()->write(&ch, 1); this->getNext()->write(this->buf, this->length); } this->state = st_top; this->length = 0; }
0
[ "CWE-787" ]
qpdf
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
222,418,699,778,207,670,000,000,000,000,000,000,000
33
Fix sign and conversion warnings (major) This makes all integer type conversions that have potential data loss explicit with calls that do range checks and raise an exception. After this commit, qpdf builds with no warnings when -Wsign-conversion -Wconversion is used with gcc or clang or when -W3 -Wd4800 is used with MSVC. This significantly reduces the likelihood of potential crashes from bogus integer values. There are some parts of the code that take int when they should take size_t or an offset. Such places would make qpdf not support files with more than 2^31 of something that usually wouldn't be so large. In the event that such a file shows up and is valid, at least qpdf would raise an error in the right spot so the issue could be legitimately addressed rather than failing in some weird way because of a silent overflow condition.
/** \param pos Index of the image to remove. **/
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
234,134,943,271,888,800,000,000,000,000,000,000,000
3
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
void HeaderUtility::stripPortFromHost(RequestHeaderMap& headers, uint32_t listener_port) { if (headers.getMethodValue() == Http::Headers::get().MethodValues.Connect) { // According to RFC 2817 Connect method should have port part in host header. // In this case we won't strip it even if configured to do so. return; } const absl::string_view original_host = headers.getHostValue(); const absl::string_view::size_type port_start = original_host.rfind(':'); if (port_start == absl::string_view::npos) { return; } // According to RFC3986 v6 address is always enclosed in "[]". section 3.2.2. const auto v6_end_index = original_host.rfind("]"); if (v6_end_index == absl::string_view::npos || v6_end_index < port_start) { if ((port_start + 1) > original_host.size()) { return; } const absl::string_view port_str = original_host.substr(port_start + 1); uint32_t port = 0; if (!absl::SimpleAtoi(port_str, &port)) { return; } if (port != listener_port) { // We would strip ports only if they are the same, as local port of the listener. return; } const absl::string_view host = original_host.substr(0, port_start); headers.setHost(host); } }
0
[]
envoy
2c60632d41555ec8b3d9ef5246242be637a2db0f
298,591,457,090,125,960,000,000,000,000,000,000,000
31
http: header map security fixes for duplicate headers (#197) Previously header matching did not match on all headers for non-inline headers. This patch changes the default behavior to always logically match on all headers. Multiple individual headers will be logically concatenated with ',' similar to what is done with inline headers. This makes the behavior effectively consistent. This behavior can be temporary reverted by setting the runtime value "envoy.reloadable_features.header_match_on_all_headers" to "false". Targeted fixes have been additionally performed on the following extensions which make them consider all duplicate headers by default as a comma concatenated list: 1) Any extension using CEL matching on headers. 2) The header to metadata filter. 3) The JWT filter. 4) The Lua filter. Like primary header matching used in routing, RBAC, etc. this behavior can be disabled by setting the runtime value "envoy.reloadable_features.header_match_on_all_headers" to false. Finally, the setCopy() header map API previously only set the first header in the case of duplicate non-inline headers. setCopy() now behaves similiarly to the other set*() APIs and replaces all found headers with a single value. This may have had security implications in the extauth filter which uses this API. This behavior can be disabled by setting the runtime value "envoy.reloadable_features.http_set_copy_replace_all_headers" to false. Fixes https://github.com/envoyproxy/envoy-setec/issues/188 Signed-off-by: Matt Klein <[email protected]>
write_back_data(struct vhost_crypto_data_req *vc_req) { struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last; while (wb_data) { rte_memcpy(wb_data->dst, wb_data->src, wb_data->len); wb_last = wb_data; wb_data = wb_data->next; rte_mempool_put(vc_req->wb_pool, wb_last); } }
0
[ "CWE-125" ]
dpdk
acd4c92fa693bbea695f2bb42bb93fb8567c3ca5
26,795,894,830,054,537,000,000,000,000,000,000,000
11
vhost/crypto: validate keys lengths transform_cipher_param() and transform_chain_param() handle the payload data for the VHOST_USER_CRYPTO_CREATE_SESS message. These payloads have to be validated, since it could come from untrusted sources. Two buffers and their lengths are defined in this payload, one the the auth key and one for the cipher key. But above functions do not validate the key length inputs, which could lead to read out of bounds, as buffers have static sizes of 64 bytes for the cipher key and 512 bytes for the auth key. This patch adds necessary checks on the key length field before being used. CVE-2020-10724 Fixes: e80a98708166 ("vhost/crypto: add session message handler") Cc: [email protected] Reported-by: Ilja Van Sprundel <[email protected]> Signed-off-by: Maxime Coquelin <[email protected]> Reviewed-by: Xiaolong Ye <[email protected]> Reviewed-by: Ilja Van Sprundel <[email protected]>
void _LUTevalFloat(register const cmsFloat32Number In[], register cmsFloat32Number Out[], const void* D) { cmsPipeline* lut = (cmsPipeline*) D; cmsStage *mpe; cmsFloat32Number Storage[2][MAX_STAGE_CHANNELS]; int Phase = 0, NextPhase; memmove(&Storage[Phase][0], In, lut ->InputChannels * sizeof(cmsFloat32Number)); for (mpe = lut ->Elements; mpe != NULL; mpe = mpe ->Next) { NextPhase = Phase ^ 1; mpe ->EvalPtr(&Storage[Phase][0], &Storage[NextPhase][0], mpe); Phase = NextPhase; } memmove(Out, &Storage[Phase][0], lut ->OutputChannels * sizeof(cmsFloat32Number)); }
0
[]
Little-CMS
b0d5ffd4ad91cf8683ee106f13742db3dc66599a
76,494,378,522,410,545,000,000,000,000,000,000,000
20
Memory Squeezing: LCMS2: CLUTElemDup Check for allocation failures and tidy up if found.
cipop(mrb_state *mrb) { struct mrb_context *c = mrb->c; struct REnv *env = mrb_vm_ci_env(c->ci); c->ci--; if (env) mrb_env_unshare(mrb, env); return c->ci; }
0
[ "CWE-122", "CWE-787" ]
mruby
47068ae07a5fa3aa9a1879cdfe98a9ce0f339299
335,906,451,780,953,970,000,000,000,000,000,000,000
9
vm.c: packed arguments length may be zero for `send` method.
lou_logFile (const char *fileName) { if (fileName == NULL || fileName[0] == 0) return; if (initialLogFileName[0] == 0) strcpy (initialLogFileName, fileName); logFile = fopen (fileName, "wb"); if (logFile == NULL && initialLogFileName[0] != 0) logFile = fopen (initialLogFileName, "wb"); if (logFile == NULL) { fprintf (stderr, "Cannot open log file %s\n", fileName); logFile = stderr; } }
0
[]
liblouis
dc97ef791a4fae9da11592c79f9f79e010596e0c
334,233,090,505,439,030,000,000,000,000,000,000,000
15
Merge branch 'table_resolver'
static bool get_sequence_numbers(struct torture_context *torture, struct torture_domain_sequence **seqs) { struct winbindd_request req; struct winbindd_response rep; const char *extra_data; char line[256]; uint32_t count = 0; struct torture_domain_sequence *s = NULL; ZERO_STRUCT(req); ZERO_STRUCT(rep); DO_STRUCT_REQ_REP(WINBINDD_SHOW_SEQUENCE, &req, &rep); extra_data = (char *)rep.extra_data.data; torture_assert(torture, extra_data, "NULL sequence list"); while (next_token(&extra_data, line, "\n", sizeof(line))) { char *p, *lp; uint32_t seq; s = talloc_realloc(torture, s, struct torture_domain_sequence, count + 2); ZERO_STRUCT(s[count+1]); lp = line; p = strchr(lp, ' '); torture_assert(torture, p, "invalid line format"); *p = 0; s[count].netbios_name = talloc_strdup(s, lp); lp = p+1; torture_assert(torture, strncmp(lp, ": ", 2) == 0, "invalid line format"); lp += 2; if (strcmp(lp, "DISCONNECTED") == 0) { seq = (uint32_t)-1; } else { seq = (uint32_t)strtol(lp, &p, 10); torture_assert(torture, (*p == '\0'), "invalid line format"); torture_assert(torture, (seq != (uint32_t)-1), "sequence number -1 encountered"); } s[count].seq = seq; count++; } SAFE_FREE(rep.extra_data.data); torture_assert(torture, count >= 2, "The list of domain sequence " "numbers should contain 2 entries"); *seqs = s; return true; }
0
[ "CWE-476" ]
samba
0b259a48a70bde4dfd482e0720e593ae5a9c414a
310,727,517,982,306,240,000,000,000,000,000,000,000
57
CVE-2020-14323 torture4: Add a simple test for invalid lookup_sids winbind call We can't add this test before the fix, add it to knownfail and have the fix remove the knownfail entry again. As this crashes winbind, many tests after this one will fail. Reported by Bas Alberts of the GitHub Security Lab Team as GHSL-2020-134 Bug: https://bugzilla.samba.org/show_bug.cgi?id=14436 Signed-off-by: Volker Lendecke <[email protected]>
QPDF::read_xref(qpdf_offset_t xref_offset) { std::map<int, int> free_table; while (xref_offset) { char buf[7]; memset(buf, 0, sizeof(buf)); this->file->seek(xref_offset, SEEK_SET); this->file->read(buf, sizeof(buf) - 1); // The PDF spec says xref must be followed by a line // terminator, but files exist in the wild where it is // terminated by arbitrary whitespace. PCRE xref_re("^xref\\s+"); PCRE::Match m = xref_re.match(buf); if (m) { QTC::TC("qpdf", "QPDF xref space", ((buf[4] == '\n') ? 0 : (buf[4] == '\r') ? 1 : (buf[4] == ' ') ? 2 : 9999)); xref_offset = read_xrefTable(xref_offset + m.getMatch(0).length()); } else { xref_offset = read_xrefStream(xref_offset); } } if (! this->trailer.isInitialized()) { throw QPDFExc(qpdf_e_damaged_pdf, this->file->getName(), "", 0, "unable to find trailer while reading xref"); } int size = this->trailer.getKey("/Size").getIntValue(); int max_obj = 0; if (! xref_table.empty()) { max_obj = (*(xref_table.rbegin())).first.getObj(); } if (! this->deleted_objects.empty()) { max_obj = std::max(max_obj, *(this->deleted_objects.rbegin())); } if (size != max_obj + 1) { QTC::TC("qpdf", "QPDF xref size mismatch"); warn(QPDFExc(qpdf_e_damaged_pdf, this->file->getName(), "", 0, std::string("reported number of objects (") + QUtil::int_to_string(size) + ") inconsistent with actual number of objects (" + QUtil::int_to_string(max_obj + 1) + ")")); } // We no longer need the deleted_objects table, so go ahead and // clear it out to make sure we never depend on its being set. this->deleted_objects.clear(); }
0
[ "CWE-399", "CWE-835" ]
qpdf
701b518d5c56a1449825a3a37a716c58e05e1c3e
192,044,764,596,265,060,000,000,000,000,000,000,000
57
Detect recursion loops resolving objects (fixes #51) During parsing of an object, sometimes parts of the object have to be resolved. An example is stream lengths. If such an object directly or indirectly points to the object being parsed, it can cause an infinite loop. Guard against all cases of re-entrant resolution of objects.
void ConnectDialog::on_qaUrl_triggered() { ServerItem *si = static_cast<ServerItem *>(qtwServers->currentItem()); if (! si || si->qsUrl.isEmpty()) return; QDesktopServices::openUrl(QUrl(si->qsUrl)); }
1
[ "CWE-59", "CWE-61" ]
mumble
e59ee87abe249f345908c7d568f6879d16bfd648
176,582,098,161,279,340,000,000,000,000,000,000,000
7
FIX(client): Only allow "http"/"https" for URLs in ConnectDialog Our public server list registration script doesn't have an URL scheme whitelist for the website field. Turns out a malicious server can register itself with a dangerous URL in an attempt to attack a user's machine. User interaction is required, as the URL has to be opened by right-clicking on the server entry and clicking on "Open Webpage". This commit introduces a client-side whitelist, which only allows "http" and "https" schemes. We will also implement it in our public list. In future we should probably add a warning QMessageBox informing the user that there's no guarantee the URL is safe (regardless of the scheme). Thanks a lot to https://positive.security for reporting the RCE vulnerability to us privately.
TfLiteStatus Subgraph::SetCustomAllocationForTensor( int tensor_index, const TfLiteCustomAllocation& allocation) { TfLiteTensor* tensor = &context_.tensors[tensor_index]; TF_LITE_ENSURE(context(), (tensor->allocation_type == kTfLiteArenaRw || tensor->allocation_type == kTfLiteArenaRwPersistent || tensor->allocation_type == kTfLiteCustom)); TF_LITE_ENSURE_STATUS( ValidateCustomAllocationForTensor(context(), tensor, allocation)); // If tensor already has a custom alloc, just reassign. const auto alloc_it = std::find_if( custom_allocations_.begin(), custom_allocations_.end(), [tensor_index]( const std::pair<int, TfLiteCustomAllocation>& existing_alloc) { return existing_alloc.first == tensor_index; }); if (alloc_it == custom_allocations_.end()) { custom_allocations_.emplace_back(tensor_index, allocation); } else { alloc_it->second = allocation; } tensor->allocation_type = kTfLiteCustom; tensor->data.data = allocation.data; return kTfLiteOk; }
0
[ "CWE-20", "CWE-787" ]
tensorflow
d58c96946b2880991d63d1dacacb32f0a4dfa453
139,570,569,906,095,980,000,000,000,000,000,000,000
28
[tflite] Ensure inputs and outputs don't overlap. If a model uses the same tensor for both an input and an output then this can result in data loss and memory corruption. This should not happen. PiperOrigin-RevId: 332522916 Change-Id: If0905b142415a9dfceaf2d181872f2a8fb88f48a
void FromUniLookup::reset() { for (unsigned i = 0; i != 256*4; ++i) data[i].key = npos; overflow_end = overflow; }
0
[ "CWE-125" ]
aspell
de29341638833ba7717bd6b5e6850998454b044b
297,368,168,130,439,520,000,000,000,000,000,000,000
6
Don't allow null-terminated UCS-2/4 strings using the original API. Detect if the encoding is UCS-2/4 and the length is -1 in affected API functions and refuse to convert the string. If the string ends up being converted somehow, abort with an error message in DecodeDirect and ConvDirect. To convert a null terminated string in Decode/ConvDirect, a negative number corresponding to the width of the underlying character type for the encoding is expected; for example, if the encoding is "ucs-2" then a the size is expected to be -2. Also fix a 1-3 byte over-read in DecodeDirect when reading UCS-2/4 strings when a size is provided (found by OSS-Fuzz). Also fix a bug in DecodeDirect that caused DocumentChecker to return the wrong offsets when working with UCS-2/4 strings.
*/ void *netdev_lower_get_next_private_rcu(struct net_device *dev, struct list_head **iter) { struct netdev_adjacent *lower; WARN_ON_ONCE(!rcu_read_lock_held()); lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); if (&lower->list == &dev->adj_list.lower) return NULL; *iter = &lower->list; return lower->private;
0
[ "CWE-400", "CWE-703" ]
linux
fac8e0f579695a3ecbc4d3cac369139d7f819971
8,835,191,328,447,511,000,000,000,000,000,000,000
16
tunnels: Don't apply GRO to multiple layers of encapsulation. When drivers express support for TSO of encapsulated packets, they only mean that they can do it for one layer of encapsulation. Supporting additional levels would mean updating, at a minimum, more IP length fields and they are unaware of this. No encapsulation device expresses support for handling offloaded encapsulated packets, so we won't generate these types of frames in the transmit path. However, GRO doesn't have a check for multiple levels of encapsulation and will attempt to build them. UDP tunnel GRO actually does prevent this situation but it only handles multiple UDP tunnels stacked on top of each other. This generalizes that solution to prevent any kind of tunnel stacking that would cause problems. Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack") Signed-off-by: Jesse Gross <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void MACH0_(opts_set_default)(struct MACH0_(opts_t) *options, RBinFile *bf) { r_return_if_fail (options && bf && bf->rbin); options->header_at = 0; options->symbols_off = 0; options->verbose = bf->rbin->verbose; }
0
[ "CWE-125", "CWE-787" ]
radare2
0052500c1ed5bf8263b26b9fd7773dbdc6f170c4
109,813,710,971,792,760,000,000,000,000,000,000,000
6
Fix heap OOB read in macho.iterate_chained_fixups ##crash * Reported by peacock-doris via huntr.dev * Reproducer 'tests_65305' mrmacete: * Return early if segs_count is 0 * Initialize segs_count also for reconstructed fixups Co-authored-by: pancake <[email protected]> Co-authored-by: Francesco Tamagni <[email protected]>
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) { int ret = -ENOSYS; #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC int ioapic_id; u64 addr; struct ioapic_domain_cfg cfg = { .type = IOAPIC_DOMAIN_DYNAMIC, .ops = &mp_ioapic_irqdomain_ops, }; ioapic_id = acpi_get_ioapic_id(handle, gsi_base, &addr); if (ioapic_id < 0) { unsigned long long uid; acpi_status status; status = acpi_evaluate_integer(handle, METHOD_NAME__UID, NULL, &uid); if (ACPI_FAILURE(status)) { acpi_handle_warn(handle, "failed to get IOAPIC ID.\n"); return -EINVAL; } ioapic_id = (int)uid; } mutex_lock(&acpi_ioapic_lock); ret = mp_register_ioapic(ioapic_id, phys_addr, gsi_base, &cfg); mutex_unlock(&acpi_ioapic_lock); #endif return ret; }
0
[ "CWE-120" ]
linux
dad5ab0db8deac535d03e3fe3d8f2892173fa6a4
188,836,611,536,838,440,000,000,000,000,000,000,000
32
x86/acpi: Prevent out of bound access caused by broken ACPI tables The bus_irq argument of mp_override_legacy_irq() is used as the index into the isa_irq_to_gsi[] array. The bus_irq argument originates from ACPI_MADT_TYPE_IO_APIC and ACPI_MADT_TYPE_INTERRUPT items in the ACPI tables, but is nowhere sanity checked. That allows broken or malicious ACPI tables to overwrite memory, which might cause malfunction, panic or arbitrary code execution. Add a sanity check and emit a warning when that triggers. [ tglx: Added warning and rewrote changelog ] Signed-off-by: Seunghun Han <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Cc: [email protected] Cc: "Rafael J. Wysocki" <[email protected]> Cc: [email protected] Signed-off-by: Ingo Molnar <[email protected]>
read_connections (NMAGConfSettings *settings) { NMAGConfSettingsPrivate *priv = NMA_GCONF_SETTINGS_GET_PRIVATE (settings); GSList *dir_list; GSList *iter; dir_list = nm_gconf_get_all_connections (priv->client); if (!dir_list) return; for (iter = dir_list; iter; iter = iter->next) { char *dir = (char *) iter->data; NMAGConfConnection *connection; connection = nma_gconf_connection_new (priv->client, dir); if (connection) add_connection_real (settings, connection); g_free (dir); } g_slist_free (dir_list); priv->connections = g_slist_reverse (priv->connections); }
0
[ "CWE-200" ]
network-manager-applet
8627880e07c8345f69ed639325280c7f62a8f894
222,083,008,163,358,250,000,000,000,000,000,000,000
23
editor: prevent any registration of objects on the system bus D-Bus access-control is name-based; so requests for a specific name are allowed/denied based on the rules in /etc/dbus-1/system.d. But apparently apps still get a non-named service on the bus, and if we register *any* object even though we don't have a named service, dbus and dbus-glib will happily proxy signals. Since the connection editor shouldn't ever expose anything having to do with connections on any bus, make sure that's the case.
static int fit_config_verify_sig(const void *fit, int conf_noffset, const void *sig_blob, int sig_offset) { int noffset; char *err_msg = "No 'signature' subnode found"; int verified = 0; int ret; /* Process all hash subnodes of the component conf node */ fdt_for_each_subnode(noffset, fit, conf_noffset) { const char *name = fit_get_name(fit, noffset, NULL); if (!strncmp(name, FIT_SIG_NODENAME, strlen(FIT_SIG_NODENAME))) { ret = fit_config_check_sig(fit, noffset, sig_offset, conf_noffset, &err_msg); if (ret) { puts("- "); } else { puts("+ "); verified = 1; break; } } } if (noffset == -FDT_ERR_TRUNCATED || noffset == -FDT_ERR_BADSTRUCTURE) { err_msg = "Corrupted or truncated tree"; goto error; } if (verified) return 0; error: printf(" error!\n%s for '%s' hash node in '%s' config node\n", err_msg, fit_get_name(fit, noffset, NULL), fit_get_name(fit, conf_noffset, NULL)); return -EPERM; }
0
[]
u-boot
79af75f7776fc20b0d7eb6afe1e27c00fdb4b9b4
39,951,614,118,071,404,000,000,000,000,000,000,000
40
fit: Don't allow verification of images with @ nodes When searching for a node called 'fred', any unit address appended to the name is ignored by libfdt, meaning that 'fred' can match 'fred@1'. This means that we cannot be sure that the node originally intended is the one that is used. Disallow use of nodes with unit addresses. Update the forge test also, since it uses @ addresses. CVE-2021-27138 Signed-off-by: Simon Glass <[email protected]> Reported-by: Bruce Monroe <[email protected]> Reported-by: Arie Haenel <[email protected]> Reported-by: Julien Lenoir <[email protected]>
static void textview_show_header(TextView *textview, GPtrArray *headers) { GtkTextView *text = GTK_TEXT_VIEW(textview->text); GtkTextBuffer *buffer = gtk_text_view_get_buffer(text); GtkTextIter iter; Header *header; gint i; cm_return_if_fail(headers != NULL); for (i = 0; i < headers->len; i++) { header = g_ptr_array_index(headers, i); cm_return_if_fail(header->name != NULL); gtk_text_buffer_get_end_iter (buffer, &iter); if(prefs_common.trans_hdr == TRUE) { gchar *hdr = g_strndup(header->name, strlen(header->name) - 1); gchar *trans_hdr = gettext(hdr); gtk_text_buffer_insert_with_tags_by_name(buffer, &iter, trans_hdr, -1, "header_title", "header", NULL); gtk_text_buffer_insert_with_tags_by_name(buffer, &iter, ":", 1, "header_title", "header", NULL); g_free(hdr); } else { gtk_text_buffer_insert_with_tags_by_name(buffer, &iter, header->name, -1, "header_title", "header", NULL); } if (header->name[strlen(header->name) - 1] != ' ') gtk_text_buffer_insert_with_tags_by_name (buffer, &iter, " ", 1, "header_title", "header", NULL); if (procheader_headername_equal(header->name, "Subject") || procheader_headername_equal(header->name, "From") || procheader_headername_equal(header->name, "To") || procheader_headername_equal(header->name, "Cc") || procheader_headername_equal(header->name, "Bcc") || procheader_headername_equal(header->name, "Reply-To") || procheader_headername_equal(header->name, "Sender") || procheader_headername_equal(header->name, "Resent-From") || procheader_headername_equal(header->name, "Resent-To")) unfold_line(header->body); if (procheader_headername_equal(header->name, "Date") && prefs_common.msgview_date_format) { gchar hbody[80]; procheader_date_parse(hbody, header->body, sizeof(hbody)); gtk_text_buffer_get_end_iter (buffer, &iter); gtk_text_buffer_insert_with_tags_by_name (buffer, &iter, hbody, -1, "header", NULL); } else if ((procheader_headername_equal(header->name, "X-Mailer") || procheader_headername_equal(header->name, "X-Newsreader")) && (strstr(header->body, "Claws Mail") != NULL || strstr(header->body, "Sylpheed-Claws") != NULL)) { gtk_text_buffer_get_end_iter (buffer, &iter); gtk_text_buffer_insert_with_tags_by_name (buffer, &iter, header->body, -1, "header", "emphasis", NULL); } else { gboolean hdr = procheader_headername_equal(header->name, "From") || procheader_headername_equal(header->name, "To") || procheader_headername_equal(header->name, "Cc") || procheader_headername_equal(header->name, "Bcc") || procheader_headername_equal(header->name, "Reply-To") || procheader_headername_equal(header->name, "Sender") || procheader_headername_equal(header->name, "Resent-From") || procheader_headername_equal(header->name, "Resent-To"); textview_make_clickable_parts(textview, "header", "hlink", header->body, hdr); } gtk_text_buffer_get_end_iter (buffer, &iter); gtk_text_buffer_insert_with_tags_by_name(buffer, &iter, "\n", 1, "header", NULL); } textview_show_avatar(textview); if (prefs_common.save_xface) textview_save_contact_pic(textview); textview_show_contact_pic(textview); }
0
[ "CWE-601" ]
claws
ac286a71ed78429e16c612161251b9ea90ccd431
122,751,106,331,925,100,000,000,000,000,000,000,000
86
harden link checker before accepting click
const Section* Binary::get_section(const std::string& segname, const std::string& secname) const { if (const SegmentCommand* seg = get_segment(segname)) { if (const Section* sec = seg->get_section(secname)) { return sec; } } return nullptr; }
0
[ "CWE-703" ]
LIEF
7acf0bc4224081d4f425fcc8b2e361b95291d878
230,943,336,540,526,500,000,000,000,000,000,000,000
8
Resolve #764
void PDFDoc::checkHeader() { char hdrBuf[headerSearchSize+1]; char *p; char *tokptr; int i; int bytesRead; pdfMajorVersion = 0; pdfMinorVersion = 0; // read up to headerSearchSize bytes from the beginning of the document for (i = 0; i < headerSearchSize; ++i) { const int c = str->getChar(); if (c == EOF) break; hdrBuf[i] = c; } bytesRead = i; hdrBuf[bytesRead] = '\0'; // find the start of the PDF header if it exists and parse the version bool headerFound = false; for (i = 0; i < bytesRead - 5; ++i) { if (!strncmp(&hdrBuf[i], "%PDF-", 5)) { headerFound = true; break; } } if (!headerFound) { error(errSyntaxWarning, -1, "May not be a PDF file (continuing anyway)"); return; } str->moveStart(i); if (!(p = strtok_r(&hdrBuf[i+5], " \t\n\r", &tokptr))) { error(errSyntaxWarning, -1, "May not be a PDF file (continuing anyway)"); return; } sscanf(p, "%d.%d", &pdfMajorVersion, &pdfMinorVersion); // We don't do the version check. Don't add it back in. }
0
[ "CWE-20" ]
poppler
9fd5ec0e6e5f763b190f2a55ceb5427cfe851d5f
184,202,077,949,125,200,000,000,000,000,000,000,000
40
PDFDoc::setup: Fix return value At that point xref can have gone wrong since extractPDFSubtype() can have caused a reconstruct that broke stuff so instead of unconditionally returning true, return xref->isOk() Fixes #706
static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u) { struct watch_adapter *watch, *tmp_watch; char *path, *token; int err, rc; LIST_HEAD(staging_q); path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) { rc = -EILSEQ; goto out; } if (msg_type == XS_WATCH) { watch = alloc_watch_adapter(path, token); if (watch == NULL) { rc = -ENOMEM; goto out; } watch->watch.callback = watch_fired; watch->dev_data = u; err = register_xenbus_watch(&watch->watch); if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } /* Success. Synthesize a reply to say all is OK. */ { struct { struct xsd_sockmsg hdr; char body[3]; } __packed reply = { { .type = msg_type, .len = sizeof(reply.body) }, "OK" }; mutex_lock(&u->reply_mutex); rc = queue_reply(&u->read_buffers, &reply, sizeof(reply)); wake_up(&u->read_waitq); mutex_unlock(&u->reply_mutex); } out: return rc; }
0
[]
linux
0beef634b86a1350c31da5fcc2992f0d7c8a622b
195,417,593,966,957,840,000,000,000,000,000,000,000
70
xenbus: don't BUG() on user mode induced condition Inability to locate a user mode specified transaction ID should not lead to a kernel crash. For other than XS_TRANSACTION_START also don't issue anything to xenbus if the specified ID doesn't match that of any active transaction. Signed-off-by: Jan Beulich <[email protected]> Cc: <[email protected]> Signed-off-by: David Vrabel <[email protected]>
static enum TIFFReadDirEntryErr TIFFReadDirEntryArrayWithLimit( TIFF* tif, TIFFDirEntry* direntry, uint32* count, uint32 desttypesize, void** value, uint64 maxcount) { int typesize; uint32 datasize; void* data; uint64 target_count64; typesize=TIFFDataWidth(direntry->tdir_type); target_count64 = (direntry->tdir_count > maxcount) ? maxcount : direntry->tdir_count; if ((target_count64==0)||(typesize==0)) { *value=0; return(TIFFReadDirEntryErrOk); } (void) desttypesize; /* * As a sanity check, make sure we have no more than a 2GB tag array * in either the current data type or the dest data type. This also * avoids problems with overflow of tmsize_t on 32bit systems. */ if ((uint64)(2147483647/typesize)<target_count64) return(TIFFReadDirEntryErrSizesan); if ((uint64)(2147483647/desttypesize)<target_count64) return(TIFFReadDirEntryErrSizesan); *count=(uint32)target_count64; datasize=(*count)*typesize; assert((tmsize_t)datasize>0); data=_TIFFCheckMalloc(tif, *count, typesize, "ReadDirEntryArray"); if (data==0) return(TIFFReadDirEntryErrAlloc); if (!(tif->tif_flags&TIFF_BIGTIFF)) { if (datasize<=4) _TIFFmemcpy(data,&direntry->tdir_offset,datasize); else { enum TIFFReadDirEntryErr err; uint32 offset = direntry->tdir_offset.toff_long; if (tif->tif_flags&TIFF_SWAB) TIFFSwabLong(&offset); err=TIFFReadDirEntryData(tif,(uint64)offset,(tmsize_t)datasize,data); if (err!=TIFFReadDirEntryErrOk) { _TIFFfree(data); return(err); } } } else { if (datasize<=8) _TIFFmemcpy(data,&direntry->tdir_offset,datasize); else { enum TIFFReadDirEntryErr err; uint64 offset = direntry->tdir_offset.toff_long8; if (tif->tif_flags&TIFF_SWAB) TIFFSwabLong8(&offset); err=TIFFReadDirEntryData(tif,offset,(tmsize_t)datasize,data); if (err!=TIFFReadDirEntryErrOk) { _TIFFfree(data); return(err); } } } *value=data; return(TIFFReadDirEntryErrOk); }
1
[ "CWE-770" ]
libtiff
dc02f9050311a90b3c0655147cee09bfa7081cfc
77,347,637,914,203,965,000,000,000,000,000,000,000
75
* libtiff/tif_read.c: add protection against excessive memory allocation attempts in TIFFReadDirEntryArray() on short files. Effective for mmap'ed case. And non-mmap'ed case, but restricted to 64bit builds. Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2675
parse_command_modifiers( exarg_T *eap, char **errormsg, cmdmod_T *cmod, int skip_only) { char_u *orig_cmd = eap->cmd; char_u *cmd_start = NULL; int use_plus_cmd = FALSE; int starts_with_colon = FALSE; int vim9script = in_vim9script(); int has_visual_range = FALSE; CLEAR_POINTER(cmod); cmod->cmod_flags = sticky_cmdmod_flags; if (STRNCMP(eap->cmd, "'<,'>", 5) == 0) { // The automatically inserted Visual area range is skipped, so that // typing ":cmdmod cmd" in Visual mode works without having to move the // range to after the modififiers. The command will be // "'<,'>cmdmod cmd", parse "cmdmod cmd" and then put back "'<,'>" // before "cmd" below. eap->cmd += 5; cmd_start = eap->cmd; has_visual_range = TRUE; } // Repeat until no more command modifiers are found. for (;;) { char_u *p; while (*eap->cmd == ' ' || *eap->cmd == '\t' || *eap->cmd == ':') { if (*eap->cmd == ':') starts_with_colon = TRUE; ++eap->cmd; } // in ex mode, an empty command (after modifiers) works like :+ if (*eap->cmd == NUL && exmode_active && (getline_equal(eap->getline, eap->cookie, getexmodeline) || getline_equal(eap->getline, eap->cookie, getexline)) && curwin->w_cursor.lnum < curbuf->b_ml.ml_line_count) { use_plus_cmd = TRUE; if (!skip_only) ex_pressedreturn = TRUE; break; // no modifiers following } // ignore comment and empty lines if (comment_start(eap->cmd, starts_with_colon)) { // a comment ends at a NL if (eap->nextcmd == NULL) { eap->nextcmd = vim_strchr(eap->cmd, '\n'); if (eap->nextcmd != NULL) ++eap->nextcmd; } if (vim9script && has_cmdmod(cmod, FALSE)) *errormsg = _(e_command_modifier_without_command); return FAIL; } if (*eap->cmd == NUL) { if (!skip_only) { ex_pressedreturn = TRUE; if (vim9script && has_cmdmod(cmod, FALSE)) *errormsg = _(e_command_modifier_without_command); } return FAIL; } p = skip_range(eap->cmd, TRUE, NULL); // In Vim9 script a variable can shadow a command modifier: // verbose = 123 // verbose += 123 // silent! verbose = func() // verbose.member = 2 // verbose[expr] = 2 // But not: // verbose [a, b] = list if (vim9script) { char_u *s, *n; for (s = eap->cmd; ASCII_ISALPHA(*s); ++s) ; n = skipwhite(s); if (*n == '.' || *n == '=' || (*n != NUL && n[1] == '=') || *s == '[') break; } switch (*p) { // When adding an entry, also modify cmd_exists(). case 'a': if (!checkforcmd_noparen(&eap->cmd, "aboveleft", 3)) break; cmod->cmod_split |= WSP_ABOVE; continue; case 'b': if (checkforcmd_noparen(&eap->cmd, "belowright", 3)) { cmod->cmod_split |= WSP_BELOW; continue; } if (checkforcmd_opt(&eap->cmd, "browse", 3, TRUE)) { #ifdef FEAT_BROWSE_CMD cmod->cmod_flags |= CMOD_BROWSE; #endif continue; } if (!checkforcmd_noparen(&eap->cmd, "botright", 2)) break; cmod->cmod_split |= WSP_BOT; continue; case 'c': if (!checkforcmd_opt(&eap->cmd, "confirm", 4, TRUE)) break; #if defined(FEAT_GUI_DIALOG) || defined(FEAT_CON_DIALOG) cmod->cmod_flags |= CMOD_CONFIRM; #endif continue; case 'k': if (checkforcmd_noparen(&eap->cmd, "keepmarks", 3)) { cmod->cmod_flags |= CMOD_KEEPMARKS; continue; } if (checkforcmd_noparen(&eap->cmd, "keepalt", 5)) { cmod->cmod_flags |= CMOD_KEEPALT; continue; } if (checkforcmd_noparen(&eap->cmd, "keeppatterns", 5)) { cmod->cmod_flags |= CMOD_KEEPPATTERNS; continue; } if (!checkforcmd_noparen(&eap->cmd, "keepjumps", 5)) break; cmod->cmod_flags |= CMOD_KEEPJUMPS; continue; case 'f': // only accept ":filter {pat} cmd" { char_u *reg_pat; char_u *nulp = NULL; int c = 0; if (!checkforcmd_noparen(&p, "filter", 4) || *p == NUL || (ends_excmd(*p) #ifdef FEAT_EVAL // in ":filter #pat# cmd" # does not // start a comment && (!vim9script || VIM_ISWHITE(p[1])) #endif )) break; if (*p == '!') { cmod->cmod_filter_force = TRUE; p = skipwhite(p + 1); if (*p == NUL || ends_excmd(*p)) break; } #ifdef FEAT_EVAL // Avoid that "filter(arg)" is recognized. if (vim9script && !VIM_ISWHITE(p[-1])) break; #endif if (skip_only) p = skip_vimgrep_pat(p, NULL, NULL); else // NOTE: This puts a NUL after the pattern. p = skip_vimgrep_pat_ext(p, &reg_pat, NULL, &nulp, &c); if (p == NULL || *p == NUL) break; if (!skip_only) { cmod->cmod_filter_regmatch.regprog = vim_regcomp(reg_pat, RE_MAGIC); if (cmod->cmod_filter_regmatch.regprog == NULL) break; // restore the character overwritten by NUL if (nulp != NULL) *nulp = c; } eap->cmd = p; continue; } // ":hide" and ":hide | cmd" are not modifiers case 'h': if (p != eap->cmd || !checkforcmd_noparen(&p, "hide", 3) || *p == NUL || ends_excmd(*p)) break; eap->cmd = p; cmod->cmod_flags |= CMOD_HIDE; continue; case 'l': if (checkforcmd_noparen(&eap->cmd, "lockmarks", 3)) { cmod->cmod_flags |= CMOD_LOCKMARKS; continue; } if (checkforcmd_noparen(&eap->cmd, "legacy", 3)) { if (ends_excmd2(p, eap->cmd)) { *errormsg = _(e_legacy_must_be_followed_by_command); return FAIL; } cmod->cmod_flags |= CMOD_LEGACY; continue; } if (!checkforcmd_noparen(&eap->cmd, "leftabove", 5)) break; cmod->cmod_split |= WSP_ABOVE; continue; case 'n': if (checkforcmd_noparen(&eap->cmd, "noautocmd", 3)) { cmod->cmod_flags |= CMOD_NOAUTOCMD; continue; } if (!checkforcmd_noparen(&eap->cmd, "noswapfile", 3)) break; cmod->cmod_flags |= CMOD_NOSWAPFILE; continue; case 'r': if (!checkforcmd_noparen(&eap->cmd, "rightbelow", 6)) break; cmod->cmod_split |= WSP_BELOW; continue; case 's': if (checkforcmd_noparen(&eap->cmd, "sandbox", 3)) { cmod->cmod_flags |= CMOD_SANDBOX; continue; } if (!checkforcmd_noparen(&eap->cmd, "silent", 3)) break; cmod->cmod_flags |= CMOD_SILENT; if (*eap->cmd == '!' && !VIM_ISWHITE(eap->cmd[-1])) { // ":silent!", but not "silent !cmd" eap->cmd = skipwhite(eap->cmd + 1); cmod->cmod_flags |= CMOD_ERRSILENT; } continue; case 't': if (checkforcmd_noparen(&p, "tab", 3)) { if (!skip_only) { long tabnr = get_address(eap, &eap->cmd, ADDR_TABS, eap->skip, skip_only, FALSE, 1); if (tabnr == MAXLNUM) cmod->cmod_tab = tabpage_index(curtab) + 1; else { if (tabnr < 0 || tabnr > LAST_TAB_NR) { *errormsg = _(e_invalid_range); return FAIL; } cmod->cmod_tab = tabnr + 1; } } eap->cmd = p; continue; } if (!checkforcmd_noparen(&eap->cmd, "topleft", 2)) break; cmod->cmod_split |= WSP_TOP; continue; case 'u': if (!checkforcmd_noparen(&eap->cmd, "unsilent", 3)) break; cmod->cmod_flags |= CMOD_UNSILENT; continue; case 'v': if (checkforcmd_noparen(&eap->cmd, "vertical", 4)) { cmod->cmod_split |= WSP_VERT; continue; } if (checkforcmd_noparen(&eap->cmd, "vim9cmd", 4)) { if (ends_excmd2(p, eap->cmd)) { *errormsg = _(e_vim9cmd_must_be_followed_by_command); return FAIL; } cmod->cmod_flags |= CMOD_VIM9CMD; continue; } if (!checkforcmd_noparen(&p, "verbose", 4)) break; if (vim_isdigit(*eap->cmd)) { // zero means not set, one is verbose == 0, etc. cmod->cmod_verbose = atoi((char *)eap->cmd) + 1; } else cmod->cmod_verbose = 2; // default: verbose == 1 eap->cmd = p; continue; } break; } if (has_visual_range) { if (eap->cmd > cmd_start) { // Move the '<,'> range to after the modifiers and insert a colon. // Since the modifiers have been parsed put the colon on top of the // space: "'<,'>mod cmd" -> "mod:'<,'>cmd // Put eap->cmd after the colon. if (use_plus_cmd) { size_t len = STRLEN(cmd_start); // Special case: empty command uses "+": // "'<,'>mods" -> "mods'<,'>+ mch_memmove(orig_cmd, cmd_start, len); STRCPY(orig_cmd + len, "'<,'>+"); } else { mch_memmove(cmd_start - 5, cmd_start, eap->cmd - cmd_start); eap->cmd -= 5; mch_memmove(eap->cmd - 1, ":'<,'>", 6); } } else // No modifiers, move the pointer back. // Special case: change empty command to "+". if (use_plus_cmd) eap->cmd = (char_u *)"'<,'>+"; else eap->cmd = orig_cmd; } else if (use_plus_cmd) eap->cmd = (char_u *)"+"; return OK; }
1
[ "CWE-787" ]
vim
c6fdb15d423df22e1776844811d082322475e48a
23,783,941,848,142,442,000,000,000,000,000,000,000
362
patch 9.0.0025: accessing beyond allocated memory with the cmdline window Problem: Accessing beyond allocated memory when using the cmdline window in Ex mode. Solution: Use "*" instead of "'<,'>" for Visual mode.
xmlParseAttValueComplex(xmlParserCtxtPtr ctxt, int *attlen, int normalize) { xmlChar limit = 0; xmlChar *buf = NULL; xmlChar *rep = NULL; size_t len = 0; size_t buf_size = 0; int c, l, in_space = 0; xmlChar *current = NULL; xmlEntityPtr ent; if (NXT(0) == '"') { ctxt->instate = XML_PARSER_ATTRIBUTE_VALUE; limit = '"'; NEXT; } else if (NXT(0) == '\'') { limit = '\''; ctxt->instate = XML_PARSER_ATTRIBUTE_VALUE; NEXT; } else { xmlFatalErr(ctxt, XML_ERR_ATTRIBUTE_NOT_STARTED, NULL); return(NULL); } /* * allocate a translation buffer. */ buf_size = XML_PARSER_BUFFER_SIZE; buf = (xmlChar *) xmlMallocAtomic(buf_size); if (buf == NULL) goto mem_error; /* * OK loop until we reach one of the ending char or a size limit. */ c = CUR_CHAR(l); while (((NXT(0) != limit) && /* checked */ (IS_CHAR(c)) && (c != '<')) && (ctxt->instate != XML_PARSER_EOF)) { /* * Impose a reasonable limit on attribute size, unless XML_PARSE_HUGE * special option is given */ if ((len > XML_MAX_TEXT_LENGTH) && ((ctxt->options & XML_PARSE_HUGE) == 0)) { xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED, "AttValue length too long\n"); goto mem_error; } if (c == '&') { in_space = 0; if (NXT(1) == '#') { int val = xmlParseCharRef(ctxt); if (val == '&') { if (ctxt->replaceEntities) { if (len + 10 > buf_size) { growBuffer(buf, 10); } buf[len++] = '&'; } else { /* * The reparsing will be done in xmlStringGetNodeList() * called by the attribute() function in SAX.c */ if (len + 10 > buf_size) { growBuffer(buf, 10); } buf[len++] = '&'; buf[len++] = '#'; buf[len++] = '3'; buf[len++] = '8'; buf[len++] = ';'; } } else if (val != 0) { if (len + 10 > buf_size) { growBuffer(buf, 10); } len += xmlCopyChar(0, &buf[len], val); } } else { ent = xmlParseEntityRef(ctxt); ctxt->nbentities++; if (ent != NULL) ctxt->nbentities += ent->owner; if ((ent != NULL) && (ent->etype == XML_INTERNAL_PREDEFINED_ENTITY)) { if (len + 10 > buf_size) { growBuffer(buf, 10); } if ((ctxt->replaceEntities == 0) && (ent->content[0] == '&')) { buf[len++] = '&'; buf[len++] = '#'; buf[len++] = '3'; buf[len++] = '8'; buf[len++] = ';'; } else { buf[len++] = ent->content[0]; } } else if ((ent != NULL) && (ctxt->replaceEntities != 0)) { if (ent->etype != XML_INTERNAL_PREDEFINED_ENTITY) { ++ctxt->depth; rep = xmlStringDecodeEntities(ctxt, ent->content, XML_SUBSTITUTE_REF, 0, 0, 0); --ctxt->depth; if (rep != NULL) { current = rep; while (*current != 0) { /* non input consuming */ if ((*current == 0xD) || (*current == 0xA) || (*current == 0x9)) { buf[len++] = 0x20; current++; } else buf[len++] = *current++; if (len + 10 > buf_size) { growBuffer(buf, 10); } } xmlFree(rep); rep = NULL; } } else { if (len + 10 > buf_size) { growBuffer(buf, 10); } if (ent->content != NULL) buf[len++] = ent->content[0]; } } else if (ent != NULL) { int i = xmlStrlen(ent->name); const xmlChar *cur = ent->name; /* * This may look absurd but is needed to detect * entities problems */ if ((ent->etype != XML_INTERNAL_PREDEFINED_ENTITY) && (ent->content != NULL) && (ent->checked == 0)) { unsigned long oldnbent = ctxt->nbentities, diff; ++ctxt->depth; rep = xmlStringDecodeEntities(ctxt, ent->content, XML_SUBSTITUTE_REF, 0, 0, 0); --ctxt->depth; diff = ctxt->nbentities - oldnbent + 1; if (diff > INT_MAX / 2) diff = INT_MAX / 2; ent->checked = diff * 2; if (rep != NULL) { if (xmlStrchr(rep, '<')) ent->checked |= 1; xmlFree(rep); rep = NULL; } else { ent->content[0] = 0; } } /* * Just output the reference */ buf[len++] = '&'; while (len + i + 10 > buf_size) { growBuffer(buf, i + 10); } for (;i > 0;i--) buf[len++] = *cur++; buf[len++] = ';'; } } } else { if ((c == 0x20) || (c == 0xD) || (c == 0xA) || (c == 0x9)) { if ((len != 0) || (!normalize)) { if ((!normalize) || (!in_space)) { COPY_BUF(l,buf,len,0x20); while (len + 10 > buf_size) { growBuffer(buf, 10); } } in_space = 1; } } else { in_space = 0; COPY_BUF(l,buf,len,c); if (len + 10 > buf_size) { growBuffer(buf, 10); } } NEXTL(l); } GROW; c = CUR_CHAR(l); } if (ctxt->instate == XML_PARSER_EOF) goto error; if ((in_space) && (normalize)) { while ((len > 0) && (buf[len - 1] == 0x20)) len--; } buf[len] = 0; if (RAW == '<') { xmlFatalErr(ctxt, XML_ERR_LT_IN_ATTRIBUTE, NULL); } else if (RAW != limit) { if ((c != 0) && (!IS_CHAR(c))) { xmlFatalErrMsg(ctxt, XML_ERR_INVALID_CHAR, "invalid character in attribute value\n"); } else { xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED, "AttValue: ' expected\n"); } } else NEXT; /* * There we potentially risk an overflow, don't allow attribute value of * length more than INT_MAX it is a very reasonable assumption ! */ if (len >= INT_MAX) { xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED, "AttValue length too long\n"); goto mem_error; } if (attlen != NULL) *attlen = (int) len; return(buf); mem_error: xmlErrMemory(ctxt, NULL); error: if (buf != NULL) xmlFree(buf); if (rep != NULL) xmlFree(rep); return(NULL); }
0
[ "CWE-776" ]
libxml2
8598060bacada41a0eb09d95c97744ff4e428f8e
250,106,343,235,863,670,000,000,000,000,000,000,000
237
Patch for security issue CVE-2021-3541 This is relapted to parameter entities expansion and following the line of the billion laugh attack. Somehow in that path the counting of parameters was missed and the normal algorithm based on entities "density" was useless.
static void test_globmatch(void) { ASSERT(mg_globmatch("", 0, "", 0) == 1); ASSERT(mg_globmatch("*", 1, "a", 1) == 1); ASSERT(mg_globmatch("*", 1, "ab", 2) == 1); ASSERT(mg_globmatch("", 0, "a", 1) == 0); ASSERT(mg_globmatch("/", 1, "/foo", 4) == 0); ASSERT(mg_globmatch("/*/foo", 6, "/x/bar", 6) == 0); ASSERT(mg_globmatch("/*/foo", 6, "/x/foo", 6) == 1); ASSERT(mg_globmatch("/*/foo", 6, "/x/foox", 7) == 0); ASSERT(mg_globmatch("/*/foo*", 7, "/x/foox", 7) == 1); ASSERT(mg_globmatch("/*", 2, "/abc", 4) == 1); ASSERT(mg_globmatch("/*", 2, "/ab/", 4) == 0); ASSERT(mg_globmatch("/*", 2, "/", 1) == 1); ASSERT(mg_globmatch("/x/*", 4, "/x/2", 4) == 1); ASSERT(mg_globmatch("/x/*", 4, "/x/2/foo", 8) == 0); ASSERT(mg_globmatch("/x/*/*", 6, "/x/2/foo", 8) == 1); ASSERT(mg_globmatch("#", 1, "///", 3) == 1); ASSERT(mg_globmatch("/api/*", 6, "/api/foo", 8) == 1); ASSERT(mg_globmatch("/api/*", 6, "/api/log/static", 15) == 0); ASSERT(mg_globmatch("/api/#", 6, "/api/log/static", 15) == 1); ASSERT(mg_globmatch("#.shtml", 7, "/ssi/index.shtml", 16) == 1); ASSERT(mg_globmatch("#.c", 3, ".c", 2) == 1); ASSERT(mg_globmatch("abc", 3, "ab", 2) == 0); ASSERT(mg_globmatch("#.c", 3, "a.c", 3) == 1); ASSERT(mg_globmatch("#.c", 3, "..c", 3) == 1); ASSERT(mg_globmatch("#.c", 3, "/.c", 3) == 1); ASSERT(mg_globmatch("#.c", 3, "//a.c", 5) == 1); ASSERT(mg_globmatch("#.c", 3, "x/a.c", 5) == 1); ASSERT(mg_globmatch("#.c", 3, "./a.c", 5) == 1); ASSERT(mg_globmatch("#.shtml", 7, "./ssi/index.shtml", 17) == 1); ASSERT(mg_globmatch("#aa#bb#", 7, "caabba", 6) == 1); ASSERT(mg_globmatch("#aa#bb#", 7, "caabxa", 6) == 0); }
0
[ "CWE-552" ]
mongoose
c65c8fdaaa257e0487ab0aaae9e8f6b439335945
219,979,588,020,554,100,000,000,000,000,000,000,000
33
Protect against the directory traversal in mg_upload()
int compat_dccp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level != SOL_DCCP) return inet_csk_compat_setsockopt(sk, level, optname, optval, optlen); return do_dccp_setsockopt(sk, level, optname, optval, optlen); }
0
[ "CWE-416" ]
linux
69c64866ce072dea1d1e59a0d61e0f66c0dffb76
93,881,830,899,957,160,000,000,000,000,000,000,000
8
dccp: CVE-2017-8824: use-after-free in DCCP code Whenever the sock object is in DCCP_CLOSED state, dccp_disconnect() must free dccps_hc_tx_ccid and dccps_hc_rx_ccid and set to NULL. Signed-off-by: Mohamed Ghannam <[email protected]> Reviewed-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, struct alloc_chunk_ctl *ctl) { int index = btrfs_bg_flags_to_raid_index(ctl->type); ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; ctl->devs_max = btrfs_raid_array[index].devs_max; if (!ctl->devs_max) ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); ctl->devs_min = btrfs_raid_array[index].devs_min; ctl->devs_increment = btrfs_raid_array[index].devs_increment; ctl->ncopies = btrfs_raid_array[index].ncopies; ctl->nparity = btrfs_raid_array[index].nparity; ctl->ndevs = 0; switch (fs_devices->chunk_alloc_policy) { case BTRFS_CHUNK_ALLOC_REGULAR: init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); break; case BTRFS_CHUNK_ALLOC_ZONED: init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); break; default: BUG(); } }
0
[ "CWE-476", "CWE-703" ]
linux
e4571b8c5e9ffa1e85c0c671995bd4dcc5c75091
224,589,316,809,313,400,000,000,000,000,000,000,000
27
btrfs: fix NULL pointer dereference when deleting device by invalid id [BUG] It's easy to trigger NULL pointer dereference, just by removing a non-existing device id: # mkfs.btrfs -f -m single -d single /dev/test/scratch1 \ /dev/test/scratch2 # mount /dev/test/scratch1 /mnt/btrfs # btrfs device remove 3 /mnt/btrfs Then we have the following kernel NULL pointer dereference: BUG: kernel NULL pointer dereference, address: 0000000000000000 #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page PGD 0 P4D 0 Oops: 0000 [#1] PREEMPT SMP NOPTI CPU: 9 PID: 649 Comm: btrfs Not tainted 5.14.0-rc3-custom+ #35 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 RIP: 0010:btrfs_rm_device+0x4de/0x6b0 [btrfs] btrfs_ioctl+0x18bb/0x3190 [btrfs] ? lock_is_held_type+0xa5/0x120 ? find_held_lock.constprop.0+0x2b/0x80 ? do_user_addr_fault+0x201/0x6a0 ? lock_release+0xd2/0x2d0 ? __x64_sys_ioctl+0x83/0xb0 __x64_sys_ioctl+0x83/0xb0 do_syscall_64+0x3b/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xae [CAUSE] Commit a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return btrfs_device directly") moves the "missing" device path check into btrfs_rm_device(). But btrfs_rm_device() itself can have case where it only receives @devid, with NULL as @device_path. In that case, calling strcmp() on NULL will trigger the NULL pointer dereference. Before that commit, we handle the "missing" case inside btrfs_find_device_by_devspec(), which will not check @device_path at all if @devid is provided, thus no way to trigger the bug. [FIX] Before calling strcmp(), also make sure @device_path is not NULL. Fixes: a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return btrfs_device directly") CC: [email protected] # 5.4+ Reported-by: butt3rflyh4ck <[email protected]> Reviewed-by: Anand Jain <[email protected]> Signed-off-by: Qu Wenruo <[email protected]> Reviewed-by: David Sterba <[email protected]> Signed-off-by: David Sterba <[email protected]>
AP_DECLARE(void) ap_setup_make_content_type(apr_pool_t *pool) { int i; for (i = 0; needcset[i]; i++) { continue; } needcset_patterns = (const apr_strmatch_pattern **) apr_palloc(pool, (i + 1) * sizeof(apr_strmatch_pattern *)); for (i = 0; needcset[i]; i++) { needcset_patterns[i] = apr_strmatch_precompile(pool, needcset[i], 0); } needcset_patterns[i] = NULL; charset_pattern = apr_strmatch_precompile(pool, "charset=", 0); }
0
[ "CWE-703" ]
httpd
be0f5335e3e73eb63253b050fdc23f252f5c8ae3
69,159,300,577,330,150,000,000,000,000,000,000,000
14
*) SECURITY: CVE-2015-0253 (cve.mitre.org) core: Fix a crash introduced in with ErrorDocument 400 pointing to a local URL-path with the INCLUDES filter active, introduced in 2.4.11. PR 57531. [Yann Ylavic] Submitted By: ylavic Committed By: covener git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1664205 13f79535-47bb-0310-9956-ffa450edef68
static void ssl_calc_finished_tls( ssl_context *ssl, unsigned char *buf, int from ) { int len = 12; const char *sender; md5_context md5; sha1_context sha1; unsigned char padbuf[36]; ssl_session *session = ssl->session_negotiate; if( !session ) session = ssl->session; SSL_DEBUG_MSG( 2, ( "=> calc finished tls" ) ); memcpy( &md5 , &ssl->handshake->fin_md5 , sizeof(md5_context) ); memcpy( &sha1, &ssl->handshake->fin_sha1, sizeof(sha1_context) ); /* * TLSv1: * hash = PRF( master, finished_label, * MD5( handshake ) + SHA1( handshake ) )[0..11] */ SSL_DEBUG_BUF( 4, "finished md5 state", (unsigned char *) md5.state, sizeof( md5.state ) ); SSL_DEBUG_BUF( 4, "finished sha1 state", (unsigned char *) sha1.state, sizeof( sha1.state ) ); sender = ( from == SSL_IS_CLIENT ) ? "client finished" : "server finished"; md5_finish( &md5, padbuf ); sha1_finish( &sha1, padbuf + 16 ); ssl->handshake->tls_prf( session->master, 48, (char *) sender, padbuf, 36, buf, len ); SSL_DEBUG_BUF( 3, "calc finished result", buf, len ); memset( &md5, 0, sizeof( md5_context ) ); memset( &sha1, 0, sizeof( sha1_context ) ); memset( padbuf, 0, sizeof( padbuf ) ); SSL_DEBUG_MSG( 2, ( "<= calc finished" ) ); }
0
[ "CWE-20" ]
polarssl
1922a4e6aade7b1d685af19d4d9339ddb5c02859
254,647,194,657,044,000,000,000,000,000,000,000,000
49
ssl_parse_certificate() now calls x509parse_crt_der() directly
iasecc_se_get_info(struct sc_card *card, struct iasecc_se_info *se) { struct sc_context *ctx = card->ctx; struct sc_apdu apdu; unsigned char rbuf[0x100]; unsigned char sbuf_iasecc[10] = { 0x4D, 0x08, IASECC_SDO_TEMPLATE_TAG, 0x06, IASECC_SDO_TAG_HEADER, IASECC_SDO_CLASS_SE | IASECC_OBJECT_REF_LOCAL, se->reference & 0x3F, 0x02, IASECC_SDO_CLASS_SE, 0x80 }; int rv; LOG_FUNC_CALLED(ctx); if (se->reference > IASECC_SE_REF_MAX) LOG_FUNC_RETURN(ctx, SC_ERROR_INVALID_ARGUMENTS); rv = iasecc_se_get_info_from_cache(card, se); if (rv == SC_ERROR_OBJECT_NOT_FOUND) { sc_log(ctx, "No SE#%X info in cache, try to use 'GET DATA'", se->reference); sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0xCB, 0x3F, 0xFF); apdu.data = sbuf_iasecc; apdu.datalen = sizeof(sbuf_iasecc); apdu.lc = apdu.datalen; apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = sizeof(rbuf); rv = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(ctx, rv, "APDU transmit failed"); rv = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(ctx, rv, "get SE data error"); rv = iasecc_se_parse(card, apdu.resp, apdu.resplen, se); LOG_TEST_RET(ctx, rv, "cannot parse SE data"); rv = iasecc_se_cache_info(card, se); LOG_TEST_RET(ctx, rv, "failed to put SE data into cache"); } LOG_FUNC_RETURN(ctx, rv); }
0
[ "CWE-125" ]
OpenSC
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
245,087,861,528,526,200,000,000,000,000,000,000,000
44
fixed out of bounds reads Thanks to Eric Sesterhenn from X41 D-SEC GmbH for reporting and suggesting security fixes.
static bool geneve_dst_addr_equal(struct ip_tunnel_info *a, struct ip_tunnel_info *b) { if (ip_tunnel_info_af(a) == AF_INET) return a->key.u.ipv4.dst == b->key.u.ipv4.dst; else return ipv6_addr_equal(&a->key.u.ipv6.dst, &b->key.u.ipv6.dst); }
0
[]
net
6c8991f41546c3c472503dff1ea9daaddf9331c2
320,368,679,204,652,460,000,000,000,000,000,000,000
8
net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup ipv6_stub uses the ip6_dst_lookup function to allow other modules to perform IPv6 lookups. However, this function skips the XFRM layer entirely. All users of ipv6_stub->ip6_dst_lookup use ip_route_output_flow (via the ip_route_output_key and ip_route_output helpers) for their IPv4 lookups, which calls xfrm_lookup_route(). This patch fixes this inconsistent behavior by switching the stub to ip6_dst_lookup_flow, which also calls xfrm_lookup_route(). This requires some changes in all the callers, as these two functions take different arguments and have different return types. Fixes: 5f81bd2e5d80 ("ipv6: export a stub for IPv6 symbols used by vxlan") Reported-by: Xiumei Mu <[email protected]> Signed-off-by: Sabrina Dubroca <[email protected]> Signed-off-by: David S. Miller <[email protected]>
cifs_push_locks(struct cifsFileInfo *cfile) { struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); int rc = 0; /* we are going to update can_cache_brlcks here - need a write access */ down_write(&cinode->lock_sem); if (!cinode->can_cache_brlcks) { up_write(&cinode->lock_sem); return rc; } if (cap_unix(tcon->ses) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) rc = cifs_push_posix_locks(cfile); else rc = tcon->ses->server->ops->push_mand_locks(cfile); cinode->can_cache_brlcks = false; up_write(&cinode->lock_sem); return rc; }
0
[ "CWE-119", "CWE-787" ]
linux
5d81de8e8667da7135d3a32a964087c0faf5483f
8,297,073,801,184,753,000,000,000,000,000,000,000
25
cifs: ensure that uncached writes handle unmapped areas correctly It's possible for userland to pass down an iovec via writev() that has a bogus user pointer in it. If that happens and we're doing an uncached write, then we can end up getting less bytes than we expect from the call to iov_iter_copy_from_user. This is CVE-2014-0069 cifs_iovec_write isn't set up to handle that situation however. It'll blindly keep chugging through the page array and not filling those pages with anything useful. Worse yet, we'll later end up with a negative number in wdata->tailsz, which will confuse the sending routines and cause an oops at the very least. Fix this by having the copy phase of cifs_iovec_write stop copying data in this situation and send the last write as a short one. At the same time, we want to avoid sending a zero-length write to the server, so break out of the loop and set rc to -EFAULT if that happens. This also allows us to handle the case where no address in the iovec is valid. [Note: Marking this for stable on v3.4+ kernels, but kernels as old as v2.6.38 may have a similar problem and may need similar fix] Cc: <[email protected]> # v3.4+ Reviewed-by: Pavel Shilovsky <[email protected]> Reported-by: Al Viro <[email protected]> Signed-off-by: Jeff Layton <[email protected]> Signed-off-by: Steve French <[email protected]>
static inline void mark_dirty(struct vmcb *vmcb, int bit) { vmcb->control.clean &= ~(1 << bit); }
0
[]
kvm
854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
100,461,781,528,999,640,000,000,000,000,000,000,000
4
KVM: x86: Check non-canonical addresses upon WRMSR Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is written to certain MSRs. The behavior is "almost" identical for AMD and Intel (ignoring MSRs that are not implemented in either architecture since they would anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if non-canonical address is written on Intel but not on AMD (which ignores the top 32-bits). Accordingly, this patch injects a #GP on the MSRs which behave identically on Intel and AMD. To eliminate the differences between the architecutres, the value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to canonical value before writing instead of injecting a #GP. Some references from Intel and AMD manuals: According to Intel SDM description of WRMSR instruction #GP is expected on WRMSR "If the source register contains a non-canonical address and ECX specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE, IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP." According to AMD manual instruction manual: LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical form, a general-protection exception (#GP) occurs." IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the base field must be in canonical form or a #GP fault will occur." IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must be in canonical form." This patch fixes CVE-2014-3610. Cc: [email protected] Signed-off-by: Nadav Amit <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
path_poly(PG_FUNCTION_ARGS) { PATH *path = PG_GETARG_PATH_P(0); POLYGON *poly; int size; int i; /* This is not very consistent --- other similar cases return NULL ... */ if (!path->closed) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("open path cannot be converted to polygon"))); size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * path->npts; poly = (POLYGON *) palloc(size); SET_VARSIZE(poly, size); poly->npts = path->npts; for (i = 0; i < path->npts; i++) { poly->p[i].x = path->p[i].x; poly->p[i].y = path->p[i].y; } make_bound_box(poly); PG_RETURN_POLYGON_P(poly); }
1
[ "CWE-703", "CWE-189" ]
postgres
31400a673325147e1205326008e32135a78b4d8a
95,887,822,875,494,020,000,000,000,000,000,000,000
29
Predict integer overflow to avoid buffer overruns. Several functions, mostly type input functions, calculated an allocation size such that the calculation wrapped to a small positive value when arguments implied a sufficiently-large requirement. Writes past the end of the inadvertent small allocation followed shortly thereafter. Coverity identified the path_in() vulnerability; code inspection led to the rest. In passing, add check_stack_depth() to prevent stack overflow in related functions. Back-patch to 8.4 (all supported versions). The non-comment hstore changes touch code that did not exist in 8.4, so that part stops at 9.0. Noah Misch and Heikki Linnakangas, reviewed by Tom Lane. Security: CVE-2014-0064
*/ static int xmlXPathCompOpEval(xmlXPathParserContextPtr ctxt, xmlXPathStepOpPtr op) { int total = 0; int equal, ret; xmlXPathCompExprPtr comp; xmlXPathObjectPtr arg1, arg2; xmlNodePtr bak; xmlDocPtr bakd; int pp; int cs; CHECK_ERROR0; comp = ctxt->comp; switch (op->op) { case XPATH_OP_END: return (0); case XPATH_OP_AND: bakd = ctxt->context->doc; bak = ctxt->context->node; pp = ctxt->context->proximityPosition; cs = ctxt->context->contextSize; total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]); CHECK_ERROR0; xmlXPathBooleanFunction(ctxt, 1); if ((ctxt->value == NULL) || (ctxt->value->boolval == 0)) return (total); arg2 = valuePop(ctxt); ctxt->context->doc = bakd; ctxt->context->node = bak; ctxt->context->proximityPosition = pp; ctxt->context->contextSize = cs; total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]); if (ctxt->error) { xmlXPathFreeObject(arg2); return(0); } xmlXPathBooleanFunction(ctxt, 1); arg1 = valuePop(ctxt); arg1->boolval &= arg2->boolval; valuePush(ctxt, arg1); xmlXPathReleaseObject(ctxt->context, arg2); return (total); case XPATH_OP_OR: bakd = ctxt->context->doc; bak = ctxt->context->node; pp = ctxt->context->proximityPosition; cs = ctxt->context->contextSize; total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]); CHECK_ERROR0; xmlXPathBooleanFunction(ctxt, 1); if ((ctxt->value == NULL) || (ctxt->value->boolval == 1)) return (total); arg2 = valuePop(ctxt); ctxt->context->doc = bakd; ctxt->context->node = bak; ctxt->context->proximityPosition = pp; ctxt->context->contextSize = cs; total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]); if (ctxt->error) { xmlXPathFreeObject(arg2); return(0); } xmlXPathBooleanFunction(ctxt, 1); arg1 = valuePop(ctxt); arg1->boolval |= arg2->boolval; valuePush(ctxt, arg1); xmlXPathReleaseObject(ctxt->context, arg2); return (total); case XPATH_OP_EQUAL: bakd = ctxt->context->doc; bak = ctxt->context->node; pp = ctxt->context->proximityPosition; cs = ctxt->context->contextSize; total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]); CHECK_ERROR0; ctxt->context->doc = bakd; ctxt->context->node = bak; ctxt->context->proximityPosition = pp; ctxt->context->contextSize = cs; total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]); CHECK_ERROR0; if (op->value) equal = xmlXPathEqualValues(ctxt); else equal = xmlXPathNotEqualValues(ctxt); valuePush(ctxt, xmlXPathCacheNewBoolean(ctxt->context, equal)); return (total); case XPATH_OP_CMP: bakd = ctxt->context->doc; bak = ctxt->context->node; pp = ctxt->context->proximityPosition; cs = ctxt->context->contextSize; total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]); CHECK_ERROR0; ctxt->context->doc = bakd; ctxt->context->node = bak; ctxt->context->proximityPosition = pp; ctxt->context->contextSize = cs; total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]); CHECK_ERROR0; ret = xmlXPathCompareValues(ctxt, op->value, op->value2); valuePush(ctxt, xmlXPathCacheNewBoolean(ctxt->context, ret)); return (total); case XPATH_OP_PLUS: bakd = ctxt->context->doc; bak = ctxt->context->node; pp = ctxt->context->proximityPosition; cs = ctxt->context->contextSize; total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]); CHECK_ERROR0; if (op->ch2 != -1) { ctxt->context->doc = bakd; ctxt->context->node = bak; ctxt->context->proximityPosition = pp; ctxt->context->contextSize = cs; total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]); } CHECK_ERROR0; if (op->value == 0) xmlXPathSubValues(ctxt); else if (op->value == 1) xmlXPathAddValues(ctxt); else if (op->value == 2) xmlXPathValueFlipSign(ctxt); else if (op->value == 3) { CAST_TO_NUMBER; CHECK_TYPE0(XPATH_NUMBER); } return (total); case XPATH_OP_MULT: bakd = ctxt->context->doc; bak = ctxt->context->node; pp = ctxt->context->proximityPosition; cs = ctxt->context->contextSize; total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]); CHECK_ERROR0; ctxt->context->doc = bakd; ctxt->context->node = bak; ctxt->context->proximityPosition = pp; ctxt->context->contextSize = cs; total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]); CHECK_ERROR0; if (op->value == 0) xmlXPathMultValues(ctxt); else if (op->value == 1) xmlXPathDivValues(ctxt); else if (op->value == 2) xmlXPathModValues(ctxt); return (total); case XPATH_OP_UNION: bakd = ctxt->context->doc; bak = ctxt->context->node; pp = ctxt->context->proximityPosition; cs = ctxt->context->contextSize; total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]); CHECK_ERROR0; ctxt->context->doc = bakd; ctxt->context->node = bak; ctxt->context->proximityPosition = pp; ctxt->context->contextSize = cs; total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]); CHECK_ERROR0; CHECK_TYPE0(XPATH_NODESET); arg2 = valuePop(ctxt); CHECK_TYPE0(XPATH_NODESET); arg1 = valuePop(ctxt); if ((arg1->nodesetval == NULL) || ((arg2->nodesetval != NULL) && (arg2->nodesetval->nodeNr != 0))) { arg1->nodesetval = xmlXPathNodeSetMerge(arg1->nodesetval, arg2->nodesetval); } valuePush(ctxt, arg1); xmlXPathReleaseObject(ctxt->context, arg2); return (total); case XPATH_OP_ROOT: xmlXPathRoot(ctxt); return (total); case XPATH_OP_NODE: if (op->ch1 != -1) total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]); CHECK_ERROR0; if (op->ch2 != -1) total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]); CHECK_ERROR0; valuePush(ctxt, xmlXPathCacheNewNodeSet(ctxt->context, ctxt->context->node)); return (total); case XPATH_OP_RESET: if (op->ch1 != -1) total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]); CHECK_ERROR0; if (op->ch2 != -1) total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]); CHECK_ERROR0; ctxt->context->node = NULL; return (total); case XPATH_OP_COLLECT:{ if (op->ch1 == -1) return (total); total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]); CHECK_ERROR0; total += xmlXPathNodeCollectAndTest(ctxt, op, NULL, NULL, 0); return (total); } case XPATH_OP_VALUE: valuePush(ctxt, xmlXPathCacheObjectCopy(ctxt->context, (xmlXPathObjectPtr) op->value4)); return (total); case XPATH_OP_VARIABLE:{ xmlXPathObjectPtr val; if (op->ch1 != -1) total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]); if (op->value5 == NULL) { val = xmlXPathVariableLookup(ctxt->context, op->value4); if (val == NULL) { ctxt->error = XPATH_UNDEF_VARIABLE_ERROR; return(0); } valuePush(ctxt, val); } else { const xmlChar *URI; URI = xmlXPathNsLookup(ctxt->context, op->value5); if (URI == NULL) { xmlGenericError(xmlGenericErrorContext, "xmlXPathCompOpEval: variable %s bound to undefined prefix %s\n", (char *) op->value4, (char *)op->value5); ctxt->error = XPATH_UNDEF_PREFIX_ERROR; return (total); } val = xmlXPathVariableLookupNS(ctxt->context, op->value4, URI); if (val == NULL) { ctxt->error = XPATH_UNDEF_VARIABLE_ERROR; return(0); } valuePush(ctxt, val); } return (total); } case XPATH_OP_FUNCTION:{ xmlXPathFunction func; const xmlChar *oldFunc, *oldFuncURI; int i; int frame; frame = xmlXPathSetFrame(ctxt); if (op->ch1 != -1) { total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]); if (ctxt->error != XPATH_EXPRESSION_OK) { xmlXPathPopFrame(ctxt, frame); return (total); } } if (ctxt->valueNr < ctxt->valueFrame + op->value) { xmlGenericError(xmlGenericErrorContext, "xmlXPathCompOpEval: parameter error\n"); ctxt->error = XPATH_INVALID_OPERAND; xmlXPathPopFrame(ctxt, frame); return (total); } for (i = 0; i < op->value; i++) { if (ctxt->valueTab[(ctxt->valueNr - 1) - i] == NULL) { xmlGenericError(xmlGenericErrorContext, "xmlXPathCompOpEval: parameter error\n"); ctxt->error = XPATH_INVALID_OPERAND; xmlXPathPopFrame(ctxt, frame); return (total); } } if (op->cache != NULL) XML_CAST_FPTR(func) = op->cache; else { const xmlChar *URI = NULL; if (op->value5 == NULL) func = xmlXPathFunctionLookup(ctxt->context, op->value4); else { URI = xmlXPathNsLookup(ctxt->context, op->value5); if (URI == NULL) { xmlGenericError(xmlGenericErrorContext, "xmlXPathCompOpEval: function %s bound to undefined prefix %s\n", (char *)op->value4, (char *)op->value5); xmlXPathPopFrame(ctxt, frame); ctxt->error = XPATH_UNDEF_PREFIX_ERROR; return (total); } func = xmlXPathFunctionLookupNS(ctxt->context, op->value4, URI); } if (func == NULL) { xmlGenericError(xmlGenericErrorContext, "xmlXPathCompOpEval: function %s not found\n", (char *)op->value4); XP_ERROR0(XPATH_UNKNOWN_FUNC_ERROR); } op->cache = XML_CAST_FPTR(func); op->cacheURI = (void *) URI; } oldFunc = ctxt->context->function; oldFuncURI = ctxt->context->functionURI; ctxt->context->function = op->value4; ctxt->context->functionURI = op->cacheURI; func(ctxt, op->value); ctxt->context->function = oldFunc; ctxt->context->functionURI = oldFuncURI; xmlXPathPopFrame(ctxt, frame); return (total); } case XPATH_OP_ARG: bakd = ctxt->context->doc; bak = ctxt->context->node; pp = ctxt->context->proximityPosition; cs = ctxt->context->contextSize; if (op->ch1 != -1) total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]); ctxt->context->contextSize = cs; ctxt->context->proximityPosition = pp; ctxt->context->node = bak; ctxt->context->doc = bakd; CHECK_ERROR0; if (op->ch2 != -1) { total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]); ctxt->context->doc = bakd; ctxt->context->node = bak; CHECK_ERROR0; } return (total); case XPATH_OP_PREDICATE: case XPATH_OP_FILTER:{ xmlXPathObjectPtr res; xmlXPathObjectPtr obj, tmp; xmlNodeSetPtr newset = NULL; xmlNodeSetPtr oldset; xmlNodePtr oldnode; xmlDocPtr oldDoc; int i; /* * Optimization for ()[1] selection i.e. the first elem */ if ((op->ch1 != -1) && (op->ch2 != -1) && #ifdef XP_OPTIMIZED_FILTER_FIRST /* * FILTER TODO: Can we assume that the inner processing * will result in an ordered list if we have an * XPATH_OP_FILTER? * What about an additional field or flag on * xmlXPathObject like @sorted ? This way we wouln'd need * to assume anything, so it would be more robust and * easier to optimize. */ ((comp->steps[op->ch1].op == XPATH_OP_SORT) || /* 18 */ (comp->steps[op->ch1].op == XPATH_OP_FILTER)) && /* 17 */ #else (comp->steps[op->ch1].op == XPATH_OP_SORT) && #endif (comp->steps[op->ch2].op == XPATH_OP_VALUE)) { /* 12 */ xmlXPathObjectPtr val; val = comp->steps[op->ch2].value4; if ((val != NULL) && (val->type == XPATH_NUMBER) && (val->floatval == 1.0)) { xmlNodePtr first = NULL; total += xmlXPathCompOpEvalFirst(ctxt, &comp->steps[op->ch1], &first); CHECK_ERROR0; /* * The nodeset should be in document order, * Keep only the first value */ if ((ctxt->value != NULL) && (ctxt->value->type == XPATH_NODESET) && (ctxt->value->nodesetval != NULL) && (ctxt->value->nodesetval->nodeNr > 1)) ctxt->value->nodesetval->nodeNr = 1; return (total); } } /* * Optimization for ()[last()] selection i.e. the last elem */ if ((op->ch1 != -1) && (op->ch2 != -1) && (comp->steps[op->ch1].op == XPATH_OP_SORT) && (comp->steps[op->ch2].op == XPATH_OP_SORT)) { int f = comp->steps[op->ch2].ch1; if ((f != -1) && (comp->steps[f].op == XPATH_OP_FUNCTION) && (comp->steps[f].value5 == NULL) && (comp->steps[f].value == 0) && (comp->steps[f].value4 != NULL) && (xmlStrEqual (comp->steps[f].value4, BAD_CAST "last"))) { xmlNodePtr last = NULL; total += xmlXPathCompOpEvalLast(ctxt, &comp->steps[op->ch1], &last); CHECK_ERROR0; /* * The nodeset should be in document order, * Keep only the last value */ if ((ctxt->value != NULL) && (ctxt->value->type == XPATH_NODESET) && (ctxt->value->nodesetval != NULL) && (ctxt->value->nodesetval->nodeTab != NULL) && (ctxt->value->nodesetval->nodeNr > 1)) { ctxt->value->nodesetval->nodeTab[0] = ctxt->value->nodesetval->nodeTab[ctxt-> value-> nodesetval-> nodeNr - 1]; ctxt->value->nodesetval->nodeNr = 1; } return (total); } } /* * Process inner predicates first. * Example "index[parent::book][1]": * ... * PREDICATE <-- we are here "[1]" * PREDICATE <-- process "[parent::book]" first * SORT * COLLECT 'parent' 'name' 'node' book * NODE * ELEM Object is a number : 1 */ if (op->ch1 != -1) total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]); CHECK_ERROR0; if (op->ch2 == -1) return (total); if (ctxt->value == NULL) return (total); oldnode = ctxt->context->node; #ifdef LIBXML_XPTR_ENABLED /* * Hum are we filtering the result of an XPointer expression */ if (ctxt->value->type == XPATH_LOCATIONSET) { xmlLocationSetPtr newlocset = NULL; xmlLocationSetPtr oldlocset; /* * Extract the old locset, and then evaluate the result of the * expression for all the element in the locset. use it to grow * up a new locset. */ CHECK_TYPE0(XPATH_LOCATIONSET); obj = valuePop(ctxt); oldlocset = obj->user; ctxt->context->node = NULL; if ((oldlocset == NULL) || (oldlocset->locNr == 0)) { ctxt->context->contextSize = 0; ctxt->context->proximityPosition = 0; if (op->ch2 != -1) total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]); res = valuePop(ctxt); if (res != NULL) { xmlXPathReleaseObject(ctxt->context, res); } valuePush(ctxt, obj); CHECK_ERROR0; return (total); } newlocset = xmlXPtrLocationSetCreate(NULL); for (i = 0; i < oldlocset->locNr; i++) { /* * Run the evaluation with a node list made of a * single item in the nodelocset. */ ctxt->context->node = oldlocset->locTab[i]->user; ctxt->context->contextSize = oldlocset->locNr; ctxt->context->proximityPosition = i + 1; tmp = xmlXPathCacheNewNodeSet(ctxt->context, ctxt->context->node); valuePush(ctxt, tmp); if (op->ch2 != -1) total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]); if (ctxt->error != XPATH_EXPRESSION_OK) { xmlXPathFreeObject(obj); return(0); } /* * The result of the evaluation need to be tested to * decided whether the filter succeeded or not */ res = valuePop(ctxt); if (xmlXPathEvaluatePredicateResult(ctxt, res)) { xmlXPtrLocationSetAdd(newlocset, xmlXPathObjectCopy (oldlocset->locTab[i])); } /* * Cleanup */ if (res != NULL) { xmlXPathReleaseObject(ctxt->context, res); } if (ctxt->value == tmp) { res = valuePop(ctxt); xmlXPathReleaseObject(ctxt->context, res); } ctxt->context->node = NULL; } /* * The result is used as the new evaluation locset. */ xmlXPathReleaseObject(ctxt->context, obj); ctxt->context->node = NULL; ctxt->context->contextSize = -1; ctxt->context->proximityPosition = -1; valuePush(ctxt, xmlXPtrWrapLocationSet(newlocset)); ctxt->context->node = oldnode; return (total); } #endif /* LIBXML_XPTR_ENABLED */ /* * Extract the old set, and then evaluate the result of the * expression for all the element in the set. use it to grow * up a new set. */ CHECK_TYPE0(XPATH_NODESET); obj = valuePop(ctxt); oldset = obj->nodesetval; oldnode = ctxt->context->node; oldDoc = ctxt->context->doc; ctxt->context->node = NULL; if ((oldset == NULL) || (oldset->nodeNr == 0)) { ctxt->context->contextSize = 0; ctxt->context->proximityPosition = 0; /* if (op->ch2 != -1) total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]); CHECK_ERROR0; res = valuePop(ctxt); if (res != NULL) xmlXPathFreeObject(res); */ valuePush(ctxt, obj); ctxt->context->node = oldnode; CHECK_ERROR0; } else { tmp = NULL; /* * Initialize the new set. * Also set the xpath document in case things like * key() evaluation are attempted on the predicate */ newset = xmlXPathNodeSetCreate(NULL); /* * SPEC XPath 1.0: * "For each node in the node-set to be filtered, the * PredicateExpr is evaluated with that node as the * context node, with the number of nodes in the * node-set as the context size, and with the proximity * position of the node in the node-set with respect to * the axis as the context position;" * @oldset is the node-set" to be filtered. * * SPEC XPath 1.0: * "only predicates change the context position and * context size (see [2.4 Predicates])." * Example: * node-set context pos * nA 1 * nB 2 * nC 3 * After applying predicate [position() > 1] : * node-set context pos * nB 1 * nC 2 * * removed the first node in the node-set, then * the context position of the */ for (i = 0; i < oldset->nodeNr; i++) { /* * Run the evaluation with a node list made of * a single item in the nodeset. */ ctxt->context->node = oldset->nodeTab[i]; if ((oldset->nodeTab[i]->type != XML_NAMESPACE_DECL) && (oldset->nodeTab[i]->doc != NULL)) ctxt->context->doc = oldset->nodeTab[i]->doc; if (tmp == NULL) { tmp = xmlXPathCacheNewNodeSet(ctxt->context, ctxt->context->node); } else { if (xmlXPathNodeSetAddUnique(tmp->nodesetval, ctxt->context->node) < 0) { ctxt->error = XPATH_MEMORY_ERROR; } } valuePush(ctxt, tmp); ctxt->context->contextSize = oldset->nodeNr; ctxt->context->proximityPosition = i + 1; /* * Evaluate the predicate against the context node. * Can/should we optimize position() predicates * here (e.g. "[1]")? */ if (op->ch2 != -1) total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]); if (ctxt->error != XPATH_EXPRESSION_OK) { xmlXPathFreeNodeSet(newset); xmlXPathFreeObject(obj); return(0); } /* * The result of the evaluation needs to be tested to * decide whether the filter succeeded or not */ /* * OPTIMIZE TODO: Can we use * xmlXPathNodeSetAdd*Unique()* instead? */ res = valuePop(ctxt); if (xmlXPathEvaluatePredicateResult(ctxt, res)) { if (xmlXPathNodeSetAdd(newset, oldset->nodeTab[i]) < 0) ctxt->error = XPATH_MEMORY_ERROR; } /* * Cleanup */ if (res != NULL) { xmlXPathReleaseObject(ctxt->context, res); } if (ctxt->value == tmp) { valuePop(ctxt); xmlXPathNodeSetClear(tmp->nodesetval, 1); /* * Don't free the temporary nodeset * in order to avoid massive recreation inside this * loop. */ } else tmp = NULL; ctxt->context->node = NULL; } if (tmp != NULL) xmlXPathReleaseObject(ctxt->context, tmp); /* * The result is used as the new evaluation set. */ xmlXPathReleaseObject(ctxt->context, obj); ctxt->context->node = NULL; ctxt->context->contextSize = -1; ctxt->context->proximityPosition = -1; /* may want to move this past the '}' later */ ctxt->context->doc = oldDoc; valuePush(ctxt, xmlXPathCacheWrapNodeSet(ctxt->context, newset)); } ctxt->context->node = oldnode; return (total); } case XPATH_OP_SORT: if (op->ch1 != -1) total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]); CHECK_ERROR0; if ((ctxt->value != NULL) && (ctxt->value->type == XPATH_NODESET) && (ctxt->value->nodesetval != NULL) && (ctxt->value->nodesetval->nodeNr > 1)) { xmlXPathNodeSetSort(ctxt->value->nodesetval); } return (total); #ifdef LIBXML_XPTR_ENABLED case XPATH_OP_RANGETO:{ xmlXPathObjectPtr range; xmlXPathObjectPtr res, obj; xmlXPathObjectPtr tmp; xmlLocationSetPtr newlocset = NULL; xmlLocationSetPtr oldlocset; xmlNodeSetPtr oldset; int i, j; if (op->ch1 != -1) total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]); if (op->ch2 == -1) return (total); if (ctxt->value->type == XPATH_LOCATIONSET) { /* * Extract the old locset, and then evaluate the result of the * expression for all the element in the locset. use it to grow * up a new locset. */ CHECK_TYPE0(XPATH_LOCATIONSET); obj = valuePop(ctxt); oldlocset = obj->user; if ((oldlocset == NULL) || (oldlocset->locNr == 0)) { ctxt->context->node = NULL; ctxt->context->contextSize = 0; ctxt->context->proximityPosition = 0; total += xmlXPathCompOpEval(ctxt,&comp->steps[op->ch2]); res = valuePop(ctxt); if (res != NULL) { xmlXPathReleaseObject(ctxt->context, res); } valuePush(ctxt, obj); CHECK_ERROR0; return (total); } newlocset = xmlXPtrLocationSetCreate(NULL); for (i = 0; i < oldlocset->locNr; i++) { /* * Run the evaluation with a node list made of a * single item in the nodelocset. */ ctxt->context->node = oldlocset->locTab[i]->user; ctxt->context->contextSize = oldlocset->locNr; ctxt->context->proximityPosition = i + 1; tmp = xmlXPathCacheNewNodeSet(ctxt->context, ctxt->context->node); valuePush(ctxt, tmp); if (op->ch2 != -1) total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]); if (ctxt->error != XPATH_EXPRESSION_OK) { xmlXPathFreeObject(obj); return(0); } res = valuePop(ctxt); if (res->type == XPATH_LOCATIONSET) { xmlLocationSetPtr rloc = (xmlLocationSetPtr)res->user; for (j=0; j<rloc->locNr; j++) { range = xmlXPtrNewRange( oldlocset->locTab[i]->user, oldlocset->locTab[i]->index, rloc->locTab[j]->user2, rloc->locTab[j]->index2); if (range != NULL) { xmlXPtrLocationSetAdd(newlocset, range); } } } else { range = xmlXPtrNewRangeNodeObject( (xmlNodePtr)oldlocset->locTab[i]->user, res); if (range != NULL) { xmlXPtrLocationSetAdd(newlocset,range); } } /* * Cleanup */ if (res != NULL) { xmlXPathReleaseObject(ctxt->context, res); } if (ctxt->value == tmp) { res = valuePop(ctxt); xmlXPathReleaseObject(ctxt->context, res); } ctxt->context->node = NULL; } } else { /* Not a location set */ CHECK_TYPE0(XPATH_NODESET); obj = valuePop(ctxt); oldset = obj->nodesetval; ctxt->context->node = NULL; newlocset = xmlXPtrLocationSetCreate(NULL); if (oldset != NULL) { for (i = 0; i < oldset->nodeNr; i++) { /* * Run the evaluation with a node list made of a single item * in the nodeset. */ ctxt->context->node = oldset->nodeTab[i]; /* * OPTIMIZE TODO: Avoid recreation for every iteration. */ tmp = xmlXPathCacheNewNodeSet(ctxt->context, ctxt->context->node); valuePush(ctxt, tmp); if (op->ch2 != -1) total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]); if (ctxt->error != XPATH_EXPRESSION_OK) { xmlXPathFreeObject(obj); return(0); } res = valuePop(ctxt); range = xmlXPtrNewRangeNodeObject(oldset->nodeTab[i], res); if (range != NULL) { xmlXPtrLocationSetAdd(newlocset, range); } /* * Cleanup */ if (res != NULL) { xmlXPathReleaseObject(ctxt->context, res); } if (ctxt->value == tmp) { res = valuePop(ctxt); xmlXPathReleaseObject(ctxt->context, res); } ctxt->context->node = NULL; } } } /* * The result is used as the new evaluation set. */ xmlXPathReleaseObject(ctxt->context, obj); ctxt->context->node = NULL; ctxt->context->contextSize = -1; ctxt->context->proximityPosition = -1; valuePush(ctxt, xmlXPtrWrapLocationSet(newlocset)); return (total); } #endif /* LIBXML_XPTR_ENABLED */ } xmlGenericError(xmlGenericErrorContext, "XPath: unknown precompiled operation %d\n", op->op); ctxt->error = XPATH_INVALID_OPERAND;
0
[]
libxml2
03c6723043775122313f107695066e5744189a08
209,809,883,968,201,400,000,000,000,000,000,000,000
883
Handling of XPath function arguments in error case The XPath engine tries to guarantee that every XPath function can pop 'nargs' non-NULL values off the stack. libxslt, for example, relies on this assumption. But the check isn't thorough enough if there are errors during the evaluation of arguments. This can lead to segfaults: https://mail.gnome.org/archives/xslt/2013-December/msg00005.html This commit makes the handling of function arguments more robust. * Bail out early when evaluation of XPath function arguments fails. * Make sure that there are 'nargs' arguments in the current call frame.
struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode, const struct qstr *qstr, __u32 goal, uid_t *owner, __u32 i_flags, int handle_type, unsigned int line_no, int nblocks) { struct super_block *sb; struct buffer_head *inode_bitmap_bh = NULL; struct buffer_head *group_desc_bh; ext4_group_t ngroups, group = 0; unsigned long ino = 0; struct inode *inode; struct ext4_group_desc *gdp = NULL; struct ext4_inode_info *ei; struct ext4_sb_info *sbi; int ret2, err; struct inode *ret; ext4_group_t i; ext4_group_t flex_group; struct ext4_group_info *grp; int encrypt = 0; /* Cannot create files in a deleted directory */ if (!dir || !dir->i_nlink) return ERR_PTR(-EPERM); sb = dir->i_sb; sbi = EXT4_SB(sb); if (unlikely(ext4_forced_shutdown(sbi))) return ERR_PTR(-EIO); if ((ext4_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) && (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) && !(i_flags & EXT4_EA_INODE_FL)) { err = fscrypt_get_encryption_info(dir); if (err) return ERR_PTR(err); if (!fscrypt_has_encryption_key(dir)) return ERR_PTR(-ENOKEY); encrypt = 1; } if (!handle && sbi->s_journal && !(i_flags & EXT4_EA_INODE_FL)) { #ifdef CONFIG_EXT4_FS_POSIX_ACL struct posix_acl *p = get_acl(dir, ACL_TYPE_DEFAULT); if (IS_ERR(p)) return ERR_CAST(p); if (p) { int acl_size = p->a_count * sizeof(ext4_acl_entry); nblocks += (S_ISDIR(mode) ? 2 : 1) * __ext4_xattr_set_credits(sb, NULL /* inode */, NULL /* block_bh */, acl_size, true /* is_create */); posix_acl_release(p); } #endif #ifdef CONFIG_SECURITY { int num_security_xattrs = 1; #ifdef CONFIG_INTEGRITY num_security_xattrs++; #endif /* * We assume that security xattrs are never * more than 1k. In practice they are under * 128 bytes. */ nblocks += num_security_xattrs * __ext4_xattr_set_credits(sb, NULL /* inode */, NULL /* block_bh */, 1024, true /* is_create */); } #endif if (encrypt) nblocks += __ext4_xattr_set_credits(sb, NULL /* inode */, NULL /* block_bh */, FSCRYPT_SET_CONTEXT_MAX_SIZE, true /* is_create */); } ngroups = ext4_get_groups_count(sb); trace_ext4_request_inode(dir, mode); inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); ei = EXT4_I(inode); /* * Initialize owners and quota early so that we don't have to account * for quota initialization worst case in standard inode creating * transaction */ if (owner) { inode->i_mode = mode; i_uid_write(inode, owner[0]); i_gid_write(inode, owner[1]); } else if (test_opt(sb, GRPID)) { inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = dir->i_gid; } else inode_init_owner(inode, dir, mode); if (ext4_has_feature_project(sb) && ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) ei->i_projid = EXT4_I(dir)->i_projid; else ei->i_projid = make_kprojid(&init_user_ns, EXT4_DEF_PROJID); err = dquot_initialize(inode); if (err) goto out; if (!goal) goal = sbi->s_inode_goal; if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) { group = (goal - 1) / EXT4_INODES_PER_GROUP(sb); ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb); ret2 = 0; goto got_group; } if (S_ISDIR(mode)) ret2 = find_group_orlov(sb, dir, &group, mode, qstr); else ret2 = find_group_other(sb, dir, &group, mode); got_group: EXT4_I(dir)->i_last_alloc_group = group; err = -ENOSPC; if (ret2 == -1) goto out; /* * Normally we will only go through one pass of this loop, * unless we get unlucky and it turns out the group we selected * had its last inode grabbed by someone else. */ for (i = 0; i < ngroups; i++, ino = 0) { err = -EIO; gdp = ext4_get_group_desc(sb, group, &group_desc_bh); if (!gdp) goto out; /* * Check free inodes count before loading bitmap. */ if (ext4_free_inodes_count(sb, gdp) == 0) goto next_group; grp = ext4_get_group_info(sb, group); /* Skip groups with already-known suspicious inode tables */ if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) goto next_group; brelse(inode_bitmap_bh); inode_bitmap_bh = ext4_read_inode_bitmap(sb, group); /* Skip groups with suspicious inode tables */ if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp) || IS_ERR(inode_bitmap_bh)) { inode_bitmap_bh = NULL; goto next_group; } repeat_in_this_group: ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino); if (!ret2) goto next_group; if (group == 0 && (ino + 1) < EXT4_FIRST_INO(sb)) { ext4_error(sb, "reserved inode found cleared - " "inode=%lu", ino + 1); ext4_mark_group_bitmap_corrupted(sb, group, EXT4_GROUP_INFO_IBITMAP_CORRUPT); goto next_group; } if (!handle) { BUG_ON(nblocks <= 0); handle = __ext4_journal_start_sb(dir->i_sb, line_no, handle_type, nblocks, 0); if (IS_ERR(handle)) { err = PTR_ERR(handle); ext4_std_error(sb, err); goto out; } } BUFFER_TRACE(inode_bitmap_bh, "get_write_access"); err = ext4_journal_get_write_access(handle, inode_bitmap_bh); if (err) { ext4_std_error(sb, err); goto out; } ext4_lock_group(sb, group); ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data); if (ret2) { /* Someone already took the bit. Repeat the search * with lock held. */ ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino); if (ret2) { ext4_set_bit(ino, inode_bitmap_bh->b_data); ret2 = 0; } else { ret2 = 1; /* we didn't grab the inode */ } } ext4_unlock_group(sb, group); ino++; /* the inode bitmap is zero-based */ if (!ret2) goto got; /* we grabbed the inode! */ if (ino < EXT4_INODES_PER_GROUP(sb)) goto repeat_in_this_group; next_group: if (++group == ngroups) group = 0; } err = -ENOSPC; goto out; got: BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh); if (err) { ext4_std_error(sb, err); goto out; } BUFFER_TRACE(group_desc_bh, "get_write_access"); err = ext4_journal_get_write_access(handle, group_desc_bh); if (err) { ext4_std_error(sb, err); goto out; } /* We may have to initialize the block bitmap if it isn't already */ if (ext4_has_group_desc_csum(sb) && gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { struct buffer_head *block_bitmap_bh; block_bitmap_bh = ext4_read_block_bitmap(sb, group); if (IS_ERR(block_bitmap_bh)) { err = PTR_ERR(block_bitmap_bh); goto out; } BUFFER_TRACE(block_bitmap_bh, "get block bitmap access"); err = ext4_journal_get_write_access(handle, block_bitmap_bh); if (err) { brelse(block_bitmap_bh); ext4_std_error(sb, err); goto out; } BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap"); err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh); /* recheck and clear flag under lock if we still need to */ ext4_lock_group(sb, group); if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); ext4_free_group_clusters_set(sb, gdp, ext4_free_clusters_after_init(sb, group, gdp)); ext4_block_bitmap_csum_set(sb, group, gdp, block_bitmap_bh); ext4_group_desc_csum_set(sb, group, gdp); } ext4_unlock_group(sb, group); brelse(block_bitmap_bh); if (err) { ext4_std_error(sb, err); goto out; } } /* Update the relevant bg descriptor fields */ if (ext4_has_group_desc_csum(sb)) { int free; struct ext4_group_info *grp = ext4_get_group_info(sb, group); down_read(&grp->alloc_sem); /* protect vs itable lazyinit */ ext4_lock_group(sb, group); /* while we modify the bg desc */ free = EXT4_INODES_PER_GROUP(sb) - ext4_itable_unused_count(sb, gdp); if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT); free = 0; } /* * Check the relative inode number against the last used * relative inode number in this group. if it is greater * we need to update the bg_itable_unused count */ if (ino > free) ext4_itable_unused_set(sb, gdp, (EXT4_INODES_PER_GROUP(sb) - ino)); up_read(&grp->alloc_sem); } else { ext4_lock_group(sb, group); } ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1); if (S_ISDIR(mode)) { ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1); if (sbi->s_log_groups_per_flex) { ext4_group_t f = ext4_flex_group(sbi, group); atomic_inc(&sbi->s_flex_groups[f].used_dirs); } } if (ext4_has_group_desc_csum(sb)) { ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8); ext4_group_desc_csum_set(sb, group, gdp); } ext4_unlock_group(sb, group); BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh); if (err) { ext4_std_error(sb, err); goto out; } percpu_counter_dec(&sbi->s_freeinodes_counter); if (S_ISDIR(mode)) percpu_counter_inc(&sbi->s_dirs_counter); if (sbi->s_log_groups_per_flex) { flex_group = ext4_flex_group(sbi, group); atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes); } inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb); /* This is the optimal IO size (for stat), not the fs block size */ inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime = current_time(inode); memset(ei->i_data, 0, sizeof(ei->i_data)); ei->i_dir_start_lookup = 0; ei->i_disksize = 0; /* Don't inherit extent flag from directory, amongst others. */ ei->i_flags = ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED); ei->i_flags |= i_flags; ei->i_file_acl = 0; ei->i_dtime = 0; ei->i_block_group = group; ei->i_last_alloc_group = ~0; ext4_set_inode_flags(inode); if (IS_DIRSYNC(inode)) ext4_handle_sync(handle); if (insert_inode_locked(inode) < 0) { /* * Likely a bitmap corruption causing inode to be allocated * twice. */ err = -EIO; ext4_error(sb, "failed to insert inode %lu: doubly allocated?", inode->i_ino); ext4_mark_group_bitmap_corrupted(sb, group, EXT4_GROUP_INFO_IBITMAP_CORRUPT); goto out; } inode->i_generation = prandom_u32(); /* Precompute checksum seed for inode metadata */ if (ext4_has_metadata_csum(sb)) { __u32 csum; __le32 inum = cpu_to_le32(inode->i_ino); __le32 gen = cpu_to_le32(inode->i_generation); csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, sizeof(inum)); ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, sizeof(gen)); } ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ ext4_set_inode_state(inode, EXT4_STATE_NEW); ei->i_extra_isize = sbi->s_want_extra_isize; ei->i_inline_off = 0; if (ext4_has_feature_inline_data(sb)) ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); ret = inode; err = dquot_alloc_inode(inode); if (err) goto fail_drop; /* * Since the encryption xattr will always be unique, create it first so * that it's less likely to end up in an external xattr block and * prevent its deduplication. */ if (encrypt) { err = fscrypt_inherit_context(dir, inode, handle, true); if (err) goto fail_free_drop; } if (!(ei->i_flags & EXT4_EA_INODE_FL)) { err = ext4_init_acl(handle, inode, dir); if (err) goto fail_free_drop; err = ext4_init_security(handle, inode, dir, qstr); if (err) goto fail_free_drop; } if (ext4_has_feature_extents(sb)) { /* set extent flag only for directory, file and normal symlink*/ if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) { ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS); ext4_ext_tree_init(handle, inode); } } if (ext4_handle_valid(handle)) { ei->i_sync_tid = handle->h_transaction->t_tid; ei->i_datasync_tid = handle->h_transaction->t_tid; } err = ext4_mark_inode_dirty(handle, inode); if (err) { ext4_std_error(sb, err); goto fail_free_drop; } ext4_debug("allocating inode %lu\n", inode->i_ino); trace_ext4_allocate_inode(inode, dir, mode); brelse(inode_bitmap_bh); return ret; fail_free_drop: dquot_free_inode(inode); fail_drop: clear_nlink(inode); unlock_new_inode(inode); out: dquot_drop(inode); inode->i_flags |= S_NOQUOTA; iput(inode); brelse(inode_bitmap_bh); return ERR_PTR(err); }
1
[ "CWE-416" ]
linux
8844618d8aa7a9973e7b527d038a2a589665002c
97,327,577,510,042,030,000,000,000,000,000,000,000
458
ext4: only look at the bg_flags field if it is valid The bg_flags field in the block group descripts is only valid if the uninit_bg or metadata_csum feature is enabled. We were not consistently looking at this field; fix this. Also block group #0 must never have uninitialized allocation bitmaps, or need to be zeroed, since that's where the root inode, and other special inodes are set up. Check for these conditions and mark the file system as corrupted if they are detected. This addresses CVE-2018-10876. https://bugzilla.kernel.org/show_bug.cgi?id=199403 Signed-off-by: Theodore Ts'o <[email protected]> Cc: [email protected]
static int ZEND_FASTCALL ZEND_EXT_FCALL_BEGIN_SPEC_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { if (!EG(no_extensions)) { zend_llist_apply_with_argument(&zend_extensions, (llist_apply_with_arg_func_t) zend_extension_fcall_begin_handler, EX(op_array) TSRMLS_CC); } ZEND_VM_NEXT_OPCODE(); }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
215,956,014,888,436,600,000,000,000,000,000,000,000
7
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
static inline void RemoveFreeBlock(void *block,const size_t i) { register void *next, *previous; next=NextBlockInList(block); previous=PreviousBlockInList(block); if (previous == (void *) NULL) memory_pool.blocks[i]=next; else NextBlockInList(previous)=next; if (next != (void *) NULL) PreviousBlockInList(next)=previous; }
0
[ "CWE-190", "CWE-189", "CWE-703" ]
ImageMagick
0f6fc2d5bf8f500820c3dbcf0d23ee14f2d9f734
328,835,683,160,947,300,000,000,000,000,000,000,000
15
static enum test_return test_binary_incrq(void) { return test_binary_incr_impl("test_binary_incrq", PROTOCOL_BINARY_CMD_INCREMENTQ); }
0
[ "CWE-20" ]
memcached
75cc83685e103bc8ba380a57468c8f04413033f9
860,468,099,658,604,900,000,000,000,000,000,000
4
Issue 102: Piping null to the server will crash it
Bool gf_filter_end_of_session(GF_Filter *filter) { if (!filter) return GF_TRUE; return filter->session->in_final_flush; }
0
[ "CWE-787" ]
gpac
da37ec8582266983d0ec4b7550ec907401ec441e
21,927,642,182,966,540,000,000,000,000,000,000,000
5
fixed crashes for very long path - cf #1908
static void __exit cleanup_trusted(void) { trusted_shash_release(); unregister_key_type(&key_type_trusted); }
0
[ "CWE-284", "CWE-264", "CWE-269" ]
linux
096fe9eaea40a17e125569f9e657e34cdb6d73bd
207,192,478,826,790,350,000,000,000,000,000,000,000
5
KEYS: Fix handling of stored error in a negatively instantiated user key If a user key gets negatively instantiated, an error code is cached in the payload area. A negatively instantiated key may be then be positively instantiated by updating it with valid data. However, the ->update key type method must be aware that the error code may be there. The following may be used to trigger the bug in the user key type: keyctl request2 user user "" @u keyctl add user user "a" @u which manifests itself as: BUG: unable to handle kernel paging request at 00000000ffffff8a IP: [<ffffffff810a376f>] __call_rcu.constprop.76+0x1f/0x280 kernel/rcu/tree.c:3046 PGD 7cc30067 PUD 0 Oops: 0002 [#1] SMP Modules linked in: CPU: 3 PID: 2644 Comm: a.out Not tainted 4.3.0+ #49 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 task: ffff88003ddea700 ti: ffff88003dd88000 task.ti: ffff88003dd88000 RIP: 0010:[<ffffffff810a376f>] [<ffffffff810a376f>] __call_rcu.constprop.76+0x1f/0x280 [<ffffffff810a376f>] __call_rcu.constprop.76+0x1f/0x280 kernel/rcu/tree.c:3046 RSP: 0018:ffff88003dd8bdb0 EFLAGS: 00010246 RAX: 00000000ffffff82 RBX: 0000000000000000 RCX: 0000000000000001 RDX: ffffffff81e3fe40 RSI: 0000000000000000 RDI: 00000000ffffff82 RBP: ffff88003dd8bde0 R08: ffff88007d2d2da0 R09: 0000000000000000 R10: 0000000000000000 R11: ffff88003e8073c0 R12: 00000000ffffff82 R13: ffff88003dd8be68 R14: ffff88007d027600 R15: ffff88003ddea700 FS: 0000000000b92880(0063) GS:ffff88007fd00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 00000000ffffff8a CR3: 000000007cc5f000 CR4: 00000000000006e0 Stack: ffff88003dd8bdf0 ffffffff81160a8a 0000000000000000 00000000ffffff82 ffff88003dd8be68 ffff88007d027600 ffff88003dd8bdf0 ffffffff810a39e5 ffff88003dd8be20 ffffffff812a31ab ffff88007d027600 ffff88007d027620 Call Trace: [<ffffffff810a39e5>] kfree_call_rcu+0x15/0x20 kernel/rcu/tree.c:3136 [<ffffffff812a31ab>] user_update+0x8b/0xb0 security/keys/user_defined.c:129 [< inline >] __key_update security/keys/key.c:730 [<ffffffff8129e5c1>] key_create_or_update+0x291/0x440 security/keys/key.c:908 [< inline >] SYSC_add_key security/keys/keyctl.c:125 [<ffffffff8129fc21>] SyS_add_key+0x101/0x1e0 security/keys/keyctl.c:60 [<ffffffff8185f617>] entry_SYSCALL_64_fastpath+0x12/0x6a arch/x86/entry/entry_64.S:185 Note the error code (-ENOKEY) in EDX. A similar bug can be tripped by: keyctl request2 trusted user "" @u keyctl add trusted user "a" @u This should also affect encrypted keys - but that has to be correctly parameterised or it will fail with EINVAL before getting to the bit that will crashes. Reported-by: Dmitry Vyukov <[email protected]> Signed-off-by: David Howells <[email protected]> Acked-by: Mimi Zohar <[email protected]> Signed-off-by: James Morris <[email protected]>
static inline bool is_debug(u32 intr_info) { return is_exception_n(intr_info, DB_VECTOR); }
0
[ "CWE-284" ]
linux
727ba748e110b4de50d142edca9d6a9b7e6111d8
269,286,925,733,383,100,000,000,000,000,000,000,000
4
kvm: nVMX: Enforce cpl=0 for VMX instructions VMX instructions executed inside a L1 VM will always trigger a VM exit even when executed with cpl 3. This means we must perform the privilege check in software. Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks") Cc: [email protected] Signed-off-by: Felix Wilhelm <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
inline void uncacheable(uint8 cause) { safe_to_cache_query= 0; if (current_select) // initialisation SP variables has no SELECT { /* There are no sense to mark select_lex and union fields of LEX, but we should merk all subselects as uncacheable from current till most upper */ SELECT_LEX *sl; SELECT_LEX_UNIT *un; for (sl= current_select, un= sl->master_unit(); un && un != &unit; sl= sl->outer_select(), un= (sl ? sl->master_unit() : NULL)) { sl->uncacheable|= cause; un->uncacheable|= cause; } if (sl) sl->uncacheable|= cause; } if (first_select_lex()) first_select_lex()->uncacheable|= cause; }
0
[ "CWE-703" ]
server
39feab3cd31b5414aa9b428eaba915c251ac34a2
15,119,619,045,406,032,000,000,000,000,000,000,000
26
MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT IF an INSERT/REPLACE SELECT statement contained an ON expression in the top level select and this expression used a subquery with a column reference that could not be resolved then an attempt to resolve this reference as an outer reference caused a crash of the server. This happened because the outer context field in the Name_resolution_context structure was not set to NULL for such references. Rather it pointed to the first element in the select_stack. Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select() method when parsing a SELECT construct. Approved by Oleksandr Byelkin <[email protected]>
frag6_print(netdissect_options *ndo, register const u_char *bp, register const u_char *bp2) { register const struct ip6_frag *dp; register const struct ip6_hdr *ip6; dp = (const struct ip6_frag *)bp; ip6 = (const struct ip6_hdr *)bp2; ND_TCHECK(dp->ip6f_offlg); if (ndo->ndo_vflag) { ND_PRINT((ndo, "frag (0x%08x:%d|%ld)", EXTRACT_32BITS(&dp->ip6f_ident), EXTRACT_16BITS(&dp->ip6f_offlg) & IP6F_OFF_MASK, sizeof(struct ip6_hdr) + EXTRACT_16BITS(&ip6->ip6_plen) - (long)(bp - bp2) - sizeof(struct ip6_frag))); } else { ND_PRINT((ndo, "frag (%d|%ld)", EXTRACT_16BITS(&dp->ip6f_offlg) & IP6F_OFF_MASK, sizeof(struct ip6_hdr) + EXTRACT_16BITS(&ip6->ip6_plen) - (long)(bp - bp2) - sizeof(struct ip6_frag))); } /* it is meaningless to decode non-first fragment */ if ((EXTRACT_16BITS(&dp->ip6f_offlg) & IP6F_OFF_MASK) != 0) return -1; else { ND_PRINT((ndo, " ")); return sizeof(struct ip6_frag); } trunc: ND_PRINT((ndo, "[|frag]")); return -1; }
1
[ "CWE-125", "CWE-787" ]
tcpdump
2d669862df7cd17f539129049f6fb70d17174125
282,174,911,102,190,380,000,000,000,000,000,000,000
35
CVE-2017-13031/Check for the presence of the entire IPv6 fragment header. This fixes a buffer over-read discovered by Bhargava Shastry, SecT/TU Berlin. Add a test using the capture file supplied by the reporter(s), modified so the capture file won't be rejected as an invalid capture. Clean up some whitespace in tests/TESTLIST while we're at it.
static void seek_floppy(void) { int track; blind_seek = 0; debug_dcl(drive_params[current_drive].flags, "calling disk change from %s\n", __func__); if (!test_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[current_drive].flags) && disk_change(current_drive) && (raw_cmd->flags & FD_RAW_NEED_DISK)) { /* the media changed flag should be cleared after the seek. * If it isn't, this means that there is really no disk in * the drive. */ set_bit(FD_DISK_CHANGED_BIT, &drive_state[current_drive].flags); cont->done(0); cont->redo(); return; } if (drive_state[current_drive].track <= NEED_1_RECAL) { recalibrate_floppy(); return; } else if (test_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[current_drive].flags) && (raw_cmd->flags & FD_RAW_NEED_DISK) && (drive_state[current_drive].track <= NO_TRACK || drive_state[current_drive].track == raw_cmd->track)) { /* we seek to clear the media-changed condition. Does anybody * know a more elegant way, which works on all drives? */ if (raw_cmd->track) track = raw_cmd->track - 1; else { if (drive_params[current_drive].flags & FD_SILENT_DCL_CLEAR) { set_dor(current_fdc, ~(0x10 << UNIT(current_drive)), 0); blind_seek = 1; raw_cmd->flags |= FD_RAW_NEED_SEEK; } track = 1; } } else { check_wp(current_fdc, current_drive); if (raw_cmd->track != drive_state[current_drive].track && (raw_cmd->flags & FD_RAW_NEED_SEEK)) track = raw_cmd->track; else { setup_rw_floppy(); return; } } do_floppy = seek_interrupt; output_byte(current_fdc, FD_SEEK); output_byte(current_fdc, UNIT(current_drive)); if (output_byte(current_fdc, track) < 0) { reset_fdc(); return; } debugt(__func__, ""); }
0
[ "CWE-416" ]
linux
233087ca063686964a53c829d547c7571e3f67bf
240,273,844,731,055,140,000,000,000,000,000,000,000
59
floppy: disable FDRAWCMD by default Minh Yuan reported a concurrency use-after-free issue in the floppy code between raw_cmd_ioctl and seek_interrupt. [ It turns out this has been around, and that others have reported the KASAN splats over the years, but Minh Yuan had a reproducer for it and so gets primary credit for reporting it for this fix - Linus ] The problem is, this driver tends to break very easily and nowadays, nobody is expected to use FDRAWCMD anyway since it was used to manipulate non-standard formats. The risk of breaking the driver is higher than the risk presented by this race, and accessing the device requires privileges anyway. Let's just add a config option to completely disable this ioctl and leave it disabled by default. Distros shouldn't use it, and only those running on antique hardware might need to enable it. Link: https://lore.kernel.org/all/[email protected]/ Link: https://lore.kernel.org/lkml/CAKcFiNC=MfYVW-Jt9A3=FPJpTwCD2PL_ULNCpsCVE5s8ZeBQgQ@mail.gmail.com Link: https://lore.kernel.org/all/CAEAjamu1FRhz6StCe_55XY5s389ZP_xmCF69k987En+1z53=eg@mail.gmail.com Reported-by: Minh Yuan <[email protected]> Reported-by: [email protected] Reported-by: cruise k <[email protected]> Reported-by: Kyungtae Kim <[email protected]> Suggested-by: Linus Torvalds <[email protected]> Tested-by: Denis Efremov <[email protected]> Signed-off-by: Willy Tarreau <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); __u8 param; void *sent; BT_DBG("%s status 0x%2.2x", hdev->name, status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); if (!sent) return; param = *((__u8 *) sent); hci_dev_lock(hdev); if (status) { hdev->discov_timeout = 0; goto done; } if (param & SCAN_INQUIRY) set_bit(HCI_ISCAN, &hdev->flags); else clear_bit(HCI_ISCAN, &hdev->flags); if (param & SCAN_PAGE) set_bit(HCI_PSCAN, &hdev->flags); else clear_bit(HCI_PSCAN, &hdev->flags); done: hci_dev_unlock(hdev); }
0
[ "CWE-290" ]
linux
3ca44c16b0dcc764b641ee4ac226909f5c421aa3
67,300,382,797,711,750,000,000,000,000,000,000,000
34
Bluetooth: Consolidate encryption handling in hci_encrypt_cfm This makes hci_encrypt_cfm calls hci_connect_cfm in case the connection state is BT_CONFIG so callers don't have to check the state. Signed-off-by: Luiz Augusto von Dentz <[email protected]> Signed-off-by: Marcel Holtmann <[email protected]>
static enum test_return cache_redzone_test(void) { #ifndef HAVE_UMEM_H cache_t *cache = cache_create("test", sizeof(uint32_t), sizeof(char*), NULL, NULL); /* Ignore SIGABORT */ struct sigaction old_action; struct sigaction action = { .sa_handler = SIG_IGN, .sa_flags = 0}; sigemptyset(&action.sa_mask); sigaction(SIGABRT, &action, &old_action); /* check memory debug.. */ char *p = cache_alloc(cache); char old = *(p - 1); *(p - 1) = 0; cache_free(cache, p); assert(cache_error == -1); *(p - 1) = old; p[sizeof(uint32_t)] = 0; cache_free(cache, p); assert(cache_error == 1); /* restore signal handler */ sigaction(SIGABRT, &old_action, NULL); cache_destroy(cache); return TEST_PASS; #else return TEST_SKIP; #endif }
0
[ "CWE-20" ]
memcached
75cc83685e103bc8ba380a57468c8f04413033f9
292,894,347,518,059,400,000,000,000,000,000,000,000
34
Issue 102: Piping null to the server will crash it
f_getcmdpos(typval_T *argvars UNUSED, typval_T *rettv) { cmdline_info_T *p = get_ccline_ptr(); rettv->vval.v_number = p != NULL ? p->cmdpos + 1 : 0; }
0
[ "CWE-416" ]
vim
1c3dd8ddcba63c1af5112e567215b3cec2de11d0
176,120,263,822,929,380,000,000,000,000,000,000,000
6
patch 9.0.0490: using freed memory with cmdwin and BufEnter autocmd Problem: Using freed memory with cmdwin and BufEnter autocmd. Solution: Make sure pointer to b_p_iminsert is still valid.
bool AES_GCM_DecryptContext::Decrypt( const void *pEncryptedDataAndTag, size_t cbEncryptedDataAndTag, const void *pIV, void *pPlaintextData, uint32 *pcbPlaintextData, const void *pAdditionalAuthenticationData, size_t cbAuthenticationData ) { unsigned long long pcbPlaintextData_longlong; const int nDecryptResult = crypto_aead_aes256gcm_decrypt_afternm( static_cast<unsigned char*>( pPlaintextData ), &pcbPlaintextData_longlong, nullptr, static_cast<const unsigned char*>( pEncryptedDataAndTag ), cbEncryptedDataAndTag, static_cast<const unsigned char*>( pAdditionalAuthenticationData ), cbAuthenticationData, static_cast<const unsigned char*>( pIV ), static_cast<const crypto_aead_aes256gcm_state*>( m_ctx ) ); *pcbPlaintextData = pcbPlaintextData_longlong; return nDecryptResult == 0; }
1
[ "CWE-787" ]
GameNetworkingSockets
bea84e2844b647532a9b7fbc3a6a8989d66e49e3
232,978,415,129,254,770,000,000,000,000,000,000,000
20
Check if output buffer is too small. It really seems like libsodium (whose entire purpose is to make crypto idiot-proof) making me mess with these details is a flaw in the API design. Also, correct Hungarian.
g_file_enumerate_children (GFile *file, const char *attributes, GFileQueryInfoFlags flags, GCancellable *cancellable, GError **error) { GFileIface *iface; g_return_val_if_fail (G_IS_FILE (file), NULL); if (g_cancellable_set_error_if_cancelled (cancellable, error)) return NULL; iface = G_FILE_GET_IFACE (file); if (iface->enumerate_children == NULL) { g_set_error_literal (error, G_IO_ERROR, G_IO_ERROR_NOT_SUPPORTED, _("Operation not supported")); return NULL; } return (* iface->enumerate_children) (file, attributes, flags, cancellable, error); }
0
[ "CWE-362" ]
glib
d8f8f4d637ce43f8699ba94c9b7648beda0ca174
96,276,474,280,543,000,000,000,000,000,000,000,000
26
gfile: Limit access to files when copying file_copy_fallback creates new files with default permissions and set the correct permissions after the operation is finished. This might cause that the files can be accessible by more users during the operation than expected. Use G_FILE_CREATE_PRIVATE for the new files to limit access to those files.
static void event_isupport(IRC_SERVER_REC *server, const char *data) { char **item, *sptr, *eptr; char **isupport; gpointer key, value; g_return_if_fail(server != NULL); server->isupport_sent = TRUE; sptr = strchr(data, ' '); if (sptr == NULL) return; sptr++; isupport = g_strsplit(sptr, " ", -1); for(item = isupport; *item != NULL; item++) { int removed = FALSE; if (**item == '\0') continue; if (**item == ':') break; sptr = strchr(*item, '='); if (sptr != NULL) { *sptr = '\0'; sptr++; } eptr = *item; if(*eptr == '-') { removed = TRUE; eptr++; } key = value = NULL; if (!g_hash_table_lookup_extended(server->isupport, eptr, &key, &value) && removed) continue; g_hash_table_remove(server->isupport, eptr); if (!removed) { g_hash_table_insert(server->isupport, g_strdup(eptr), g_strdup(sptr != NULL ? sptr : "")); } g_free(key); g_free(value); } g_strfreev(isupport); irc_server_init_isupport(server); }
0
[ "CWE-416" ]
irssi
43e44d553d44e313003cee87e6ea5e24d68b84a1
314,586,589,281,376,050,000,000,000,000,000,000,000
56
Merge branch 'security' into 'master' Security Closes GL#12, GL#13, GL#14, GL#15, GL#16 See merge request irssi/irssi!23
static OPJ_BOOL opj_tcd_dwt_decode ( opj_tcd_t *p_tcd ) { OPJ_UINT32 compno; opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcd_tilecomp_t * l_tile_comp = l_tile->comps; opj_tccp_t * l_tccp = p_tcd->tcp->tccps; opj_image_comp_t * l_img_comp = p_tcd->image->comps; for (compno = 0; compno < l_tile->numcomps; compno++) { /* if (tcd->cp->reduce != 0) { tcd->image->comps[compno].resno_decoded = tile->comps[compno].numresolutions - tcd->cp->reduce - 1; if (tcd->image->comps[compno].resno_decoded < 0) { return false; } } numres2decode = tcd->image->comps[compno].resno_decoded + 1; if(numres2decode > 0){ */ if (l_tccp->qmfbid == 1) { if (! opj_dwt_decode(l_tile_comp, l_img_comp->resno_decoded+1)) { return OPJ_FALSE; } } else { if (! opj_dwt_decode_real(l_tile_comp, l_img_comp->resno_decoded+1)) { return OPJ_FALSE; } } ++l_tile_comp; ++l_img_comp; ++l_tccp; } return OPJ_TRUE; }
0
[ "CWE-369" ]
openjpeg
8f9cc62b3f9a1da9712329ddcedb9750d585505c
112,811,212,982,747,140,000,000,000,000,000,000,000
40
Fix division by zero Fix uclouvain/openjpeg#733
RegexMatchExpression::RegexMatchExpression() : LeafMatchExpression(REGEX) {}
0
[]
mongo
b0ef26c639112b50648a02d969298650fbd402a4
282,126,080,999,414,870,000,000,000,000,000,000,000
1
SERVER-51083 Reject invalid UTF-8 from $regex match expressions
void testUriUserInfoHostPort5() { // No user info, no port UriParserStateA stateA; UriUriA uriA; stateA.uri = &uriA; // 0 4 0 3 0 9 const char * const input = "http" "://" "localhost"; TEST_ASSERT(0 == uriParseUriA(&stateA, input)); TEST_ASSERT(uriA.userInfo.first == NULL); TEST_ASSERT(uriA.userInfo.afterLast == NULL); TEST_ASSERT(uriA.hostText.first == input + 4 + 3); TEST_ASSERT(uriA.hostText.afterLast == input + 4 + 3 + 9); TEST_ASSERT(uriA.portText.first == NULL); TEST_ASSERT(uriA.portText.afterLast == NULL); uriFreeUriMembersA(&uriA); }
0
[ "CWE-787" ]
uriparser
864f5d4c127def386dd5cc926ad96934b297f04e
123,425,791,503,303,380,000,000,000,000,000,000,000
17
UriQuery.c: Fix out-of-bounds-write in ComposeQuery and ...Ex Reported by Google Autofuzz team
_lou_getCharFromDots(widechar d) { CharOrDots *cdPtr = getCharOrDots(d, 1, gTable); if (cdPtr) return cdPtr->found; return ' '; }
0
[ "CWE-787" ]
liblouis
fb2bfce4ed49ac4656a8f7e5b5526e4838da1dde
205,669,445,992,356,600,000,000,000,000,000,000,000
5
Fix yet another buffer overflow in the braille table parser Reported by Henri Salo Fixes #592
hb_ot_layout_set_scale (hb_ot_layout_t *layout, hb_16dot16_t x_scale, hb_16dot16_t y_scale) { layout->gpos_info.x_scale = x_scale; layout->gpos_info.y_scale = y_scale; }
0
[]
pango
336bb3201096bdd0494d29926dd44e8cca8bed26
972,544,803,709,066,700,000,000,000,000,000,000
6
[HB] Remove all references to the old code!
static inline bool sig_handler_ignored(void __user *handler, int sig) { /* Is it explicitly or implicitly ignored? */ return handler == SIG_IGN || (handler == SIG_DFL && sig_kernel_ignore(sig)); }
0
[ "CWE-190" ]
linux
d1e7fd6462ca9fc76650fbe6ca800e35b24267da
3,940,218,698,048,722,500,000,000,000,000,000,000
6
signal: Extend exec_id to 64bits Replace the 32bit exec_id with a 64bit exec_id to make it impossible to wrap the exec_id counter. With care an attacker can cause exec_id wrap and send arbitrary signals to a newly exec'd parent. This bypasses the signal sending checks if the parent changes their credentials during exec. The severity of this problem can been seen that in my limited testing of a 32bit exec_id it can take as little as 19s to exec 65536 times. Which means that it can take as little as 14 days to wrap a 32bit exec_id. Adam Zabrocki has succeeded wrapping the self_exe_id in 7 days. Even my slower timing is in the uptime of a typical server. Which means self_exec_id is simply a speed bump today, and if exec gets noticably faster self_exec_id won't even be a speed bump. Extending self_exec_id to 64bits introduces a problem on 32bit architectures where reading self_exec_id is no longer atomic and can take two read instructions. Which means that is is possible to hit a window where the read value of exec_id does not match the written value. So with very lucky timing after this change this still remains expoiltable. I have updated the update of exec_id on exec to use WRITE_ONCE and the read of exec_id in do_notify_parent to use READ_ONCE to make it clear that there is no locking between these two locations. Link: https://lore.kernel.org/kernel-hardening/[email protected] Fixes: 2.3.23pre2 Cc: [email protected] Signed-off-by: "Eric W. Biederman" <[email protected]>
load_attachment_idle (EAttachment *attachment) { e_attachment_load_async ( attachment, (GAsyncReadyCallback) attachment_loaded, NULL); return FALSE; }
0
[ "CWE-347" ]
evolution
9c55a311325f5905d8b8403b96607e46cf343f21
195,989,273,418,907,070,000,000,000,000,000,000,000
8
I#120 - Show security bar above message headers Closes https://gitlab.gnome.org/GNOME/evolution/issues/120
ex_colorscheme(exarg_T *eap) { if (*eap->arg == NUL) { #ifdef FEAT_EVAL char_u *expr = vim_strsave((char_u *)"g:colors_name"); char_u *p = NULL; if (expr != NULL) { ++emsg_off; p = eval_to_string(expr, FALSE); --emsg_off; vim_free(expr); } if (p != NULL) { msg((char *)p); vim_free(p); } else msg("default"); #else msg(_("unknown")); #endif } else if (load_colors(eap->arg) == FAIL) semsg(_(e_cannot_find_color_scheme_str), eap->arg); #ifdef FEAT_VTP else if (has_vtp_working()) { // background color change requires clear + redraw update_screen(CLEAR); redrawcmd(); } #endif }
0
[ "CWE-125" ]
vim
d3a117814d6acbf0dca3eff1a7626843b9b3734a
244,139,622,693,052,230,000,000,000,000,000,000,000
38
patch 8.2.4009: reading one byte beyond the end of the line Problem: Reading one byte beyond the end of the line. Solution: Check for NUL byte first.
static void clientGone(rfbClientPtr cl) { rfbShutdownServer(cl->screen, TRUE); }
0
[ "CWE-665" ]
libvncserver
8b06f835e259652b0ff026898014fc7297ade858
69,651,912,629,396,180,000,000,000,000,000,000,000
4
When connecting to a repeater, only send initialised string Closes #253
static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) { if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) tp->ecn_flags &= ~TCP_ECN_OK; }
0
[ "CWE-703", "CWE-189" ]
linux
8b8a321ff72c785ed5e8b4cf6eda20b35d427390
312,768,167,334,439,620,000,000,000,000,000,000,000
5
tcp: fix zero cwnd in tcp_cwnd_reduction Patch 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode conditionally") introduced a bug that cwnd may become 0 when both inflight and sndcnt are 0 (cwnd = inflight + sndcnt). This may lead to a div-by-zero if the connection starts another cwnd reduction phase by setting tp->prior_cwnd to the current cwnd (0) in tcp_init_cwnd_reduction(). To prevent this we skip PRR operation when nothing is acked or sacked. Then cwnd must be positive in all cases as long as ssthresh is positive: 1) The proportional reduction mode inflight > ssthresh > 0 2) The reduction bound mode a) inflight == ssthresh > 0 b) inflight < ssthresh sndcnt > 0 since newly_acked_sacked > 0 and inflight < ssthresh Therefore in all cases inflight and sndcnt can not both be 0. We check invalid tp->prior_cwnd to avoid potential div0 bugs. In reality this bug is triggered only with a sequence of less common events. For example, the connection is terminating an ECN-triggered cwnd reduction with an inflight 0, then it receives reordered/old ACKs or DSACKs from prior transmission (which acks nothing). Or the connection is in fast recovery stage that marks everything lost, but fails to retransmit due to local issues, then receives data packets from other end which acks nothing. Fixes: 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode conditionally") Reported-by: Oleksandr Natalenko <[email protected]> Signed-off-by: Yuchung Cheng <[email protected]> Signed-off-by: Neal Cardwell <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int ucma_set_option_level(struct ucma_context *ctx, int level, int optname, void *optval, size_t optlen) { int ret; switch (level) { case RDMA_OPTION_ID: ret = ucma_set_option_id(ctx, optname, optval, optlen); break; case RDMA_OPTION_IB: ret = ucma_set_option_ib(ctx, optname, optval, optlen); break; default: ret = -ENOSYS; } return ret; }
0
[ "CWE-416", "CWE-703" ]
linux
cb2595c1393b4a5211534e6f0a0fbad369e21ad8
247,911,687,830,889,800,000,000,000,000,000,000,000
18
infiniband: fix a possible use-after-free bug ucma_process_join() will free the new allocated "mc" struct, if there is any error after that, especially the copy_to_user(). But in parallel, ucma_leave_multicast() could find this "mc" through idr_find() before ucma_process_join() frees it, since it is already published. So "mc" could be used in ucma_leave_multicast() after it is been allocated and freed in ucma_process_join(), since we don't refcnt it. Fix this by separating "publish" from ID allocation, so that we can get an ID first and publish it later after copy_to_user(). Fixes: c8f6a362bf3e ("RDMA/cma: Add multicast communication support") Reported-by: Noam Rathaus <[email protected]> Signed-off-by: Cong Wang <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
inline int decimal_int_part() const { return my_decimal_int_part(decimal_precision(), decimals); }
0
[]
mysql-server
f7316aa0c9a3909fc7498e7b95d5d3af044a7e21
46,813,756,635,945,220,000,000,000,000,000,000,000
2
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL)) Backport of Bug#19143243 fix. NAME_CONST item can return NULL_ITEM type in case of incorrect arguments. NULL_ITEM has special processing in Item_func_in function. In Item_func_in::fix_length_and_dec an array of possible comparators is created. Since NAME_CONST function has NULL_ITEM type, corresponding array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE. ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(), so the NULL_ITEM is attempted compared with an empty comparator. The fix is to disable the caching of Item_name_const item.
PHP_FUNCTION(xml_parser_create) { php_xml_parser_create_impl(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0); }
0
[ "CWE-119" ]
php-src
1248079be837808da4c97364fb3b4c96c8015fbf
99,289,811,124,453,260,000,000,000,000,000,000,000
4
Fix bug #72099: xml_parse_into_struct segmentation fault
R_API ut64 r_bin_java_annotation_default_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; if (attr) { // attr = r_bin_java_default_attr_new (buffer, sz, buf_offset); size += 6; // attr->info.annotation_default_attr.default_value = r_bin_java_element_value_new (buffer+offset, sz-offset, buf_offset+offset); size += r_bin_java_element_value_calc_size (attr->info.annotation_default_attr.default_value); } return size; }
0
[ "CWE-119", "CWE-788" ]
radare2
6c4428f018d385fc80a33ecddcb37becea685dd5
180,642,271,518,039,200,000,000,000,000,000,000,000
10
Improve boundary checks to fix oobread segfaults ##crash * Reported by Cen Zhang via huntr.dev * Reproducer: bins/fuzzed/javaoob-havoc.class
static void write_uid(bytearray_t * bplist, uint64_t val) { uint64_t size = get_needed_bytes(val); uint8_t *buff = NULL; //do not write 3bytes int node if (size == 3) size++; #ifdef __BIG_ENDIAN__ val = val << ((sizeof(uint64_t) - size) * 8); #endif buff = (uint8_t *) malloc(sizeof(uint8_t) + size); buff[0] = BPLIST_UID | Log2(size); memcpy(buff + 1, &val, size); byte_convert(buff + 1, size); byte_array_append(bplist, buff, sizeof(uint8_t) + size); free(buff); }
0
[ "CWE-770" ]
libplist
26061aac4ec75e7a4469a9aab9a424716223e5c4
306,124,649,943,244,700,000,000,000,000,000,000,000
19
bplist: Check for invalid offset_size in bplist trailer
dns_msg_ansadd(struct dns_msg* msg, struct regional* region, struct ub_packed_rrset_key* rrset, time_t now) { if(!(msg->rep->rrsets[msg->rep->rrset_count++] = packed_rrset_copy_region(rrset, region, now))) return 0; msg->rep->an_numrrsets++; return 1; }
0
[ "CWE-400" ]
unbound
ba0f382eee814e56900a535778d13206b86b6d49
229,373,699,287,490,000,000,000,000,000,000,000,000
9
- CVE-2020-12662 Unbound can be tricked into amplifying an incoming query into a large number of queries directed to a target. - CVE-2020-12663 Malformed answers from upstream name servers can be used to make Unbound unresponsive.
void onBelowWriteBufferLowWatermark() override {}
0
[ "CWE-400" ]
envoy
dfddb529e914d794ac552e906b13d71233609bf7
83,574,999,456,888,530,000,000,000,000,000,000,000
1
listener: Add configurable accepted connection limits (#153) Add support for per-listener limits on accepted connections. Signed-off-by: Tony Allen <[email protected]>
TEST_F(HttpConnectionManagerImplTest, TestAccessLogWithInvalidRequest) { setup(false, ""); std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>()); std::shared_ptr<AccessLog::MockInstance> handler(new NiceMock<AccessLog::MockInstance>()); EXPECT_CALL(filter_factory_, createFilterChain(_)) .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamDecoderFilter(filter); callbacks.addAccessLogHandler(handler); })); EXPECT_CALL(*handler, log(_, _, _, _)) .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*, const StreamInfo::StreamInfo& stream_info) { EXPECT_TRUE(stream_info.responseCode()); EXPECT_EQ(stream_info.responseCode().value(), uint32_t(400)); EXPECT_EQ("missing_host_header", stream_info.responseCodeDetails().value()); EXPECT_NE(nullptr, stream_info.downstreamLocalAddress()); EXPECT_NE(nullptr, stream_info.downstreamRemoteAddress()); EXPECT_NE(nullptr, stream_info.downstreamDirectRemoteAddress()); EXPECT_EQ(nullptr, stream_info.routeEntry()); })); StreamDecoder* decoder = nullptr; NiceMock<MockStreamEncoder> encoder; EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { decoder = &conn_manager_->newStream(encoder); // These request headers are missing the necessary ":host" HeaderMapPtr headers{new TestHeaderMapImpl{{":method", "GET"}, {":path", "/"}}}; decoder->decodeHeaders(std::move(headers), true); data.drain(0); })); Buffer::OwnedImpl fake_input; conn_manager_->onData(fake_input, false); }
0
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
53,592,762,883,088,410,000,000,000,000,000,000,000
38
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <[email protected]>
char *ap_response_code_string(request_rec *r, int error_index) { core_dir_config *dirconf; core_request_config *reqconf = ap_get_core_module_config(r->request_config); const char *err; const char *response; ap_expr_info_t *expr; /* check for string registered via ap_custom_response() first */ if (reqconf->response_code_strings != NULL && reqconf->response_code_strings[error_index] != NULL) { return reqconf->response_code_strings[error_index]; } /* check for string specified via ErrorDocument */ dirconf = ap_get_core_module_config(r->per_dir_config); if (!dirconf->response_code_exprs) { return NULL; } expr = apr_hash_get(dirconf->response_code_exprs, &error_index, sizeof(error_index)); if (!expr) { return NULL; } /* special token to indicate revert back to default */ if ((char *) expr == &errordocument_default) { return NULL; } err = NULL; response = ap_expr_str_exec(r, expr, &err); if (err) { ap_log_rerror( APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02841) "core: ErrorDocument: can't " "evaluate require expression: %s", err); return NULL; } /* alas, duplication required as we return not-const */ return apr_pstrdup(r->pool, response); }
0
[ "CWE-416", "CWE-284" ]
httpd
4cc27823899e070268b906ca677ee838d07cf67a
298,863,895,830,819,660,000,000,000,000,000,000,000
44
core: Disallow Methods' registration at run time (.htaccess), they may be used only if registered at init time (httpd.conf). Calling ap_method_register() in children processes is not the right scope since it won't be shared for all requests. git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1807655 13f79535-47bb-0310-9956-ffa450edef68
struct sc_card_driver * sc_get_tcos_driver(void) { struct sc_card_driver *iso_drv = sc_get_iso7816_driver(); if (iso_ops == NULL) iso_ops = iso_drv->ops; tcos_ops = *iso_drv->ops; tcos_ops.match_card = tcos_match_card; tcos_ops.init = tcos_init; tcos_ops.finish = tcos_finish; tcos_ops.create_file = tcos_create_file; tcos_ops.set_security_env = tcos_set_security_env; tcos_ops.select_file = tcos_select_file; tcos_ops.list_files = tcos_list_files; tcos_ops.delete_file = tcos_delete_file; tcos_ops.compute_signature = tcos_compute_signature; tcos_ops.decipher = tcos_decipher; tcos_ops.restore_security_env = tcos_restore_security_env; tcos_ops.card_ctl = tcos_card_ctl; return &tcos_drv; }
0
[ "CWE-787" ]
OpenSC
9d294de90d1cc66956389856e60b6944b27b4817
80,805,254,200,391,235,000,000,000,000,000,000,000
22
prevent out of bounds write fixes https://oss-fuzz.com/testcase-detail/5226571123392512
static int handle_invept(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 vmx_instruction_info, types; unsigned long type; gva_t gva; struct x86_exception e; struct { u64 eptp, gpa; } operand; if (!(vmx->nested.nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) || !(vmx->nested.nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } if (!nested_vmx_check_permission(vcpu)) return 1; if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; if (!(types & (1UL << type))) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); skip_emulated_instruction(vcpu); return 1; } /* According to the Intel VMX instruction reference, the memory * operand is read even if it isn't needed (e.g., for type==global) */ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmx_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, sizeof(operand), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } switch (type) { case VMX_EPT_EXTENT_GLOBAL: kvm_mmu_sync_roots(vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); nested_vmx_succeed(vcpu); break; default: /* Trap single context invalidation invept calls */ BUG_ON(1); break; } skip_emulated_instruction(vcpu); return 1; }
0
[ "CWE-284", "CWE-264" ]
linux
3ce424e45411cf5a13105e0386b6ecf6eeb4f66f
267,702,689,412,883,260,000,000,000,000,000,000,000
65
kvm:vmx: more complete state update on APICv on/off The function to update APICv on/off state (in particular, to deactivate it when enabling Hyper-V SynIC) is incomplete: it doesn't adjust APICv-related fields among secondary processor-based VM-execution controls. As a result, Windows 2012 guests get stuck when SynIC-based auto-EOI interrupt intersected with e.g. an IPI in the guest. In addition, the MSR intercept bitmap isn't updated every time "virtualize x2APIC mode" is toggled. This path can only be triggered by a malicious guest, because Windows didn't use x2APIC but rather their own synthetic APIC access MSRs; however a guest running in a SynIC-enabled VM could switch to x2APIC and thus obtain direct access to host APIC MSRs (CVE-2016-4440). The patch fixes those omissions. Signed-off-by: Roman Kagan <[email protected]> Reported-by: Steve Rutherford <[email protected]> Reported-by: Yang Zhang <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
QPDF::setIgnoreXRefStreams(bool val) { this->m->ignore_xref_streams = val; }
0
[ "CWE-125" ]
qpdf
1868a10f8b06631362618bfc85ca8646da4b4b71
324,175,150,955,399,800,000,000,000,000,000,000,000
4
Replace all atoi calls with QUtil::string_to_int The latter catches underflow/overflow.
int migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page) { int expected_count; void **pslot; spin_lock_irq(&mapping->tree_lock); pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page)); expected_count = 2 + page_has_private(page); if (page_count(page) != expected_count || radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { spin_unlock_irq(&mapping->tree_lock); return -EAGAIN; } if (!page_ref_freeze(page, expected_count)) { spin_unlock_irq(&mapping->tree_lock); return -EAGAIN; } newpage->index = page->index; newpage->mapping = page->mapping; get_page(newpage); radix_tree_replace_slot(&mapping->page_tree, pslot, newpage); page_ref_unfreeze(page, expected_count - 1); spin_unlock_irq(&mapping->tree_lock); return MIGRATEPAGE_SUCCESS; }
0
[ "CWE-200" ]
linux
197e7e521384a23b9e585178f3f11c9fa08274b9
192,371,833,542,533,200,000,000,000,000,000,000,000
36
Sanitize 'move_pages()' permission checks The 'move_paghes()' system call was introduced long long ago with the same permission checks as for sending a signal (except using CAP_SYS_NICE instead of CAP_SYS_KILL for the overriding capability). That turns out to not be a great choice - while the system call really only moves physical page allocations around (and you need other capabilities to do a lot of it), you can check the return value to map out some the virtual address choices and defeat ASLR of a binary that still shares your uid. So change the access checks to the more common 'ptrace_may_access()' model instead. This tightens the access checks for the uid, and also effectively changes the CAP_SYS_NICE check to CAP_SYS_PTRACE, but it's unlikely that anybody really _uses_ this legacy system call any more (we hav ebetter NUMA placement models these days), so I expect nobody to notice. Famous last words. Reported-by: Otto Ebeling <[email protected]> Acked-by: Eric W. Biederman <[email protected]> Cc: Willy Tarreau <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]>
void close_thread_table(THD *thd, TABLE **table_ptr) { TABLE *table= *table_ptr; DBUG_ENTER("close_thread_table"); DBUG_PRINT("tcache", ("table: '%s'.'%s' %p", table->s->db.str, table->s->table_name.str, table)); DBUG_ASSERT(!table->file->keyread_enabled()); DBUG_ASSERT(!table->file || table->file->inited == handler::NONE); /* The metadata lock must be released after giving back the table to the table cache. */ DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE, table->s->db.str, table->s->table_name.str, MDL_SHARED)); table->vcol_cleanup_expr(thd); table->mdl_ticket= NULL; if (table->file) { table->file->update_global_table_stats(); table->file->update_global_index_stats(); } /* This look is needed to allow THD::notify_shared_lock() to traverse the thd->open_tables list without having to worry that some of the tables are removed from under it */ mysql_mutex_lock(&thd->LOCK_thd_data); *table_ptr=table->next; mysql_mutex_unlock(&thd->LOCK_thd_data); if (! table->needs_reopen()) { /* Avoid having MERGE tables with attached children in table cache. */ table->file->extra(HA_EXTRA_DETACH_CHILDREN); /* Free memory and reset for next loop. */ free_field_buffers_larger_than(table, MAX_TDC_BLOB_SIZE); table->file->ha_reset(); } /* Do this *before* entering the TABLE_SHARE::tdc.LOCK_table_share critical section. */ MYSQL_UNBIND_TABLE(table->file); tc_release_table(table); DBUG_VOID_RETURN; }
0
[ "CWE-416", "CWE-703" ]
server
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
13,297,081,275,927,098,000,000,000,000,000,000,000
55
MDEV-24176 Server crashes after insert in the table with virtual column generated using date_format() and if() vcol_info->expr is allocated on expr_arena at parsing stage. Since expr item is allocated on expr_arena all its containee items must be allocated on expr_arena too. Otherwise fix_session_expr() will encounter prematurely freed item. When table is reopened from cache vcol_info contains stale expression. We refresh expression via TABLE::vcol_fix_exprs() but first we must prepare a proper context (Vcol_expr_context) which meets some requirements: 1. As noted above expr update must be done on expr_arena as there may be new items created. It was a bug in fix_session_expr_for_read() and was just not reproduced because of no second refix. Now refix is done for more cases so it does reproduce. Tests affected: vcol.binlog 2. Also name resolution context must be narrowed to the single table. Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes 3. sql_mode must be clean and not fail expr update. sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc must not affect vcol expression update. If the table was created successfully any further evaluation must not fail. Tests affected: main.func_like Reviewed by: Sergei Golubchik <[email protected]>
GF_Err clef_box_read(GF_Box *s, GF_BitStream *bs) { GF_ApertureBox *ptr = (GF_ApertureBox *)s; ISOM_DECREASE_SIZE(ptr, 8); ptr->width = gf_bs_read_u32(bs); ptr->height = gf_bs_read_u32(bs); return GF_OK; }
0
[ "CWE-476" ]
gpac
6170024568f4dda310e98ef7508477b425c58d09
261,518,793,607,123,900,000,000,000,000,000,000,000
8
fixed potential crash - cf #1263
static bool check_underflow(const struct arpt_entry *e) { const struct xt_entry_target *t; unsigned int verdict; if (!unconditional(&e->arp)) return false; t = arpt_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct xt_standard_target *)t)->verdict; verdict = -verdict - 1; return verdict == NF_DROP || verdict == NF_ACCEPT; }
1
[ "CWE-119", "CWE-787" ]
linux
54d83fc74aa9ec72794373cb47432c5f7fb1a309
34,864,711,861,777,957,000,000,000,000,000,000,000
14
netfilter: x_tables: fix unconditional helper Ben Hawkes says: In the mark_source_chains function (net/ipv4/netfilter/ip_tables.c) it is possible for a user-supplied ipt_entry structure to have a large next_offset field. This field is not bounds checked prior to writing a counter value at the supplied offset. Problem is that mark_source_chains should not have been called -- the rule doesn't have a next entry, so its supposed to return an absolute verdict of either ACCEPT or DROP. However, the function conditional() doesn't work as the name implies. It only checks that the rule is using wildcard address matching. However, an unconditional rule must also not be using any matches (no -m args). The underflow validator only checked the addresses, therefore passing the 'unconditional absolute verdict' test, while mark_source_chains also tested for presence of matches, and thus proceeeded to the next (not-existent) rule. Unify this so that all the callers have same idea of 'unconditional rule'. Reported-by: Ben Hawkes <[email protected]> Signed-off-by: Florian Westphal <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
p11_kit_iter_load_attributes (P11KitIter *iter, CK_ATTRIBUTE *template, CK_ULONG count) { CK_ATTRIBUTE *original = NULL; CK_ULONG i; CK_RV rv; return_val_if_fail (iter != NULL, CKR_GENERAL_ERROR); return_val_if_fail (iter->iterating, CKR_GENERAL_ERROR); return_val_if_fail (iter->module != NULL, CKR_GENERAL_ERROR); return_val_if_fail (iter->session != 0, CKR_GENERAL_ERROR); return_val_if_fail (iter->object != 0, CKR_GENERAL_ERROR); if (count == 0) return CKR_OK; original = memdup (template, count * sizeof (CK_ATTRIBUTE)); return_val_if_fail (original != NULL, CKR_HOST_MEMORY); for (i = 0; i < count; i++) template[i].pValue = NULL; rv = (iter->module->C_GetAttributeValue) (iter->session, iter->object, template, count); switch (rv) { case CKR_OK: case CKR_ATTRIBUTE_TYPE_INVALID: case CKR_ATTRIBUTE_SENSITIVE: case CKR_BUFFER_TOO_SMALL: break; default: free (original); return rv; } for (i = 0; i < count; i++) { if (template[i].ulValueLen == (CK_ULONG)-1 || template[i].ulValueLen == 0) { free (original[i].pValue); } else if (original[i].pValue != NULL && template[i].ulValueLen == original[i].ulValueLen) { template[i].pValue = original[i].pValue; } else { template[i].pValue = realloc (original[i].pValue, template[i].ulValueLen); return_val_if_fail (template[i].pValue != NULL, CKR_HOST_MEMORY); } } free (original); rv = (iter->module->C_GetAttributeValue) (iter->session, iter->object, template, count); switch (rv) { case CKR_OK: case CKR_ATTRIBUTE_TYPE_INVALID: case CKR_ATTRIBUTE_SENSITIVE: rv = CKR_OK; break; default: return_val_if_fail (rv != CKR_BUFFER_TOO_SMALL, rv); return rv; } for (i = 0; i < count; i++) { if (template[i].ulValueLen == (CK_ULONG)-1 || template[i].ulValueLen == 0) { free (template[i].pValue); template[i].pValue = NULL; } } return rv; }
0
[ "CWE-190" ]
p11-kit
5307a1d21a50cacd06f471a873a018d23ba4b963
178,096,546,640,448,000,000,000,000,000,000,000,000
76
Check for arithmetic overflows before allocating
long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v) { struct ucounts *iter; long max = LONG_MAX; long ret = 0; for (iter = ucounts; iter; iter = iter->ns->ucounts) { long new = atomic_long_add_return(v, &iter->ucount[type]); if (new < 0 || new > max) ret = LONG_MAX; else if (iter == ucounts) ret = new; max = READ_ONCE(iter->ns->ucount_max[type]); } return ret; }
0
[ "CWE-416" ]
linux
f9d87929d451d3e649699d0f1d74f71f77ad38f5
294,322,207,646,513,870,000,000,000,000,000,000,000
16
ucount: Make get_ucount a safe get_user replacement When the ucount code was refactored to create get_ucount it was missed that some of the contexts in which a rlimit is kept elevated can be the only reference to the user/ucount in the system. Ordinary ucount references exist in places that also have a reference to the user namspace, but in POSIX message queues, the SysV shm code, and the SIGPENDING code there is no independent user namespace reference. Inspection of the the user_namespace show no instance of circular references between struct ucounts and the user_namespace. So hold a reference from struct ucount to i's user_namespace to resolve this problem. Link: https://lore.kernel.org/lkml/[email protected]/ Reported-by: Qian Cai <[email protected]> Reported-by: Mathias Krause <[email protected]> Tested-by: Mathias Krause <[email protected]> Reviewed-by: Mathias Krause <[email protected]> Reviewed-by: Alexey Gladkov <[email protected]> Fixes: d64696905554 ("Reimplement RLIMIT_SIGPENDING on top of ucounts") Fixes: 6e52a9f0532f ("Reimplement RLIMIT_MSGQUEUE on top of ucounts") Fixes: d7c9e99aee48 ("Reimplement RLIMIT_MEMLOCK on top of ucounts") Cc: [email protected] Signed-off-by: "Eric W. Biederman" <[email protected]>
static void nfs4_update_session(struct nfs4_session *session, struct nfs41_create_session_res *res) { nfs4_copy_sessionid(&session->sess_id, &res->sessionid); /* Mark client id and session as being confirmed */ session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); session->flags = res->flags; memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); if (res->flags & SESSION4_BACK_CHAN) memcpy(&session->bc_attrs, &res->bc_attrs, sizeof(session->bc_attrs)); }
0
[ "CWE-787" ]
linux
b4487b93545214a9db8cbf32e86411677b0cca21
74,970,151,861,589,890,000,000,000,000,000,000,000
13
nfs: Fix getxattr kernel panic and memory overflow Move the buffer size check to decode_attr_security_label() before memcpy() Only call memcpy() if the buffer is large enough Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS") Signed-off-by: Jeffrey Mitchell <[email protected]> [Trond: clean up duplicate test of label->len != 0] Signed-off-by: Trond Myklebust <[email protected]>
static void aer_log_del_err(PCIEAERLog *aer_log, PCIEAERErr *err) { assert(aer_log->log_num); *err = aer_log->log[0]; aer_log->log_num--; memmove(&aer_log->log[0], &aer_log->log[1], aer_log->log_num * sizeof *err); }
0
[ "CWE-119" ]
qemu
5f691ff91d323b6f97c6600405a7f9dc115a0ad1
200,803,550,510,377,970,000,000,000,000,000,000,000
8
hw/pci/pcie_aer.c: fix buffer overruns on invalid state load 4) CVE-2013-4529 hw/pci/pcie_aer.c pcie aer log can overrun the buffer if log_num is too large There are two issues in this file: 1. log_max from remote can be larger than on local then buffer will overrun with data coming from state file. 2. log_num can be larger then we get data corruption again with an overflow but not adversary controlled. Fix both issues. Reported-by: Anthony Liguori <[email protected]> Reported-by: Michael S. Tsirkin <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]> Reviewed-by: Dr. David Alan Gilbert <[email protected]> Signed-off-by: Juan Quintela <[email protected]>