func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int err;
int skb_len;
unsigned long flags;
struct sk_buff_head *list = &sk->sk_receive_queue;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
net_dbg_ratelimited("sending flow OFF (queue len = %d %d)\n",
atomic_read(&cf_sk->sk.sk_rmem_alloc),
sk_rcvbuf_lowwater(cf_sk));
set_rx_flow_off(cf_sk);
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
err = sk_filter(sk, skb);
if (err)
return err;
if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
set_rx_flow_off(cf_sk);
net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
skb->dev = NULL;
skb_set_owner_r(skb, sk);
/* Cache the SKB length before we tack it onto the receive
* queue. Once it is added it no longer belongs to us and
* may be freed by other threads of control pulling packets
* from the queue.
*/
skb_len = skb->len;
spin_lock_irqsave(&list->lock, flags);
if (!sock_flag(sk, SOCK_DEAD))
__skb_queue_tail(list, skb);
spin_unlock_irqrestore(&list->lock, flags);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb_len);
else
kfree_skb(skb);
return 0;
} | 0 | [
"CWE-20",
"CWE-269"
]
| linux | f3d3342602f8bcbf37d7c46641cb9bca7618eb1c | 64,165,043,842,099,470,000,000,000,000,000,000,000 | 44 | net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
AVCPBProperties *ff_add_cpb_side_data(AVCodecContext *avctx)
{
AVPacketSideData *tmp;
AVCPBProperties *props;
size_t size;
props = av_cpb_properties_alloc(&size);
if (!props)
return NULL;
tmp = av_realloc_array(avctx->coded_side_data, avctx->nb_coded_side_data + 1, sizeof(*tmp));
if (!tmp) {
av_freep(&props);
return NULL;
}
avctx->coded_side_data = tmp;
avctx->nb_coded_side_data++;
avctx->coded_side_data[avctx->nb_coded_side_data - 1].type = AV_PKT_DATA_CPB_PROPERTIES;
avctx->coded_side_data[avctx->nb_coded_side_data - 1].data = (uint8_t*)props;
avctx->coded_side_data[avctx->nb_coded_side_data - 1].size = size;
return props;
} | 0 | [
"CWE-787"
]
| FFmpeg | 2080bc33717955a0e4268e738acf8c1eeddbf8cb | 18,647,583,423,308,394,000,000,000,000,000,000,000 | 25 | avcodec/utils: correct align value for interplay
Fixes out of array access
Fixes: 452/fuzz-1-ffmpeg_VIDEO_AV_CODEC_ID_INTERPLAY_VIDEO_fuzzer
Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg
Signed-off-by: Michael Niedermayer <[email protected]> |
TEST_F(AllowMissingInAndListTest, BadJwt) {
// Bad JWT should fail.
EXPECT_CALL(mock_cb_, onComplete(Status::JwtVerificationFail));
auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, NonExistKidToken}};
context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);
verifier_->verify(context_);
EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));
} | 0 | [
"CWE-303",
"CWE-703"
]
| envoy | ea39e3cba652bcc4b11bb0d5c62b017e584d2e5a | 189,742,069,718,990,300,000,000,000,000,000,000,000 | 8 | jwt_authn: fix a bug where JWT with wrong issuer is allowed in allow_missing case (#15194)
[jwt] When allow_missing is used inside RequiresAny, the requests with JWT with wrong issuer are accepted. This is a bug, allow_missing should only allow requests without any JWT. This change fixed the above issue by preserving JwtUnknownIssuer in allow_missing case.
Signed-off-by: Wayne Zhang <[email protected]> |
void mem_nop (void *opaque, void *ptr){
} | 0 | [
"CWE-399",
"CWE-190"
]
| lepton | 6a5ceefac1162783fffd9506a3de39c85c725761 | 262,592,781,592,874,350,000,000,000,000,000,000,000 | 3 | fix #111 |
static void received_conflicting_request(REQUEST *request,
const RADCLIENT *client)
{
radlog(L_ERR, "Received conflicting packet from "
"client %s port %d - ID: %d due to unfinished request %u. Giving up on old request.",
client->shortname,
request->packet->src_port, request->packet->id,
request->number);
/*
* Nuke it from the request hash, so we can receive new
* packets.
*/
remove_from_request_hash(request);
switch (request->child_state) {
#ifdef HAVE_PTHREAD_H
/*
* It's queued or running. Tell it to stop, and
* wait for it to do so.
*/
case REQUEST_QUEUED:
case REQUEST_RUNNING:
request->master_state = REQUEST_STOP_PROCESSING;
request->delay += request->delay >> 1;
tv_add(&request->when, request->delay);
INSERT_EVENT(wait_for_child_to_die, request);
return;
#endif
/*
* Catch race conditions. It may have switched
* from running to done while this code is being
* executed.
*/
case REQUEST_REJECT_DELAY:
case REQUEST_CLEANUP_DELAY:
case REQUEST_DONE:
break;
/*
* It's in some other state, and therefore also
* in the event queue. At some point, the
* child will notice, and we can then delete it.
*/
case REQUEST_PROXIED:
default:
rad_assert(request->ev != NULL);
break;
}
} | 0 | [
"CWE-399"
]
| freeradius-server | ff94dd35673bba1476594299d31ce8293b8bd223 | 336,663,497,261,288,200,000,000,000,000,000,000,000 | 53 | Do not delete "old" requests until they are free.
If the request is in the queue for 30+ seconds, do NOT delete it.
Instead, mark it as "STOP PROCESSING", and do "wait_for_child_to_die",
which waits for a child thread to pick it up, and acknowledge that it's
done. Once it's marked done, we can finally clean it up.
This may be the underlying issue behind bug #35 |
autocmd_supported(char_u *name)
{
char_u *p;
return (event_name2nr(name, &p) != NUM_EVENTS);
} | 0 | [
"CWE-200",
"CWE-668"
]
| vim | 5a73e0ca54c77e067c3b12ea6f35e3e8681e8cf8 | 168,432,293,518,501,370,000,000,000,000,000,000,000 | 6 | patch 8.0.1263: others can read the swap file if a user is careless
Problem: Others can read the swap file if a user is careless with his
primary group.
Solution: If the group permission allows for reading but the world
permissions doesn't, make sure the group is right. |
ex_popup(exarg_T *eap)
{
# if defined(FEAT_GUI_MSWIN) || defined(FEAT_GUI_GTK)
if (gui.in_use)
gui_make_popup(eap->arg, eap->forceit);
# ifdef FEAT_TERM_POPUP_MENU
else
# endif
# endif
# ifdef FEAT_TERM_POPUP_MENU
pum_make_popup(eap->arg, eap->forceit);
# endif
} | 0 | [
"CWE-78"
]
| vim | 8c62a08faf89663e5633dc5036cd8695c80f1075 | 180,892,469,990,934,100,000,000,000,000,000,000,000 | 13 | patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others. |
tor_tls_handshake(tor_tls_t *tls)
{
int r;
tor_assert(tls);
tor_assert(tls->ssl);
tor_assert(tls->state == TOR_TLS_ST_HANDSHAKE);
check_no_tls_errors();
if (tls->isServer) {
r = SSL_accept(tls->ssl);
} else {
r = SSL_connect(tls->ssl);
}
/* We need to call this here and not earlier, since OpenSSL has a penchant
* for clearing its flags when you say accept or connect. */
tor_tls_unblock_renegotiation(tls);
r = tor_tls_get_error(tls,r,0, "handshaking", LOG_INFO);
if (ERR_peek_error() != 0) {
tls_log_errors(tls, tls->isServer ? LOG_INFO : LOG_WARN,
"handshaking");
return TOR_TLS_ERROR_MISC;
}
if (r == TOR_TLS_DONE) {
tls->state = TOR_TLS_ST_OPEN;
if (tls->isServer) {
SSL_set_info_callback(tls->ssl, NULL);
SSL_set_verify(tls->ssl, SSL_VERIFY_PEER, always_accept_verify_cb);
/* There doesn't seem to be a clear OpenSSL API to clear mode flags. */
tls->ssl->mode &= ~SSL_MODE_NO_AUTO_CHAIN;
#ifdef V2_HANDSHAKE_SERVER
if (tor_tls_client_is_using_v2_ciphers(tls->ssl, ADDR(tls))) {
/* This check is redundant, but back when we did it in the callback,
* we might have not been able to look up the tor_tls_t if the code
* was buggy. Fixing that. */
if (!tls->wasV2Handshake) {
log_warn(LD_BUG, "For some reason, wasV2Handshake didn't"
" get set. Fixing that.");
}
tls->wasV2Handshake = 1;
log_debug(LD_NET, "Completed V2 TLS handshake with client; waiting "
"for renegotiation.");
} else {
tls->wasV2Handshake = 0;
}
#endif
} else {
#ifdef V2_HANDSHAKE_CLIENT
/* If we got no ID cert, we're a v2 handshake. */
X509 *cert = SSL_get_peer_certificate(tls->ssl);
STACK_OF(X509) *chain = SSL_get_peer_cert_chain(tls->ssl);
int n_certs = sk_X509_num(chain);
if (n_certs > 1 || (n_certs == 1 && cert != sk_X509_value(chain, 0)))
tls->wasV2Handshake = 0;
else {
log_debug(LD_NET, "Server sent back a single certificate; looks like "
"a v2 handshake on %p.", tls);
tls->wasV2Handshake = 1;
}
if (cert)
X509_free(cert);
#endif
if (SSL_set_cipher_list(tls->ssl, SERVER_CIPHER_LIST) == 0) {
tls_log_errors(NULL, LOG_WARN, "re-setting ciphers");
r = TOR_TLS_ERROR_MISC;
}
}
}
return r;
} | 0 | [
"CWE-264"
]
| tor | 638fdedcf16cf7d6f7c586d36f7ef335c1c9714f | 99,842,456,592,635,580,000,000,000,000,000,000,000 | 68 | Don't send a certificate chain on outgoing TLS connections from non-relays |
CImg<T> get_closing(const CImg<t>& kernel, const unsigned int boundary_conditions=1,
const bool is_real=false) const {
const int sx = kernel.width(), sy = kernel.height(), sz = kernel.depth();
if (is_empty() || (sx<=1 && sy<=1 && sz<=1)) return *this;
const int sx1 = (int)(sx - 1)/2, sy1 = (int)(sy - 1)/2, sz1 = (int)(sz - 1)/2;
CImg<T> res;
if (_depth>1) { // 3D
get_resize(width() + sx + 1,height() + sy + 1,depth() + sz + 1,spectrum(),0,boundary_conditions,0.5,0.5,0.5).
dilate(kernel,1,is_real).erode(kernel,1,is_real).
crop(sx1 + 1,sy1 + 1,sz1 + 1,sx1 + width(),sy1 + height(),sz1 + depth()).move_to(res);
} else if (_height>1) { // 2D
get_resize(width() + sx + 1,height() + sy + 1,1,spectrum(),0,boundary_conditions,0.5,0.5).
dilate(kernel,1,is_real).erode(kernel,1,is_real).
crop(sx1 + 1,sy1 + 1,sx1 + width(),sy1 + height()).move_to(res);
} else if (_width>1) { // 1D
get_resize(width() + sx + 1,1,1,spectrum(),0,boundary_conditions,0.5).
dilate(kernel,1,is_real).erode(kernel,1,is_real).
crop(sx1 + 1,sx1 + width()).move_to(res);
}
return res;
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 47,455,790,610,429,830,000,000,000,000,000,000,000 | 21 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
int nci_core_rsp_packet(struct nci_dev *ndev, __u16 opcode,
struct sk_buff *skb)
{
return nci_op_rsp_packet(ndev, opcode, skb, ndev->ops->core_ops,
ndev->ops->n_core_ops);
} | 0 | []
| linux | 48b71a9e66c2eab60564b1b1c85f4928ed04e406 | 175,332,217,068,514,330,000,000,000,000,000,000,000 | 6 | NFC: add NCI_UNREG flag to eliminate the race
There are two sites that calls queue_work() after the
destroy_workqueue() and lead to possible UAF.
The first site is nci_send_cmd(), which can happen after the
nci_close_device as below
nfcmrvl_nci_unregister_dev | nfc_genl_dev_up
nci_close_device |
flush_workqueue |
del_timer_sync |
nci_unregister_device | nfc_get_device
destroy_workqueue | nfc_dev_up
nfc_unregister_device | nci_dev_up
device_del | nci_open_device
| __nci_request
| nci_send_cmd
| queue_work !!!
Another site is nci_cmd_timer, awaked by the nci_cmd_work from the
nci_send_cmd.
... | ...
nci_unregister_device | queue_work
destroy_workqueue |
nfc_unregister_device | ...
device_del | nci_cmd_work
| mod_timer
| ...
| nci_cmd_timer
| queue_work !!!
For the above two UAF, the root cause is that the nfc_dev_up can race
between the nci_unregister_device routine. Therefore, this patch
introduce NCI_UNREG flag to easily eliminate the possible race. In
addition, the mutex_lock in nci_close_device can act as a barrier.
Signed-off-by: Lin Ma <[email protected]>
Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation")
Reviewed-by: Jakub Kicinski <[email protected]>
Reviewed-by: Krzysztof Kozlowski <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]> |
asmlinkage long sys_mknodat(int dfd, const char __user *filename, int mode,
unsigned dev)
{
int error = 0;
char * tmp;
struct dentry * dentry;
struct nameidata nd;
if (S_ISDIR(mode))
return -EPERM;
tmp = getname(filename);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
error = do_path_lookup(dfd, tmp, LOOKUP_PARENT, &nd);
if (error)
goto out;
dentry = lookup_create(&nd, 0);
if (IS_ERR(dentry)) {
error = PTR_ERR(dentry);
goto out_unlock;
}
if (!IS_POSIXACL(nd.path.dentry->d_inode))
mode &= ~current->fs->umask;
error = may_mknod(mode);
if (error)
goto out_dput;
error = mnt_want_write(nd.path.mnt);
if (error)
goto out_dput;
switch (mode & S_IFMT) {
case 0: case S_IFREG:
error = vfs_create(nd.path.dentry->d_inode,dentry,mode,&nd);
break;
case S_IFCHR: case S_IFBLK:
error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode,
new_decode_dev(dev));
break;
case S_IFIFO: case S_IFSOCK:
error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode,0);
break;
}
mnt_drop_write(nd.path.mnt);
out_dput:
dput(dentry);
out_unlock:
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
path_put(&nd.path);
out:
putname(tmp);
return error;
} | 0 | [
"CWE-120"
]
| linux-2.6 | d70b67c8bc72ee23b55381bd6a884f4796692f77 | 60,226,493,380,353,860,000,000,000,000,000,000,000 | 53 | [patch] vfs: fix lookup on deleted directory
Lookup can install a child dentry for a deleted directory. This keeps
the directory dentry alive, and the inode pinned in the cache and on
disk, even after all external references have gone away.
This isn't a big problem normally, since memory pressure or umount
will clear out the directory dentry and its children, releasing the
inode. But for UBIFS this causes problems because its orphan area can
overflow.
Fix this by returning ENOENT for all lookups on a S_DEAD directory
before creating a child dentry.
Thanks to Zoltan Sogor for noticing this while testing UBIFS, and
Artem for the excellent analysis of the problem and testing.
Reported-by: Artem Bityutskiy <[email protected]>
Tested-by: Artem Bityutskiy <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
Signed-off-by: Al Viro <[email protected]> |
onig_recompile(regex_t* reg, const UChar* pattern, const UChar* pattern_end,
OnigOptionType option, OnigEncoding enc, OnigSyntaxType* syntax,
OnigErrorInfo* einfo)
{
int r;
regex_t *new_reg;
r = onig_new(&new_reg, pattern, pattern_end, option, enc, syntax, einfo);
if (r) return r;
if (ONIG_STATE(reg) == ONIG_STATE_NORMAL) {
onig_transfer(reg, new_reg);
}
else {
onig_chain_link_add(reg, new_reg);
}
return 0;
} | 0 | [
"CWE-125"
]
| php-src | c6e34d91b88638966662caac62c4d0e90538e317 | 251,973,288,637,259,400,000,000,000,000,000,000,000 | 17 | Fix bug #77371 (heap buffer overflow in mb regex functions - compile_string_node) |
static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
{
int ret;
/* Record our major and minor device numbers. */
if (!single_major) {
ret = register_blkdev(0, rbd_dev->name);
if (ret < 0)
goto err_out_unlock;
rbd_dev->major = ret;
rbd_dev->minor = 0;
} else {
rbd_dev->major = rbd_major;
rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
}
/* Set up the blkdev mapping. */
ret = rbd_init_disk(rbd_dev);
if (ret)
goto err_out_blkdev;
set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev));
ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
if (ret)
goto err_out_disk;
set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
up_write(&rbd_dev->header_rwsem);
return 0;
err_out_disk:
rbd_free_disk(rbd_dev);
err_out_blkdev:
if (!single_major)
unregister_blkdev(rbd_dev->major, rbd_dev->name);
err_out_unlock:
up_write(&rbd_dev->header_rwsem);
return ret;
} | 0 | [
"CWE-863"
]
| linux | f44d04e696feaf13d192d942c4f14ad2e117065a | 37,824,980,996,482,115,000,000,000,000,000,000,000 | 44 | rbd: require global CAP_SYS_ADMIN for mapping and unmapping
It turns out that currently we rely only on sysfs attribute
permissions:
$ ll /sys/bus/rbd/{add*,remove*}
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add_single_major
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/remove
--w------- 1 root root 4096 Sep 3 20:38 /sys/bus/rbd/remove_single_major
This means that images can be mapped and unmapped (i.e. block devices
can be created and deleted) by a UID 0 process even after it drops all
privileges or by any process with CAP_DAC_OVERRIDE in its user namespace
as long as UID 0 is mapped into that user namespace.
Be consistent with other virtual block devices (loop, nbd, dm, md, etc)
and require CAP_SYS_ADMIN in the initial user namespace for mapping and
unmapping, and also for dumping the configuration string and refreshing
the image header.
Cc: [email protected]
Signed-off-by: Ilya Dryomov <[email protected]>
Reviewed-by: Jeff Layton <[email protected]> |
static void clear_file_array(wfClipboard* clipboard)
{
size_t i;
if (!clipboard)
return;
/* clear file_names array */
if (clipboard->file_names)
{
for (i = 0; i < clipboard->nFiles; i++)
{
free(clipboard->file_names[i]);
clipboard->file_names[i] = NULL;
}
free(clipboard->file_names);
clipboard->file_names = NULL;
}
/* clear fileDescriptor array */
if (clipboard->fileDescriptor)
{
for (i = 0; i < clipboard->nFiles; i++)
{
free(clipboard->fileDescriptor[i]);
clipboard->fileDescriptor[i] = NULL;
}
free(clipboard->fileDescriptor);
clipboard->fileDescriptor = NULL;
}
clipboard->file_array_size = 0;
clipboard->nFiles = 0;
} | 0 | [
"CWE-20"
]
| FreeRDP | 0d79670a28c0ab049af08613621aa0c267f977e9 | 300,854,443,581,004,630,000,000,000,000,000,000,000 | 36 | Fixed missing input checks for file contents request
reported by Valentino Ricotta (Thalium) |
g_vfs_backend_dav_init (GVfsBackendDav *backend)
{
g_vfs_backend_set_user_visible (G_VFS_BACKEND (backend), TRUE);
} | 0 | []
| gvfs | f81ff2108ab3b6e370f20dcadd8708d23f499184 | 62,677,835,795,503,320,000,000,000,000,000,000,000 | 4 | dav: don't unescape the uri twice
path_equal tries to unescape path before comparing. Unfortunately
this function is used also for already unescaped paths. Therefore
unescaping can fail. This commit reverts changes which was done in
commit 50af53d and unescape just uris, which aren't unescaped yet.
https://bugzilla.gnome.org/show_bug.cgi?id=743298 |
etherproto_string(netdissect_options *ndo, u_short port)
{
register char *cp;
register struct hnamemem *tp;
register uint32_t i = port;
char buf[sizeof("0000")];
for (tp = &eprototable[i & (HASHNAMESIZE-1)]; tp->nxt; tp = tp->nxt)
if (tp->addr == i)
return (tp->name);
tp->addr = i;
tp->nxt = newhnamemem(ndo);
cp = buf;
NTOHS(port);
*cp++ = hex[port >> 12 & 0xf];
*cp++ = hex[port >> 8 & 0xf];
*cp++ = hex[port >> 4 & 0xf];
*cp++ = hex[port & 0xf];
*cp++ = '\0';
tp->name = strdup(buf);
if (tp->name == NULL)
(*ndo->ndo_error)(ndo, "etherproto_string: strdup(buf)");
return (tp->name);
} | 0 | [
"CWE-125",
"CWE-787"
]
| tcpdump | 730fc35968c5433b9e2a829779057f4f9495dc51 | 16,559,376,879,543,010,000,000,000,000,000,000,000 | 26 | CVE-2017-12894/In lookup_bytestring(), take the length of the byte string into account.
Otherwise, if, in our search of the hash table, we come across a byte
string that's shorter than the string we're looking for, we'll search
past the end of the string in the hash table.
This fixes a buffer over-read discovered by Forcepoint's security
researchers Otto Airamo & Antti Levomäki.
Add a test using the capture file supplied by the reporter(s). |
current_element (GMarkupParseContext *context)
{
return context->tag_stack->data;
} | 0 | [
"CWE-476"
]
| glib | fccef3cc822af74699cca84cd202719ae61ca3b9 | 284,652,611,455,865,900,000,000,000,000,000,000,000 | 4 | gmarkup: Fix crash in error handling path for closing elements
If something which looks like a closing tag is left unfinished, but
isn’t paired to an opening tag in the document, the error handling code
would do a null pointer dereference. Avoid that, at the cost of
introducing a new translatable error message.
Includes a test case, courtesy of pdknsk.
Signed-off-by: Philip Withnall <[email protected]>
https://gitlab.gnome.org/GNOME/glib/issues/1461 |
static void iscsi_nop_timed_event(void *opaque)
{
IscsiLun *iscsilun = opaque;
qemu_mutex_lock(&iscsilun->mutex);
if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
error_report("iSCSI: NOP timeout. Reconnecting...");
iscsilun->request_timed_out = true;
} else if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) {
error_report("iSCSI: failed to sent NOP-Out. Disabling NOP messages.");
goto out;
}
timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
iscsi_set_events(iscsilun);
out:
qemu_mutex_unlock(&iscsilun->mutex);
} | 0 | [
"CWE-125"
]
| qemu | ff0507c239a246fd7215b31c5658fc6a3ee1e4c5 | 46,327,071,991,184,790,000,000,000,000,000,000,000 | 19 | block/iscsi:fix heap-buffer-overflow in iscsi_aio_ioctl_cb
There is an overflow, the source 'datain.data[2]' is 100 bytes,
but the 'ss' is 252 bytes.This may cause a security issue because
we can access a lot of unrelated memory data.
The len for sbp copy data should take the minimum of mx_sb_len and
sb_len_wr, not the maximum.
If we use iscsi device for VM backend storage, ASAN show stack:
READ of size 252 at 0xfffd149dcfc4 thread T0
#0 0xaaad433d0d34 in __asan_memcpy (aarch64-softmmu/qemu-system-aarch64+0x2cb0d34)
#1 0xaaad45f9d6d0 in iscsi_aio_ioctl_cb /qemu/block/iscsi.c:996:9
#2 0xfffd1af0e2dc (/usr/lib64/iscsi/libiscsi.so.8+0xe2dc)
#3 0xfffd1af0d174 (/usr/lib64/iscsi/libiscsi.so.8+0xd174)
#4 0xfffd1af19fac (/usr/lib64/iscsi/libiscsi.so.8+0x19fac)
#5 0xaaad45f9acc8 in iscsi_process_read /qemu/block/iscsi.c:403:5
#6 0xaaad4623733c in aio_dispatch_handler /qemu/util/aio-posix.c:467:9
#7 0xaaad4622f350 in aio_dispatch_handlers /qemu/util/aio-posix.c:510:20
#8 0xaaad4622f350 in aio_dispatch /qemu/util/aio-posix.c:520
#9 0xaaad46215944 in aio_ctx_dispatch /qemu/util/async.c:298:5
#10 0xfffd1bed12f4 in g_main_context_dispatch (/lib64/libglib-2.0.so.0+0x512f4)
#11 0xaaad46227de0 in glib_pollfds_poll /qemu/util/main-loop.c:219:9
#12 0xaaad46227de0 in os_host_main_loop_wait /qemu/util/main-loop.c:242
#13 0xaaad46227de0 in main_loop_wait /qemu/util/main-loop.c:518
#14 0xaaad43d9d60c in qemu_main_loop /qemu/softmmu/vl.c:1662:9
#15 0xaaad4607a5b0 in main /qemu/softmmu/main.c:49:5
#16 0xfffd1a460b9c in __libc_start_main (/lib64/libc.so.6+0x20b9c)
#17 0xaaad43320740 in _start (aarch64-softmmu/qemu-system-aarch64+0x2c00740)
0xfffd149dcfc4 is located 0 bytes to the right of 100-byte region [0xfffd149dcf60,0xfffd149dcfc4)
allocated by thread T0 here:
#0 0xaaad433d1e70 in __interceptor_malloc (aarch64-softmmu/qemu-system-aarch64+0x2cb1e70)
#1 0xfffd1af0e254 (/usr/lib64/iscsi/libiscsi.so.8+0xe254)
#2 0xfffd1af0d174 (/usr/lib64/iscsi/libiscsi.so.8+0xd174)
#3 0xfffd1af19fac (/usr/lib64/iscsi/libiscsi.so.8+0x19fac)
#4 0xaaad45f9acc8 in iscsi_process_read /qemu/block/iscsi.c:403:5
#5 0xaaad4623733c in aio_dispatch_handler /qemu/util/aio-posix.c:467:9
#6 0xaaad4622f350 in aio_dispatch_handlers /qemu/util/aio-posix.c:510:20
#7 0xaaad4622f350 in aio_dispatch /qemu/util/aio-posix.c:520
#8 0xaaad46215944 in aio_ctx_dispatch /qemu/util/async.c:298:5
#9 0xfffd1bed12f4 in g_main_context_dispatch (/lib64/libglib-2.0.so.0+0x512f4)
#10 0xaaad46227de0 in glib_pollfds_poll /qemu/util/main-loop.c:219:9
#11 0xaaad46227de0 in os_host_main_loop_wait /qemu/util/main-loop.c:242
#12 0xaaad46227de0 in main_loop_wait /qemu/util/main-loop.c:518
#13 0xaaad43d9d60c in qemu_main_loop /qemu/softmmu/vl.c:1662:9
#14 0xaaad4607a5b0 in main /qemu/softmmu/main.c:49:5
#15 0xfffd1a460b9c in __libc_start_main (/lib64/libc.so.6+0x20b9c)
#16 0xaaad43320740 in _start (aarch64-softmmu/qemu-system-aarch64+0x2c00740)
Reported-by: Euler Robot <[email protected]>
Signed-off-by: Chen Qun <[email protected]>
Reviewed-by: Stefan Hajnoczi <[email protected]>
Message-id: [email protected]
Reviewed-by: Daniel P. Berrangé <[email protected]>
Signed-off-by: Peter Maydell <[email protected]> |
static UPNP_INLINE int search_extension(
/*! [in] . */
const char *extension,
/*! [out] . */
const char **con_type,
/*! [out] . */
const char **con_subtype)
{
int top, mid, bot;
int cmp;
top = 0;
bot = NUM_MEDIA_TYPES - 1;
while (top <= bot) {
mid = (top + bot) / 2;
cmp = strcasecmp(extension, gMediaTypeList[mid].file_ext);
if (cmp > 0) {
/* look below mid. */
top = mid + 1;
} else if (cmp < 0) {
/* look above mid. */
bot = mid - 1;
} else {
/* cmp == 0 */
*con_type = gMediaTypeList[mid].content_type;
*con_subtype = gMediaTypeList[mid].content_subtype;
return 0;
}
}
return -1;
} | 0 | [
"CWE-284"
]
| pupnp-code | be0a01bdb83395d9f3a5ea09c1308a4f1a972cbd | 18,974,022,495,885,660,000,000,000,000,000,000,000 | 33 | Don't allow unhandled POSTs to write to the filesystem by default
If there's no registered handler for a POST request, the default behaviour
is to write it to the filesystem. Several million deployed devices appear
to have this behaviour, making it possible to (at least) store arbitrary
data on them. Add a configure option that enables this behaviour, and change
the default to just drop POSTs that aren't directly handled. |
static void peak_usb_unlink_all_urbs(struct peak_usb_device *dev)
{
int i;
/* free all Rx (submitted) urbs */
usb_kill_anchored_urbs(&dev->rx_submitted);
/* free unsubmitted Tx urbs first */
for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) {
struct urb *urb = dev->tx_contexts[i].urb;
if (!urb ||
dev->tx_contexts[i].echo_index != PCAN_USB_MAX_TX_URBS) {
/*
* this urb is already released or always submitted,
* let usb core free by itself
*/
continue;
}
usb_free_urb(urb);
dev->tx_contexts[i].urb = NULL;
}
/* then free all submitted Tx urbs */
usb_kill_anchored_urbs(&dev->tx_submitted);
atomic_set(&dev->active_tx_urbs, 0);
} | 0 | [
"CWE-909"
]
| linux | f7a1337f0d29b98733c8824e165fca3371d7d4fd | 264,073,745,294,126,200,000,000,000,000,000,000,000 | 28 | can: peak_usb: fix slab info leak
Fix a small slab info leak due to a failure to clear the command buffer
at allocation.
The first 16 bytes of the command buffer are always sent to the device
in pcan_usb_send_cmd() even though only the first two may have been
initialised in case no argument payload is provided (e.g. when waiting
for a response).
Fixes: bb4785551f64 ("can: usb: PEAK-System Technik USB adapters driver core")
Cc: stable <[email protected]> # 3.4
Reported-by: [email protected]
Signed-off-by: Johan Hovold <[email protected]>
Signed-off-by: Marc Kleine-Budde <[email protected]> |
PHP_FUNCTION(system)
{
php_exec_ex(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1);
} | 0 | [
"CWE-703",
"CWE-189"
]
| php-src | 2871c70efaaaa0f102557a17c727fd4d5204dd4b | 186,239,027,435,069,530,000,000,000,000,000,000,000 | 4 | Patch for Heap Buffer Overflow in EscapeShell
Proposed patch for bug #71270 |
void *MACH0_(mach0_free)(struct MACH0_(obj_t) *mo) {
if (!mo) {
return NULL;
}
size_t i;
if (mo->symbols) {
for (i = 0; !mo->symbols[i].last; i++) {
free (mo->symbols[i].name);
}
free (mo->symbols);
}
free (mo->segs);
free (mo->sects);
free (mo->symtab);
free (mo->symstr);
free (mo->indirectsyms);
free (mo->imports_by_ord);
ht_pp_free (mo->imports_by_name);
free (mo->dyld_info);
free (mo->toc);
free (mo->modtab);
free (mo->libs);
free (mo->func_start);
free (mo->signature);
free (mo->intrp);
free (mo->compiler);
if (mo->chained_starts) {
for (i = 0; i < mo->nsegs; i++) {
if (mo->chained_starts[i]) {
free (mo->chained_starts[i]->page_start);
free (mo->chained_starts[i]);
}
}
free (mo->chained_starts);
}
r_buf_free (mo->b);
free (mo);
return NULL;
} | 1 | [
"CWE-125",
"CWE-787"
]
| radare2 | 0052500c1ed5bf8263b26b9fd7773dbdc6f170c4 | 30,073,576,392,675,150,000,000,000,000,000,000,000 | 40 | Fix heap OOB read in macho.iterate_chained_fixups ##crash
* Reported by peacock-doris via huntr.dev
* Reproducer 'tests_65305'
mrmacete:
* Return early if segs_count is 0
* Initialize segs_count also for reconstructed fixups
Co-authored-by: pancake <[email protected]>
Co-authored-by: Francesco Tamagni <[email protected]> |
Returns TRUE if the mode outputs blocks of bytes */
PHP_FUNCTION(mcrypt_module_is_block_mode)
{
MCRYPT_GET_MODE_DIR_ARGS(modes_dir)
if (mcrypt_module_is_block_mode(module, dir) == 1) {
RETURN_TRUE;
} else {
RETURN_FALSE;
} | 1 | [
"CWE-190"
]
| php-src | 6c5211a0cef0cc2854eaa387e0eb036e012904d0 | 110,027,340,702,908,620,000,000,000,000,000,000,000 | 10 | Fix bug #72455: Heap Overflow due to integer overflows |
static NO_INLINE JsVar *jspGetNamedFieldInParents(JsVar *object, const char* name, bool returnName) {
// Now look in prototypes
JsVar * child = jspeiFindChildFromStringInParents(object, name);
/* Check for builtins via separate function
* This way we save on RAM for built-ins because everything comes out of program code */
if (!child) {
child = jswFindBuiltInFunction(object, name);
}
/* We didn't get here if we found a child in the object itself, so
* if we're here then we probably have the wrong name - so for example
* with `a.b = c;` could end up setting `a.prototype.b` (bug #360)
*
* Also we might have got a built-in, which wouldn't have a name on it
* anyway - so in both cases, strip the name if it is there, and create
* a new name.
*/
if (child && returnName) {
// Get rid of existing name
child = jsvSkipNameAndUnLock(child);
// create a new name
JsVar *nameVar = jsvNewFromString(name);
JsVar *newChild = jsvCreateNewChild(object, nameVar, child);
jsvUnLock2(nameVar, child);
child = newChild;
}
// If not found and is the prototype, create it
if (!child) {
if (jsvIsFunction(object) && strcmp(name, JSPARSE_PROTOTYPE_VAR)==0) {
// prototype is supposed to be an object
JsVar *proto = jsvNewObject();
// make sure it has a 'constructor' variable that points to the object it was part of
jsvObjectSetChild(proto, JSPARSE_CONSTRUCTOR_VAR, object);
child = jsvAddNamedChild(object, proto, JSPARSE_PROTOTYPE_VAR);
jspEnsureIsPrototype(object, child);
jsvUnLock(proto);
} else if (strcmp(name, JSPARSE_INHERITS_VAR)==0) {
const char *objName = jswGetBasicObjectName(object);
if (objName) {
child = jspNewPrototype(objName);
}
}
}
return child;
} | 0 | [
"CWE-119",
"CWE-125"
]
| Espruino | c36d30529118aa049797db43f111ddad468aad29 | 193,928,201,638,872,770,000,000,000,000,000,000,000 | 48 | Fix stack overflow if void void void... is repeated many times (fix #1434) |
void wakeup_subsequent_commits(int wakeup_error_arg)
{
/*
Do the check inline, so only the wakeup case takes the cost of a function
call for every commmit.
Note that the check is done without locking. It is the responsibility of
the user of the wakeup facility to ensure that no waiters can register
themselves after the last call to wakeup_subsequent_commits().
This avoids having to take another lock for every commit, which would be
pointless anyway - even if we check under lock, there is nothing to
prevent a waiter from arriving just after releasing the lock.
*/
if (subsequent_commits_list)
wakeup_subsequent_commits2(wakeup_error_arg);
} | 0 | [
"CWE-416"
]
| server | 4681b6f2d8c82b4ec5cf115e83698251963d80d5 | 31,370,300,723,421,900,000,000,000,000,000,000,000 | 17 | MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob
the bug was that in_vector array in Item_func_in was allocated in the
statement arena, not in the table->expr_arena.
revert part of the 5acd391e8b2d. Instead, change the arena correctly
in fix_all_session_vcol_exprs().
Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force
item tree changes to be rolled back (because they were allocated in the
wrong arena and didn't persist. now they do) |
int db_action_valid(uint32_t action)
{
if (sys_chk_seccomp_action(action) == 1)
return 0;
return -EINVAL;
} | 0 | []
| libseccomp | c5bf78de480b32b324e0f511c88ce533ed280b37 | 126,712,140,201,997,750,000,000,000,000,000,000,000 | 6 | db: fix 64-bit argument comparisons
Our approach to doing 64-bit comparisons using 32-bit operators was
just plain wrong, leading to a number of potential problems with
filters that used the LT, GT, LE, or GE operators. This patch fixes
this problem and a few other related issues that came to light in
the course of fixing the core problem.
A special thanks to Jann Horn for bringing this problem to our
attention.
Signed-off-by: Paul Moore <[email protected]> |
int do_pipe_flags(int *fd, int flags)
{
struct file *files[2];
int error = __do_pipe_flags(fd, files, flags);
if (!error) {
fd_install(fd[0], files[0]);
fd_install(fd[1], files[1]);
}
return error;
} | 0 | [
"CWE-17"
]
| linux | f0d1bec9d58d4c038d0ac958c9af82be6eb18045 | 174,358,333,191,221,160,000,000,000,000,000,000,000 | 10 | new helper: copy_page_from_iter()
parallel to copy_page_to_iter(). pipe_write() switched to it (and became
->write_iter()).
Signed-off-by: Al Viro <[email protected]> |
static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args,
int err)
{
struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
struct fuse_io_priv *io = ia->io;
ssize_t pos = -1;
fuse_release_user_pages(&ia->ap, io->should_dirty);
if (err) {
/* Nothing */
} else if (io->write) {
if (ia->write.out.size > ia->write.in.size) {
err = -EIO;
} else if (ia->write.in.size != ia->write.out.size) {
pos = ia->write.in.offset - io->offset +
ia->write.out.size;
}
} else {
u32 outsize = args->out_args[0].size;
if (ia->read.in.size != outsize)
pos = ia->read.in.offset - io->offset + outsize;
}
fuse_aio_complete(io, err, pos);
fuse_io_free(ia);
} | 0 | [
"CWE-459"
]
| linux | 5d069dbe8aaf2a197142558b6fb2978189ba3454 | 100,064,648,311,382,040,000,000,000,000,000,000,000 | 28 | fuse: fix bad inode
Jan Kara's analysis of the syzbot report (edited):
The reproducer opens a directory on FUSE filesystem, it then attaches
dnotify mark to the open directory. After that a fuse_do_getattr() call
finds that attributes returned by the server are inconsistent, and calls
make_bad_inode() which, among other things does:
inode->i_mode = S_IFREG;
This then confuses dnotify which doesn't tear down its structures
properly and eventually crashes.
Avoid calling make_bad_inode() on a live inode: switch to a private flag on
the fuse inode. Also add the test to ops which the bad_inode_ops would
have caught.
This bug goes back to the initial merge of fuse in 2.6.14...
Reported-by: [email protected]
Signed-off-by: Miklos Szeredi <[email protected]>
Tested-by: Jan Kara <[email protected]>
Cc: <[email protected]> |
_outAppendPath(StringInfo str, const AppendPath *node)
{
WRITE_NODE_TYPE("APPENDPATH");
_outPathInfo(str, (const Path *) node);
WRITE_NODE_FIELD(subpaths);
} | 0 | [
"CWE-362"
]
| postgres | 5f173040e324f6c2eebb90d86cf1b0cdb5890f0a | 252,223,228,322,849,840,000,000,000,000,000,000,000 | 8 | Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062 |
static void lsr_read_attribute_name_ex(GF_LASeRCodec *lsr, GF_Node *n, Bool skippable)
{
u32 val = 1;
if (skippable) {
GF_LSR_READ_INT(lsr, val, 1, "hasAttributeName");
if (!val) return;
}
GF_LSR_READ_INT(lsr, val, 1, "choice");
if (val) {
lsr_read_vluimsbf5(lsr, "item[i]");
lsr_read_vluimsbf5(lsr, "item[i]");
return;
} else {
GF_FieldInfo info;
lsr->last_error = gf_node_get_attribute_by_tag(n, TAG_SVG_ATT_attributeName, GF_TRUE, GF_FALSE, &info);
GF_LSR_READ_INT(lsr, val, 8, "attributeType");
/*translate type to attribute tag*/
((SMIL_AttributeName*)info.far_ptr)->type = gf_lsr_anim_type_to_attribute(val);
}
} | 0 | [
"CWE-190"
]
| gpac | faa75edde3dfeba1e2cf6ffa48e45a50f1042096 | 160,640,057,164,796,120,000,000,000,000,000,000,000 | 22 | fixed #2213 |
ex_ni(exarg_T *eap)
{
if (!eap->skip)
eap->errmsg =
_(e_sorry_command_is_not_available_in_this_version);
} | 0 | [
"CWE-125"
]
| vim | d3a117814d6acbf0dca3eff1a7626843b9b3734a | 269,348,405,131,002,470,000,000,000,000,000,000,000 | 6 | patch 8.2.4009: reading one byte beyond the end of the line
Problem: Reading one byte beyond the end of the line.
Solution: Check for NUL byte first. |
inline double uppercase(const double x) {
return (double)((x<'a'||x>'z')?x:x - 'a' + 'A');
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 80,748,411,805,866,950,000,000,000,000,000,000,000 | 3 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
DEATH_TEST(LteOp, InvalidEooOperand, "Invariant failure _rhs") {
BSONObj operand;
LTEMatchExpression lte("", operand.firstElement());
} | 0 | []
| mongo | 64095239f41e9f3841d8be9088347db56d35c891 | 132,062,153,087,611,060,000,000,000,000,000,000,000 | 4 | SERVER-51083 Reject invalid UTF-8 from $regex match expressions |
ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
{
ListCell *l;
/* Fail if write permissions are requested on any non-temp table */
foreach(l, plannedstmt->rtable)
{
RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
if (rte->rtekind != RTE_RELATION)
continue;
if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
continue;
if (isTempNamespace(get_rel_namespace(rte->relid)))
continue;
PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
}
} | 0 | [
"CWE-209"
]
| postgres | 804b6b6db4dcfc590a468e7be390738f9f7755fb | 89,795,051,947,489,700,000,000,000,000,000,000,000 | 21 | Fix column-privilege leak in error-message paths
While building error messages to return to the user,
BuildIndexValueDescription, ExecBuildSlotValueDescription and
ri_ReportViolation would happily include the entire key or entire row in
the result returned to the user, even if the user didn't have access to
view all of the columns being included.
Instead, include only those columns which the user is providing or which
the user has select rights on. If the user does not have any rights
to view the table or any of the columns involved then no detail is
provided and a NULL value is returned from BuildIndexValueDescription
and ExecBuildSlotValueDescription. Note that, for key cases, the user
must have access to all of the columns for the key to be shown; a
partial key will not be returned.
Further, in master only, do not return any data for cases where row
security is enabled on the relation and row security should be applied
for the user. This required a bit of refactoring and moving of things
around related to RLS- note the addition of utils/misc/rls.c.
Back-patch all the way, as column-level privileges are now in all
supported versions.
This has been assigned CVE-2014-8161, but since the issue and the patch
have already been publicized on pgsql-hackers, there's no point in trying
to hide this commit. |
void hkeysCommand(client *c) {
genericHgetallCommand(c,OBJ_HASH_KEY);
} | 0 | [
"CWE-190"
]
| redis | f6a40570fa63d5afdd596c78083d754081d80ae3 | 55,681,704,239,703,420,000,000,000,000,000,000,000 | 3 | Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628)
- fix possible heap corruption in ziplist and listpack resulting by trying to
allocate more than the maximum size of 4GB.
- prevent ziplist (hash and zset) from reaching size of above 1GB, will be
converted to HT encoding, that's not a useful size.
- prevent listpack (stream) from reaching size of above 1GB.
- XADD will start a new listpack if the new record may cause the previous
listpack to grow over 1GB.
- XADD will respond with an error if a single stream record is over 1GB
- List type (ziplist in quicklist) was truncating strings that were over 4GB,
now it'll respond with an error. |
static void acpi_table_drop_item(struct config_group *group,
struct config_item *cfg)
{
struct acpi_table *table = container_of(cfg, struct acpi_table, cfg);
ACPI_INFO(("Host-directed Dynamic ACPI Table Unload"));
acpi_unload_table(table->index);
} | 0 | [
"CWE-862"
]
| linux | 75b0cea7bf307f362057cc778efe89af4c615354 | 121,431,614,579,852,170,000,000,000,000,000,000,000 | 8 | ACPI: configfs: Disallow loading ACPI tables when locked down
Like other vectors already patched, this one here allows the root
user to load ACPI tables, which enables arbitrary physical address
writes, which in turn makes it possible to disable lockdown.
Prevents this by checking the lockdown status before allowing a new
ACPI table to be installed. The link in the trailer shows a PoC of
how this might be used.
Link: https://git.zx2c4.com/american-unsigned-language/tree/american-unsigned-language-2.sh
Cc: 5.4+ <[email protected]> # 5.4+
Signed-off-by: Jason A. Donenfeld <[email protected]>
Signed-off-by: Rafael J. Wysocki <[email protected]> |
static int decode_opaque_fixed(struct xdr_stream *xdr, void *buf, size_t len)
{
ssize_t ret = xdr_stream_decode_opaque_fixed(xdr, buf, len);
if (unlikely(ret < 0))
return -EIO;
return 0;
} | 0 | [
"CWE-787"
]
| linux | b4487b93545214a9db8cbf32e86411677b0cca21 | 19,807,079,248,785,338,000,000,000,000,000,000,000 | 7 | nfs: Fix getxattr kernel panic and memory overflow
Move the buffer size check to decode_attr_security_label() before memcpy()
Only call memcpy() if the buffer is large enough
Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS")
Signed-off-by: Jeffrey Mitchell <[email protected]>
[Trond: clean up duplicate test of label->len != 0]
Signed-off-by: Trond Myklebust <[email protected]> |
u8 gf_bs_align(GF_BitStream *bs)
{
u8 res = 8 - bs->nbBits;
if ( (bs->bsmode == GF_BITSTREAM_READ) || (bs->bsmode == GF_BITSTREAM_FILE_READ)) {
if (res > 0) {
gf_bs_read_int(bs, res);
}
return res;
}
if (bs->nbBits > 0) {
gf_bs_write_int (bs, 0, res);
return res;
}
return 0;
} | 0 | [
"CWE-617",
"CWE-703"
]
| gpac | 9ea93a2ec8f555ceed1ee27294cf94822f14f10f | 112,508,342,584,195,670,000,000,000,000,000,000,000 | 15 | fixed #2165 |
static int store_info(__G) /* return 0 if skipping, 1 if OK */
__GDEF
{
#ifdef USE_BZIP2
# define UNKN_BZ2 (G.crec.compression_method!=BZIPPED)
#else
# define UNKN_BZ2 TRUE /* bzip2 unknown */
#endif
#ifdef USE_LZMA
# define UNKN_LZMA (G.crec.compression_method!=LZMAED)
#else
# define UNKN_LZMA TRUE /* LZMA unknown */
#endif
#ifdef USE_WAVP
# define UNKN_WAVP (G.crec.compression_method!=WAVPACKED)
#else
# define UNKN_WAVP TRUE /* WavPack unknown */
#endif
#ifdef USE_PPMD
# define UNKN_PPMD (G.crec.compression_method!=PPMDED)
#else
# define UNKN_PPMD TRUE /* PPMd unknown */
#endif
#ifdef SFX
# ifdef USE_DEFLATE64
# define UNKN_COMPR \
(G.crec.compression_method!=STORED && G.crec.compression_method<DEFLATED \
&& G.crec.compression_method>ENHDEFLATED \
&& UNKN_BZ2 && UNKN_LZMA && UNKN_WAVP && UNKN_PPMD)
# else
# define UNKN_COMPR \
(G.crec.compression_method!=STORED && G.crec.compression_method!=DEFLATED\
&& UNKN_BZ2 && UNKN_LZMA && UNKN_WAVP && UNKN_PPMD)
# endif
#else
# ifdef COPYRIGHT_CLEAN /* no reduced files */
# define UNKN_RED (G.crec.compression_method >= REDUCED1 && \
G.crec.compression_method <= REDUCED4)
# else
# define UNKN_RED FALSE /* reducing not unknown */
# endif
# ifdef LZW_CLEAN /* no shrunk files */
# define UNKN_SHR (G.crec.compression_method == SHRUNK)
# else
# define UNKN_SHR FALSE /* unshrinking not unknown */
# endif
# ifdef USE_DEFLATE64
# define UNKN_COMPR (UNKN_RED || UNKN_SHR || \
G.crec.compression_method==TOKENIZED || \
(G.crec.compression_method>ENHDEFLATED && UNKN_BZ2 && UNKN_LZMA \
&& UNKN_WAVP && UNKN_PPMD))
# else
# define UNKN_COMPR (UNKN_RED || UNKN_SHR || \
G.crec.compression_method==TOKENIZED || \
(G.crec.compression_method>DEFLATED && UNKN_BZ2 && UNKN_LZMA \
&& UNKN_WAVP && UNKN_PPMD))
# endif
#endif
#if (defined(USE_BZIP2) && (UNZIP_VERSION < UNZIP_BZ2VERS))
int unzvers_support = (UNKN_BZ2 ? UNZIP_VERSION : UNZIP_BZ2VERS);
# define UNZVERS_SUPPORT unzvers_support
#else
# define UNZVERS_SUPPORT UNZIP_VERSION
#endif
/*---------------------------------------------------------------------------
Check central directory info for version/compatibility requirements.
---------------------------------------------------------------------------*/
G.pInfo->encrypted = G.crec.general_purpose_bit_flag & 1; /* bit field */
G.pInfo->ExtLocHdr = (G.crec.general_purpose_bit_flag & 8) == 8; /* bit */
G.pInfo->textfile = G.crec.internal_file_attributes & 1; /* bit field */
G.pInfo->crc = G.crec.crc32;
G.pInfo->compr_size = G.crec.csize;
G.pInfo->uncompr_size = G.crec.ucsize;
switch (uO.aflag) {
case 0:
G.pInfo->textmode = FALSE; /* bit field */
break;
case 1:
G.pInfo->textmode = G.pInfo->textfile; /* auto-convert mode */
break;
default: /* case 2: */
G.pInfo->textmode = TRUE;
break;
}
if (G.crec.version_needed_to_extract[1] == VMS_) {
if (G.crec.version_needed_to_extract[0] > VMS_UNZIP_VERSION) {
if (!((uO.tflag && uO.qflag) || (!uO.tflag && !QCOND2)))
Info(slide, 0x401, ((char *)slide, LoadFarString(VersionMsg),
FnFilter1(G.filename), "VMS",
G.crec.version_needed_to_extract[0] / 10,
G.crec.version_needed_to_extract[0] % 10,
VMS_UNZIP_VERSION / 10, VMS_UNZIP_VERSION % 10));
return 0;
}
#ifndef VMS /* won't be able to use extra field, but still have data */
else if (!uO.tflag && !IS_OVERWRT_ALL) { /* if -o, extract anyway */
Info(slide, 0x481, ((char *)slide, LoadFarString(VMSFormatQuery),
FnFilter1(G.filename)));
fgets(G.answerbuf, sizeof(G.answerbuf), stdin);
if ((*G.answerbuf != 'y') && (*G.answerbuf != 'Y'))
return 0;
}
#endif /* !VMS */
/* usual file type: don't need VMS to extract */
} else if (G.crec.version_needed_to_extract[0] > UNZVERS_SUPPORT) {
if (!((uO.tflag && uO.qflag) || (!uO.tflag && !QCOND2)))
Info(slide, 0x401, ((char *)slide, LoadFarString(VersionMsg),
FnFilter1(G.filename), "PK",
G.crec.version_needed_to_extract[0] / 10,
G.crec.version_needed_to_extract[0] % 10,
UNZVERS_SUPPORT / 10, UNZVERS_SUPPORT % 10));
return 0;
}
if (UNKN_COMPR) {
if (!((uO.tflag && uO.qflag) || (!uO.tflag && !QCOND2))) {
#ifndef SFX
unsigned cmpridx;
if ((cmpridx = find_compr_idx(G.crec.compression_method))
< NUM_METHODS)
Info(slide, 0x401, ((char *)slide, LoadFarString(ComprMsgName),
FnFilter1(G.filename),
LoadFarStringSmall(ComprNames[cmpridx])));
else
#endif
Info(slide, 0x401, ((char *)slide, LoadFarString(ComprMsgNum),
FnFilter1(G.filename),
G.crec.compression_method));
}
return 0;
}
#if (!CRYPT)
if (G.pInfo->encrypted) {
if (!((uO.tflag && uO.qflag) || (!uO.tflag && !QCOND2)))
Info(slide, 0x401, ((char *)slide, LoadFarString(SkipEncrypted),
FnFilter1(G.filename)));
return 0;
}
#endif /* !CRYPT */
#ifndef SFX
/* store a copy of the central header filename for later comparison */
if ((G.pInfo->cfilname = zfmalloc(strlen(G.filename) + 1)) == NULL) {
Info(slide, 0x401, ((char *)slide, LoadFarString(WarnNoMemCFName),
FnFilter1(G.filename)));
} else
zfstrcpy(G.pInfo->cfilname, G.filename);
#endif /* !SFX */
/* map whatever file attributes we have into the local format */
mapattr(__G); /* GRR: worry about return value later */
G.pInfo->diskstart = G.crec.disk_number_start;
G.pInfo->offset = (zoff_t)G.crec.relative_offset_local_header;
return 1;
} /* end function store_info() */ | 0 | [
"CWE-400"
]
| unzip | 47b3ceae397d21bf822bc2ac73052a4b1daf8e1c | 27,099,508,475,081,447,000,000,000,000,000,000,000 | 167 | Detect and reject a zip bomb using overlapped entries.
This detects an invalid zip file that has at least one entry that
overlaps with another entry or with the central directory to the
end of the file. A Fifield zip bomb uses overlapped local entries
to vastly increase the potential inflation ratio. Such an invalid
zip file is rejected.
See https://www.bamsoftware.com/hacks/zipbomb/ for David Fifield's
analysis, construction, and examples of such zip bombs.
The detection maintains a list of covered spans of the zip files
so far, where the central directory to the end of the file and any
bytes preceding the first entry at zip file offset zero are
considered covered initially. Then as each entry is decompressed
or tested, it is considered covered. When a new entry is about to
be processed, its initial offset is checked to see if it is
contained by a covered span. If so, the zip file is rejected as
invalid.
This commit depends on a preceding commit: "Fix bug in
undefer_input() that misplaced the input state." |
int BSONObj::woCompare(const BSONObj &r, const BSONObj &idxKey,
bool considerFieldName) const {
if ( isEmpty() )
return r.isEmpty() ? 0 : -1;
if ( r.isEmpty() )
return 1;
bool ordered = !idxKey.isEmpty();
BSONObjIterator i(*this);
BSONObjIterator j(r);
BSONObjIterator k(idxKey);
while ( 1 ) {
// so far, equal...
BSONElement l = i.next();
BSONElement r = j.next();
BSONElement o;
if ( ordered )
o = k.next();
if ( l.eoo() )
return r.eoo() ? 0 : -1;
if ( r.eoo() )
return 1;
int x;
/*
if( ordered && o.type() == String && strcmp(o.valuestr(), "ascii-proto") == 0 &&
l.type() == String && r.type() == String ) {
// note: no negative support yet, as this is just sort of a POC
x = _stricmp(l.valuestr(), r.valuestr());
}
else*/ {
x = l.woCompare( r, considerFieldName );
if ( ordered && o.number() < 0 )
x = -x;
}
if ( x != 0 )
return x;
}
return -1;
} | 0 | [
"CWE-20"
]
| mongo | f9817a6cf64bdba8e1e1cef30a798110df746b58 | 203,046,928,191,858,100,000,000,000,000,000,000,000 | 42 | SERVER-7769 - turn objcheck on by default and use new fast bson validate |
static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
{
int ret;
if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
kfree_skb(skb);
return -ENETDOWN;
}
skb->dev = dev;
__this_cpu_inc(xmit_recursion);
ret = dev_queue_xmit(skb);
__this_cpu_dec(xmit_recursion);
return ret;
} | 0 | [
"CWE-120"
]
| linux | 050fad7c4534c13c8eb1d9c2ba66012e014773cb | 322,024,590,088,745,620,000,000,000,000,000,000,000 | 18 | bpf: fix truncated jump targets on heavy expansions
Recently during testing, I ran into the following panic:
[ 207.892422] Internal error: Accessing user space memory outside uaccess.h routines: 96000004 [#1] SMP
[ 207.901637] Modules linked in: binfmt_misc [...]
[ 207.966530] CPU: 45 PID: 2256 Comm: test_verifier Tainted: G W 4.17.0-rc3+ #7
[ 207.974956] Hardware name: FOXCONN R2-1221R-A4/C2U4N_MB, BIOS G31FB18A 03/31/2017
[ 207.982428] pstate: 60400005 (nZCv daif +PAN -UAO)
[ 207.987214] pc : bpf_skb_load_helper_8_no_cache+0x34/0xc0
[ 207.992603] lr : 0xffff000000bdb754
[ 207.996080] sp : ffff000013703ca0
[ 207.999384] x29: ffff000013703ca0 x28: 0000000000000001
[ 208.004688] x27: 0000000000000001 x26: 0000000000000000
[ 208.009992] x25: ffff000013703ce0 x24: ffff800fb4afcb00
[ 208.015295] x23: ffff00007d2f5038 x22: ffff00007d2f5000
[ 208.020599] x21: fffffffffeff2a6f x20: 000000000000000a
[ 208.025903] x19: ffff000009578000 x18: 0000000000000a03
[ 208.031206] x17: 0000000000000000 x16: 0000000000000000
[ 208.036510] x15: 0000ffff9de83000 x14: 0000000000000000
[ 208.041813] x13: 0000000000000000 x12: 0000000000000000
[ 208.047116] x11: 0000000000000001 x10: ffff0000089e7f18
[ 208.052419] x9 : fffffffffeff2a6f x8 : 0000000000000000
[ 208.057723] x7 : 000000000000000a x6 : 00280c6160000000
[ 208.063026] x5 : 0000000000000018 x4 : 0000000000007db6
[ 208.068329] x3 : 000000000008647a x2 : 19868179b1484500
[ 208.073632] x1 : 0000000000000000 x0 : ffff000009578c08
[ 208.078938] Process test_verifier (pid: 2256, stack limit = 0x0000000049ca7974)
[ 208.086235] Call trace:
[ 208.088672] bpf_skb_load_helper_8_no_cache+0x34/0xc0
[ 208.093713] 0xffff000000bdb754
[ 208.096845] bpf_test_run+0x78/0xf8
[ 208.100324] bpf_prog_test_run_skb+0x148/0x230
[ 208.104758] sys_bpf+0x314/0x1198
[ 208.108064] el0_svc_naked+0x30/0x34
[ 208.111632] Code: 91302260 f9400001 f9001fa1 d2800001 (29500680)
[ 208.117717] ---[ end trace 263cb8a59b5bf29f ]---
The program itself which caused this had a long jump over the whole
instruction sequence where all of the inner instructions required
heavy expansions into multiple BPF instructions. Additionally, I also
had BPF hardening enabled which requires once more rewrites of all
constant values in order to blind them. Each time we rewrite insns,
bpf_adj_branches() would need to potentially adjust branch targets
which cross the patchlet boundary to accommodate for the additional
delta. Eventually that lead to the case where the target offset could
not fit into insn->off's upper 0x7fff limit anymore where then offset
wraps around becoming negative (in s16 universe), or vice versa
depending on the jump direction.
Therefore it becomes necessary to detect and reject any such occasions
in a generic way for native eBPF and cBPF to eBPF migrations. For
the latter we can simply check bounds in the bpf_convert_filter()'s
BPF_EMIT_JMP helper macro and bail out once we surpass limits. The
bpf_patch_insn_single() for native eBPF (and cBPF to eBPF in case
of subsequent hardening) is a bit more complex in that we need to
detect such truncations before hitting the bpf_prog_realloc(). Thus
the latter is split into an extra pass to probe problematic offsets
on the original program in order to fail early. With that in place
and carefully tested I no longer hit the panic and the rewrites are
rejected properly. The above example panic I've seen on bpf-next,
though the issue itself is generic in that a guard against this issue
in bpf seems more appropriate in this case.
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Martin KaFai Lau <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]> |
int mbedtls_asn1_write_null( unsigned char **p, unsigned char *start )
{
int ret;
size_t len = 0;
// Write NULL
//
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( p, start, 0) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( p, start, MBEDTLS_ASN1_NULL ) );
return( (int) len );
} | 0 | []
| mbedtls | 97b5209bc01ab8b3b519fdb46cefc04739433124 | 314,269,869,203,861,300,000,000,000,000,000,000,000 | 12 | Fix potential double free in cert writing code
In case an entry with the given OID already exists in the list passed to
mbedtls_asn1_store_named_data() and there is not enough memory to allocate
room for the new value, the existing entry will be freed but the preceding
entry in the list will sill hold a pointer to it. (And the following entries
in the list are no longer reachable.) This results in memory leak or a double
free.
The issue is we want to leave the list in a consistent state on allocation
failure. (We could add a warning that the list is left in inconsistent state
when the function returns NULL, but behaviour changes that require more care
from the user are undesirable, especially in a stable branch.)
The chosen solution is a bit inefficient in that there is a time where both
blocks are allocated, but at least it's safe and this should trump efficiency
here: this code is only used for generating certificates, which is unlikely to
be done on very constrained devices, or to be in the critical loop of
anything. Also, the sizes involved should be fairly small anyway.
fixes #367 |
static void checkActiveVdbeCnt(sqlite3 *db){
Vdbe *p;
int cnt = 0;
int nWrite = 0;
int nRead = 0;
p = db->pVdbe;
while( p ){
if( sqlite3_stmt_busy((sqlite3_stmt*)p) ){
cnt++;
if( p->readOnly==0 ) nWrite++;
if( p->bIsReader ) nRead++;
}
p = p->pNext;
}
assert( cnt==db->nVdbeActive );
assert( nWrite==db->nVdbeWrite );
assert( nRead==db->nVdbeRead );
} | 0 | [
"CWE-755"
]
| sqlite | 8654186b0236d556aa85528c2573ee0b6ab71be3 | 123,325,776,858,035,500,000,000,000,000,000,000,000 | 18 | When an error occurs while rewriting the parser tree for window functions
in the sqlite3WindowRewrite() routine, make sure that pParse->nErr is set,
and make sure that this shuts down any subsequent code generation that might
depend on the transformations that were implemented. This fixes a problem
discovered by the Yongheng and Rui fuzzer.
FossilOrigin-Name: e2bddcd4c55ba3cbe0130332679ff4b048630d0ced9a8899982edb5a3569ba7f |
QByteArray Helper::getPartitionTable(const QString &devicePath)
{
processExec(QStringLiteral("/sbin/sfdisk -d %1").arg(devicePath));
return lastProcessStandardOutput();
} | 0 | [
"CWE-59",
"CWE-61"
]
| deepin-clone | e079f3e2712b4f8c28e3e63e71ba1a1f90fce1ab | 241,490,479,484,186,570,000,000,000,000,000,000,000 | 6 | fix: Do not use the "/tmp" directory
https://github.com/linuxdeepin/deepin-clone/issues/16
https://bugzilla.opensuse.org/show_bug.cgi?id=1130388 |
ICON_IMAGE load_icon_default(ICON_TYPE type) {
WORD idi;
ICON_IMAGE img;
switch(type) {
case ICON_ACTIVE:
idi=IDI_STUNNEL_ACTIVE;
break;
case ICON_ERROR:
idi=IDI_STUNNEL_ERROR;
break;
case ICON_IDLE:
idi=IDI_STUNNEL_IDLE;
break;
default:
return NULL;
}
img=LoadImage(ghInst, MAKEINTRESOURCE(idi), IMAGE_ICON,
GetSystemMetrics(SM_CXSMICON), GetSystemMetrics(SM_CYSMICON), 0);
return DuplicateIcon(NULL, img);
} | 0 | [
"CWE-295"
]
| stunnel | ebad9ddc4efb2635f37174c9d800d06206f1edf9 | 332,036,305,931,702,800,000,000,000,000,000,000,000 | 21 | stunnel-5.57 |
aiff_close (SF_PRIVATE *psf)
{ AIFF_PRIVATE *paiff = psf->container_data ;
if (paiff != NULL && paiff->markstr != NULL)
{ free (paiff->markstr) ;
paiff->markstr = NULL ;
} ;
if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR)
{ aiff_write_tailer (psf) ;
aiff_write_header (psf, SF_TRUE) ;
} ;
return 0 ;
} /* aiff_close */ | 0 | [
"CWE-119",
"CWE-787"
]
| libsndfile | f833c53cb596e9e1792949f762e0b33661822748 | 57,425,460,069,719,730,000,000,000,000,000,000,000 | 15 | src/aiff.c: Fix a buffer read overflow
Secunia Advisory SA76717.
Found by: Laurent Delosieres, Secunia Research at Flexera Software |
static ssize_t push_ucs2(void *dest, const char *src, size_t dest_len, int flags)
{
size_t len=0;
size_t src_len = strlen(src);
size_t size = 0;
bool ret;
if (flags & STR_UPPER) {
char *tmpbuf = strupper_talloc(NULL, src);
ssize_t retval;
if (tmpbuf == NULL) {
return -1;
}
retval = push_ucs2(dest, tmpbuf, dest_len, flags & ~STR_UPPER);
talloc_free(tmpbuf);
return retval;
}
if (flags & STR_TERMINATE)
src_len++;
if (ucs2_align(NULL, dest, flags)) {
*(char *)dest = 0;
dest = (void *)((char *)dest + 1);
if (dest_len) dest_len--;
len++;
}
/* ucs2 is always a multiple of 2 bytes */
dest_len &= ~1;
ret = convert_string(CH_UNIX, CH_UTF16, src, src_len, dest, dest_len, &size);
if (ret == false) {
return 0;
}
len += size;
return (ssize_t)len;
} | 0 | [
"CWE-200"
]
| samba | ba5dbda6d0174a59d221c45cca52ecd232820d48 | 39,802,936,344,420,930,000,000,000,000,000,000,000 | 40 | CVE-2015-5330: Fix handling of unicode near string endings
Until now next_codepoint_ext() and next_codepoint_handle_ext() were
using strnlen(str, 5) to determine how much string they should try to
decode. This ended up looking past the end of the string when it was not
null terminated and the final character looked like a multi-byte encoding.
The fix is to let the caller say how long the string can be.
Bug: https://bugzilla.samba.org/show_bug.cgi?id=11599
Signed-off-by: Douglas Bagnall <[email protected]>
Pair-programmed-with: Andrew Bartlett <[email protected]>
Reviewed-by: Ralph Boehme <[email protected]> |
xmlParseStringPEReference(xmlParserCtxtPtr ctxt, const xmlChar **str) {
const xmlChar *ptr;
xmlChar cur;
xmlChar *name;
xmlEntityPtr entity = NULL;
if ((str == NULL) || (*str == NULL)) return(NULL);
ptr = *str;
cur = *ptr;
if (cur != '%')
return(NULL);
ptr++;
name = xmlParseStringName(ctxt, &ptr);
if (name == NULL) {
xmlFatalErrMsg(ctxt, XML_ERR_NAME_REQUIRED,
"xmlParseStringPEReference: no name\n");
*str = ptr;
return(NULL);
}
cur = *ptr;
if (cur != ';') {
xmlFatalErr(ctxt, XML_ERR_ENTITYREF_SEMICOL_MISSING, NULL);
xmlFree(name);
*str = ptr;
return(NULL);
}
ptr++;
/*
* Increase the number of entity references parsed
*/
ctxt->nbentities++;
/*
* Request the entity from SAX
*/
if ((ctxt->sax != NULL) &&
(ctxt->sax->getParameterEntity != NULL))
entity = ctxt->sax->getParameterEntity(ctxt->userData, name);
if (ctxt->instate == XML_PARSER_EOF) {
xmlFree(name);
*str = ptr;
return(NULL);
}
if (entity == NULL) {
/*
* [ WFC: Entity Declared ]
* In a document without any DTD, a document with only an
* internal DTD subset which contains no parameter entity
* references, or a document with "standalone='yes'", ...
* ... The declaration of a parameter entity must precede
* any reference to it...
*/
if ((ctxt->standalone == 1) ||
((ctxt->hasExternalSubset == 0) && (ctxt->hasPErefs == 0))) {
xmlFatalErrMsgStr(ctxt, XML_ERR_UNDECLARED_ENTITY,
"PEReference: %%%s; not found\n", name);
} else {
/*
* [ VC: Entity Declared ]
* In a document with an external subset or external
* parameter entities with "standalone='no'", ...
* ... The declaration of a parameter entity must
* precede any reference to it...
*/
xmlWarningMsg(ctxt, XML_WAR_UNDECLARED_ENTITY,
"PEReference: %%%s; not found\n",
name, NULL);
ctxt->valid = 0;
}
xmlParserEntityCheck(ctxt, 0, NULL, 0);
} else {
/*
* Internal checking in case the entity quest barfed
*/
if ((entity->etype != XML_INTERNAL_PARAMETER_ENTITY) &&
(entity->etype != XML_EXTERNAL_PARAMETER_ENTITY)) {
xmlWarningMsg(ctxt, XML_WAR_UNDECLARED_ENTITY,
"%%%s; is not a parameter entity\n",
name, NULL);
}
}
ctxt->hasPErefs = 1;
xmlFree(name);
*str = ptr;
return(entity);
} | 0 | []
| libxml2 | 0e1a49c8907645d2e155f0d89d4d9895ac5112b5 | 195,011,568,371,825,700,000,000,000,000,000,000,000 | 87 | Fix infinite loop in xmlStringLenDecodeEntities
When ctxt->instate == XML_PARSER_EOF,xmlParseStringEntityRef
return NULL which cause a infinite loop in xmlStringLenDecodeEntities
Found with libFuzzer.
Signed-off-by: Zhipeng Xie <[email protected]> |
GF_Err gf_isom_set_rvc_config(GF_ISOFile *movie, u32 track, u32 sampleDescriptionIndex, u16 rvc_predefined, char *mime, u8 *data, u32 size)
{
GF_MPEGVisualSampleEntryBox *entry;
GF_Err e;
GF_TrackBox *trak;
e = CanAccessMovie(movie, GF_ISOM_OPEN_WRITE);
if (e) return e;
trak = gf_isom_get_track_from_file(movie, track);
if (!trak) return GF_BAD_PARAM;
entry = (GF_MPEGVisualSampleEntryBox *) gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, sampleDescriptionIndex-1);
if (!entry ) return GF_BAD_PARAM;
if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_BAD_PARAM;
GF_RVCConfigurationBox *rvcc = (GF_RVCConfigurationBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_RVCC);
if (rvcc && rvcc->rvc_meta_idx) {
gf_isom_remove_meta_item(movie, GF_FALSE, track, rvcc->rvc_meta_idx);
rvcc->rvc_meta_idx = 0;
}
if (!rvcc) {
rvcc = (GF_RVCConfigurationBox *) gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_RVCC);
if (!rvcc) return GF_OUT_OF_MEM;
}
rvcc->predefined_rvc_config = rvc_predefined;
if (!rvc_predefined) {
u32 it_id=0;
e = gf_isom_set_meta_type(movie, GF_FALSE, track, GF_META_TYPE_RVCI);
if (e) return e;
gf_isom_modify_alternate_brand(movie, GF_ISOM_BRAND_ISO2, GF_TRUE);
e = gf_isom_add_meta_item_memory(movie, GF_FALSE, track, "rvcconfig.xml", &it_id, GF_META_ITEM_TYPE_MIME, mime, NULL, NULL, data, size, NULL);
if (e) return e;
rvcc->rvc_meta_idx = gf_isom_get_meta_item_count(movie, GF_FALSE, track);
}
return GF_OK;
} | 0 | [
"CWE-476"
]
| gpac | ebfa346eff05049718f7b80041093b4c5581c24e | 189,731,183,654,362,100,000,000,000,000,000,000,000 | 39 | fixed #1706 |
bool CModules::OnChanActionMessage(CActionMessage& Message) {
MODHALTCHK(OnChanActionMessage(Message));
} | 0 | [
"CWE-20",
"CWE-264"
]
| znc | 8de9e376ce531fe7f3c8b0aa4876d15b479b7311 | 271,491,738,914,435,400,000,000,000,000,000,000,000 | 3 | Fix remote code execution and privilege escalation vulnerability.
To trigger this, need to have a user already.
Thanks for Jeriko One <[email protected]> for finding and reporting this.
CVE-2019-12816 |
set_sub_anchor(regex_t* reg, OptAncInfo* anc)
{
reg->sub_anchor |= anc->left_anchor & ANCHOR_BEGIN_LINE;
reg->sub_anchor |= anc->right_anchor & ANCHOR_END_LINE;
} | 0 | [
"CWE-125"
]
| php-src | c6e34d91b88638966662caac62c4d0e90538e317 | 17,311,980,826,470,595,000,000,000,000,000,000,000 | 5 | Fix bug #77371 (heap buffer overflow in mb regex functions - compile_string_node) |
//! Implicitely cast an image into a \c T* \const.
operator const T*() const {
return _data; | 0 | [
"CWE-125"
]
| CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 152,989,931,458,347,710,000,000,000,000,000,000,000 | 3 | Fix other issues in 'CImg<T>::load_bmp()'. |
int sysctl_numa_balancing(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
int err;
int state = static_branch_likely(&sched_numa_balancing);
if (write && !capable(CAP_SYS_ADMIN))
return -EPERM;
t = *table;
t.data = &state;
err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
if (err < 0)
return err;
if (write)
set_numabalancing_state(state);
return err;
} | 0 | [
"CWE-119"
]
| linux | 29d6455178a09e1dc340380c582b13356227e8df | 18,022,924,335,296,220,000,000,000,000,000,000,000 | 19 | sched: panic on corrupted stack end
Until now, hitting this BUG_ON caused a recursive oops (because oops
handling involves do_exit(), which calls into the scheduler, which in
turn raises an oops), which caused stuff below the stack to be
overwritten until a panic happened (e.g. via an oops in interrupt
context, caused by the overwritten CPU index in the thread_info).
Just panic directly.
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static void lsr_read_script_type(GF_LASeRCodec *lsr, GF_Node *n)
{
u32 flag;
GF_LSR_READ_INT(lsr, flag, 1, "hasType");
if (flag) {
GF_FieldInfo info;
lsr->last_error = gf_node_get_attribute_by_tag(n, TAG_XLINK_ATT_type, GF_TRUE, 0, &info);
GF_LSR_READ_INT(lsr, flag, 1, "choice");
if (flag) {
GF_LSR_READ_INT(lsr, flag, 1, "script");
switch (flag) {
case 0:
*(SVG_String*)info.far_ptr = gf_strdup("application/ecmascript");
break;
case 1:
*(SVG_String*)info.far_ptr = gf_strdup("application/jar-archive");
break;
default:
break;
}
} else {
lsr_read_byte_align_string(lsr, info.far_ptr, "type");
}
}
} | 0 | [
"CWE-190"
]
| gpac | faa75edde3dfeba1e2cf6ffa48e45a50f1042096 | 206,203,484,853,486,360,000,000,000,000,000,000,000 | 25 | fixed #2213 |
static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
if (vcpu->preempted)
vcpu->preempted = false;
kvm_arch_sched_in(vcpu, cpu);
kvm_arch_vcpu_load(vcpu, cpu);
} | 0 | [
"CWE-416",
"CWE-362"
]
| linux | cfa39381173d5f969daf43582c95ad679189cbc9 | 313,437,566,901,441,500,000,000,000,000,000,000,000 | 11 | kvm: fix kvm_ioctl_create_device() reference counting (CVE-2019-6974)
kvm_ioctl_create_device() does the following:
1. creates a device that holds a reference to the VM object (with a borrowed
reference, the VM's refcount has not been bumped yet)
2. initializes the device
3. transfers the reference to the device to the caller's file descriptor table
4. calls kvm_get_kvm() to turn the borrowed reference to the VM into a real
reference
The ownership transfer in step 3 must not happen before the reference to the VM
becomes a proper, non-borrowed reference, which only happens in step 4.
After step 3, an attacker can close the file descriptor and drop the borrowed
reference, which can cause the refcount of the kvm object to drop to zero.
This means that we need to grab a reference for the device before
anon_inode_getfd(), otherwise the VM can disappear from under us.
Fixes: 852b6d57dc7f ("kvm: add device control API")
Cc: [email protected]
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static int network_config_set_interface (const oconfig_item_t *ci, /* {{{ */
int *interface)
{
if ((ci->values_num != 1)
|| (ci->values[0].type != OCONFIG_TYPE_STRING))
{
WARNING ("network plugin: The `Interface' config option needs exactly "
"one string argument.");
return (-1);
}
if (interface == NULL)
return (-1);
*interface = if_nametoindex (ci->values[0].value.string);
return (0);
} /* }}} int network_config_set_interface */ | 0 | [
"CWE-119",
"CWE-787"
]
| collectd | b589096f907052b3a4da2b9ccc9b0e2e888dfc18 | 200,211,502,639,899,240,000,000,000,000,000,000,000 | 18 | network plugin: Fix heap overflow in parse_packet().
Emilien Gaspar has identified a heap overflow in parse_packet(), the
function used by the network plugin to parse incoming network packets.
This is a vulnerability in collectd, though the scope is not clear at
this point. At the very least specially crafted network packets can be
used to crash the daemon. We can't rule out a potential remote code
execution though.
Fixes: CVE-2016-6254 |
static void vrend_draw_bind_ssbo_shader(struct vrend_sub_context *sub_ctx,
int shader_type)
{
uint32_t mask;
struct vrend_ssbo *ssbo;
struct vrend_resource *res;
int i;
if (!has_feature(feat_ssbo))
return;
if (!sub_ctx->prog->ssbo_used_mask[shader_type])
return;
if (!sub_ctx->ssbo_used_mask[shader_type])
return;
mask = sub_ctx->ssbo_used_mask[shader_type];
while (mask) {
i = u_bit_scan(&mask);
ssbo = &sub_ctx->ssbo[shader_type][i];
res = (struct vrend_resource *)ssbo->res;
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, i, res->id,
ssbo->buffer_offset, ssbo->buffer_size);
}
} | 0 | [
"CWE-787"
]
| virglrenderer | 95e581fd181b213c2ed7cdc63f2abc03eaaa77ec | 213,551,553,154,760,500,000,000,000,000,000,000,000 | 27 | vrend: Add test to resource OOB write and fix it
v2: Also check that no depth != 1 has been send when none is due
Closes: #250
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Chia-I Wu <[email protected]> |
static const SSL_METHOD *ssl2_get_server_method(int ver)
{
if (ver == SSL2_VERSION)
return (SSLv2_server_method());
else
return (NULL);
} | 0 | [
"CWE-20"
]
| openssl | 86f8fb0e344d62454f8daf3e15236b2b59210756 | 3,350,828,396,290,116,400,000,000,000,000,000,000 | 7 | Fix reachable assert in SSLv2 servers.
This assert is reachable for servers that support SSLv2 and export ciphers.
Therefore, such servers can be DoSed by sending a specially crafted
SSLv2 CLIENT-MASTER-KEY.
Also fix s2_srvr.c to error out early if the key lengths are malformed.
These lengths are sent unencrypted, so this does not introduce an oracle.
CVE-2015-0293
This issue was discovered by Sean Burford (Google) and Emilia Käsper of
the OpenSSL development team.
Reviewed-by: Richard Levitte <[email protected]>
Reviewed-by: Tim Hudson <[email protected]> |
grub_ext2_uuid (grub_device_t device, char **uuid)
{
struct grub_ext2_data *data;
grub_disk_t disk = device->disk;
grub_dl_ref (my_mod);
data = grub_ext2_mount (disk);
if (data)
{
*uuid = grub_xasprintf ("%04x%04x-%04x-%04x-%04x-%04x%04x%04x",
grub_be_to_cpu16 (data->sblock.uuid[0]),
grub_be_to_cpu16 (data->sblock.uuid[1]),
grub_be_to_cpu16 (data->sblock.uuid[2]),
grub_be_to_cpu16 (data->sblock.uuid[3]),
grub_be_to_cpu16 (data->sblock.uuid[4]),
grub_be_to_cpu16 (data->sblock.uuid[5]),
grub_be_to_cpu16 (data->sblock.uuid[6]),
grub_be_to_cpu16 (data->sblock.uuid[7]));
}
else
*uuid = NULL;
grub_dl_unref (my_mod);
grub_free (data);
return grub_errno;
} | 0 | [
"CWE-119"
]
| grub | ac8cac1dac50daaf1c390d701cca3b55e16ee768 | 289,121,079,706,935,740,000,000,000,000,000,000,000 | 29 | * grub-core/fs/ext2.c: Remove variable length arrays. |
static uint32_t avifCodecConfigurationBoxGetDepth(const avifCodecConfigurationBox * av1C)
{
if (av1C->twelveBit) {
return 12;
} else if (av1C->highBitdepth) {
return 10;
}
return 8;
} | 0 | [
"CWE-703",
"CWE-787"
]
| libavif | 0a8e7244d494ae98e9756355dfbfb6697ded2ff9 | 309,059,903,069,102,160,000,000,000,000,000,000,000 | 9 | Set max image size to 16384 * 16384
Fix https://crbug.com/oss-fuzz/24728 and
https://crbug.com/oss-fuzz/24734. |
ProcXkbGetMap(ClientPtr client)
{
DeviceIntPtr dev;
xkbGetMapReply rep;
XkbDescRec *xkb;
int n, status;
REQUEST(xkbGetMapReq);
REQUEST_SIZE_MATCH(xkbGetMapReq);
if (!(client->xkbClientFlags & _XkbClientInitialized))
return BadAccess;
CHK_KBD_DEVICE(dev, stuff->deviceSpec, client, DixGetAttrAccess);
CHK_MASK_OVERLAP(0x01, stuff->full, stuff->partial);
CHK_MASK_LEGAL(0x02, stuff->full, XkbAllMapComponentsMask);
CHK_MASK_LEGAL(0x03, stuff->partial, XkbAllMapComponentsMask);
xkb = dev->key->xkbInfo->desc;
rep = (xkbGetMapReply) {
.type = X_Reply,
.deviceID = dev->id,
.sequenceNumber = client->sequence,
.length = (SIZEOF(xkbGetMapReply) - SIZEOF(xGenericReply)) >> 2,
.present = stuff->partial | stuff->full,
.minKeyCode = xkb->min_key_code,
.maxKeyCode = xkb->max_key_code
};
if (stuff->full & XkbKeyTypesMask) {
rep.firstType = 0;
rep.nTypes = xkb->map->num_types;
}
else if (stuff->partial & XkbKeyTypesMask) {
if (((unsigned) stuff->firstType + stuff->nTypes) > xkb->map->num_types) {
client->errorValue = _XkbErrCode4(0x04, xkb->map->num_types,
stuff->firstType, stuff->nTypes);
return BadValue;
}
rep.firstType = stuff->firstType;
rep.nTypes = stuff->nTypes;
}
else
rep.nTypes = 0;
rep.totalTypes = xkb->map->num_types;
n = XkbNumKeys(xkb);
if (stuff->full & XkbKeySymsMask) {
rep.firstKeySym = xkb->min_key_code;
rep.nKeySyms = n;
}
else if (stuff->partial & XkbKeySymsMask) {
CHK_KEY_RANGE(0x05, stuff->firstKeySym, stuff->nKeySyms, xkb);
rep.firstKeySym = stuff->firstKeySym;
rep.nKeySyms = stuff->nKeySyms;
}
else
rep.nKeySyms = 0;
rep.totalSyms = 0;
if (stuff->full & XkbKeyActionsMask) {
rep.firstKeyAct = xkb->min_key_code;
rep.nKeyActs = n;
}
else if (stuff->partial & XkbKeyActionsMask) {
CHK_KEY_RANGE(0x07, stuff->firstKeyAct, stuff->nKeyActs, xkb);
rep.firstKeyAct = stuff->firstKeyAct;
rep.nKeyActs = stuff->nKeyActs;
}
else
rep.nKeyActs = 0;
rep.totalActs = 0;
if (stuff->full & XkbKeyBehaviorsMask) {
rep.firstKeyBehavior = xkb->min_key_code;
rep.nKeyBehaviors = n;
}
else if (stuff->partial & XkbKeyBehaviorsMask) {
CHK_KEY_RANGE(0x09, stuff->firstKeyBehavior, stuff->nKeyBehaviors, xkb);
rep.firstKeyBehavior = stuff->firstKeyBehavior;
rep.nKeyBehaviors = stuff->nKeyBehaviors;
}
else
rep.nKeyBehaviors = 0;
rep.totalKeyBehaviors = 0;
if (stuff->full & XkbVirtualModsMask)
rep.virtualMods = ~0;
else if (stuff->partial & XkbVirtualModsMask)
rep.virtualMods = stuff->virtualMods;
if (stuff->full & XkbExplicitComponentsMask) {
rep.firstKeyExplicit = xkb->min_key_code;
rep.nKeyExplicit = n;
}
else if (stuff->partial & XkbExplicitComponentsMask) {
CHK_KEY_RANGE(0x0B, stuff->firstKeyExplicit, stuff->nKeyExplicit, xkb);
rep.firstKeyExplicit = stuff->firstKeyExplicit;
rep.nKeyExplicit = stuff->nKeyExplicit;
}
else
rep.nKeyExplicit = 0;
rep.totalKeyExplicit = 0;
if (stuff->full & XkbModifierMapMask) {
rep.firstModMapKey = xkb->min_key_code;
rep.nModMapKeys = n;
}
else if (stuff->partial & XkbModifierMapMask) {
CHK_KEY_RANGE(0x0D, stuff->firstModMapKey, stuff->nModMapKeys, xkb);
rep.firstModMapKey = stuff->firstModMapKey;
rep.nModMapKeys = stuff->nModMapKeys;
}
else
rep.nModMapKeys = 0;
rep.totalModMapKeys = 0;
if (stuff->full & XkbVirtualModMapMask) {
rep.firstVModMapKey = xkb->min_key_code;
rep.nVModMapKeys = n;
}
else if (stuff->partial & XkbVirtualModMapMask) {
CHK_KEY_RANGE(0x0F, stuff->firstVModMapKey, stuff->nVModMapKeys, xkb);
rep.firstVModMapKey = stuff->firstVModMapKey;
rep.nVModMapKeys = stuff->nVModMapKeys;
}
else
rep.nVModMapKeys = 0;
rep.totalVModMapKeys = 0;
if ((status = XkbComputeGetMapReplySize(xkb, &rep)) != Success)
return status;
return XkbSendMap(client, xkb, &rep);
} | 0 | [
"CWE-119"
]
| xserver | f7cd1276bbd4fe3a9700096dec33b52b8440788d | 282,263,690,664,224,800,000,000,000,000,000,000,000 | 134 | Correct bounds checking in XkbSetNames()
CVE-2020-14345 / ZDI 11428
This vulnerability was discovered by:
Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
Signed-off-by: Matthieu Herrb <[email protected]> |
void TNEFFree(TNEFStruct *TNEF) {
Attachment *p, *store;
FREEVARLENGTH(TNEF->from);
FREEVARLENGTH(TNEF->subject);
FREEVARLENGTH(TNEF->body);
FREEVARLENGTH(TNEF->CodePage);
FREEVARLENGTH(TNEF->OriginalMessageClass);
FREEVARLENGTH(TNEF->Owner);
FREEVARLENGTH(TNEF->SentFor);
FREEVARLENGTH(TNEF->Delegate);
FREEVARLENGTH(TNEF->AidOwner);
TNEFFreeMapiProps(&(TNEF->MapiProperties));
p = TNEF->starting_attach.next;
while (p != NULL) {
TNEFFreeAttachment(p);
store = p->next;
free(p);
p = store;
}
} | 0 | [
"CWE-399",
"CWE-125"
]
| ytnef | 3cb0f914d6427073f262e1b2b5fd973e3043cdf7 | 143,164,908,279,744,390,000,000,000,000,000,000,000 | 22 | BugFix - Potential OOB with Fields of Size 0
Thanks to @hannob for contributing a malformed TNEF stream with
a Version field of size 0. Now such files will return an error
indicating invalid data. |
void CLASS cubic_spline (const int *x_, const int *y_, const int len)
{
float **A, *b, *c, *d, *x, *y;
int i, j;
A = (float **) calloc (((2*len + 4)*sizeof **A + sizeof *A), 2*len);
if (!A) return;
A[0] = (float *) (A + 2*len);
for (i = 1; i < 2*len; i++)
A[i] = A[0] + 2*len*i;
y = len + (x = i + (d = i + (c = i + (b = A[0] + i*i))));
for (i = 0; i < len; i++) {
x[i] = x_[i] / 65535.0;
y[i] = y_[i] / 65535.0;
}
for (i = len-1; i > 0; i--) {
b[i] = (y[i] - y[i-1]) / (x[i] - x[i-1]);
d[i-1] = x[i] - x[i-1];
}
for (i = 1; i < len-1; i++) {
A[i][i] = 2 * (d[i-1] + d[i]);
if (i > 1) {
A[i][i-1] = d[i-1];
A[i-1][i] = d[i-1];
}
A[i][len-1] = 6 * (b[i+1] - b[i]);
}
for(i = 1; i < len-2; i++) {
float v = A[i+1][i] / A[i][i];
for(j = 1; j <= len-1; j++)
A[i+1][j] -= v * A[i][j];
}
for(i = len-2; i > 0; i--) {
float acc = 0;
for(j = i; j <= len-2; j++)
acc += A[i][j]*c[j];
c[i] = (A[i][len-1] - acc) / A[i][i];
}
for (i = 0; i < 0x10000; i++) {
float x_out = (float)(i / 65535.0);
float y_out = 0;
for (j = 0; j < len-1; j++) {
if (x[j] <= x_out && x_out <= x[j+1]) {
float v = x_out - x[j];
y_out = y[j] +
((y[j+1] - y[j]) / d[j] - (2 * d[j] * c[j] + c[j+1] * d[j])/6) * v
+ (c[j] * 0.5) * v*v + ((c[j+1] - c[j]) / (6 * d[j])) * v*v*v;
}
}
curve[i] = y_out < 0.0 ? 0 : (y_out >= 1.0 ? 65535 :
(ushort)(y_out * 65535.0 + 0.5));
}
free (A);
} | 0 | [
"CWE-129"
]
| LibRaw | 89d065424f09b788f443734d44857289489ca9e2 | 142,633,790,411,277,860,000,000,000,000,000,000,000 | 54 | fixed two more problems found by fuzzer |
ipv4_get_nw_frag(const struct ip_header *nh)
{
uint8_t nw_frag = 0;
if (OVS_UNLIKELY(IP_IS_FRAGMENT(nh->ip_frag_off))) {
nw_frag = FLOW_NW_FRAG_ANY;
if (nh->ip_frag_off & htons(IP_FRAG_OFF_MASK)) {
nw_frag |= FLOW_NW_FRAG_LATER;
}
}
return nw_frag;
} | 0 | [
"CWE-400"
]
| ovs | 79349cbab0b2a755140eedb91833ad2760520a83 | 235,823,394,482,959,700,000,000,000,000,000,000,000 | 13 | flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]> |
static void h2_update_poll(struct conn_stream *cs)
{
struct h2s *h2s = cs->ctx;
if (!h2s)
return;
/* we may unblock a blocked read */
if (cs->flags & CS_FL_DATA_RD_ENA) {
/* the stream indicates it's willing to read */
h2s->h2c->flags &= ~H2_CF_DEM_SFULL;
if (h2s->h2c->dsi == h2s->id) {
conn_xprt_want_recv(cs->conn);
conn_xprt_want_send(cs->conn);
}
}
/* Note: the stream and stream-int code doesn't allow us to perform a
* synchronous send() here unfortunately, because this code is called
* as si_update() from the process_stream() context. This means that
* we have to queue the current cs and defer its processing after the
* connection's cs list is processed anyway.
*/
if (cs->flags & CS_FL_DATA_WR_ENA) {
if (LIST_ISEMPTY(&h2s->list)) {
if (LIST_ISEMPTY(&h2s->h2c->send_list) &&
!h2s->h2c->mbuf->o && // not yet subscribed
!(cs->conn->flags & CO_FL_SOCK_WR_SH))
conn_xprt_want_send(cs->conn);
LIST_ADDQ(&h2s->h2c->send_list, &h2s->list);
}
}
else if (!LIST_ISEMPTY(&h2s->list)) {
LIST_DEL(&h2s->list);
LIST_INIT(&h2s->list);
h2s->flags &= ~(H2_SF_BLK_MBUSY | H2_SF_BLK_MROOM | H2_SF_BLK_MFCTL);
}
/* this can happen from within si_chk_snd() */
if (h2s->h2c->mbuf->o && !(cs->conn->flags & CO_FL_XPRT_WR_ENA))
conn_xprt_want_send(cs->conn);
} | 0 | [
"CWE-119"
]
| haproxy | 3f0e1ec70173593f4c2b3681b26c04a4ed5fc588 | 298,428,058,059,562,070,000,000,000,000,000,000,000 | 44 | BUG/CRITICAL: h2: fix incorrect frame length check
The incoming H2 frame length was checked against the max_frame_size
setting instead of being checked against the bufsize. The max_frame_size
only applies to outgoing traffic and not to incoming one, so if a large
enough frame size is advertised in the SETTINGS frame, a wrapped frame
will be defragmented into a temporary allocated buffer where the second
fragment my overflow the heap by up to 16 kB.
It is very unlikely that this can be exploited for code execution given
that buffers are very short lived and their address not realistically
predictable in production, but the likeliness of an immediate crash is
absolutely certain.
This fix must be backported to 1.8.
Many thanks to Jordan Zebor from F5 Networks for reporting this issue
in a responsible way. |
on_unregister_handler(TCMUService1HandlerManager1 *interface,
GDBusMethodInvocation *invocation,
gchar *subtype,
gpointer user_data)
{
struct tcmur_handler *handler = find_handler_by_subtype(subtype);
struct dbus_info *info = handler ? handler->opaque : NULL;
if (!handler) {
g_dbus_method_invocation_return_value(invocation,
g_variant_new("(bs)", FALSE,
"unknown subtype"));
return TRUE;
}
else if (handler->_is_dbus_handler != 1) {
g_dbus_method_invocation_return_value(invocation,
g_variant_new("(bs)", FALSE,
"cannot unregister internal handler"));
return TRUE;
}
dbus_unexport_handler(handler);
tcmur_unregister_dbus_handler(handler);
g_bus_unwatch_name(info->watcher_id);
g_free(info);
g_free(handler);
g_dbus_method_invocation_return_value(invocation,
g_variant_new("(bs)", TRUE, "succeeded"));
return TRUE;
} | 0 | [
"CWE-476"
]
| tcmu-runner | bb80e9c7a798f035768260ebdadffb6eb0786178 | 83,951,388,748,812,940,000,000,000,000,000,000,000 | 31 | only allow dynamic UnregisterHandler for external handlers, thereby fixing DoS
Trying to unregister an internal handler ended up in a SEGFAULT, because
the tcmur_handler->opaque was NULL. Way to reproduce:
dbus-send --system --print-reply --dest=org.kernel.TCMUService1 /org/kernel/TCMUService1/HandlerManager1 org.kernel.TCMUService1.HandlerManager1.UnregisterHandler string:qcow
we use a newly introduced boolean in struct tcmur_handler for keeping
track of external handlers. As suggested by mikechristie adjusting the
public data structure is acceptable. |
int xt_check_match(struct xt_mtchk_param *par,
unsigned int size, u16 proto, bool inv_proto)
{
int ret;
if (XT_ALIGN(par->match->matchsize) != size &&
par->match->matchsize != -1) {
/*
* ebt_among is exempt from centralized matchsize checking
* because it uses a dynamic-size data set.
*/
pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
xt_prefix[par->family], par->match->name,
par->match->revision,
XT_ALIGN(par->match->matchsize), size);
return -EINVAL;
}
if (par->match->table != NULL &&
strcmp(par->match->table, par->table) != 0) {
pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
xt_prefix[par->family], par->match->name,
par->match->table, par->table);
return -EINVAL;
}
if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
char used[64], allow[64];
pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
xt_prefix[par->family], par->match->name,
textify_hooks(used, sizeof(used),
par->hook_mask, par->family),
textify_hooks(allow, sizeof(allow),
par->match->hooks,
par->family));
return -EINVAL;
}
if (par->match->proto && (par->match->proto != proto || inv_proto)) {
pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
xt_prefix[par->family], par->match->name,
par->match->proto);
return -EINVAL;
}
if (par->match->checkentry != NULL) {
ret = par->match->checkentry(par);
if (ret < 0)
return ret;
else if (ret > 0)
/* Flag up potential errors. */
return -EIO;
}
return 0;
} | 0 | []
| linux | 175e476b8cdf2a4de7432583b49c871345e4f8a1 | 221,393,546,266,801,350,000,000,000,000,000,000,000 | 52 | netfilter: x_tables: Use correct memory barriers.
When a new table value was assigned, it was followed by a write memory
barrier. This ensured that all writes before this point would complete
before any writes after this point. However, to determine whether the
rules are unused, the sequence counter is read. To ensure that all
writes have been done before these reads, a full memory barrier is
needed, not just a write memory barrier. The same argument applies when
incrementing the counter, before the rules are read.
Changing to using smp_mb() instead of smp_wmb() fixes the kernel panic
reported in cc00bcaa5899 (which is still present), while still
maintaining the same speed of replacing tables.
The smb_mb() barriers potentially slow the packet path, however testing
has shown no measurable change in performance on a 4-core MIPS64
platform.
Fixes: 7f5c6d4f665b ("netfilter: get rid of atomic ops in fast path")
Signed-off-by: Mark Tomlinson <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]> |
static inline void audit_proctitle_free(struct audit_context *context)
{
kfree(context->proctitle.value);
context->proctitle.value = NULL;
context->proctitle.len = 0;
} | 0 | [
"CWE-362"
]
| linux | 43761473c254b45883a64441dd0bc85a42f3645c | 236,235,481,529,170,550,000,000,000,000,000,000,000 | 6 | audit: fix a double fetch in audit_log_single_execve_arg()
There is a double fetch problem in audit_log_single_execve_arg()
where we first check the execve(2) argumnets for any "bad" characters
which would require hex encoding and then re-fetch the arguments for
logging in the audit record[1]. Of course this leaves a window of
opportunity for an unsavory application to munge with the data.
This patch reworks things by only fetching the argument data once[2]
into a buffer where it is scanned and logged into the audit
records(s). In addition to fixing the double fetch, this patch
improves on the original code in a few other ways: better handling
of large arguments which require encoding, stricter record length
checking, and some performance improvements (completely unverified,
but we got rid of some strlen() calls, that's got to be a good
thing).
As part of the development of this patch, I've also created a basic
regression test for the audit-testsuite, the test can be tracked on
GitHub at the following link:
* https://github.com/linux-audit/audit-testsuite/issues/25
[1] If you pay careful attention, there is actually a triple fetch
problem due to a strnlen_user() call at the top of the function.
[2] This is a tiny white lie, we do make a call to strnlen_user()
prior to fetching the argument data. I don't like it, but due to the
way the audit record is structured we really have no choice unless we
copy the entire argument at once (which would require a rather
wasteful allocation). The good news is that with this patch the
kernel no longer relies on this strnlen_user() value for anything
beyond recording it in the log, we also update it with a trustworthy
value whenever possible.
Reported-by: Pengfei Wang <[email protected]>
Cc: <[email protected]>
Signed-off-by: Paul Moore <[email protected]> |
static void init_explored_state(struct bpf_verifier_env *env, int idx)
{
env->insn_aux_data[idx].prune_point = true;
} | 0 | [
"CWE-119",
"CWE-681",
"CWE-787"
]
| linux | 5b9fbeb75b6a98955f628e205ac26689bcb1383e | 64,003,262,637,647,470,000,000,000,000,000,000,000 | 4 | bpf: Fix scalar32_min_max_or bounds tracking
Simon reported an issue with the current scalar32_min_max_or() implementation.
That is, compared to the other 32 bit subreg tracking functions, the code in
scalar32_min_max_or() stands out that it's using the 64 bit registers instead
of 32 bit ones. This leads to bounds tracking issues, for example:
[...]
8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
8: (79) r1 = *(u64 *)(r0 +0)
R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
9: (b7) r0 = 1
10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
10: (18) r2 = 0x600000002
12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
12: (ad) if r1 < r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: (95) exit
14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
14: (25) if r1 > 0x0 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: (95) exit
16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
16: (47) r1 |= 0
17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x1; 0x700000000),s32_max_value=1,u32_max_value=1) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
[...]
The bound tests on the map value force the upper unsigned bound to be 25769803777
in 64 bit (0b11000000000000000000000000000000001) and then lower one to be 1. By
using OR they are truncated and thus result in the range [1,1] for the 32 bit reg
tracker. This is incorrect given the only thing we know is that the value must be
positive and thus 2147483647 (0b1111111111111111111111111111111) at max for the
subregs. Fix it by using the {u,s}32_{min,max}_value vars instead. This also makes
sense, for example, for the case where we update dst_reg->s32_{min,max}_value in
the else branch we need to use the newly computed dst_reg->u32_{min,max}_value as
we know that these are positive. Previously, in the else branch the 64 bit values
of umin_value=1 and umax_value=32212254719 were used and latter got truncated to
be 1 as upper bound there. After the fix the subreg range is now correct:
[...]
8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
8: (79) r1 = *(u64 *)(r0 +0)
R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
9: (b7) r0 = 1
10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
10: (18) r2 = 0x600000002
12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
12: (ad) if r1 < r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: (95) exit
14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
14: (25) if r1 > 0x0 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: (95) exit
16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
16: (47) r1 |= 0
17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
[...]
Fixes: 3f50f132d840 ("bpf: Verifier, do explicit ALU32 bounds tracking")
Reported-by: Simon Scannell <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Reviewed-by: John Fastabend <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]> |
input_buffer& ClientHello::set(input_buffer& in)
{
return in >> *this;
} | 0 | []
| mysql-server | b9768521bdeb1a8069c7b871f4536792b65fd79b | 277,510,441,297,779,700,000,000,000,000,000,000,000 | 4 | Updated yassl to yassl-2.3.8
(cherry picked from commit 7f9941eab55ed672bfcccd382dafbdbcfdc75aaa) |
void set_crop_bottom(uint64_t crop_bottom) { crop_bottom_ = crop_bottom; } | 0 | [
"CWE-20"
]
| libvpx | f00890eecdf8365ea125ac16769a83aa6b68792d | 314,082,685,892,627,970,000,000,000,000,000,000,000 | 1 | update libwebm to libwebm-1.0.0.27-352-g6ab9fcf
https://chromium.googlesource.com/webm/libwebm/+log/af81f26..6ab9fcf
Change-Id: I9d56e1fbaba9b96404b4fbabefddc1a85b79c25d |
GC_API GC_ATTR_MALLOC char * GC_CALL GC_strndup(const char *str, size_t size)
{
char *copy;
size_t len = strlen(str); /* str is expected to be non-NULL */
if (len > size)
len = size;
copy = GC_malloc_atomic(len + 1);
if (copy == NULL) {
# ifndef MSWINCE
errno = ENOMEM;
# endif
return NULL;
}
BCOPY(str, copy, len);
copy[len] = '\0';
return copy;
} | 0 | [
"CWE-119"
]
| bdwgc | 7292c02fac2066d39dd1bcc37d1a7054fd1e32ee | 265,124,909,395,030,500,000,000,000,000,000,000,000 | 17 | Fix malloc routines to prevent size value wrap-around
See issue #135 on Github.
* allchblk.c (GC_allochblk, GC_allochblk_nth): Use
OBJ_SZ_TO_BLOCKS_CHECKED instead of OBJ_SZ_TO_BLOCKS.
* malloc.c (GC_alloc_large): Likewise.
* alloc.c (GC_expand_hp_inner): Type of "bytes" local variable changed
from word to size_t; cast ROUNDUP_PAGESIZE argument to size_t; prevent
overflow when computing GC_heapsize+bytes > GC_max_heapsize.
* dbg_mlc.c (GC_debug_malloc, GC_debug_malloc_ignore_off_page,
GC_debug_malloc_atomic_ignore_off_page, GC_debug_generic_malloc,
GC_debug_generic_malloc_inner,
GC_debug_generic_malloc_inner_ignore_off_page,
GC_debug_malloc_stubborn, GC_debug_malloc_atomic,
GC_debug_malloc_uncollectable, GC_debug_malloc_atomic_uncollectable):
Use SIZET_SAT_ADD (instead of "+" operator) to add extra bytes to lb
value.
* fnlz_mlc.c (GC_finalized_malloc): Likewise.
* gcj_mlc.c (GC_debug_gcj_malloc): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Likewise.
* include/private/gcconfig.h (GET_MEM): Likewise.
* mallocx.c (GC_malloc_many, GC_memalign): Likewise.
* os_dep.c (GC_wince_get_mem, GC_win32_get_mem): Likewise.
* typd_mlc.c (GC_malloc_explicitly_typed,
GC_malloc_explicitly_typed_ignore_off_page,
GC_calloc_explicitly_typed): Likewise.
* headers.c (GC_scratch_alloc): Change type of bytes_to_get from word
to size_t (because ROUNDUP_PAGESIZE_IF_MMAP result type changed).
* include/private/gc_priv.h: Include limits.h (unless SIZE_MAX already
defined).
* include/private/gc_priv.h (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Move from
malloc.c file.
* include/private/gc_priv.h (SIZET_SAT_ADD): New macro (defined before
include gcconfig.h).
* include/private/gc_priv.h (EXTRA_BYTES, GC_page_size): Change type
to size_t.
* os_dep.c (GC_page_size): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Add comment about the argument.
* include/private/gcconfig.h (GET_MEM): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, OBJ_SZ_TO_BLOCKS, ROUNDUP_PAGESIZE,
ROUNDUP_PAGESIZE_IF_MMAP): Rename argument to "lb".
* include/private/gc_priv.h (OBJ_SZ_TO_BLOCKS_CHECKED): New macro.
* include/private/gcconfig.h (GC_win32_get_mem, GC_wince_get_mem,
GC_unix_get_mem): Change argument type from word to int.
* os_dep.c (GC_unix_mmap_get_mem, GC_unix_get_mem,
GC_unix_sbrk_get_mem, GC_wince_get_mem, GC_win32_get_mem): Likewise.
* malloc.c (GC_alloc_large_and_clear): Call OBJ_SZ_TO_BLOCKS only
if no value wrap around is guaranteed.
* malloc.c (GC_generic_malloc): Do not check for lb_rounded < lb case
(because ROUNDED_UP_GRANULES and GRANULES_TO_BYTES guarantees no value
wrap around).
* mallocx.c (GC_generic_malloc_ignore_off_page): Likewise.
* misc.c (GC_init_size_map): Change "i" local variable type from int
to size_t.
* os_dep.c (GC_write_fault_handler, catch_exception_raise): Likewise.
* misc.c (GC_envfile_init): Cast len to size_t when passed to
ROUNDUP_PAGESIZE_IF_MMAP.
* os_dep.c (GC_setpagesize): Cast GC_sysinfo.dwPageSize and
GETPAGESIZE() to size_t (when setting GC_page_size).
* os_dep.c (GC_unix_mmap_get_mem, GC_unmap_start, GC_remove_protection):
Expand ROUNDUP_PAGESIZE macro but without value wrap-around checking
(the argument is of word type).
* os_dep.c (GC_unix_mmap_get_mem): Replace -GC_page_size with
~GC_page_size+1 (because GC_page_size is unsigned); remove redundant
cast to size_t.
* os_dep.c (GC_unix_sbrk_get_mem): Add explicit cast of GC_page_size
to SBRK_ARG_T.
* os_dep.c (GC_wince_get_mem): Change type of res_bytes local variable
to size_t.
* typd_mlc.c: Do not include limits.h.
* typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Remove (as defined in
gc_priv.h now). |
static void pegasus_set_multicast(struct net_device *net)
{
pegasus_t *pegasus = netdev_priv(net);
if (net->flags & IFF_PROMISC) {
pegasus->eth_regs[EthCtrl2] |= RX_PROMISCUOUS;
netif_info(pegasus, link, net, "Promiscuous mode enabled\n");
} else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) {
pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST;
pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
netif_dbg(pegasus, link, net, "set allmulti\n");
} else {
pegasus->eth_regs[EthCtrl0] &= ~RX_MULTICAST;
pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
}
update_eth_regs_async(pegasus);
} | 0 | [
"CWE-119",
"CWE-284"
]
| linux | 5593523f968bc86d42a035c6df47d5e0979b5ace | 222,525,969,249,657,670,000,000,000,000,000,000,000 | 17 | pegasus: Use heap buffers for all register access
Allocating USB buffers on the stack is not portable, and no longer
works on x86_64 (with VMAP_STACK enabled as per default).
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
References: https://bugs.debian.org/852556
Reported-by: Lisandro Damián Nicanor Pérez Meyer <[email protected]>
Tested-by: Lisandro Damián Nicanor Pérez Meyer <[email protected]>
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int vhost_vdpa_open(struct inode *inode, struct file *filep)
{
struct vhost_vdpa *v;
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
int nvqs, i, r, opened;
v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
opened = atomic_cmpxchg(&v->opened, 0, 1);
if (opened)
return -EBUSY;
nvqs = v->nvqs;
r = vhost_vdpa_reset(v);
if (r)
goto err;
vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
r = -ENOMEM;
goto err;
}
dev = &v->vdev;
for (i = 0; i < nvqs; i++) {
vqs[i] = &v->vqs[i];
vqs[i]->handle_kick = handle_vq_kick;
}
vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
vhost_vdpa_process_iotlb_msg);
dev->iotlb = vhost_iotlb_alloc(0, 0);
if (!dev->iotlb) {
r = -ENOMEM;
goto err_init_iotlb;
}
r = vhost_vdpa_alloc_domain(v);
if (r)
goto err_init_iotlb;
vhost_vdpa_set_iova_range(v);
filep->private_data = v;
return 0;
err_init_iotlb:
vhost_dev_cleanup(&v->vdev);
kfree(vqs);
err:
atomic_dec(&v->opened);
return r;
} | 0 | [
"CWE-190"
]
| linux | 870aaff92e959e29d40f9cfdb5ed06ba2fc2dae0 | 27,955,646,572,542,975,000,000,000,000,000,000,000 | 55 | vdpa: clean up get_config_size ret value handling
The return type of get_config_size is size_t so it makes
sense to change the type of the variable holding its result.
That said, this already got taken care of (differently, and arguably
not as well) by commit 3ed21c1451a1 ("vdpa: check that offsets are
within bounds").
The added 'c->off > size' test in that commit will be done as an
unsigned comparison on 32-bit (safe due to not being signed).
On a 64-bit platform, it will be done as a signed comparison, but in
that case the comparison will be done in 64-bit, and 'c->off' being an
u32 it will be valid thanks to the extended range (ie both values will
be positive in 64 bits).
So this was a real bug, but it was already addressed and marked for stable.
Signed-off-by: Laura Abbott <[email protected]>
Reported-by: Luo Likang <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]> |
String read(int64_t len) {
StringBuffer sb(len);
auto buf = sb.appendCursor(len);
auto n = zip_fread(m_zipFile, buf, len);
if (n > 0) {
sb.resize(n);
return sb.detach();
}
return empty_string();
} | 0 | [
"CWE-22"
]
| hhvm | 65c95a01541dd2fbc9c978ac53bed235b5376686 | 279,630,217,765,357,100,000,000,000,000,000,000,000 | 10 | ZipArchive::extractTo bug 70350
Summary:Don't allow upward directory traversal when extracting zip archive files.
Files in zip files with `..` or starting at main root `/` should be normalized
to something where the file being extracted winds up within the directory or
a subdirectory where the actual extraction is taking place.
http://git.php.net/?p=php-src.git;a=commit;h=f9c2bf73adb2ede0a486b0db466c264f2b27e0bb
Reviewed By: FBNeal
Differential Revision: D2798452
fb-gh-sync-id: 844549c93e011d1e991bb322bf85822246b04e30
shipit-source-id: 844549c93e011d1e991bb322bf85822246b04e30 |
static int __init mcheck_debugfs_init(void)
{
struct dentry *dmce, *ffake_panic;
dmce = mce_get_debugfs_dir();
if (!dmce)
return -ENOMEM;
ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
&fake_panic_fops);
if (!ffake_panic)
return -ENOMEM;
return 0;
} | 0 | [
"CWE-362"
]
| linux | b3b7c4795ccab5be71f080774c45bbbcc75c2aaf | 288,441,367,029,188,370,000,000,000,000,000,000,000 | 14 | x86/MCE: Serialize sysfs changes
The check_interval file in
/sys/devices/system/machinecheck/machinecheck<cpu number>
directory is a global timer value for MCE polling. If it is changed by one
CPU, mce_restart() broadcasts the event to other CPUs to delete and restart
the MCE polling timer and __mcheck_cpu_init_timer() reinitializes the
mce_timer variable.
If more than one CPU writes a specific value to the check_interval file
concurrently, mce_timer is not protected from such concurrent accesses and
all kinds of explosions happen. Since only root can write to those sysfs
variables, the issue is not a big deal security-wise.
However, concurrent writes to these configuration variables is void of
reason so the proper thing to do is to serialize the access with a mutex.
Boris:
- Make store_int_with_restart() use device_store_ulong() to filter out
negative intervals
- Limit min interval to 1 second
- Correct locking
- Massage commit message
Signed-off-by: Seunghun Han <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: linux-edac <[email protected]>
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected] |
int iter_lookup_parent_glue_from_cache(struct module_env* env,
struct delegpt* dp, struct regional* region, struct query_info* qinfo)
{
struct ub_packed_rrset_key* akey;
struct delegpt_ns* ns;
size_t num = delegpt_count_targets(dp);
for(ns = dp->nslist; ns; ns = ns->next) {
/* get cached parentside A */
akey = rrset_cache_lookup(env->rrset_cache, ns->name,
ns->namelen, LDNS_RR_TYPE_A, qinfo->qclass,
PACKED_RRSET_PARENT_SIDE, *env->now, 0);
if(akey) {
log_rrset_key(VERB_ALGO, "found parent-side", akey);
ns->done_pside4 = 1;
/* a negative-cache-element has no addresses it adds */
if(!delegpt_add_rrset_A(dp, region, akey, 1, NULL))
log_err("malloc failure in lookup_parent_glue");
lock_rw_unlock(&akey->entry.lock);
}
/* get cached parentside AAAA */
akey = rrset_cache_lookup(env->rrset_cache, ns->name,
ns->namelen, LDNS_RR_TYPE_AAAA, qinfo->qclass,
PACKED_RRSET_PARENT_SIDE, *env->now, 0);
if(akey) {
log_rrset_key(VERB_ALGO, "found parent-side", akey);
ns->done_pside6 = 1;
/* a negative-cache-element has no addresses it adds */
if(!delegpt_add_rrset_AAAA(dp, region, akey, 1, NULL))
log_err("malloc failure in lookup_parent_glue");
lock_rw_unlock(&akey->entry.lock);
}
}
/* see if new (but lame) addresses have become available */
return delegpt_count_targets(dp) != num;
} | 0 | [
"CWE-400"
]
| unbound | ba0f382eee814e56900a535778d13206b86b6d49 | 229,954,073,889,453,740,000,000,000,000,000,000,000 | 35 | - CVE-2020-12662 Unbound can be tricked into amplifying an incoming
query into a large number of queries directed to a target.
- CVE-2020-12663 Malformed answers from upstream name servers can be
used to make Unbound unresponsive. |
static void __net_exit icmpv6_sk_exit(struct net *net)
{
int i;
for_each_possible_cpu(i) {
inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]);
}
kfree(net->ipv6.icmp_sk);
} | 0 | [
"CWE-20",
"CWE-200"
]
| linux | 79dc7e3f1cd323be4c81aa1a94faa1b3ed987fb2 | 47,236,720,615,420,530,000,000,000,000,000,000,000 | 9 | net: handle no dst on skb in icmp6_send
Andrey reported the following while fuzzing the kernel with syzkaller:
kasan: CONFIG_KASAN_INLINE enabled
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN
Modules linked in:
CPU: 0 PID: 3859 Comm: a.out Not tainted 4.9.0-rc6+ #429
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
task: ffff8800666d4200 task.stack: ffff880067348000
RIP: 0010:[<ffffffff833617ec>] [<ffffffff833617ec>]
icmp6_send+0x5fc/0x1e30 net/ipv6/icmp.c:451
RSP: 0018:ffff88006734f2c0 EFLAGS: 00010206
RAX: ffff8800666d4200 RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: dffffc0000000000 RDI: 0000000000000018
RBP: ffff88006734f630 R08: ffff880064138418 R09: 0000000000000003
R10: dffffc0000000000 R11: 0000000000000005 R12: 0000000000000000
R13: ffffffff84e7e200 R14: ffff880064138484 R15: ffff8800641383c0
FS: 00007fb3887a07c0(0000) GS:ffff88006cc00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000020000000 CR3: 000000006b040000 CR4: 00000000000006f0
Stack:
ffff8800666d4200 ffff8800666d49f8 ffff8800666d4200 ffffffff84c02460
ffff8800666d4a1a 1ffff1000ccdaa2f ffff88006734f498 0000000000000046
ffff88006734f440 ffffffff832f4269 ffff880064ba7456 0000000000000000
Call Trace:
[<ffffffff83364ddc>] icmpv6_param_prob+0x2c/0x40 net/ipv6/icmp.c:557
[< inline >] ip6_tlvopt_unknown net/ipv6/exthdrs.c:88
[<ffffffff83394405>] ip6_parse_tlv+0x555/0x670 net/ipv6/exthdrs.c:157
[<ffffffff8339a759>] ipv6_parse_hopopts+0x199/0x460 net/ipv6/exthdrs.c:663
[<ffffffff832ee773>] ipv6_rcv+0xfa3/0x1dc0 net/ipv6/ip6_input.c:191
...
icmp6_send / icmpv6_send is invoked for both rx and tx paths. In both
cases the dst->dev should be preferred for determining the L3 domain
if the dst has been set on the skb. Fallback to the skb->dev if it has
not. This covers the case reported here where icmp6_send is invoked on
Rx before the route lookup.
Fixes: 5d41ce29e ("net: icmp6_send should use dst dev to determine L3 domain")
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David Ahern <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
isis_print_extd_ip_reach(netdissect_options *ndo,
const uint8_t *tptr, const char *ident, uint16_t afi)
{
char ident_buffer[20];
uint8_t prefix[sizeof(struct in6_addr)]; /* shared copy buffer for IPv4 and IPv6 prefixes */
u_int metric, status_byte, bit_length, byte_length, sublen, processed, subtlvtype, subtlvlen;
if (!ND_TTEST2(*tptr, 4))
return (0);
metric = EXTRACT_32BITS(tptr);
processed=4;
tptr+=4;
if (afi == AF_INET) {
if (!ND_TTEST2(*tptr, 1)) /* fetch status byte */
return (0);
status_byte=*(tptr++);
bit_length = status_byte&0x3f;
if (bit_length > 32) {
ND_PRINT((ndo, "%sIPv4 prefix: bad bit length %u",
ident,
bit_length));
return (0);
}
processed++;
} else if (afi == AF_INET6) {
if (!ND_TTEST2(*tptr, 1)) /* fetch status & prefix_len byte */
return (0);
status_byte=*(tptr++);
bit_length=*(tptr++);
if (bit_length > 128) {
ND_PRINT((ndo, "%sIPv6 prefix: bad bit length %u",
ident,
bit_length));
return (0);
}
processed+=2;
} else
return (0); /* somebody is fooling us */
byte_length = (bit_length + 7) / 8; /* prefix has variable length encoding */
if (!ND_TTEST2(*tptr, byte_length))
return (0);
memset(prefix, 0, sizeof prefix); /* clear the copy buffer */
memcpy(prefix,tptr,byte_length); /* copy as much as is stored in the TLV */
tptr+=byte_length;
processed+=byte_length;
if (afi == AF_INET)
ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u",
ident,
ipaddr_string(ndo, prefix),
bit_length));
else if (afi == AF_INET6)
ND_PRINT((ndo, "%sIPv6 prefix: %s/%u",
ident,
ip6addr_string(ndo, prefix),
bit_length));
ND_PRINT((ndo, ", Distribution: %s, Metric: %u",
ISIS_MASK_TLV_EXTD_IP_UPDOWN(status_byte) ? "down" : "up",
metric));
if (afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte))
ND_PRINT((ndo, ", sub-TLVs present"));
else if (afi == AF_INET6)
ND_PRINT((ndo, ", %s%s",
ISIS_MASK_TLV_EXTD_IP6_IE(status_byte) ? "External" : "Internal",
ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte) ? ", sub-TLVs present" : ""));
if ((afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte))
|| (afi == AF_INET6 && ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte))
) {
/* assume that one prefix can hold more
than one subTLV - therefore the first byte must reflect
the aggregate bytecount of the subTLVs for this prefix
*/
if (!ND_TTEST2(*tptr, 1))
return (0);
sublen=*(tptr++);
processed+=sublen+1;
ND_PRINT((ndo, " (%u)", sublen)); /* print out subTLV length */
while (sublen>0) {
if (!ND_TTEST2(*tptr,2))
return (0);
subtlvtype=*(tptr++);
subtlvlen=*(tptr++);
/* prepend the indent string */
snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident);
if (!isis_print_ip_reach_subtlv(ndo, tptr, subtlvtype, subtlvlen, ident_buffer))
return(0);
tptr+=subtlvlen;
sublen-=(subtlvlen+2);
}
}
return (processed);
} | 1 | [
"CWE-125",
"CWE-787"
]
| tcpdump | 979dcefd7b259e9e233f77fe1c5312793bfd948f | 274,578,969,162,284,000,000,000,000,000,000,000,000 | 99 | CVE-2017-12998/IS-IS: Check for 2 bytes if we're going to fetch 2 bytes.
Probably a copy-and-pasteo.
This fixes a buffer over-read discovered by Forcepoint's security
researchers Otto Airamo & Antti Levomäki.
Add a test using the capture file supplied by the reporter(s). |
static int link_set_handler(sd_netlink *rtnl, sd_netlink_message *m, void *userdata) {
_cleanup_link_unref_ Link *link = userdata;
int r;
log_link_debug(link, "Set link");
r = sd_netlink_message_get_errno(m);
if (r < 0 && r != -EEXIST) {
log_link_error_errno(link, r, "Could not join netdev: %m");
link_enter_failed(link);
return 1;
}
return 0;
} | 0 | [
"CWE-120"
]
| systemd | f5a8c43f39937d97c9ed75e3fe8621945b42b0db | 272,109,263,921,427,200,000,000,000,000,000,000,000 | 15 | networkd: IPv6 router discovery - follow IPv6AcceptRouterAdvertisemnt=
The previous behavior:
When DHCPv6 was enabled, router discover was performed first, and then DHCPv6 was
enabled only if the relevant flags were passed in the Router Advertisement message.
Moreover, router discovery was performed even if AcceptRouterAdvertisements=false,
moreover, even if router advertisements were accepted (by the kernel) the flags
indicating that DHCPv6 should be performed were ignored.
New behavior:
If RouterAdvertisements are accepted, and either no routers are found, or an
advertisement is received indicating DHCPv6 should be performed, the DHCPv6
client is started. Moreover, the DHCP option now truly enables the DHCPv6
client regardless of router discovery (though it will probably not be
very useful to get a lease withotu any routes, this seems the more consistent
approach).
The recommended default setting should be to set DHCP=ipv4 and to leave
IPv6AcceptRouterAdvertisements unset. |
static int fits_nan_64 (unsigned char *v)
{register unsigned long k;
k = (v[0] << 24) | (v[1] << 16) | (v[2] << 8) | v[3];
k &= 0x7fffffff; /* Dont care about the sign bit */
/* See NOST Definition of the Flexible Image Transport System (FITS), */
/* Appendix F, IEEE special formats. */
return ( ((k >= 0x7f7fffff) && (k <= 0x7fffffff))
|| ((k >= 0x00000001) && (k <= 0x00800000)));
} | 0 | [
"CWE-476"
]
| gimp | ace45631595e8781a1420842582d67160097163c | 112,005,550,647,552,300,000,000,000,000,000,000,000 | 12 | Bug 676804 - file handling DoS for fit file format
Apply patch from [email protected] which fixes a buffer overflow on
broken/malicious fits files. |
static void usbredir_cleanup_device_queues(USBRedirDevice *dev)
{
int i;
packet_id_queue_empty(&dev->cancelled);
packet_id_queue_empty(&dev->already_in_flight);
for (i = 0; i < MAX_ENDPOINTS; i++) {
usbredir_free_bufpq(dev, I2EP(i));
}
} | 0 | [
"CWE-770"
]
| qemu | 7ec54f9eb62b5d177e30eb8b1cad795a5f8d8986 | 221,090,850,162,245,070,000,000,000,000,000,000,000 | 10 | usb/redir: avoid dynamic stack allocation (CVE-2021-3527)
Use autofree heap allocation instead.
Fixes: 4f4321c11ff ("usb: use iovecs in USBPacket")
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Signed-off-by: Gerd Hoffmann <[email protected]>
Tested-by: Philippe Mathieu-Daudé <[email protected]>
Message-Id: <[email protected]> |
static bool dump_fd_info(const char *dest_filename, char *source_filename, int source_base_ofs, uid_t uid, gid_t gid)
{
FILE *fp = fopen(dest_filename, "w");
if (!fp)
return false;
unsigned fd = 0;
while (fd <= 99999) /* paranoia check */
{
sprintf(source_filename + source_base_ofs, "fd/%u", fd);
char *name = malloc_readlink(source_filename);
if (!name)
break;
fprintf(fp, "%u:%s\n", fd, name);
free(name);
sprintf(source_filename + source_base_ofs, "fdinfo/%u", fd);
fd++;
FILE *in = fopen(source_filename, "r");
if (!in)
continue;
char buf[128];
while (fgets(buf, sizeof(buf)-1, in))
{
/* in case the line is not terminated, terminate it */
char *eol = strchrnul(buf, '\n');
eol[0] = '\n';
eol[1] = '\0';
fputs(buf, fp);
}
fclose(in);
}
const int dest_fd = fileno(fp);
if (fchown(dest_fd, uid, gid) < 0)
{
perror_msg("Can't change '%s' ownership to %lu:%lu", dest_filename, (long)uid, (long)gid);
fclose(fp);
unlink(dest_filename);
return false;
}
fclose(fp);
return true;
} | 1 | [
"CWE-59"
]
| abrt | d6e2f6f128cef4c21cb80941ae674c9842681aa7 | 61,577,906,513,352,070,000,000,000,000,000,000,000 | 45 | ccpp: open file for dump_fd_info with O_EXCL
To avoid possible races.
Related: #1211835
Signed-off-by: Jakub Filak <[email protected]> |
int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl,
int type)
{
int err;
char *xattr_name;
size_t size = 0;
char *value = NULL;
hfs_dbg(ACL_MOD, "[%s]: ino %lu\n", __func__, inode->i_ino);
switch (type) {
case ACL_TYPE_ACCESS:
xattr_name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
err = posix_acl_equiv_mode(acl, &inode->i_mode);
if (err < 0)
return err;
}
err = 0;
break;
case ACL_TYPE_DEFAULT:
xattr_name = XATTR_NAME_POSIX_ACL_DEFAULT;
if (!S_ISDIR(inode->i_mode))
return acl ? -EACCES : 0;
break;
default:
return -EINVAL;
}
if (acl) {
size = posix_acl_xattr_size(acl->a_count);
if (unlikely(size > HFSPLUS_MAX_INLINE_DATA_SIZE))
return -ENOMEM;
value = (char *)hfsplus_alloc_attr_entry();
if (unlikely(!value))
return -ENOMEM;
err = posix_acl_to_xattr(&init_user_ns, acl, value, size);
if (unlikely(err < 0))
goto end_set_acl;
}
err = __hfsplus_setxattr(inode, xattr_name, value, size, 0);
end_set_acl:
hfsplus_destroy_attr_entry((hfsplus_attr_entry *)value);
if (!err)
set_cached_acl(inode, type, acl);
return err;
} | 1 | [
"CWE-862",
"CWE-285"
]
| linux | 073931017b49d9458aa351605b43a7e34598caef | 232,009,027,515,793,380,000,000,000,000,000,000,000 | 53 | posix_acl: Clear SGID bit when setting file permissions
When file permissions are modified via chmod(2) and the user is not in
the owning group or capable of CAP_FSETID, the setgid bit is cleared in
inode_change_ok(). Setting a POSIX ACL via setxattr(2) sets the file
permissions as well as the new ACL, but doesn't clear the setgid bit in
a similar way; this allows to bypass the check in chmod(2). Fix that.
References: CVE-2016-7097
Reviewed-by: Christoph Hellwig <[email protected]>
Reviewed-by: Jeff Layton <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Andreas Gruenbacher <[email protected]> |
static int http_auth(const char *creds, struct transaction_t *txn)
{
struct auth_challenge_t *chal = &txn->auth_chal;
static int status = SASL_OK;
int slen;
const char *clientin = NULL, *realm = NULL, *user, **authzid;
unsigned int clientinlen = 0;
struct auth_scheme_t *scheme;
static char base64[BASE64_BUF_SIZE+1];
const void *canon_user;
/* Split credentials into auth scheme and response */
slen = strcspn(creds, " \0");
if ((clientin = strchr(creds, ' '))) clientinlen = strlen(++clientin);
syslog(LOG_DEBUG,
"http_auth: status=%d scheme='%s' creds='%.*s%s'",
status, chal->scheme ? chal->scheme->name : "",
slen, creds, clientin ? " <response>" : "");
/* Free userid & authstate previously allocated for auth'd user */
if (httpd_userid) {
free(httpd_userid);
httpd_userid = NULL;
}
if (httpd_authstate) {
auth_freestate(httpd_authstate);
httpd_authstate = NULL;
}
chal->param = NULL;
if (chal->scheme) {
/* Use current scheme, if possible */
scheme = chal->scheme;
if (strncasecmp(scheme->name, creds, slen)) {
/* Changing auth scheme -> reset state */
syslog(LOG_DEBUG, "http_auth: changing scheme");
reset_saslconn(&httpd_saslconn);
chal->scheme = NULL;
status = SASL_OK;
}
}
if (!chal->scheme) {
/* Find the client-specified auth scheme */
syslog(LOG_DEBUG, "http_auth: find client scheme");
for (scheme = auth_schemes; scheme->name; scheme++) {
if (slen && !strncasecmp(scheme->name, creds, slen)) {
/* Found a supported scheme, see if its available */
if (!(avail_auth_schemes & (1 << scheme->idx))) scheme = NULL;
break;
}
}
if (!scheme || !scheme->name) {
/* Didn't find a matching scheme that is available */
syslog(LOG_DEBUG, "Unknown auth scheme '%.*s'", slen, creds);
return SASL_NOMECH;
}
/* We found it! */
syslog(LOG_DEBUG, "http_auth: found matching scheme: %s", scheme->name);
chal->scheme = scheme;
status = SASL_OK;
}
/* Base64 decode any client response, if necesary */
if (clientin && (scheme->flags & AUTH_BASE64)) {
int r = sasl_decode64(clientin, clientinlen,
base64, BASE64_BUF_SIZE, &clientinlen);
if (r != SASL_OK) {
syslog(LOG_ERR, "Base64 decode failed: %s",
sasl_errstring(r, NULL, NULL));
return r;
}
clientin = base64;
}
/* Get realm - based on namespace of URL */
switch (txn->req_tgt.namespace) {
case URL_NS_DEFAULT:
case URL_NS_PRINCIPAL:
realm = config_getstring(IMAPOPT_DAV_REALM);
break;
case URL_NS_CALENDAR:
realm = config_getstring(IMAPOPT_CALDAV_REALM);
break;
case URL_NS_ADDRESSBOOK:
realm = config_getstring(IMAPOPT_CARDDAV_REALM);
break;
case URL_NS_RSS:
realm = config_getstring(IMAPOPT_RSS_REALM);
break;
}
if (!realm) realm = config_servername;
#ifdef SASL_HTTP_REQUEST
/* Setup SASL HTTP request, if necessary */
if (scheme->flags & AUTH_NEED_REQUEST) {
sasl_http_request_t sasl_http_req;
sasl_http_req.method = txn->req_line.meth;
sasl_http_req.uri = txn->req_line.uri;
sasl_http_req.entity = NULL;
sasl_http_req.elen = 0;
sasl_http_req.non_persist = txn->flags.conn & CONN_CLOSE;
sasl_setprop(httpd_saslconn, SASL_HTTP_REQUEST, &sasl_http_req);
}
#endif /* SASL_HTTP_REQUEST */
if (scheme->idx == AUTH_BASIC) {
/* Basic (plaintext) authentication */
char *pass;
if (!clientin) {
/* Create initial challenge (base64 buffer is static) */
snprintf(base64, BASE64_BUF_SIZE, "realm=\"%s\"", realm);
chal->param = base64;
chal->scheme = NULL; /* make sure we don't reset the SASL ctx */
return status;
}
/* Split credentials into <user> ':' <pass>.
* We are working with base64 buffer, so we can modify it.
*/
user = base64;
pass = strchr(base64, ':');
if (!pass) {
syslog(LOG_ERR, "Basic auth: Missing password");
return SASL_BADPARAM;
}
*pass++ = '\0';
/* Verify the password */
status = sasl_checkpass(httpd_saslconn, user, strlen(user),
pass, strlen(pass));
memset(pass, 0, strlen(pass)); /* erase plaintext password */
if (status) {
syslog(LOG_NOTICE, "badlogin: %s Basic %s %s",
httpd_clienthost, user, sasl_errdetail(httpd_saslconn));
/* Don't allow user probing */
if (status == SASL_NOUSER) status = SASL_BADAUTH;
return status;
}
/* Successful authentication - fall through */
}
else {
/* SASL-based authentication (Digest, Negotiate, NTLM) */
const char *serverout = NULL;
unsigned int serveroutlen = 0;
if (status == SASL_CONTINUE) {
/* Continue current authentication exchange */
syslog(LOG_DEBUG, "http_auth: continue %s", scheme->saslmech);
status = sasl_server_step(httpd_saslconn, clientin, clientinlen,
&serverout, &serveroutlen);
}
else {
/* Start new authentication exchange */
syslog(LOG_DEBUG, "http_auth: start %s", scheme->saslmech);
status = sasl_server_start(httpd_saslconn, scheme->saslmech,
clientin, clientinlen,
&serverout, &serveroutlen);
}
/* Failure - probably bad client response */
if ((status != SASL_OK) && (status != SASL_CONTINUE)) {
syslog(LOG_ERR, "SASL failed: %s",
sasl_errstring(status, NULL, NULL));
return status;
}
/* Base64 encode any server challenge, if necesary */
if (serverout && (scheme->flags & AUTH_BASE64)) {
int r = sasl_encode64(serverout, serveroutlen,
base64, BASE64_BUF_SIZE, NULL);
if (r != SASL_OK) {
syslog(LOG_ERR, "Base64 encode failed: %s",
sasl_errstring(r, NULL, NULL));
return r;
}
serverout = base64;
}
chal->param = serverout;
if (status == SASL_CONTINUE) {
/* Need another step to complete authentication */
return status;
}
/* Successful authentication
*
* HTTP doesn't support security layers,
* so don't attach SASL context to prot layer.
*/
}
/* Get the userid from SASL - already canonicalized */
status = sasl_getprop(httpd_saslconn, SASL_USERNAME, &canon_user);
if (status != SASL_OK) {
syslog(LOG_ERR, "weird SASL error %d getting SASL_USERNAME", status);
return status;
}
user = (const char *) canon_user;
if (saslprops.authid) free(saslprops.authid);
saslprops.authid = xstrdup(user);
authzid = spool_getheader(txn->req_hdrs, "Authorize-As");
if (authzid && *authzid[0]) {
/* Trying to proxy as another user */
user = authzid[0];
status = proxy_authz(&user, txn);
if (status) return status;
}
httpd_userid = xstrdup(user);
auth_success(txn);
return status;
} | 0 | [
"CWE-787"
]
| cyrus-imapd | a5779db8163b99463e25e7c476f9cbba438b65f3 | 206,474,142,010,252,440,000,000,000,000,000,000,000 | 229 | HTTP: don't overrun buffer when parsing strings with sscanf() |
void get_partition_set(const TABLE *table, uchar *buf, const uint index,
const key_range *key_spec, part_id_range *part_spec)
{
partition_info *part_info= table->part_info;
uint num_parts= part_info->get_tot_partitions();
uint i, part_id;
uint sub_part= num_parts;
uint32 part_part= num_parts;
KEY *key_info= NULL;
bool found_part_field= FALSE;
DBUG_ENTER("get_partition_set");
part_spec->start_part= 0;
part_spec->end_part= num_parts - 1;
if ((index < MAX_KEY) &&
key_spec && key_spec->flag == (uint)HA_READ_KEY_EXACT &&
part_info->some_fields_in_PF.is_set(index))
{
key_info= table->key_info+index;
/*
The index can potentially provide at least one PF-field (field in the
partition function). Thus it is interesting to continue our probe.
*/
if (key_spec->length == key_info->key_length)
{
/*
The entire key is set so we can check whether we can immediately
derive either the complete PF or if we can derive either
the top PF or the subpartitioning PF. This can be established by
checking precalculated bits on each index.
*/
if (part_info->all_fields_in_PF.is_set(index))
{
/*
We can derive the exact partition to use, no more than this one
is needed.
*/
get_full_part_id_from_key(table,buf,key_info,key_spec,part_spec);
/*
Check if range can be adjusted by looking in read_partitions
*/
prune_partition_set(table, part_spec);
DBUG_VOID_RETURN;
}
else if (part_info->is_sub_partitioned())
{
if (part_info->all_fields_in_SPF.is_set(index))
{
if (get_sub_part_id_from_key(table, buf, key_info, key_spec, &sub_part))
{
part_spec->start_part= num_parts;
DBUG_VOID_RETURN;
}
}
else if (part_info->all_fields_in_PPF.is_set(index))
{
if (get_part_id_from_key(table,buf,key_info,
key_spec,(uint32*)&part_part))
{
/*
The value of the RANGE or LIST partitioning was outside of
allowed values. Thus it is certain that the result of this
scan will be empty.
*/
part_spec->start_part= num_parts;
DBUG_VOID_RETURN;
}
}
}
}
else
{
/*
Set an indicator on all partition fields that are bound.
If at least one PF-field was bound it pays off to check whether
the PF or PPF or SPF has been bound.
(PF = Partition Function, SPF = Subpartition Function and
PPF = Partition Function part of subpartitioning)
*/
if ((found_part_field= set_PF_fields_in_key(key_info,
key_spec->length)))
{
if (check_part_func_bound(part_info->full_part_field_array))
{
/*
We were able to bind all fields in the partition function even
by using only a part of the key. Calculate the partition to use.
*/
get_full_part_id_from_key(table,buf,key_info,key_spec,part_spec);
clear_indicator_in_key_fields(key_info);
/*
Check if range can be adjusted by looking in read_partitions
*/
prune_partition_set(table, part_spec);
DBUG_VOID_RETURN;
}
else if (part_info->is_sub_partitioned())
{
if (check_part_func_bound(part_info->subpart_field_array))
{
if (get_sub_part_id_from_key(table, buf, key_info, key_spec, &sub_part))
{
part_spec->start_part= num_parts;
clear_indicator_in_key_fields(key_info);
DBUG_VOID_RETURN;
}
}
else if (check_part_func_bound(part_info->part_field_array))
{
if (get_part_id_from_key(table,buf,key_info,key_spec,&part_part))
{
part_spec->start_part= num_parts;
clear_indicator_in_key_fields(key_info);
DBUG_VOID_RETURN;
}
}
}
}
}
}
{
/*
The next step is to analyse the table condition to see whether any
information about which partitions to scan can be derived from there.
Currently not implemented.
*/
}
/*
If we come here we have found a range of sorts we have either discovered
nothing or we have discovered a range of partitions with possible holes
in it. We need a bitvector to further the work here.
*/
if (!(part_part == num_parts && sub_part == num_parts))
{
/*
We can only arrive here if we are using subpartitioning.
*/
if (part_part != num_parts)
{
/*
We know the top partition and need to scan all underlying
subpartitions. This is a range without holes.
*/
DBUG_ASSERT(sub_part == num_parts);
part_spec->start_part= part_part * part_info->num_subparts;
part_spec->end_part= part_spec->start_part+part_info->num_subparts - 1;
}
else
{
DBUG_ASSERT(sub_part != num_parts);
part_spec->start_part= sub_part;
part_spec->end_part=sub_part+
(part_info->num_subparts*(part_info->num_parts-1));
for (i= 0, part_id= sub_part; i < part_info->num_parts;
i++, part_id+= part_info->num_subparts)
; //Set bit part_id in bit array
}
}
if (found_part_field)
clear_indicator_in_key_fields(key_info);
/*
Check if range can be adjusted by looking in read_partitions
*/
prune_partition_set(table, part_spec);
DBUG_VOID_RETURN;
} | 0 | [
"CWE-416"
]
| server | c02ebf3510850ba78a106be9974c94c3b97d8585 | 11,648,549,450,209,740,000,000,000,000,000,000,000 | 166 | MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments. |
flatpak_proxy_finalize (GObject *object)
{
FlatpakProxy *proxy = FLATPAK_PROXY (object);
if (g_socket_service_is_active (G_SOCKET_SERVICE (proxy)))
unlink (proxy->socket_path);
g_assert (proxy->clients == NULL);
g_hash_table_destroy (proxy->policy);
g_hash_table_destroy (proxy->wildcard_policy);
g_hash_table_destroy (proxy->filters);
g_free (proxy->socket_path);
g_free (proxy->dbus_address);
G_OBJECT_CLASS (flatpak_proxy_parent_class)->finalize (object);
} | 0 | [
"CWE-284",
"CWE-436"
]
| flatpak | 52346bf187b5a7f1c0fe9075b328b7ad6abe78f6 | 96,330,900,509,348,540,000,000,000,000,000,000,000 | 18 | Fix vulnerability in dbus proxy
During the authentication all client data is directly forwarded
to the dbus daemon as is, until we detect the BEGIN command after
which we start filtering the binary dbus protocol.
Unfortunately the detection of the BEGIN command in the proxy
did not exactly match the detection in the dbus daemon. A BEGIN
followed by a space or tab was considered ok in the daemon but
not by the proxy. This could be exploited to send arbitrary
dbus messages to the host, which can be used to break out of
the sandbox.
This was noticed by Gabriel Campana of The Google Security Team.
This fix makes the detection of the authentication phase end
match the dbus code. In addition we duplicate the authentication
line validation from dbus, which includes ensuring all data is
ASCII, and limiting the size of a line to 16k. In fact, we add
some extra stringent checks, disallowing ASCII control chars and
requiring that auth lines start with a capital letter. |
vm_run_eval (ecma_compiled_code_t *bytecode_data_p, /**< byte-code data */
uint32_t parse_opts) /**< ecma_parse_opts_t option bits */
{
ecma_value_t this_binding;
ecma_object_t *lex_env_p;
/* ECMA-262 v5, 10.4.2 */
if (parse_opts & ECMA_PARSE_DIRECT_EVAL)
{
this_binding = ecma_copy_value (JERRY_CONTEXT (vm_top_context_p)->this_binding);
lex_env_p = JERRY_CONTEXT (vm_top_context_p)->lex_env_p;
#if JERRY_DEBUGGER
uint32_t chain_index = parse_opts >> ECMA_PARSE_CHAIN_INDEX_SHIFT;
parse_opts &= (1 << ECMA_PARSE_CHAIN_INDEX_SHIFT) - 1;
while (chain_index != 0)
{
if (JERRY_UNLIKELY (lex_env_p->u2.outer_reference_cp == JMEM_CP_NULL))
{
ecma_bytecode_deref (bytecode_data_p);
ecma_free_value (this_binding);
return ecma_raise_range_error (ECMA_ERR_MSG ("Invalid scope chain index for eval"));
}
lex_env_p = ECMA_GET_NON_NULL_POINTER (ecma_object_t, lex_env_p->u2.outer_reference_cp);
if ((ecma_get_lex_env_type (lex_env_p) == ECMA_LEXICAL_ENVIRONMENT_THIS_OBJECT_BOUND)
|| (ecma_get_lex_env_type (lex_env_p) == ECMA_LEXICAL_ENVIRONMENT_DECLARATIVE))
{
chain_index--;
}
}
#endif /* JERRY_DEBUGGER */
}
else
{
#if JERRY_BUILTIN_REALMS
ecma_object_t *global_obj_p = (ecma_object_t *) ecma_op_function_get_realm (bytecode_data_p);
this_binding = ((ecma_global_object_t *) global_obj_p)->this_binding;
ecma_ref_object (ecma_get_object_from_value (this_binding));
#else /* !JERRY_BUILTIN_REALMS */
ecma_object_t *global_obj_p = ecma_builtin_get_global ();
ecma_ref_object (global_obj_p);
this_binding = ecma_make_object_value (global_obj_p);
#endif /* JERRY_BUILTIN_REALMS */
lex_env_p = ecma_get_global_scope (global_obj_p);
}
ecma_ref_object (lex_env_p);
if ((bytecode_data_p->status_flags & CBC_CODE_FLAGS_STRICT_MODE) != 0)
{
ecma_object_t *strict_lex_env_p = ecma_create_decl_lex_env (lex_env_p);
ecma_deref_object (lex_env_p);
lex_env_p = strict_lex_env_p;
}
if ((bytecode_data_p->status_flags & CBC_CODE_FLAGS_LEXICAL_BLOCK_NEEDED) != 0)
{
ecma_object_t *lex_block_p = ecma_create_decl_lex_env (lex_env_p);
lex_block_p->type_flags_refs |= ECMA_OBJECT_FLAG_BLOCK;
ecma_deref_object (lex_env_p);
lex_env_p = lex_block_p;
}
vm_frame_ctx_shared_t shared;
shared.bytecode_header_p = bytecode_data_p;
shared.function_object_p = NULL;
shared.status_flags = (parse_opts & ECMA_PARSE_DIRECT_EVAL) ? VM_FRAME_CTX_SHARED_DIRECT_EVAL : 0;
ecma_value_t completion_value = vm_run (&shared, this_binding, lex_env_p);
ecma_deref_object (lex_env_p);
ecma_free_value (this_binding);
#if JERRY_SNAPSHOT_EXEC
if (!(bytecode_data_p->status_flags & CBC_CODE_FLAGS_STATIC_FUNCTION))
{
ecma_bytecode_deref (bytecode_data_p);
}
#else /* !JERRY_SNAPSHOT_EXEC */
ecma_bytecode_deref (bytecode_data_p);
#endif /* JERRY_SNAPSHOT_EXEC */
return completion_value;
} /* vm_run_eval */ | 0 | [
"CWE-416"
]
| jerryscript | 3ad76f932c8d2e3b9ba2d95e64848698ec7d7290 | 203,001,349,473,081,800,000,000,000,000,000,000,000 | 89 | Fix for-in collection cleanup on abrupt 'has' result (#4807)
This patch fixes #4747
JerryScript-DCO-1.0-Signed-off-by: Robert Fancsik [email protected] |
int mnt_table_parse_stream(struct libmnt_table *tb, FILE *f, const char *filename)
{
int rc = -1;
int flags = 0;
pid_t tid = -1;
struct libmnt_parser pa = { .line = 0 };
assert(tb);
assert(f);
assert(filename);
DBG(TAB, ul_debugobj(tb, "%s: start parsing [entries=%d, filter=%s]",
filename, mnt_table_get_nents(tb),
tb->fltrcb ? "yes" : "not"));
pa.filename = filename;
pa.f = f;
/* necessary for /proc/mounts only, the /proc/self/mountinfo
* parser sets the flag properly
*/
if (tb->fmt == MNT_FMT_SWAPS)
flags = MNT_FS_SWAP;
else if (filename && strcmp(filename, _PATH_PROC_MOUNTS) == 0)
flags = MNT_FS_KERNEL;
do {
struct libmnt_fs *fs;
if (feof(f)) {
DBG(TAB, ul_debugobj(tb, "end-of-file"));
break;
}
fs = mnt_new_fs();
if (!fs)
goto err;
/* parse */
rc = mnt_table_parse_next(&pa, tb, fs);
if (rc == 0 && tb->fltrcb && tb->fltrcb(fs, tb->fltrcb_data))
rc = 1; /* filtered out by callback... */
/* add to the table */
if (rc == 0) {
rc = mnt_table_add_fs(tb, fs);
fs->flags |= flags;
if (rc == 0 && tb->fmt == MNT_FMT_MOUNTINFO) {
rc = kernel_fs_postparse(tb, fs, &tid, filename);
if (rc)
mnt_table_remove_fs(tb, fs);
}
}
/* remove reference (or deallocate on error) */
mnt_unref_fs(fs);
/* recoverable error */
if (rc > 0) {
DBG(TAB, ul_debugobj(tb, "recoverable error (continue)"));
continue;
}
/* fatal errors */
if (rc < 0 && !feof(f)) {
DBG(TAB, ul_debugobj(tb, "fatal error"));
goto err;
}
} while (1);
DBG(TAB, ul_debugobj(tb, "%s: stop parsing (%d entries)",
filename, mnt_table_get_nents(tb)));
parser_cleanup(&pa);
return 0;
err:
DBG(TAB, ul_debugobj(tb, "%s: parse error (rc=%d)", filename, rc));
parser_cleanup(&pa);
return rc;
} | 0 | [
"CWE-552",
"CWE-703"
]
| util-linux | 166e87368ae88bf31112a30e078cceae637f4cdb | 284,528,531,260,829,260,000,000,000,000,000,000,000 | 80 | libmount: remove support for deleted mount table entries
The "(deleted)" suffix has been originally used by kernel for deleted
mountpoints. Since kernel commit 9d4d65748a5ca26ea8650e50ba521295549bf4e3
(Dec 2014) kernel does not use this suffix for mount stuff in /proc at
all. Let's remove this support from libmount too.
Signed-off-by: Karel Zak <[email protected]> |
TEST_F(HttpConnectionManagerConfigTest, AlwaysSetRequestIdInResponseConfigured) {
const std::string yaml_string = R"EOF(
stat_prefix: ingress_http
always_set_request_id_in_response: true
route_config:
name: local_route
http_filters:
- name: envoy.filters.http.router
)EOF";
HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_,
date_provider_, route_config_provider_manager_,
scoped_routes_config_provider_manager_, http_tracer_manager_,
filter_config_provider_manager_);
EXPECT_TRUE(config.alwaysSetRequestIdInResponse());
} | 0 | [
"CWE-22"
]
| envoy | 5333b928d8bcffa26ab19bf018369a835f697585 | 122,527,274,442,349,430,000,000,000,000,000,000,000 | 16 | Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <[email protected]> |
gst_rmdemux_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
{
gboolean res = FALSE;
GstRMDemux *rmdemux;
rmdemux = GST_RMDEMUX (parent);
switch (GST_QUERY_TYPE (query)) {
case GST_QUERY_POSITION:
GST_DEBUG_OBJECT (rmdemux, "Position query: no idea from demuxer!");
break;
case GST_QUERY_DURATION:{
GstFormat fmt;
gst_query_parse_duration (query, &fmt, NULL);
if (fmt == GST_FORMAT_TIME) {
GST_OBJECT_LOCK (rmdemux);
if (G_LIKELY (rmdemux->running)) {
gst_query_set_duration (query, GST_FORMAT_TIME, rmdemux->duration);
GST_DEBUG_OBJECT (rmdemux, "duration set to %" GST_TIME_FORMAT,
GST_TIME_ARGS (rmdemux->duration));
res = TRUE;
}
GST_OBJECT_UNLOCK (rmdemux);
}
break;
}
case GST_QUERY_SEEKING:{
GstFormat fmt;
gst_query_parse_seeking (query, &fmt, NULL, NULL, NULL);
if (fmt == GST_FORMAT_TIME) {
GST_OBJECT_LOCK (rmdemux);
if (G_LIKELY (rmdemux->running)) {
gst_query_set_seeking (query, GST_FORMAT_TIME, rmdemux->seekable,
0, rmdemux->duration);
res = TRUE;
}
GST_OBJECT_UNLOCK (rmdemux);
}
break;
}
case GST_QUERY_SEGMENT:
{
GstFormat format;
gint64 start, stop;
format = rmdemux->segment.format;
start =
gst_segment_to_stream_time (&rmdemux->segment, format,
rmdemux->segment.start);
if ((stop = rmdemux->segment.stop) == -1)
stop = rmdemux->segment.duration;
else
stop = gst_segment_to_stream_time (&rmdemux->segment, format, stop);
gst_query_set_segment (query, rmdemux->segment.rate, format, start, stop);
res = TRUE;
break;
}
default:
res = gst_pad_query_default (pad, parent, query);
break;
}
return res;
} | 0 | []
| gst-plugins-ugly | 9726aaf78e6643a5955864f444852423de58de29 | 208,375,563,563,791,540,000,000,000,000,000,000,000 | 68 | rmdemux: Make sure we have enough data available when parsing audio/video packets
Otherwise there will be out-of-bounds reads and potential crashes.
Thanks to Natalie Silvanovich for reporting.
Fixes https://gitlab.freedesktop.org/gstreamer/gst-plugins-ugly/-/issues/37
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-ugly/-/merge_requests/75> |
Error ImageLoaderTGA::decode_tga_rle(const uint8_t *p_compressed_buffer, size_t p_pixel_size, uint8_t *p_uncompressed_buffer, size_t p_output_size) {
Error error;
Vector<uint8_t> pixels;
error = pixels.resize(p_pixel_size);
if (error != OK) {
return error;
}
uint8_t *pixels_w = pixels.ptrw();
size_t compressed_pos = 0;
size_t output_pos = 0;
size_t c = 0;
size_t count = 0;
while (output_pos < p_output_size) {
c = p_compressed_buffer[compressed_pos];
compressed_pos += 1;
count = (c & 0x7f) + 1;
if (output_pos + count * p_pixel_size > output_pos) {
return ERR_PARSE_ERROR;
}
if (c & 0x80) {
for (size_t i = 0; i < p_pixel_size; i++) {
pixels_w[i] = p_compressed_buffer[compressed_pos];
compressed_pos += 1;
}
for (size_t i = 0; i < count; i++) {
for (size_t j = 0; j < p_pixel_size; j++) {
p_uncompressed_buffer[output_pos + j] = pixels_w[j];
}
output_pos += p_pixel_size;
}
} else {
count *= p_pixel_size;
for (size_t i = 0; i < count; i++) {
p_uncompressed_buffer[output_pos] = p_compressed_buffer[compressed_pos];
compressed_pos += 1;
output_pos += 1;
}
}
}
return OK;
} | 0 | [
"CWE-20",
"CWE-787"
]
| godot | 403e4fd08b0b212e96f53d926e6273e0745eaa5a | 75,754,029,754,463,840,000,000,000,000,000,000,000 | 47 | Fix a crash in the TGA loader with malformed input |
bool Binary::has_rpath() const {
return has_command<RPathCommand>();
} | 0 | [
"CWE-703"
]
| LIEF | 7acf0bc4224081d4f425fcc8b2e361b95291d878 | 216,075,041,678,868,960,000,000,000,000,000,000,000 | 3 | Resolve #764 |
static struct page *shmem_alloc_page(gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
struct vm_area_struct pvma;
struct page *page;
/* Create a pseudo vma that just contains the policy */
pvma.vm_start = 0;
/* Bias interleave by inode number to distribute better across nodes */
pvma.vm_pgoff = index + info->vfs_inode.i_ino;
pvma.vm_ops = NULL;
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
page = alloc_page_vma(gfp, &pvma, 0);
/* Drop reference taken by mpol_shared_policy_lookup() */
mpol_cond_put(pvma.vm_policy);
return page;
} | 0 | [
"CWE-399"
]
| linux | 5f00110f7273f9ff04ac69a5f85bb535a4fd0987 | 46,170,995,347,013,900,000,000,000,000,000,000,000 | 20 | tmpfs: fix use-after-free of mempolicy object
The tmpfs remount logic preserves filesystem mempolicy if the mpol=M
option is not specified in the remount request. A new policy can be
specified if mpol=M is given.
Before this patch remounting an mpol bound tmpfs without specifying
mpol= mount option in the remount request would set the filesystem's
mempolicy object to a freed mempolicy object.
To reproduce the problem boot a DEBUG_PAGEALLOC kernel and run:
# mkdir /tmp/x
# mount -t tmpfs -o size=100M,mpol=interleave nodev /tmp/x
# grep /tmp/x /proc/mounts
nodev /tmp/x tmpfs rw,relatime,size=102400k,mpol=interleave:0-3 0 0
# mount -o remount,size=200M nodev /tmp/x
# grep /tmp/x /proc/mounts
nodev /tmp/x tmpfs rw,relatime,size=204800k,mpol=??? 0 0
# note ? garbage in mpol=... output above
# dd if=/dev/zero of=/tmp/x/f count=1
# panic here
Panic:
BUG: unable to handle kernel NULL pointer dereference at (null)
IP: [< (null)>] (null)
[...]
Oops: 0010 [#1] SMP DEBUG_PAGEALLOC
Call Trace:
mpol_shared_policy_init+0xa5/0x160
shmem_get_inode+0x209/0x270
shmem_mknod+0x3e/0xf0
shmem_create+0x18/0x20
vfs_create+0xb5/0x130
do_last+0x9a1/0xea0
path_openat+0xb3/0x4d0
do_filp_open+0x42/0xa0
do_sys_open+0xfe/0x1e0
compat_sys_open+0x1b/0x20
cstar_dispatch+0x7/0x1f
Non-debug kernels will not crash immediately because referencing the
dangling mpol will not cause a fault. Instead the filesystem will
reference a freed mempolicy object, which will cause unpredictable
behavior.
The problem boils down to a dropped mpol reference below if
shmem_parse_options() does not allocate a new mpol:
config = *sbinfo
shmem_parse_options(data, &config, true)
mpol_put(sbinfo->mpol)
sbinfo->mpol = config.mpol /* BUG: saves unreferenced mpol */
This patch avoids the crash by not releasing the mempolicy if
shmem_parse_options() doesn't create a new mpol.
How far back does this issue go? I see it in both 2.6.36 and 3.3. I did
not look back further.
Signed-off-by: Greg Thelen <[email protected]>
Acked-by: Hugh Dickins <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int FIPS_dsa_test(int bad)
{
DSA *dsa = NULL;
unsigned char dgst[] = "etaonrishdlc";
int r = 0;
DSA_SIG *sig = NULL;
ERR_clear_error();
dsa = FIPS_dsa_new();
if (!dsa)
goto end;
if (!DSA_generate_parameters_ex(dsa, 1024,NULL,0,NULL,NULL,NULL))
goto end;
if (!DSA_generate_key(dsa))
goto end;
if (bad)
BN_add_word(dsa->pub_key, 1);
sig = FIPS_dsa_sign(dsa, dgst, sizeof(dgst) -1, EVP_sha256());
if (!sig)
goto end;
r = FIPS_dsa_verify(dsa, dgst, sizeof(dgst) -1, EVP_sha256(), sig);
end:
if (sig)
FIPS_dsa_sig_free(sig);
if (dsa)
FIPS_dsa_free(dsa);
if (r != 1)
return 0;
return 1;
} | 0 | []
| openssl | 200f249b8c3b6439e0200d01caadc24806f1a983 | 137,808,573,976,681,300,000,000,000,000,000,000,000 | 32 | Remove Dual EC DRBG from FIPS module. |
_WriteKey(TScreen *screen, const Char *in)
{
Char line[16];
unsigned count = 0;
size_t length = strlen((const char *) in);
if (screen->control_eight_bits) {
line[count++] = ANSI_CSI;
} else {
line[count++] = ANSI_ESC;
line[count++] = '[';
}
while (length--)
line[count++] = *in++;
line[count++] = '~';
tty_vwrite(screen->respond, line, count);
} | 0 | [
"CWE-399"
]
| xterm-snapshots | 82ba55b8f994ab30ff561a347b82ea340ba7075c | 86,562,933,983,889,920,000,000,000,000,000,000,000 | 17 | snapshot of project "xterm", label xterm-365d |
Debug(val, a, b, c, d, e, f, g)
int val;
char *a, *b, *c, *d, *e, *f, *g;
{
} | 0 | [
"CWE-787"
]
| cracklib | 33d7fa4585247cd2247a1ffa032ad245836c6edb | 229,745,503,591,803,540,000,000,000,000,000,000,000 | 5 | Fix a buffer overflow processing long words
A buffer overflow processing long words has been discovered. This commit
applies the patch from
https://build.opensuse.org/package/view_file/Base:System/cracklib/0004-overflow-processing-long-words.patch
by Howard Guo.
See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=835386 and
http://www.openwall.com/lists/oss-security/2016/08/23/8 |
void CLASS parse_phase_one (int base)
{
unsigned entries, tag, len, data, save, i, j, c;
float romm_cam[3][3];
char *cp;
memset (&ph1, 0, sizeof ph1);
fseek (ifp, base, SEEK_SET);
order = get4() & 0xffff;
if (get4() >> 8 != 0x526177) return; /* "Raw" */
fseek (ifp, get4()+base, SEEK_SET);
entries = get4();
get4();
while (entries--) {
tag = get4();
fseek (ifp, 4, SEEK_CUR);
len = get4();
data = get4();
save = ftell(ifp);
fseek (ifp, base+data, SEEK_SET);
switch (tag) {
case 0x100: flip = "0653"[data & 3]-'0'; break;
case 0x106:
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
romm_cam[i][j] = getreal(11);
romm_coeff (romm_cam);
break;
case 0x107:
FORC3 cam_mul[c] = getreal(11);
break;
case 0x108: raw_width = data; break;
case 0x109: raw_height = data; break;
case 0x10a: left_margin = data; break;
case 0x10b: top_margin = data; break;
case 0x10c: width = data; break;
case 0x10d: height = data; break;
case 0x10e: ph1.format = data; break;
case 0x10f: data_offset = data+base; break;
case 0x110: meta_offset = data+base;
meta_length = len; break;
case 0x112: ph1.key_off = save - 4; break;
case 0x210: ph1.tag_210 = int_to_float(data); break;
case 0x21a: ph1.tag_21a = data; break;
case 0x21c: strip_offset = data+base; break;
case 0x21d: ph1.black = data; break;
case 0x222: ph1.split_col = data - left_margin; break;
case 0x223: ph1.black_off = data+base; break;
case 0x301:
model[63] = 0;
fread (model, 1, 63, ifp);
if ((cp = strstr(model," camera"))) *cp = 0;
}
fseek (ifp, save, SEEK_SET);
}
load_raw = ph1.format < 3 ?
&CLASS phase_one_load_raw : &CLASS phase_one_load_raw_c;
maximum = 0xffff;
strcpy (make, "Phase One");
if (model[0]) return;
switch (raw_height) {
case 2060: strcpy (model,"LightPhase"); break;
case 2682: strcpy (model,"H 10"); break;
case 4128: strcpy (model,"H 20"); break;
case 5488: strcpy (model,"H 25"); break;
}
} | 0 | [
"CWE-189"
]
| rawstudio | 983bda1f0fa5fa86884381208274198a620f006e | 209,753,007,110,954,570,000,000,000,000,000,000,000 | 67 | Avoid overflow in ljpeg_start(). |
void Compute(OpKernelContext* ctx) override {
const Tensor& shape = ctx->input(0);
OP_REQUIRES(
ctx, TensorShapeUtils::IsVector(shape.shape()),
errors::InvalidArgument("shape must be a vector of int32, got shape ",
shape.shape().DebugString()));
auto dims = shape.flat<int32>();
TensorShape out_shape;
OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(
reinterpret_cast<const int32*>(dims.data()),
dims.size(), &out_shape));
Tensor* out = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, out_shape, &out));
if (init_) {
functor::SetZeroFunctor<Device, T>()(ctx->eigen_device<Device>(),
out->flat<T>());
}
} | 0 | [
"CWE-369"
]
| tensorflow | e86605c0a336c088b638da02135ea6f9f6753618 | 142,317,067,181,359,350,000,000,000,000,000,000,000 | 19 | Fix FPE in inpace update ops.
PiperOrigin-RevId: 388303197
Change-Id: Ib48309b6213ffe53eba81004b00e889d653e4b83 |
void Curl_ssl_sessionid_lock(struct Curl_easy *data)
{
if(SSLSESSION_SHARED(data))
Curl_share_lock(data, CURL_LOCK_DATA_SSL_SESSION, CURL_LOCK_ACCESS_SINGLE);
} | 0 | [
"CWE-290"
]
| curl | b09c8ee15771c614c4bf3ddac893cdb12187c844 | 117,689,240,478,269,560,000,000,000,000,000,000,000 | 5 | vtls: add 'isproxy' argument to Curl_ssl_get/addsessionid()
To make sure we set and extract the correct session.
Reported-by: Mingtao Yang
Bug: https://curl.se/docs/CVE-2021-22890.html
CVE-2021-22890 |
Subsets and Splits