func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
{
}
| 0 |
[
"CWE-416"
] |
linux
|
8844618d8aa7a9973e7b527d038a2a589665002c
| 186,596,954,439,355,220,000,000,000,000,000,000,000 | 3 |
ext4: only look at the bg_flags field if it is valid
The bg_flags field in the block group descripts is only valid if the
uninit_bg or metadata_csum feature is enabled. We were not
consistently looking at this field; fix this.
Also block group #0 must never have uninitialized allocation bitmaps,
or need to be zeroed, since that's where the root inode, and other
special inodes are set up. Check for these conditions and mark the
file system as corrupted if they are detected.
This addresses CVE-2018-10876.
https://bugzilla.kernel.org/show_bug.cgi?id=199403
Signed-off-by: Theodore Ts'o <[email protected]>
Cc: [email protected]
|
struct luks2_reencrypt *crypt_get_luks2_reencrypt(struct crypt_device *cd)
{
return cd->u.luks2.rh;
}
| 0 |
[
"CWE-345"
] |
cryptsetup
|
0113ac2d889c5322659ad0596d4cfc6da53e356c
| 120,392,472,953,714,120,000,000,000,000,000,000,000 | 4 |
Fix CVE-2021-4122 - LUKS2 reencryption crash recovery attack
Fix possible attacks against data confidentiality through LUKS2 online
reencryption extension crash recovery.
An attacker can modify on-disk metadata to simulate decryption in
progress with crashed (unfinished) reencryption step and persistently
decrypt part of the LUKS device.
This attack requires repeated physical access to the LUKS device but
no knowledge of user passphrases.
The decryption step is performed after a valid user activates
the device with a correct passphrase and modified metadata.
There are no visible warnings for the user that such recovery happened
(except using the luksDump command). The attack can also be reversed
afterward (simulating crashed encryption from a plaintext) with
possible modification of revealed plaintext.
The problem was caused by reusing a mechanism designed for actual
reencryption operation without reassessing the security impact for new
encryption and decryption operations. While the reencryption requires
calculating and verifying both key digests, no digest was needed to
initiate decryption recovery if the destination is plaintext (no
encryption key). Also, some metadata (like encryption cipher) is not
protected, and an attacker could change it. Note that LUKS2 protects
visible metadata only when a random change occurs. It does not protect
against intentional modification but such modification must not cause
a violation of data confidentiality.
The fix introduces additional digest protection of reencryption
metadata. The digest is calculated from known keys and critical
reencryption metadata. Now an attacker cannot create correct metadata
digest without knowledge of a passphrase for used keyslots.
For more details, see LUKS2 On-Disk Format Specification version 1.1.0.
|
int seccomp_memory_deny_write_execute(void) {
uint32_t arch;
int r;
SECCOMP_FOREACH_LOCAL_ARCH(arch) {
_cleanup_(seccomp_releasep) scmp_filter_ctx seccomp = NULL;
int filter_syscall = 0, block_syscall = 0, shmat_syscall = 0;
log_debug("Operating on architecture: %s", seccomp_arch_to_string(arch));
switch (arch) {
case SCMP_ARCH_X86:
filter_syscall = SCMP_SYS(mmap2);
block_syscall = SCMP_SYS(mmap);
break;
case SCMP_ARCH_PPC64:
case SCMP_ARCH_PPC64LE:
filter_syscall = SCMP_SYS(mmap);
/* Note that shmat() isn't available, and the call is multiplexed through ipc().
* We ignore that here, which means there's still a way to get writable/executable
* memory, if an IPC key is mapped like this. That's a pity, but no total loss. */
break;
case SCMP_ARCH_ARM:
filter_syscall = SCMP_SYS(mmap2); /* arm has only mmap2 */
shmat_syscall = SCMP_SYS(shmat);
break;
case SCMP_ARCH_X86_64:
case SCMP_ARCH_X32:
case SCMP_ARCH_AARCH64:
filter_syscall = SCMP_SYS(mmap); /* amd64, x32, and arm64 have only mmap */
shmat_syscall = SCMP_SYS(shmat);
break;
/* Please add more definitions here, if you port systemd to other architectures! */
#if !defined(__i386__) && !defined(__x86_64__) && !defined(__powerpc64__) && !defined(__arm__) && !defined(__aarch64__)
#warning "Consider adding the right mmap() syscall definitions here!"
#endif
}
/* Can't filter mmap() on this arch, then skip it */
if (filter_syscall == 0)
continue;
r = seccomp_init_for_arch(&seccomp, arch, SCMP_ACT_ALLOW);
if (r < 0)
return r;
r = add_seccomp_syscall_filter(seccomp, arch, filter_syscall,
1,
SCMP_A2(SCMP_CMP_MASKED_EQ, PROT_EXEC|PROT_WRITE, PROT_EXEC|PROT_WRITE));
if (r < 0)
continue;
if (block_syscall != 0) {
r = add_seccomp_syscall_filter(seccomp, arch, block_syscall, 0, (const struct scmp_arg_cmp){} );
if (r < 0)
continue;
}
r = add_seccomp_syscall_filter(seccomp, arch, SCMP_SYS(mprotect),
1,
SCMP_A2(SCMP_CMP_MASKED_EQ, PROT_EXEC, PROT_EXEC));
if (r < 0)
continue;
if (shmat_syscall != 0) {
r = add_seccomp_syscall_filter(seccomp, arch, SCMP_SYS(shmat),
1,
SCMP_A2(SCMP_CMP_MASKED_EQ, SHM_EXEC, SHM_EXEC));
if (r < 0)
continue;
}
r = seccomp_load(seccomp);
if (IN_SET(r, -EPERM, -EACCES))
return r;
if (r < 0)
log_debug_errno(r, "Failed to install MemoryDenyWriteExecute= rule for architecture %s, skipping: %m", seccomp_arch_to_string(arch));
}
return 0;
}
| 1 |
[] |
systemd
|
b835eeb4ec1dd122b6feff2b70881265c529fcdd
| 255,310,001,386,085,200,000,000,000,000,000,000,000 | 90 |
shared/seccomp: disallow pkey_mprotect the same as mprotect for W^X mappings (#7295)
MemoryDenyWriteExecution policy could be be bypassed by using pkey_mprotect
instead of mprotect to create an executable writable mapping.
The impact is mitigated by the fact that the man page says "Note that this
feature is fully available on x86-64, and partially on x86", so hopefully
people do not rely on it as a sole security measure.
Found by Karin Hossen and Thomas Imbert from Sogeti ESEC R&D.
https://bugs.launchpad.net/bugs/1725348
|
static void annotation_get_usercounters(annotate_state_t *state,
struct annotate_entry_list *entry)
{
struct buf value = BUF_INITIALIZER;
struct mboxname_counters counters;
char *mboxname = NULL;
assert(state);
assert(state->userid);
mboxname = mboxname_user_mbox(state->userid, NULL);
int r = mboxname_read_counters(mboxname, &counters);
if (!r) buf_printf(&value, "%u %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu %u",
counters.version, counters.highestmodseq,
counters.mailmodseq, counters.caldavmodseq,
counters.carddavmodseq, counters.notesmodseq,
counters.mailfoldersmodseq, counters.caldavfoldersmodseq,
counters.carddavfoldersmodseq, counters.notesfoldersmodseq,
counters.quotamodseq, counters.raclmodseq,
counters.uidvalidity);
output_entryatt(state, entry->name, state->userid, &value);
free(mboxname);
buf_free(&value);
}
| 0 |
[
"CWE-732"
] |
cyrus-imapd
|
621f9e41465b521399f691c241181300fab55995
| 3,451,976,481,329,161,600,000,000,000,000,000,000 | 26 |
annotate: don't allow everyone to write shared server entries
|
void pwc_isoc_cleanup(struct pwc_device *pdev)
{
int i;
PWC_DEBUG_OPEN(">> pwc_isoc_cleanup()\n");
if (pdev == NULL)
return;
if (pdev->iso_init == 0)
return;
/* Unlinking ISOC buffers one by one */
for (i = 0; i < MAX_ISO_BUFS; i++) {
struct urb *urb;
urb = pdev->sbuf[i].urb;
if (urb != 0) {
if (pdev->iso_init) {
PWC_DEBUG_MEMORY("Unlinking URB %p\n", urb);
usb_kill_urb(urb);
}
PWC_DEBUG_MEMORY("Freeing URB\n");
usb_free_urb(urb);
pdev->sbuf[i].urb = NULL;
}
}
/* Stop camera, but only if we are sure the camera is still there (unplug
is signalled by EPIPE)
*/
if (pdev->error_status && pdev->error_status != EPIPE) {
PWC_DEBUG_OPEN("Setting alternate interface 0.\n");
usb_set_interface(pdev->udev, 0, 0);
}
pdev->iso_init = 0;
PWC_DEBUG_OPEN("<< pwc_isoc_cleanup()\n");
}
| 0 |
[
"CWE-399"
] |
linux-2.6
|
85237f202d46d55c1bffe0c5b1aa3ddc0f1dce4d
| 219,630,847,756,723,730,000,000,000,000,000,000,000 | 37 |
USB: fix DoS in pwc USB video driver
the pwc driver has a disconnect method that waits for user space to
close the device. This opens up an opportunity for a DoS attack,
blocking the USB subsystem and making khubd's task busy wait in
kernel space. This patch shifts freeing resources to close if an opened
device is disconnected.
Signed-off-by: Oliver Neukum <[email protected]>
CC: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr;
struct net_device *dev;
ax25_uid_assoc *user;
ax25_address *source;
lock_sock(sk);
if (!sock_flag(sk, SOCK_ZAPPED)) {
release_sock(sk);
return -EINVAL;
}
if (addr_len < sizeof(struct sockaddr_ax25) || addr_len > sizeof(struct full_sockaddr_ax25)) {
release_sock(sk);
return -EINVAL;
}
if (addr_len < (addr->fsa_ax25.sax25_ndigis * sizeof(ax25_address) + sizeof(struct sockaddr_ax25))) {
release_sock(sk);
return -EINVAL;
}
if (addr->fsa_ax25.sax25_family != AF_NETROM) {
release_sock(sk);
return -EINVAL;
}
if ((dev = nr_dev_get(&addr->fsa_ax25.sax25_call)) == NULL) {
release_sock(sk);
return -EADDRNOTAVAIL;
}
/*
* Only the super user can set an arbitrary user callsign.
*/
if (addr->fsa_ax25.sax25_ndigis == 1) {
if (!capable(CAP_NET_BIND_SERVICE)) {
dev_put(dev);
release_sock(sk);
return -EPERM;
}
nr->user_addr = addr->fsa_digipeater[0];
nr->source_addr = addr->fsa_ax25.sax25_call;
} else {
source = &addr->fsa_ax25.sax25_call;
user = ax25_findbyuid(current_euid());
if (user) {
nr->user_addr = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
release_sock(sk);
dev_put(dev);
return -EPERM;
}
nr->user_addr = *source;
}
nr->source_addr = *source;
}
nr->device = dev;
nr_insert_socket(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
dev_put(dev);
release_sock(sk);
return 0;
}
| 0 |
[
"CWE-20",
"CWE-269"
] |
linux
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
| 48,523,010,238,621,480,000,000,000,000,000,000,000 | 70 |
net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
dao_output(rpl_parent_t *parent, uint8_t lifetime)
{
/* Destination Advertisement Object */
uip_ipaddr_t prefix;
if(get_global_addr(&prefix) == 0) {
LOG_ERR("No global address set for this node - suppressing DAO\n");
return;
}
if(parent == NULL || parent->dag == NULL || parent->dag->instance == NULL) {
return;
}
RPL_LOLLIPOP_INCREMENT(dao_sequence);
#if RPL_WITH_DAO_ACK
/* set up the state since this will be the first transmission of DAO */
/* retransmissions will call directly to dao_output_target_seq */
/* keep track of my own sending of DAO for handling ack and loss of ack */
if(lifetime != RPL_ZERO_LIFETIME) {
rpl_instance_t *instance;
instance = parent->dag->instance;
instance->my_dao_seqno = dao_sequence;
instance->my_dao_transmissions = 1;
ctimer_set(&instance->dao_retransmit_timer, RPL_DAO_RETRANSMISSION_TIMEOUT,
handle_dao_retransmission, parent);
}
#else
/* We know that we have tried to register so now we are assuming
that we have a down-link - unless this is a zero lifetime one */
parent->dag->instance->has_downward_route = lifetime != RPL_ZERO_LIFETIME;
#endif /* RPL_WITH_DAO_ACK */
/* Sending a DAO with own prefix as target */
dao_output_target(parent, &prefix, lifetime);
}
| 0 |
[
"CWE-703"
] |
contiki-ng
|
587ae59956e00316fd44fd7072ac3a6a07b4b20f
| 118,248,192,695,743,650,000,000,000,000,000,000,000 | 37 |
Check RPL Target prefix length and buffer boundary.
|
int X509_verify_cert(X509_STORE_CTX *ctx)
{
SSL_DANE *dane = ctx->dane;
int ret;
if (ctx->cert == NULL) {
X509err(X509_F_X509_VERIFY_CERT, X509_R_NO_CERT_SET_FOR_US_TO_VERIFY);
ctx->error = X509_V_ERR_INVALID_CALL;
return -1;
}
if (ctx->chain != NULL) {
/*
* This X509_STORE_CTX has already been used to verify a cert. We
* cannot do another one.
*/
X509err(X509_F_X509_VERIFY_CERT, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
ctx->error = X509_V_ERR_INVALID_CALL;
return -1;
}
if (!X509_up_ref(ctx->cert)) {
X509err(X509_F_X509_VERIFY_CERT, ERR_R_INTERNAL_ERROR);
ctx->error = X509_V_ERR_UNSPECIFIED;
return -1;
}
/*
* first we make sure the chain we are going to build is present and that
* the first entry is in place
*/
if ((ctx->chain = sk_X509_new_null()) == NULL
|| !sk_X509_push(ctx->chain, ctx->cert)) {
X509_free(ctx->cert);
X509err(X509_F_X509_VERIFY_CERT, ERR_R_MALLOC_FAILURE);
ctx->error = X509_V_ERR_OUT_OF_MEM;
return -1;
}
ctx->num_untrusted = 1;
/* If the peer's public key is too weak, we can stop early. */
if (!check_key_level(ctx, ctx->cert) &&
!verify_cb_cert(ctx, ctx->cert, 0, X509_V_ERR_EE_KEY_TOO_SMALL))
return 0;
if (DANETLS_ENABLED(dane))
ret = dane_verify(ctx);
else
ret = verify_chain(ctx);
/*
* Safety-net. If we are returning an error, we must also set ctx->error,
* so that the chain is not considered verified should the error be ignored
* (e.g. TLS with SSL_VERIFY_NONE).
*/
if (ret <= 0 && ctx->error == X509_V_OK)
ctx->error = X509_V_ERR_UNSPECIFIED;
return ret;
}
| 0 |
[
"CWE-295"
] |
openssl
|
2a40b7bc7b94dd7de897a74571e7024f0cf0d63b
| 111,869,874,876,589,990,000,000,000,000,000,000,000 | 60 |
check_chain_extensions: Do not override error return value by check_curve
The X509_V_FLAG_X509_STRICT flag enables additional security checks of the
certificates present in a certificate chain. It is not set by default.
Starting from OpenSSL version 1.1.1h a check to disallow certificates with
explicitly encoded elliptic curve parameters in the chain was added to the
strict checks.
An error in the implementation of this check meant that the result of a
previous check to confirm that certificates in the chain are valid CA
certificates was overwritten. This effectively bypasses the check
that non-CA certificates must not be able to issue other certificates.
If a "purpose" has been configured then a subsequent check that the
certificate is consistent with that purpose also checks that it is a
valid CA. Therefore where a purpose is set the certificate chain will
still be rejected even when the strict flag has been used. A purpose is
set by default in libssl client and server certificate verification
routines, but it can be overriden by an application.
Affected applications explicitly set the X509_V_FLAG_X509_STRICT
verification flag and either do not set a purpose for the certificate
verification or, in the case of TLS client or server applications,
override the default purpose to make it not set.
CVE-2021-3450
Reviewed-by: Matt Caswell <[email protected]>
Reviewed-by: Paul Dale <[email protected]>
|
session_compare (const char *a,
const char *b)
{
if (a == NULL) {
return 1;
} else if (b == NULL) {
return -1;
}
return strcmp (a, b);
}
| 0 |
[] |
gdm
|
c25ef9245be4e0be2126ef3d075df4401949b570
| 38,394,754,564,028,750,000,000,000,000,000,000,000 | 11 |
Store the face and dmrc files in a cache. Refer to bug #565151.
|
static int n_do_ssl_write(SSL *s, const unsigned char *buf, unsigned int len)
{
unsigned int j,k,olen,p,bs;
int mac_size;
register unsigned char *pp;
olen=len;
/* first check if there is data from an encryption waiting to
* be sent - it must be sent because the other end is waiting.
* This will happen with non-blocking IO. We print it and then
* return.
*/
if (s->s2->wpend_len != 0) return(write_pending(s,buf,len));
/* set mac_size to mac size */
if (s->s2->clear_text)
mac_size=0;
else
{
mac_size=EVP_MD_CTX_size(s->write_hash);
if (mac_size < 0)
return -1;
}
/* lets set the pad p */
if (s->s2->clear_text)
{
if (len > SSL2_MAX_RECORD_LENGTH_2_BYTE_HEADER)
len=SSL2_MAX_RECORD_LENGTH_2_BYTE_HEADER;
p=0;
s->s2->three_byte_header=0;
/* len=len; */
}
else
{
bs=EVP_CIPHER_CTX_block_size(s->enc_read_ctx);
j=len+mac_size;
/* Two-byte headers allow for a larger record length than
* three-byte headers, but we can't use them if we need
* padding or if we have to set the escape bit. */
if ((j > SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER) &&
(!s->s2->escape))
{
if (j > SSL2_MAX_RECORD_LENGTH_2_BYTE_HEADER)
j=SSL2_MAX_RECORD_LENGTH_2_BYTE_HEADER;
/* set k to the max number of bytes with 2
* byte header */
k=j-(j%bs);
/* how many data bytes? */
len=k-mac_size;
s->s2->three_byte_header=0;
p=0;
}
else if ((bs <= 1) && (!s->s2->escape))
{
/* j <= SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER, thus
* j < SSL2_MAX_RECORD_LENGTH_2_BYTE_HEADER */
s->s2->three_byte_header=0;
p=0;
}
else /* we may have to use a 3 byte header */
{
/* If s->s2->escape is not set, then
* j <= SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER, and thus
* j < SSL2_MAX_RECORD_LENGTH_2_BYTE_HEADER. */
p=(j%bs);
p=(p == 0)?0:(bs-p);
if (s->s2->escape)
{
s->s2->three_byte_header=1;
if (j > SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER)
j=SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER;
}
else
s->s2->three_byte_header=(p == 0)?0:1;
}
}
/* Now
* j <= SSL2_MAX_RECORD_LENGTH_2_BYTE_HEADER
* holds, and if s->s2->three_byte_header is set, then even
* j <= SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER.
*/
/* mac_size is the number of MAC bytes
* len is the number of data bytes we are going to send
* p is the number of padding bytes
* (if it is a two-byte header, then p == 0) */
s->s2->wlength=len;
s->s2->padding=p;
s->s2->mac_data= &(s->s2->wbuf[3]);
s->s2->wact_data= &(s->s2->wbuf[3+mac_size]);
/* we copy the data into s->s2->wbuf */
memcpy(s->s2->wact_data,buf,len);
if (p)
memset(&(s->s2->wact_data[len]),0,p); /* arbitrary padding */
if (!s->s2->clear_text)
{
s->s2->wact_data_length=len+p;
ssl2_mac(s,s->s2->mac_data,1);
s->s2->wlength+=p+mac_size;
ssl2_enc(s,1);
}
/* package up the header */
s->s2->wpend_len=s->s2->wlength;
if (s->s2->three_byte_header) /* 3 byte header */
{
pp=s->s2->mac_data;
pp-=3;
pp[0]=(s->s2->wlength>>8)&(THREE_BYTE_MASK>>8);
if (s->s2->escape) pp[0]|=SEC_ESC_BIT;
pp[1]=s->s2->wlength&0xff;
pp[2]=s->s2->padding;
s->s2->wpend_len+=3;
}
else
{
pp=s->s2->mac_data;
pp-=2;
pp[0]=((s->s2->wlength>>8)&(TWO_BYTE_MASK>>8))|TWO_BYTE_BIT;
pp[1]=s->s2->wlength&0xff;
s->s2->wpend_len+=2;
}
s->s2->write_ptr=pp;
INC32(s->s2->write_sequence); /* expect next number */
/* lets try to actually write the data */
s->s2->wpend_tot=olen;
s->s2->wpend_buf=buf;
s->s2->wpend_ret=len;
s->s2->wpend_off=0;
return(write_pending(s,buf,olen));
}
| 0 |
[
"CWE-310"
] |
openssl
|
9c00a950604aca819cee977f1dcb4b45f2af3aa6
| 84,763,875,963,062,980,000,000,000,000,000,000,000 | 140 |
Add and use a constant-time memcmp.
This change adds CRYPTO_memcmp, which compares two vectors of bytes in
an amount of time that's independent of their contents. It also changes
several MAC compares in the code to use this over the standard memcmp,
which may leak information about the size of a matching prefix.
(cherry picked from commit 2ee798880a246d648ecddadc5b91367bee4a5d98)
Conflicts:
crypto/crypto.h
ssl/t1_lib.c
|
qf_jump_open_window(
qf_info_T *qi,
qfline_T *qf_ptr,
int newwin,
int *opened_window)
{
qf_list_T *qfl = qf_get_curlist(qi);
int old_changedtick = qfl->qf_changedtick;
int old_qf_curlist = qi->qf_curlist;
qfltype_T qfl_type = qfl->qfl_type;
// For ":helpgrep" find a help window or open one.
if (qf_ptr->qf_type == 1 && (!bt_help(curwin->w_buffer)
|| cmdmod.cmod_tab != 0))
if (jump_to_help_window(qi, newwin, opened_window) == FAIL)
return FAIL;
if (old_qf_curlist != qi->qf_curlist
|| old_changedtick != qfl->qf_changedtick
|| !is_qf_entry_present(qfl, qf_ptr))
{
if (qfl_type == QFLT_QUICKFIX)
emsg(_(e_current_quickfix_list_was_changed));
else
emsg(_(e_current_location_list_was_changed));
return FAIL;
}
// If currently in the quickfix window, find another window to show the
// file in.
if (bt_quickfix(curbuf) && !*opened_window)
{
// If there is no file specified, we don't know where to go.
// But do advance, otherwise ":cn" gets stuck.
if (qf_ptr->qf_fnum == 0)
return NOTDONE;
if (qf_jump_to_usable_window(qf_ptr->qf_fnum, newwin,
opened_window) == FAIL)
return FAIL;
}
if (old_qf_curlist != qi->qf_curlist
|| old_changedtick != qfl->qf_changedtick
|| !is_qf_entry_present(qfl, qf_ptr))
{
if (qfl_type == QFLT_QUICKFIX)
emsg(_(e_current_quickfix_list_was_changed));
else
emsg(_(e_current_location_list_was_changed));
return FAIL;
}
return OK;
}
| 1 |
[
"CWE-416"
] |
vim
|
6d24a51b94beb1991cddce221f90b455e2d50db7
| 255,911,756,916,737,830,000,000,000,000,000,000,000 | 53 |
patch 9.0.0286: using freed memory when location list changed in autocmd
Problem: Using freed memory when location list changed in autocmd.
Solution: Return QF_ABORT and handle it. (Yegappan Lakshmanan,
closes #10993)
|
static void gvt_cache_init(struct intel_vgpu *vgpu)
{
vgpu->vdev.gfn_cache = RB_ROOT;
vgpu->vdev.dma_addr_cache = RB_ROOT;
vgpu->vdev.nr_cache_entries = 0;
mutex_init(&vgpu->vdev.cache_lock);
}
| 0 |
[
"CWE-20"
] |
linux
|
51b00d8509dc69c98740da2ad07308b630d3eb7d
| 218,073,921,412,096,480,000,000,000,000,000,000,000 | 7 |
drm/i915/gvt: Fix mmap range check
This is to fix missed mmap range check on vGPU bar2 region
and only allow to map vGPU allocated GMADDR range, which means
user space should support sparse mmap to get proper offset for
mmap vGPU aperture. And this takes care of actual pgoff in mmap
request as original code always does from beginning of vGPU
aperture.
Fixes: 659643f7d814 ("drm/i915/gvt/kvmgt: add vfio/mdev support to KVMGT")
Cc: "Monroy, Rodrigo Axel" <[email protected]>
Cc: "Orrala Contreras, Alfredo" <[email protected]>
Cc: [email protected] # v4.10+
Reviewed-by: Hang Yuan <[email protected]>
Signed-off-by: Zhenyu Wang <[email protected]>
|
dissect_tcpopt_recbound(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U_)
{
return dissect_tcpopt_default_option(tvb, pinfo, tree, proto_tcp_option_scpsrec, ett_tcp_opt_recbound);
}
| 0 |
[
"CWE-354"
] |
wireshark
|
7f3fe6164a68b76d9988c4253b24d43f498f1753
| 39,690,680,345,563,397,000,000,000,000,000,000,000 | 4 |
TCP: do not use an unknown status when the checksum is 0xffff
Otherwise it triggers an assert when adding the column as the field is
defined as BASE_NONE and not BASE_DEC or BASE_HEX. Thus an unknown value
(not in proto_checksum_vals[)array) cannot be represented.
Mark the checksum as bad even if we process the packet.
Closes #16816
Conflicts:
epan/dissectors/packet-tcp.c
|
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct task_struct *task;
char buffer[PROC_NUMBUF];
struct mm_struct *mm;
struct vm_area_struct *vma;
long type;
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count))
return -EFAULT;
if (strict_strtol(strstrip(buffer), 10, &type))
return -EINVAL;
if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
return -EINVAL;
task = get_proc_task(file->f_path.dentry->d_inode);
if (!task)
return -ESRCH;
mm = get_task_mm(task);
if (mm) {
struct mm_walk clear_refs_walk = {
.pmd_entry = clear_refs_pte_range,
.mm = mm,
};
down_read(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
clear_refs_walk.private = vma;
if (is_vm_hugetlb_page(vma))
continue;
/*
* Writing 1 to /proc/pid/clear_refs affects all pages.
*
* Writing 2 to /proc/pid/clear_refs only affects
* Anonymous pages.
*
* Writing 3 to /proc/pid/clear_refs only affects file
* mapped pages.
*/
if (type == CLEAR_REFS_ANON && vma->vm_file)
continue;
if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
continue;
walk_page_range(vma->vm_start, vma->vm_end,
&clear_refs_walk);
}
flush_tlb_mm(mm);
up_read(&mm->mmap_sem);
mmput(mm);
}
put_task_struct(task);
return count;
}
| 0 |
[
"CWE-20",
"CWE-476",
"CWE-416"
] |
linux
|
76597cd31470fa130784c78fadb4dab2e624a723
| 311,260,813,800,560,920,000,000,000,000,000,000,000 | 56 |
proc: fix oops on invalid /proc/<pid>/maps access
When m_start returns an error, the seq_file logic will still call m_stop
with that error entry, so we'd better make sure that we check it before
using it as a vma.
Introduced by commit ec6fd8a4355c ("report errors in /proc/*/*map*
sanely"), which replaced NULL with various ERR_PTR() cases.
(On ia64, you happen to get a unaligned fault instead of a page fault,
since the address used is generally some random error code like -EPERM)
Reported-by: Anca Emanuel <[email protected]>
Reported-by: Tony Luck <[email protected]>
Cc: Al Viro <[email protected]>
Cc: Américo Wang <[email protected]>
Cc: Stephen Wilson <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
}
static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
{
int i;
if (!f2fs_is_multi_device(sbi))
return bdev_read_only(sbi->sb->s_bdev);
for (i = 0; i < sbi->s_ndevs; i++)
if (bdev_read_only(FDEV(i).bdev))
return true;
| 0 |
[
"CWE-476"
] |
linux
|
4969c06a0d83c9c3dc50b8efcdc8eeedfce896f6
| 154,693,394,620,678,030,000,000,000,000,000,000,000 | 12 |
f2fs: support swap file w/ DIO
Signed-off-by: Jaegeuk Kim <[email protected]>
|
archive_read_format_lha_read_header(struct archive_read *a,
struct archive_entry *entry)
{
struct archive_string linkname;
struct archive_string pathname;
struct lha *lha;
const unsigned char *p;
const char *signature;
int err;
lha_crc16_init();
a->archive.archive_format = ARCHIVE_FORMAT_LHA;
if (a->archive.archive_format_name == NULL)
a->archive.archive_format_name = "lha";
lha = (struct lha *)(a->format->data);
lha->decompress_init = 0;
lha->end_of_entry = 0;
lha->end_of_entry_cleanup = 0;
lha->entry_unconsumed = 0;
if ((p = __archive_read_ahead(a, H_SIZE, NULL)) == NULL) {
/*
* LHa archiver added 0 to the tail of its archive file as
* the mark of the end of the archive.
*/
signature = __archive_read_ahead(a, sizeof(signature[0]), NULL);
if (signature == NULL || signature[0] == 0)
return (ARCHIVE_EOF);
return (truncated_error(a));
}
signature = (const char *)p;
if (lha->found_first_header == 0 &&
signature[0] == 'M' && signature[1] == 'Z') {
/* This is an executable? Must be self-extracting... */
err = lha_skip_sfx(a);
if (err < ARCHIVE_WARN)
return (err);
if ((p = __archive_read_ahead(a, sizeof(*p), NULL)) == NULL)
return (truncated_error(a));
signature = (const char *)p;
}
/* signature[0] == 0 means the end of an LHa archive file. */
if (signature[0] == 0)
return (ARCHIVE_EOF);
/*
* Check the header format and method type.
*/
if (lha_check_header_format(p) != 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Bad LHa file");
return (ARCHIVE_FATAL);
}
/* We've found the first header. */
lha->found_first_header = 1;
/* Set a default value and common data */
lha->header_size = 0;
lha->level = p[H_LEVEL_OFFSET];
lha->method[0] = p[H_METHOD_OFFSET+1];
lha->method[1] = p[H_METHOD_OFFSET+2];
lha->method[2] = p[H_METHOD_OFFSET+3];
if (memcmp(lha->method, "lhd", 3) == 0)
lha->directory = 1;
else
lha->directory = 0;
if (memcmp(lha->method, "lh0", 3) == 0 ||
memcmp(lha->method, "lz4", 3) == 0)
lha->entry_is_compressed = 0;
else
lha->entry_is_compressed = 1;
lha->compsize = 0;
lha->origsize = 0;
lha->setflag = 0;
lha->birthtime = 0;
lha->birthtime_tv_nsec = 0;
lha->mtime = 0;
lha->mtime_tv_nsec = 0;
lha->atime = 0;
lha->atime_tv_nsec = 0;
lha->mode = (lha->directory)? 0777 : 0666;
lha->uid = 0;
lha->gid = 0;
archive_string_empty(&lha->dirname);
archive_string_empty(&lha->filename);
lha->dos_attr = 0;
if (lha->opt_sconv != NULL)
lha->sconv = lha->opt_sconv;
else
lha->sconv = NULL;
switch (p[H_LEVEL_OFFSET]) {
case 0:
err = lha_read_file_header_0(a, lha);
break;
case 1:
err = lha_read_file_header_1(a, lha);
break;
case 2:
err = lha_read_file_header_2(a, lha);
break;
case 3:
err = lha_read_file_header_3(a, lha);
break;
default:
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Unsupported LHa header level %d", p[H_LEVEL_OFFSET]);
err = ARCHIVE_FATAL;
break;
}
if (err < ARCHIVE_WARN)
return (err);
if (!lha->directory && archive_strlen(&lha->filename) == 0)
/* The filename has not been set */
return (truncated_error(a));
/*
* Make a pathname from a dirname and a filename.
*/
archive_string_concat(&lha->dirname, &lha->filename);
archive_string_init(&pathname);
archive_string_init(&linkname);
archive_string_copy(&pathname, &lha->dirname);
if ((lha->mode & AE_IFMT) == AE_IFLNK) {
/*
* Extract the symlink-name if it's included in the pathname.
*/
if (!lha_parse_linkname(&linkname, &pathname)) {
/* We couldn't get the symlink-name. */
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Unknown symlink-name");
archive_string_free(&pathname);
archive_string_free(&linkname);
return (ARCHIVE_FAILED);
}
} else {
/*
* Make sure a file-type is set.
* The mode has been overridden if it is in the extended data.
*/
lha->mode = (lha->mode & ~AE_IFMT) |
((lha->directory)? AE_IFDIR: AE_IFREG);
}
if ((lha->setflag & UNIX_MODE_IS_SET) == 0 &&
(lha->dos_attr & 1) != 0)
lha->mode &= ~(0222);/* read only. */
/*
* Set basic file parameters.
*/
if (archive_entry_copy_pathname_l(entry, pathname.s,
pathname.length, lha->sconv) != 0) {
if (errno == ENOMEM) {
archive_set_error(&a->archive, ENOMEM,
"Can't allocate memory for Pathname");
return (ARCHIVE_FATAL);
}
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Pathname cannot be converted "
"from %s to current locale.",
archive_string_conversion_charset_name(lha->sconv));
err = ARCHIVE_WARN;
}
archive_string_free(&pathname);
if (archive_strlen(&linkname) > 0) {
if (archive_entry_copy_symlink_l(entry, linkname.s,
linkname.length, lha->sconv) != 0) {
if (errno == ENOMEM) {
archive_set_error(&a->archive, ENOMEM,
"Can't allocate memory for Linkname");
return (ARCHIVE_FATAL);
}
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Linkname cannot be converted "
"from %s to current locale.",
archive_string_conversion_charset_name(lha->sconv));
err = ARCHIVE_WARN;
}
} else
archive_entry_set_symlink(entry, NULL);
archive_string_free(&linkname);
/*
* When a header level is 0, there is a possibility that
* a pathname and a symlink has '\' character, a directory
* separator in DOS/Windows. So we should convert it to '/'.
*/
if (p[H_LEVEL_OFFSET] == 0)
lha_replace_path_separator(lha, entry);
archive_entry_set_mode(entry, lha->mode);
archive_entry_set_uid(entry, lha->uid);
archive_entry_set_gid(entry, lha->gid);
if (archive_strlen(&lha->uname) > 0)
archive_entry_set_uname(entry, lha->uname.s);
if (archive_strlen(&lha->gname) > 0)
archive_entry_set_gname(entry, lha->gname.s);
if (lha->setflag & BIRTHTIME_IS_SET) {
archive_entry_set_birthtime(entry, lha->birthtime,
lha->birthtime_tv_nsec);
archive_entry_set_ctime(entry, lha->birthtime,
lha->birthtime_tv_nsec);
} else {
archive_entry_unset_birthtime(entry);
archive_entry_unset_ctime(entry);
}
archive_entry_set_mtime(entry, lha->mtime, lha->mtime_tv_nsec);
if (lha->setflag & ATIME_IS_SET)
archive_entry_set_atime(entry, lha->atime,
lha->atime_tv_nsec);
else
archive_entry_unset_atime(entry);
if (lha->directory || archive_entry_symlink(entry) != NULL)
archive_entry_unset_size(entry);
else
archive_entry_set_size(entry, lha->origsize);
/*
* Prepare variables used to read a file content.
*/
lha->entry_bytes_remaining = lha->compsize;
if (lha->entry_bytes_remaining < 0) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid LHa entry size");
return (ARCHIVE_FATAL);
}
lha->entry_offset = 0;
lha->entry_crc_calculated = 0;
/*
* This file does not have a content.
*/
if (lha->directory || lha->compsize == 0)
lha->end_of_entry = 1;
sprintf(lha->format_name, "lha -%c%c%c-",
lha->method[0], lha->method[1], lha->method[2]);
a->archive.archive_format_name = lha->format_name;
return (err);
}
| 0 |
[
"CWE-125"
] |
libarchive
|
2c8c83b9731ff822fad6cc8c670ea5519c366a14
| 123,484,598,477,192,500,000,000,000,000,000,000,000 | 252 |
Reject LHA archive entries with negative size.
|
has_textchanged(void)
{
return (first_autopat[(int)EVENT_TEXTCHANGED] != NULL);
}
| 0 |
[
"CWE-200",
"CWE-668"
] |
vim
|
5a73e0ca54c77e067c3b12ea6f35e3e8681e8cf8
| 250,150,281,802,980,920,000,000,000,000,000,000,000 | 4 |
patch 8.0.1263: others can read the swap file if a user is careless
Problem: Others can read the swap file if a user is careless with his
primary group.
Solution: If the group permission allows for reading but the world
permissions doesn't, make sure the group is right.
|
static int rsa_pub_cmp(const EVP_PKEY *a, const EVP_PKEY *b)
{
if (BN_cmp(b->pkey.rsa->n, a->pkey.rsa->n) != 0
|| BN_cmp(b->pkey.rsa->e, a->pkey.rsa->e) != 0)
return 0;
return 1;
}
| 0 |
[] |
openssl
|
4b22cce3812052fe64fc3f6d58d8cc884e3cb834
| 41,077,031,559,574,347,000,000,000,000,000,000,000 | 7 |
Reject invalid PSS parameters.
Fix a bug where invalid PSS parameters are not rejected resulting in a
NULL pointer exception. This can be triggered during certificate
verification so could be a DoS attack against a client or a server
enabling client authentication.
Thanks to Brian Carpenter for reporting this issues.
CVE-2015-0208
Reviewed-by: Tim Hudson <[email protected]>
|
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
int i;
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
dev_kfree_skb(buf);
}
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->rq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
if (vi->mergeable_rx_bufs) {
unsigned long ctx = (unsigned long)buf;
void *base = mergeable_ctx_to_buf_address(ctx);
put_page(virt_to_head_page(base));
} else if (vi->big_packets) {
give_pages(&vi->rq[i], buf);
} else {
dev_kfree_skb(buf);
}
}
}
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
48900cb6af4282fa0fb6ff4d72a81aa3dadb5c39
| 220,293,611,243,270,270,000,000,000,000,000,000,000 | 27 |
virtio-net: drop NETIF_F_FRAGLIST
virtio declares support for NETIF_F_FRAGLIST, but assumes
that there are at most MAX_SKB_FRAGS + 2 fragments which isn't
always true with a fraglist.
A longer fraglist in the skb will make the call to skb_to_sgvec overflow
the sg array, leading to memory corruption.
Drop NETIF_F_FRAGLIST so we only get what we can handle.
Cc: Michael S. Tsirkin <[email protected]>
Signed-off-by: Jason Wang <[email protected]>
Acked-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
gdk_pixbuf_new (GdkColorspace colorspace,
gboolean has_alpha,
int bits_per_sample,
int width,
int height)
{
guchar *buf;
int channels;
int rowstride;
g_return_val_if_fail (colorspace == GDK_COLORSPACE_RGB, NULL);
g_return_val_if_fail (bits_per_sample == 8, NULL);
g_return_val_if_fail (width > 0, NULL);
g_return_val_if_fail (height > 0, NULL);
channels = has_alpha ? 4 : 3;
rowstride = width * channels;
if (rowstride / channels != width || rowstride + 3 < 0) /* overflow */
return NULL;
/* Always align rows to 32-bit boundaries */
rowstride = (rowstride + 3) & ~3;
buf = g_try_malloc_n (height, rowstride);
if (!buf)
return NULL;
return gdk_pixbuf_new_from_data (buf, colorspace, has_alpha, bits_per_sample,
width, height, rowstride,
free_buffer, NULL);
}
| 0 |
[] |
gdk-pixbuf
|
deb78d971c4bcb9e3ccbb71e7925bc6baa707188
| 118,855,402,426,635,300,000,000,000,000,000,000,000 | 31 |
Use g_try_malloc_n where it makes sense
This lets us avoid some manual overflow checks.
|
size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
{
if (dstCapacity < ZSTD_blockHeaderSize) return ERROR(dstSize_tooSmall);
{ U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */
MEM_writeLE24(dst, cBlockHeader24);
return ZSTD_blockHeaderSize;
}
}
| 0 |
[
"CWE-362"
] |
zstd
|
3e5cdf1b6a85843e991d7d10f6a2567c15580da0
| 276,628,136,173,880,820,000,000,000,000,000,000,000 | 8 |
fixed T36302429
|
static void hidpp10_wheel_populate_input(struct hidpp_device *hidpp,
struct input_dev *input_dev)
{
__set_bit(EV_REL, input_dev->evbit);
__set_bit(REL_WHEEL, input_dev->relbit);
__set_bit(REL_WHEEL_HI_RES, input_dev->relbit);
__set_bit(REL_HWHEEL, input_dev->relbit);
__set_bit(REL_HWHEEL_HI_RES, input_dev->relbit);
}
| 0 |
[
"CWE-787"
] |
linux
|
d9d4b1e46d9543a82c23f6df03f4ad697dab361b
| 4,260,490,040,569,808,000,000,000,000,000,000,000 | 9 |
HID: Fix assumption that devices have inputs
The syzbot fuzzer found a slab-out-of-bounds write bug in the hid-gaff
driver. The problem is caused by the driver's assumption that the
device must have an input report. While this will be true for all
normal HID input devices, a suitably malicious device can violate the
assumption.
The same assumption is present in over a dozen other HID drivers.
This patch fixes them by checking that the list of hid_inputs for the
hid_device is nonempty before allowing it to be used.
Reported-and-tested-by: [email protected]
Signed-off-by: Alan Stern <[email protected]>
CC: <[email protected]>
Signed-off-by: Benjamin Tissoires <[email protected]>
|
static int i2r_NAME_CONSTRAINTS(const X509V3_EXT_METHOD *method, void *a,
BIO *bp, int ind)
{
NAME_CONSTRAINTS *ncons = a;
do_i2r_name_constraints(method, ncons->permittedSubtrees,
bp, ind, "Permitted");
do_i2r_name_constraints(method, ncons->excludedSubtrees,
bp, ind, "Excluded");
return 1;
}
| 0 |
[
"CWE-125"
] |
openssl
|
8393de42498f8be75cf0353f5c9f906a43a748d2
| 68,425,548,242,232,960,000,000,000,000,000,000,000 | 10 |
Fix the name constraints code to not assume NUL terminated strings
ASN.1 strings may not be NUL terminated. Don't assume they are.
CVE-2021-3712
Reviewed-by: Viktor Dukhovni <[email protected]>
Reviewed-by: Paul Dale <[email protected]>
|
SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
int __user *, upeer_addrlen, int, flags)
{
struct socket *sock, *newsock;
struct file *newfile;
int err, len, newfd, fput_needed;
struct sockaddr_storage address;
if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
return -EINVAL;
if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
err = -ENFILE;
newsock = sock_alloc();
if (!newsock)
goto out_put;
newsock->type = sock->type;
newsock->ops = sock->ops;
/*
* We don't need try_module_get here, as the listening socket (sock)
* has the protocol module (sock->ops->owner) held.
*/
__module_get(newsock->ops->owner);
newfd = get_unused_fd_flags(flags);
if (unlikely(newfd < 0)) {
err = newfd;
sock_release(newsock);
goto out_put;
}
newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name);
if (IS_ERR(newfile)) {
err = PTR_ERR(newfile);
put_unused_fd(newfd);
sock_release(newsock);
goto out_put;
}
err = security_socket_accept(sock, newsock);
if (err)
goto out_fd;
err = sock->ops->accept(sock, newsock, sock->file->f_flags, false);
if (err < 0)
goto out_fd;
if (upeer_sockaddr) {
if (newsock->ops->getname(newsock, (struct sockaddr *)&address,
&len, 2) < 0) {
err = -ECONNABORTED;
goto out_fd;
}
err = move_addr_to_user(&address,
len, upeer_sockaddr, upeer_addrlen);
if (err < 0)
goto out_fd;
}
/* File flags are not inherited via accept() unlike another OSes. */
fd_install(newfd, newfile);
err = newfd;
out_put:
fput_light(sock->file, fput_needed);
out:
return err;
out_fd:
fput(newfile);
put_unused_fd(newfd);
goto out_put;
}
| 0 |
[
"CWE-703",
"CWE-125"
] |
linux
|
8605330aac5a5785630aec8f64378a54891937cc
| 33,368,737,183,887,850,000,000,000,000,000,000,000 | 80 |
tcp: fix SCM_TIMESTAMPING_OPT_STATS for normal skbs
__sock_recv_timestamp can be called for both normal skbs (for
receive timestamps) and for skbs on the error queue (for transmit
timestamps).
Commit 1c885808e456
(tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING)
assumes any skb passed to __sock_recv_timestamp are from
the error queue, containing OPT_STATS in the content of the skb.
This results in accessing invalid memory or generating junk
data.
To fix this, set skb->pkt_type to PACKET_OUTGOING for packets
on the error queue. This is safe because on the receive path
on local sockets skb->pkt_type is never set to PACKET_OUTGOING.
With that, copy OPT_STATS from a packet, only if its pkt_type
is PACKET_OUTGOING.
Fixes: 1c885808e456 ("tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING")
Reported-by: JongHwan Kim <[email protected]>
Signed-off-by: Soheil Hassas Yeganeh <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: Willem de Bruijn <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
bool apicv)
{
/*
* Note that as a general rule, the high half of the MSRs (bits in
* the control fields which may be 1) should be initialized by the
* intersection of the underlying hardware's MSR (i.e., features which
* can be supported) and the list of features we want to expose -
* because they are known to be properly supported in our code.
* Also, usually, the low half of the MSRs (bits which must be 1) can
* be set to 0, meaning that L1 may turn off any of these bits. The
* reason is that if one of these bits is necessary, it will appear
* in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
* fields of vmcs01 and vmcs02, will turn these bits off - and
* nested_vmx_exit_reflected() will not pass related exits to L1.
* These rules have exceptions below.
*/
/* pin-based controls */
rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
msrs->pinbased_ctls_low,
msrs->pinbased_ctls_high);
msrs->pinbased_ctls_low |=
PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
msrs->pinbased_ctls_high &=
PIN_BASED_EXT_INTR_MASK |
PIN_BASED_NMI_EXITING |
PIN_BASED_VIRTUAL_NMIS |
(apicv ? PIN_BASED_POSTED_INTR : 0);
msrs->pinbased_ctls_high |=
PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
PIN_BASED_VMX_PREEMPTION_TIMER;
/* exit controls */
rdmsr(MSR_IA32_VMX_EXIT_CTLS,
msrs->exit_ctls_low,
msrs->exit_ctls_high);
msrs->exit_ctls_low =
VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
msrs->exit_ctls_high &=
#ifdef CONFIG_X86_64
VM_EXIT_HOST_ADDR_SPACE_SIZE |
#endif
VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
msrs->exit_ctls_high |=
VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
/* We support free control of debug control saving. */
msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
/* entry controls */
rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
msrs->entry_ctls_low,
msrs->entry_ctls_high);
msrs->entry_ctls_low =
VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
msrs->entry_ctls_high &=
#ifdef CONFIG_X86_64
VM_ENTRY_IA32E_MODE |
#endif
VM_ENTRY_LOAD_IA32_PAT;
msrs->entry_ctls_high |=
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
/* We support free control of debug control loading. */
msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
/* cpu-based controls */
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
msrs->procbased_ctls_low,
msrs->procbased_ctls_high);
msrs->procbased_ctls_low =
CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
msrs->procbased_ctls_high &=
CPU_BASED_VIRTUAL_INTR_PENDING |
CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING |
#ifdef CONFIG_X86_64
CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
#endif
CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
/*
* We can allow some features even when not supported by the
* hardware. For example, L1 can specify an MSR bitmap - and we
* can use it to avoid exits to L1 - even when L0 runs L2
* without MSR bitmaps.
*/
msrs->procbased_ctls_high |=
CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
CPU_BASED_USE_MSR_BITMAPS;
/* We support free control of CR3 access interception. */
msrs->procbased_ctls_low &=
~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
/*
* secondary cpu-based controls. Do not include those that
* depend on CPUID bits, they are added later by vmx_cpuid_update.
*/
if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
msrs->secondary_ctls_low,
msrs->secondary_ctls_high);
msrs->secondary_ctls_low = 0;
msrs->secondary_ctls_high &=
SECONDARY_EXEC_DESC |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_WBINVD_EXITING;
/*
* We can emulate "VMCS shadowing," even if the hardware
* doesn't support it.
*/
msrs->secondary_ctls_high |=
SECONDARY_EXEC_SHADOW_VMCS;
if (enable_ept) {
/* nested EPT: emulate EPT also to L1 */
msrs->secondary_ctls_high |=
SECONDARY_EXEC_ENABLE_EPT;
msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
if (cpu_has_vmx_ept_execute_only())
msrs->ept_caps |=
VMX_EPT_EXECUTE_ONLY_BIT;
msrs->ept_caps &= ept_caps;
msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
VMX_EPT_1GB_PAGE_BIT;
if (enable_ept_ad_bits) {
msrs->secondary_ctls_high |=
SECONDARY_EXEC_ENABLE_PML;
msrs->ept_caps |= VMX_EPT_AD_BIT;
}
}
if (cpu_has_vmx_vmfunc()) {
msrs->secondary_ctls_high |=
SECONDARY_EXEC_ENABLE_VMFUNC;
/*
* Advertise EPTP switching unconditionally
* since we emulate it
*/
if (enable_ept)
msrs->vmfunc_controls =
VMX_VMFUNC_EPTP_SWITCHING;
}
/*
* Old versions of KVM use the single-context version without
* checking for support, so declare that it is supported even
* though it is treated as global context. The alternative is
* not failing the single-context invvpid, and it is worse.
*/
if (enable_vpid) {
msrs->secondary_ctls_high |=
SECONDARY_EXEC_ENABLE_VPID;
msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
VMX_VPID_EXTENT_SUPPORTED_MASK;
}
if (enable_unrestricted_guest)
msrs->secondary_ctls_high |=
SECONDARY_EXEC_UNRESTRICTED_GUEST;
if (flexpriority_enabled)
msrs->secondary_ctls_high |=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
/* miscellaneous data */
rdmsr(MSR_IA32_VMX_MISC,
msrs->misc_low,
msrs->misc_high);
msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
msrs->misc_low |=
MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
VMX_MISC_ACTIVITY_HLT;
msrs->misc_high = 0;
/*
* This MSR reports some information about VMX support. We
* should return information about the VMX we emulate for the
* guest, and the VMCS structure we give it - not about the
* VMX support of the underlying hardware.
*/
msrs->basic =
VMCS12_REVISION |
VMX_BASIC_TRUE_CTLS |
((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
(VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
if (cpu_has_vmx_basic_inout())
msrs->basic |= VMX_BASIC_INOUT;
/*
* These MSRs specify bits which the guest must keep fixed on
* while L1 is in VMXON mode (in L1's root mode, or running an L2).
* We picked the standard core2 setting.
*/
#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
/* These MSRs specify bits which the guest must keep fixed off. */
rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
/* highest index: VMX_PREEMPTION_TIMER_VALUE */
msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
}
| 0 |
[
"CWE-863"
] |
kvm
|
acff78477b9b4f26ecdf65733a4ed77fe837e9dc
| 262,459,101,746,060,400,000,000,000,000,000,000,000 | 224 |
KVM: x86: nVMX: close leak of L0's x2APIC MSRs (CVE-2019-3887)
The nested_vmx_prepare_msr_bitmap() function doesn't directly guard the
x2APIC MSR intercepts with the "virtualize x2APIC mode" MSR. As a
result, we discovered the potential for a buggy or malicious L1 to get
access to L0's x2APIC MSRs, via an L2, as follows.
1. L1 executes WRMSR(IA32_SPEC_CTRL, 1). This causes the spec_ctrl
variable, in nested_vmx_prepare_msr_bitmap() to become true.
2. L1 disables "virtualize x2APIC mode" in VMCS12.
3. L1 enables "APIC-register virtualization" in VMCS12.
Now, KVM will set VMCS02's x2APIC MSR intercepts from VMCS12, and then
set "virtualize x2APIC mode" to 0 in VMCS02. Oops.
This patch closes the leak by explicitly guarding VMCS02's x2APIC MSR
intercepts with VMCS12's "virtualize x2APIC mode" control.
The scenario outlined above and fix prescribed here, were verified with
a related patch in kvm-unit-tests titled "Add leak scenario to
virt_x2apic_mode_test".
Note, it looks like this issue may have been introduced inadvertently
during a merge---see 15303ba5d1cd.
Signed-off-by: Marc Orr <[email protected]>
Reviewed-by: Jim Mattson <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
TEST(RegexMatchExpression, MatchesElementExtendedOn) {
BSONObj match = BSON("x"
<< "ab");
BSONObj notMatch = BSON("x"
<< "a b");
RegexMatchExpression regex;
ASSERT(regex.init("", "a b", "x").isOK());
ASSERT(regex.matchesSingleElement(match.firstElement()));
ASSERT(!regex.matchesSingleElement(notMatch.firstElement()));
}
| 0 |
[] |
mongo
|
b0ef26c639112b50648a02d969298650fbd402a4
| 87,660,346,461,930,570,000,000,000,000,000,000,000 | 10 |
SERVER-51083 Reject invalid UTF-8 from $regex match expressions
|
grammar_rule_check_and_complete (symbol_list *r)
{
/* Type check.
If there is an action, then there is nothing we can do: the user
is allowed to shoot herself in the foot.
Don't worry about the default action if $$ is untyped, since $$'s
value can't be used. */
if (!r->action_props.code && r->content.sym->content->type_name)
{
symbol *first_rhs = r->next->content.sym;
/* If $$ is being set in default way, report if any type mismatch. */
if (first_rhs)
{
char const *lhs_type = r->content.sym->content->type_name;
char const *rhs_type =
first_rhs->content->type_name ? first_rhs->content->type_name : "";
if (!UNIQSTR_EQ (lhs_type, rhs_type))
complain (&r->rhs_loc, Wother,
_("type clash on default action: <%s> != <%s>"),
lhs_type, rhs_type);
else
{
/* Install the default action only for C++. */
const bool is_cxx =
STREQ (language->language, "c++")
|| (skeleton && (STREQ (skeleton, "glr.cc")
|| STREQ (skeleton, "lalr1.cc")));
if (is_cxx)
{
code_props_rule_action_init (&r->action_props, "{ $$ = $1; }",
r->rhs_loc, r,
/* name */ NULL,
/* type */ NULL,
/* is_predicate */ false);
code_props_translate_code (&r->action_props);
}
}
}
/* Warn if there is no default for $$ but we need one. */
else
complain (&r->rhs_loc, Wother,
_("empty rule for typed nonterminal, and no action"));
}
/* Check that symbol values that should be used are in fact used. */
{
int n = 0;
for (symbol_list const *l = r; l && l->content.sym; l = l->next, ++n)
{
bool midrule_warning = false;
if (!l->action_props.is_value_used
&& symbol_should_be_used (l, &midrule_warning)
/* The default action, $$ = $1, 'uses' both. */
&& (r->action_props.code || (n != 0 && n != 1)))
{
warnings warn_flag = midrule_warning ? Wmidrule_values : Wother;
if (n)
complain (&l->sym_loc, warn_flag, _("unused value: $%d"), n);
else
complain (&l->rhs_loc, warn_flag, _("unset value: $$"));
}
}
}
/* Check that %empty => empty rule. */
if (r->percent_empty_loc.start.file
&& r->next && r->next->content.sym)
{
complain (&r->percent_empty_loc, complaint,
_("%%empty on non-empty rule"));
fixits_register (&r->percent_empty_loc, "");
}
/* Check that empty rule => %empty. */
if (!(r->next && r->next->content.sym)
&& !r->midrule_parent_rule
&& !r->percent_empty_loc.start.file
&& warning_is_enabled (Wempty_rule))
{
complain (&r->rhs_loc, Wempty_rule, _("empty rule without %%empty"));
if (feature_flag & feature_caret)
location_caret_suggestion (r->rhs_loc, "%empty", stderr);
location loc = r->rhs_loc;
loc.end = loc.start;
fixits_register (&loc, " %empty ");
}
/* See comments in grammar_current_rule_prec_set for how POSIX
mandates this complaint. It's only for identifiers, so skip
it for char literals and strings, which are always tokens. */
if (r->ruleprec
&& r->ruleprec->tag[0] != '\'' && r->ruleprec->tag[0] != '"'
&& r->ruleprec->content->status != declared
&& !r->ruleprec->content->prec)
complain (&r->rhs_loc, Wother,
_("token for %%prec is not defined: %s"), r->ruleprec->tag);
/* Check that the (main) action was not typed. */
if (r->action_props.type)
complain (&r->rhs_loc, Wother,
_("only midrule actions can be typed: %s"), r->action_props.type);
}
| 0 |
[] |
bison
|
b7aab2dbad43aaf14eebe78d54aafa245a000988
| 264,754,735,170,550,000,000,000,000,000,000,000,000 | 104 |
fix: crash when redefining the EOF token
Reported by Agency for Defense Development.
https://lists.gnu.org/r/bug-bison/2020-08/msg00008.html
On an empty such as
%token FOO
BAR
FOO 0
%%
input: %empty
we crash because when we find FOO 0, we decrement ntokens (since FOO
was discovered to be EOF, which is already known to be a token, so we
increment ntokens for it, and need to cancel this). This "works well"
when EOF is properly defined in one go, but here it is first defined
and later only assign token code 0. In the meanwhile BAR was given
the token number that we just decremented.
To fix this, assign symbol numbers after parsing, not during parsing,
so that we also saw all the explicit token codes. To maintain the
current numbers (I'd like to keep no difference in the output, not
just equivalence), we need to make sure the symbols are numbered in
the same order: that of appearance in the source file. So we need the
locations to be correct, which was almost the case, except for nterms
that appeared several times as LHS (i.e., several times as "foo:
..."). Fixing the use of location_of_lhs sufficed (it appears it was
intended for this use, but its implementation was unfinished: it was
always set to "false" only).
* src/symtab.c (symbol_location_as_lhs_set): Update location_of_lhs.
(symbol_code_set): Remove broken hack that decremented ntokens.
(symbol_class_set, dummy_symbol_get): Don't set number, ntokens and
nnterms.
(symbol_check_defined): Do it.
(symbols): Don't count nsyms here.
Actually, don't count nsyms at all: let it be done in...
* src/reader.c (check_and_convert_grammar): here. Define nsyms from
ntokens and nnterms after parsing.
* tests/input.at (EOF redeclared): New.
* examples/c/bistromathic/bistromathic.test: Adjust the traces: in
"%nterm <double> exp %% input: ...", exp used to be numbered before
input.
|
bool Config::configToVector(const ProtobufRepeatedRule& proto_rules,
HeaderToMetadataRules& vector) {
if (proto_rules.empty()) {
ENVOY_LOG(debug, "no rules provided");
return false;
}
for (const auto& entry : proto_rules) {
vector.emplace_back(entry);
}
return true;
}
| 0 |
[] |
envoy
|
2c60632d41555ec8b3d9ef5246242be637a2db0f
| 286,628,396,791,368,400,000,000,000,000,000,000,000 | 13 |
http: header map security fixes for duplicate headers (#197)
Previously header matching did not match on all headers for
non-inline headers. This patch changes the default behavior to
always logically match on all headers. Multiple individual
headers will be logically concatenated with ',' similar to what
is done with inline headers. This makes the behavior effectively
consistent. This behavior can be temporary reverted by setting
the runtime value "envoy.reloadable_features.header_match_on_all_headers"
to "false".
Targeted fixes have been additionally performed on the following
extensions which make them consider all duplicate headers by default as
a comma concatenated list:
1) Any extension using CEL matching on headers.
2) The header to metadata filter.
3) The JWT filter.
4) The Lua filter.
Like primary header matching used in routing, RBAC, etc. this behavior
can be disabled by setting the runtime value
"envoy.reloadable_features.header_match_on_all_headers" to false.
Finally, the setCopy() header map API previously only set the first
header in the case of duplicate non-inline headers. setCopy() now
behaves similiarly to the other set*() APIs and replaces all found
headers with a single value. This may have had security implications
in the extauth filter which uses this API. This behavior can be disabled
by setting the runtime value
"envoy.reloadable_features.http_set_copy_replace_all_headers" to false.
Fixes https://github.com/envoyproxy/envoy-setec/issues/188
Signed-off-by: Matt Klein <[email protected]>
|
static noinline int btrfs_mksubvol(struct path *parent,
char *name, int namelen,
struct btrfs_root *snap_src,
u64 *async_transid, bool readonly,
struct btrfs_qgroup_inherit *inherit)
{
struct inode *dir = d_inode(parent->dentry);
struct dentry *dentry;
int error;
error = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
if (error == -EINTR)
return error;
dentry = lookup_one_len(name, parent->dentry, namelen);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_unlock;
error = -EEXIST;
if (d_really_is_positive(dentry))
goto out_dput;
error = btrfs_may_create(dir, dentry);
if (error)
goto out_dput;
/*
* even if this name doesn't exist, we may get hash collisions.
* check for them now when we can safely fail
*/
error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
dir->i_ino, name,
namelen);
if (error)
goto out_dput;
down_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
goto out_up_read;
if (snap_src) {
error = create_snapshot(snap_src, dir, dentry, name, namelen,
async_transid, readonly, inherit);
} else {
error = create_subvol(dir, dentry, name, namelen,
async_transid, inherit);
}
if (!error)
fsnotify_mkdir(dir, dentry);
out_up_read:
up_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
out_dput:
dput(dentry);
out_unlock:
mutex_unlock(&dir->i_mutex);
return error;
}
| 0 |
[
"CWE-200"
] |
linux
|
8039d87d9e473aeb740d4fdbd59b9d2f89b2ced9
| 246,384,599,328,427,800,000,000,000,000,000,000,000 | 59 |
Btrfs: fix file corruption and data loss after cloning inline extents
Currently the clone ioctl allows to clone an inline extent from one file
to another that already has other (non-inlined) extents. This is a problem
because btrfs is not designed to deal with files having inline and regular
extents, if a file has an inline extent then it must be the only extent
in the file and must start at file offset 0. Having a file with an inline
extent followed by regular extents results in EIO errors when doing reads
or writes against the first 4K of the file.
Also, the clone ioctl allows one to lose data if the source file consists
of a single inline extent, with a size of N bytes, and the destination
file consists of a single inline extent with a size of M bytes, where we
have M > N. In this case the clone operation removes the inline extent
from the destination file and then copies the inline extent from the
source file into the destination file - we lose the M - N bytes from the
destination file, a read operation will get the value 0x00 for any bytes
in the the range [N, M] (the destination inode's i_size remained as M,
that's why we can read past N bytes).
So fix this by not allowing such destructive operations to happen and
return errno EOPNOTSUPP to user space.
Currently the fstest btrfs/035 tests the data loss case but it totally
ignores this - i.e. expects the operation to succeed and does not check
the we got data loss.
The following test case for fstests exercises all these cases that result
in file corruption and data loss:
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
tmp=/tmp/$$
status=1 # failure is the default!
trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
rm -f $tmp.*
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
# real QA test starts here
_need_to_be_root
_supported_fs btrfs
_supported_os Linux
_require_scratch
_require_cloner
_require_btrfs_fs_feature "no_holes"
_require_btrfs_mkfs_feature "no-holes"
rm -f $seqres.full
test_cloning_inline_extents()
{
local mkfs_opts=$1
local mount_opts=$2
_scratch_mkfs $mkfs_opts >>$seqres.full 2>&1
_scratch_mount $mount_opts
# File bar, the source for all the following clone operations, consists
# of a single inline extent (50 bytes).
$XFS_IO_PROG -f -c "pwrite -S 0xbb 0 50" $SCRATCH_MNT/bar \
| _filter_xfs_io
# Test cloning into a file with an extent (non-inlined) where the
# destination offset overlaps that extent. It should not be possible to
# clone the inline extent from file bar into this file.
$XFS_IO_PROG -f -c "pwrite -S 0xaa 0K 16K" $SCRATCH_MNT/foo \
| _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo
# Doing IO against any range in the first 4K of the file should work.
# Due to a past clone ioctl bug which allowed cloning the inline extent,
# these operations resulted in EIO errors.
echo "File foo data after clone operation:"
# All bytes should have the value 0xaa (clone operation failed and did
# not modify our file).
od -t x1 $SCRATCH_MNT/foo
$XFS_IO_PROG -c "pwrite -S 0xcc 0 100" $SCRATCH_MNT/foo | _filter_xfs_io
# Test cloning the inline extent against a file which has a hole in its
# first 4K followed by a non-inlined extent. It should not be possible
# as well to clone the inline extent from file bar into this file.
$XFS_IO_PROG -f -c "pwrite -S 0xdd 4K 12K" $SCRATCH_MNT/foo2 \
| _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo2
# Doing IO against any range in the first 4K of the file should work.
# Due to a past clone ioctl bug which allowed cloning the inline extent,
# these operations resulted in EIO errors.
echo "File foo2 data after clone operation:"
# All bytes should have the value 0x00 (clone operation failed and did
# not modify our file).
od -t x1 $SCRATCH_MNT/foo2
$XFS_IO_PROG -c "pwrite -S 0xee 0 90" $SCRATCH_MNT/foo2 | _filter_xfs_io
# Test cloning the inline extent against a file which has a size of zero
# but has a prealloc extent. It should not be possible as well to clone
# the inline extent from file bar into this file.
$XFS_IO_PROG -f -c "falloc -k 0 1M" $SCRATCH_MNT/foo3 | _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo3
# Doing IO against any range in the first 4K of the file should work.
# Due to a past clone ioctl bug which allowed cloning the inline extent,
# these operations resulted in EIO errors.
echo "First 50 bytes of foo3 after clone operation:"
# Should not be able to read any bytes, file has 0 bytes i_size (the
# clone operation failed and did not modify our file).
od -t x1 $SCRATCH_MNT/foo3
$XFS_IO_PROG -c "pwrite -S 0xff 0 90" $SCRATCH_MNT/foo3 | _filter_xfs_io
# Test cloning the inline extent against a file which consists of a
# single inline extent that has a size not greater than the size of
# bar's inline extent (40 < 50).
# It should be possible to do the extent cloning from bar to this file.
$XFS_IO_PROG -f -c "pwrite -S 0x01 0 40" $SCRATCH_MNT/foo4 \
| _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo4
# Doing IO against any range in the first 4K of the file should work.
echo "File foo4 data after clone operation:"
# Must match file bar's content.
od -t x1 $SCRATCH_MNT/foo4
$XFS_IO_PROG -c "pwrite -S 0x02 0 90" $SCRATCH_MNT/foo4 | _filter_xfs_io
# Test cloning the inline extent against a file which consists of a
# single inline extent that has a size greater than the size of bar's
# inline extent (60 > 50).
# It should not be possible to clone the inline extent from file bar
# into this file.
$XFS_IO_PROG -f -c "pwrite -S 0x03 0 60" $SCRATCH_MNT/foo5 \
| _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo5
# Reading the file should not fail.
echo "File foo5 data after clone operation:"
# Must have a size of 60 bytes, with all bytes having a value of 0x03
# (the clone operation failed and did not modify our file).
od -t x1 $SCRATCH_MNT/foo5
# Test cloning the inline extent against a file which has no extents but
# has a size greater than bar's inline extent (16K > 50).
# It should not be possible to clone the inline extent from file bar
# into this file.
$XFS_IO_PROG -f -c "truncate 16K" $SCRATCH_MNT/foo6 | _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo6
# Reading the file should not fail.
echo "File foo6 data after clone operation:"
# Must have a size of 16K, with all bytes having a value of 0x00 (the
# clone operation failed and did not modify our file).
od -t x1 $SCRATCH_MNT/foo6
# Test cloning the inline extent against a file which has no extents but
# has a size not greater than bar's inline extent (30 < 50).
# It should be possible to clone the inline extent from file bar into
# this file.
$XFS_IO_PROG -f -c "truncate 30" $SCRATCH_MNT/foo7 | _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo7
# Reading the file should not fail.
echo "File foo7 data after clone operation:"
# Must have a size of 50 bytes, with all bytes having a value of 0xbb.
od -t x1 $SCRATCH_MNT/foo7
# Test cloning the inline extent against a file which has a size not
# greater than the size of bar's inline extent (20 < 50) but has
# a prealloc extent that goes beyond the file's size. It should not be
# possible to clone the inline extent from bar into this file.
$XFS_IO_PROG -f -c "falloc -k 0 1M" \
-c "pwrite -S 0x88 0 20" \
$SCRATCH_MNT/foo8 | _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo8
echo "File foo8 data after clone operation:"
# Must have a size of 20 bytes, with all bytes having a value of 0x88
# (the clone operation did not modify our file).
od -t x1 $SCRATCH_MNT/foo8
_scratch_unmount
}
echo -e "\nTesting without compression and without the no-holes feature...\n"
test_cloning_inline_extents
echo -e "\nTesting with compression and without the no-holes feature...\n"
test_cloning_inline_extents "" "-o compress"
echo -e "\nTesting without compression and with the no-holes feature...\n"
test_cloning_inline_extents "-O no-holes" ""
echo -e "\nTesting with compression and with the no-holes feature...\n"
test_cloning_inline_extents "-O no-holes" "-o compress"
status=0
exit
Cc: [email protected]
Signed-off-by: Filipe Manana <[email protected]>
|
static inline int UT64_SUB(ut64 *r, ut64 a, ut64 b) {
if (b > a) {
return 0;
}
if (r) {
*r = a - b;
}
return 1;
}
| 0 |
[
"CWE-476"
] |
radare2
|
1ea23bd6040441a21fbcfba69dce9a01af03f989
| 276,382,102,303,087,700,000,000,000,000,000,000,000 | 9 |
Fix #6816 - null deref in r_read_*
|
Expression_cache_tracker* Item_cache_wrapper::init_tracker(MEM_ROOT *mem_root)
{
if (expr_cache)
{
Expression_cache_tracker* tracker=
new(mem_root) Expression_cache_tracker(expr_cache);
if (tracker)
((Expression_cache_tmptable *)expr_cache)->set_tracker(tracker);
return tracker;
}
return NULL;
}
| 0 |
[
"CWE-416"
] |
server
|
c02ebf3510850ba78a106be9974c94c3b97d8585
| 185,755,018,288,223,000,000,000,000,000,000,000,000 | 12 |
MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments.
|
generate_hash_secret_salt(XML_Parser parser)
{
#if defined(__UINTPTR_TYPE__)
# define PARSER_CAST(p) (__UINTPTR_TYPE__)(p)
#elif defined(_WIN64) && defined(_MSC_VER)
# define PARSER_CAST(p) (unsigned __int64)(p)
#else
# define PARSER_CAST(p) (p)
#endif
#ifdef __CloudABI__
unsigned long entropy;
(void)parser;
(void)gather_time_entropy;
arc4random_buf(&entropy, sizeof(entropy));
return entropy;
#else
/* Process ID is 0 bits entropy if attacker has local access
* XML_Parser address is few bits of entropy if attacker has local access */
const unsigned long entropy =
gather_time_entropy() ^ getpid() ^ (unsigned long)PARSER_CAST(parser);
/* Factors are 2^31-1 and 2^61-1 (Mersenne primes M31 and M61) */
if (sizeof(unsigned long) == 4) {
return entropy * 2147483647;
} else {
return entropy * (unsigned long)2305843009213693951;
}
#endif
}
| 0 |
[
"CWE-190"
] |
libexpat
|
d4f735b88d9932bd5039df2335eefdd0723dbe20
| 14,824,378,549,915,008,000,000,000,000,000,000,000 | 30 |
Detect integer overflow (CVE-2016-9063)
Needs XML_CONTEXT_BYTES to be _undefined_ to trigger,
default is defined and set to 1024.
Previously patched downstream, e.g.
https://sources.debian.net/src/expat/2.2.0-2/debian/patches/CVE-2016-9063.patch/
https://bug1274777.bmoattachments.org/attachment.cgi?id=8755538
This version avoids undefined behavior from _signed_ integer overflow.
Signed-off-by: Pascal Cuoq <[email protected]>
|
__unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
{
struct char_device_struct *cd = NULL, **cp;
int i = major_to_index(major);
mutex_lock(&chrdevs_lock);
for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
if ((*cp)->major == major &&
(*cp)->baseminor == baseminor &&
(*cp)->minorct == minorct)
break;
if (*cp) {
cd = *cp;
*cp = cd->next;
}
mutex_unlock(&chrdevs_lock);
return cd;
}
| 0 |
[
"CWE-362"
] |
linux
|
68faa679b8be1a74e6663c21c3a9d25d32f1c079
| 297,890,835,305,001,340,000,000,000,000,000,000,000 | 18 |
chardev: Avoid potential use-after-free in 'chrdev_open()'
'chrdev_open()' calls 'cdev_get()' to obtain a reference to the
'struct cdev *' stashed in the 'i_cdev' field of the target inode
structure. If the pointer is NULL, then it is initialised lazily by
looking up the kobject in the 'cdev_map' and so the whole procedure is
protected by the 'cdev_lock' spinlock to serialise initialisation of
the shared pointer.
Unfortunately, it is possible for the initialising thread to fail *after*
installing the new pointer, for example if the subsequent '->open()' call
on the file fails. In this case, 'cdev_put()' is called, the reference
count on the kobject is dropped and, if nobody else has taken a reference,
the release function is called which finally clears 'inode->i_cdev' from
'cdev_purge()' before potentially freeing the object. The problem here
is that a racing thread can happily take the 'cdev_lock' and see the
non-NULL pointer in the inode, which can result in a refcount increment
from zero and a warning:
| ------------[ cut here ]------------
| refcount_t: addition on 0; use-after-free.
| WARNING: CPU: 2 PID: 6385 at lib/refcount.c:25 refcount_warn_saturate+0x6d/0xf0
| Modules linked in:
| CPU: 2 PID: 6385 Comm: repro Not tainted 5.5.0-rc2+ #22
| Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
| RIP: 0010:refcount_warn_saturate+0x6d/0xf0
| Code: 05 55 9a 15 01 01 e8 9d aa c8 ff 0f 0b c3 80 3d 45 9a 15 01 00 75 ce 48 c7 c7 00 9c 62 b3 c6 08
| RSP: 0018:ffffb524c1b9bc70 EFLAGS: 00010282
| RAX: 0000000000000000 RBX: ffff9e9da1f71390 RCX: 0000000000000000
| RDX: ffff9e9dbbd27618 RSI: ffff9e9dbbd18798 RDI: ffff9e9dbbd18798
| RBP: 0000000000000000 R08: 000000000000095f R09: 0000000000000039
| R10: 0000000000000000 R11: ffffb524c1b9bb20 R12: ffff9e9da1e8c700
| R13: ffffffffb25ee8b0 R14: 0000000000000000 R15: ffff9e9da1e8c700
| FS: 00007f3b87d26700(0000) GS:ffff9e9dbbd00000(0000) knlGS:0000000000000000
| CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
| CR2: 00007fc16909c000 CR3: 000000012df9c000 CR4: 00000000000006e0
| DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
| DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
| Call Trace:
| kobject_get+0x5c/0x60
| cdev_get+0x2b/0x60
| chrdev_open+0x55/0x220
| ? cdev_put.part.3+0x20/0x20
| do_dentry_open+0x13a/0x390
| path_openat+0x2c8/0x1470
| do_filp_open+0x93/0x100
| ? selinux_file_ioctl+0x17f/0x220
| do_sys_open+0x186/0x220
| do_syscall_64+0x48/0x150
| entry_SYSCALL_64_after_hwframe+0x44/0xa9
| RIP: 0033:0x7f3b87efcd0e
| Code: 89 54 24 08 e8 a3 f4 ff ff 8b 74 24 0c 48 8b 3c 24 41 89 c0 44 8b 54 24 08 b8 01 01 00 00 89 f4
| RSP: 002b:00007f3b87d259f0 EFLAGS: 00000293 ORIG_RAX: 0000000000000101
| RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f3b87efcd0e
| RDX: 0000000000000000 RSI: 00007f3b87d25a80 RDI: 00000000ffffff9c
| RBP: 00007f3b87d25e90 R08: 0000000000000000 R09: 0000000000000000
| R10: 0000000000000000 R11: 0000000000000293 R12: 00007ffe188f504e
| R13: 00007ffe188f504f R14: 00007f3b87d26700 R15: 0000000000000000
| ---[ end trace 24f53ca58db8180a ]---
Since 'cdev_get()' can already fail to obtain a reference, simply move
it over to use 'kobject_get_unless_zero()' instead of 'kobject_get()',
which will cause the racing thread to return -ENXIO if the initialising
thread fails unexpectedly.
Cc: Hillf Danton <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Al Viro <[email protected]>
Reported-by: [email protected]
Signed-off-by: Will Deacon <[email protected]>
Cc: stable <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
DwaCompressor::uncompress
(const char *inPtr,
int inSize,
int minY,
const char *&outPtr)
{
return uncompress (inPtr,
inSize,
IMATH_NAMESPACE::Box2i (IMATH_NAMESPACE::V2i (_min[0], minY),
IMATH_NAMESPACE::V2i (_max[0], minY + numScanLines() - 1)),
outPtr);
}
| 0 |
[
"CWE-125"
] |
openexr
|
e79d2296496a50826a15c667bf92bdc5a05518b4
| 69,704,761,893,915,770,000,000,000,000,000,000,000 | 12 |
fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <[email protected]>
|
static struct dentry *vfat_mount(struct file_system_type *fs_type,
int flags, const char *dev_name,
void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, vfat_fill_super);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
0720a06a7518c9d0c0125bd5d1f3b6264c55c3dd
| 266,205,202,775,817,570,000,000,000,000,000,000,000 | 6 |
NLS: improve UTF8 -> UTF16 string conversion routine
The utf8s_to_utf16s conversion routine needs to be improved. Unlike
its utf16s_to_utf8s sibling, it doesn't accept arguments specifying
the maximum length of the output buffer or the endianness of its
16-bit output.
This patch (as1501) adds the two missing arguments, and adjusts the
only two places in the kernel where the function is called. A
follow-on patch will add a third caller that does utilize the new
capabilities.
The two conversion routines are still annoyingly inconsistent in the
way they handle invalid byte combinations. But that's a subject for a
different patch.
Signed-off-by: Alan Stern <[email protected]>
CC: Clemens Ladisch <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
ex_viminfo(
exarg_T *eap)
{
char_u *save_viminfo;
save_viminfo = p_viminfo;
if (*p_viminfo == NUL)
p_viminfo = (char_u *)"'100";
if (eap->cmdidx == CMD_rviminfo)
{
if (read_viminfo(eap->arg, VIF_WANT_INFO | VIF_WANT_MARKS
| (eap->forceit ? VIF_FORCEIT : 0)) == FAIL)
emsg(_("E195: Cannot open viminfo file for reading"));
}
else
write_viminfo(eap->arg, eap->forceit);
p_viminfo = save_viminfo;
}
| 0 |
[
"CWE-78"
] |
vim
|
8c62a08faf89663e5633dc5036cd8695c80f1075
| 203,263,088,870,891,570,000,000,000,000,000,000,000 | 18 |
patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others.
|
sug_filltable(
spellinfo_T *spin,
wordnode_T *node,
int startwordnr,
garray_T *gap) // place to store line of numbers
{
wordnode_T *p, *np;
int wordnr = startwordnr;
int nr;
int prev_nr;
FOR_ALL_NODE_SIBLINGS(node, p)
{
if (p->wn_byte == NUL)
{
gap->ga_len = 0;
prev_nr = 0;
for (np = p; np != NULL && np->wn_byte == NUL; np = np->wn_sibling)
{
if (ga_grow(gap, 10) == FAIL)
return -1;
nr = (np->wn_flags << 16) + (np->wn_region & 0xffff);
// Compute the offset from the previous nr and store the
// offset in a way that it takes a minimum number of bytes.
// It's a bit like utf-8, but without the need to mark
// following bytes.
nr -= prev_nr;
prev_nr += nr;
gap->ga_len += offset2bytes(nr,
(char_u *)gap->ga_data + gap->ga_len);
}
// add the NUL byte
((char_u *)gap->ga_data)[gap->ga_len++] = NUL;
if (ml_append_buf(spin->si_spellbuf, (linenr_T)wordnr,
gap->ga_data, gap->ga_len, TRUE) == FAIL)
return -1;
++wordnr;
// Remove extra NUL entries, we no longer need them. We don't
// bother freeing the nodes, they won't be reused anyway.
while (p->wn_sibling != NULL && p->wn_sibling->wn_byte == NUL)
p->wn_sibling = p->wn_sibling->wn_sibling;
// Clear the flags on the remaining NUL node, so that compression
// works a lot better.
p->wn_flags = 0;
p->wn_region = 0;
}
else
{
wordnr = sug_filltable(spin, p->wn_child, wordnr, gap);
if (wordnr == -1)
return -1;
}
}
return wordnr;
}
| 0 |
[
"CWE-787"
] |
vim
|
7c824682d2028432ee082703ef0ab399867a089b
| 70,270,303,678,435,000,000,000,000,000,000,000,000 | 60 |
patch 8.2.4919: can add invalid bytes with :spellgood
Problem: Can add invalid bytes with :spellgood.
Solution: Check for a valid word string.
|
dp_packet_reset_cutlen(struct dp_packet *b)
{
b->cutlen = 0;
}
| 0 |
[
"CWE-400"
] |
ovs
|
79349cbab0b2a755140eedb91833ad2760520a83
| 96,251,568,556,883,700,000,000,000,000,000,000,000 | 4 |
flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]>
|
static pfunc scan(struct jv_parser* p, char ch, jv* out) {
p->column++;
if (ch == '\n') {
p->line++;
p->column = 0;
}
if (ch == '\036' /* ASCII RS; see draft-ietf-json-sequence-07 */) {
if (check_truncation(p)) {
if (check_literal(p) == 0 && is_top_num(p))
return "Potentially truncated top-level numeric value";
return "Truncated value";
}
TRY(check_literal(p));
if (p->st == JV_PARSER_NORMAL && check_done(p, out))
return OK;
// shouldn't happen?
assert(!jv_is_valid(*out));
parser_reset(p);
jv_free(*out);
*out = jv_invalid();
return OK;
}
presult answer = 0;
p->last_ch_was_ws = 0;
if (p->st == JV_PARSER_NORMAL) {
chclass cls = classify(ch);
if (cls == WHITESPACE)
p->last_ch_was_ws = 1;
if (cls != LITERAL) {
TRY(check_literal(p));
if (check_done(p, out)) answer = OK;
}
switch (cls) {
case LITERAL:
tokenadd(p, ch);
break;
case WHITESPACE:
break;
case QUOTE:
p->st = JV_PARSER_STRING;
break;
case STRUCTURE:
TRY(token(p, ch));
break;
case INVALID:
return "Invalid character";
}
if (check_done(p, out)) answer = OK;
} else {
if (ch == '"' && p->st == JV_PARSER_STRING) {
TRY(found_string(p));
p->st = JV_PARSER_NORMAL;
if (check_done(p, out)) answer = OK;
} else {
tokenadd(p, ch);
if (ch == '\\' && p->st == JV_PARSER_STRING) {
p->st = JV_PARSER_STRING_ESCAPE;
} else {
p->st = JV_PARSER_STRING;
}
}
}
return answer;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
jq
|
8eb1367ca44e772963e704a700ef72ae2e12babd
| 327,460,366,569,573,560,000,000,000,000,000,000,000 | 64 |
Heap buffer overflow in tokenadd() (fix #105)
This was an off-by one: the NUL terminator byte was not allocated on
resize. This was triggered by JSON-encoded numbers longer than 256
bytes.
|
void ERR_remove_state(unsigned long)
{
GetErrors().Remove();
}
| 0 |
[
"CWE-254"
] |
mysql-server
|
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
| 73,336,924,090,804,540,000,000,000,000,000,000,000 | 4 |
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
|
spell_add_word(
char_u *word,
int len,
int what, // SPELL_ADD_ values
int idx, // "zG" and "zW": zero, otherwise index in
// 'spellfile'
int undo) // TRUE for "zug", "zuG", "zuw" and "zuW"
{
FILE *fd = NULL;
buf_T *buf = NULL;
int new_spf = FALSE;
char_u *fname;
char_u *fnamebuf = NULL;
char_u line[MAXWLEN * 2];
long fpos, fpos_next = 0;
int i;
char_u *spf;
if (enc_utf8 && !utf_valid_string(word, NULL))
{
emsg(_(e_illegal_character_in_word));
return;
}
if (idx == 0) // use internal wordlist
{
if (int_wordlist == NULL)
{
int_wordlist = vim_tempname('s', FALSE);
if (int_wordlist == NULL)
return;
}
fname = int_wordlist;
}
else
{
// If 'spellfile' isn't set figure out a good default value.
if (*curwin->w_s->b_p_spf == NUL)
{
init_spellfile();
new_spf = TRUE;
}
if (*curwin->w_s->b_p_spf == NUL)
{
semsg(_(e_option_str_is_not_set), "spellfile");
return;
}
fnamebuf = alloc(MAXPATHL);
if (fnamebuf == NULL)
return;
for (spf = curwin->w_s->b_p_spf, i = 1; *spf != NUL; ++i)
{
copy_option_part(&spf, fnamebuf, MAXPATHL, ",");
if (i == idx)
break;
if (*spf == NUL)
{
semsg(_(e_spellfile_does_not_have_nr_entries), idx);
vim_free(fnamebuf);
return;
}
}
// Check that the user isn't editing the .add file somewhere.
buf = buflist_findname_exp(fnamebuf);
if (buf != NULL && buf->b_ml.ml_mfp == NULL)
buf = NULL;
if (buf != NULL && bufIsChanged(buf))
{
emsg(_(e_file_is_loaded_in_another_buffer));
vim_free(fnamebuf);
return;
}
fname = fnamebuf;
}
if (what == SPELL_ADD_BAD || undo)
{
// When the word appears as good word we need to remove that one,
// since its flags sort before the one with WF_BANNED.
fd = mch_fopen((char *)fname, "r");
if (fd != NULL)
{
while (!vim_fgets(line, MAXWLEN * 2, fd))
{
fpos = fpos_next;
fpos_next = ftell(fd);
if (fpos_next < 0)
break; // should never happen
if (STRNCMP(word, line, len) == 0
&& (line[len] == '/' || line[len] < ' '))
{
// Found duplicate word. Remove it by writing a '#' at
// the start of the line. Mixing reading and writing
// doesn't work for all systems, close the file first.
fclose(fd);
fd = mch_fopen((char *)fname, "r+");
if (fd == NULL)
break;
if (fseek(fd, fpos, SEEK_SET) == 0)
{
fputc('#', fd);
if (undo)
{
home_replace(NULL, fname, NameBuff, MAXPATHL, TRUE);
smsg(_("Word '%.*s' removed from %s"),
len, word, NameBuff);
}
}
if (fseek(fd, fpos_next, SEEK_SET) != 0)
{
PERROR(_("Seek error in spellfile"));
break;
}
}
}
if (fd != NULL)
fclose(fd);
}
}
if (!undo)
{
fd = mch_fopen((char *)fname, "a");
if (fd == NULL && new_spf)
{
char_u *p;
// We just initialized the 'spellfile' option and can't open the
// file. We may need to create the "spell" directory first. We
// already checked the runtime directory is writable in
// init_spellfile().
if (!dir_of_file_exists(fname) && (p = gettail_sep(fname)) != fname)
{
int c = *p;
// The directory doesn't exist. Try creating it and opening
// the file again.
*p = NUL;
vim_mkdir(fname, 0755);
*p = c;
fd = mch_fopen((char *)fname, "a");
}
}
if (fd == NULL)
semsg(_(e_cant_open_file_str), fname);
else
{
if (what == SPELL_ADD_BAD)
fprintf(fd, "%.*s/!\n", len, word);
else if (what == SPELL_ADD_RARE)
fprintf(fd, "%.*s/?\n", len, word);
else
fprintf(fd, "%.*s\n", len, word);
fclose(fd);
home_replace(NULL, fname, NameBuff, MAXPATHL, TRUE);
smsg(_("Word '%.*s' added to %s"), len, word, NameBuff);
}
}
if (fd != NULL)
{
// Update the .add.spl file.
mkspell(1, &fname, FALSE, TRUE, TRUE);
// If the .add file is edited somewhere, reload it.
if (buf != NULL)
buf_reload(buf, buf->b_orig_mode, FALSE);
redraw_all_later(SOME_VALID);
}
vim_free(fnamebuf);
}
| 1 |
[
"CWE-125"
] |
vim
|
5e59ea54c0c37c2f84770f068d95280069828774
| 293,998,189,576,153,700,000,000,000,000,000,000,000 | 178 |
patch 9.0.0021: invalid memory access when adding word to spell word list
Problem: Invalid memory access when adding word with a control character to
the internal spell word list.
Solution: Disallow adding a word with control characters or a trailing
slash.
|
CImg<T>& RGBtoYCbCr() {
if (_spectrum!=3)
throw CImgInstanceException(_cimg_instance
"RGBtoYCbCr(): Instance is not a RGB image.",
cimg_instance);
T *p1 = data(0,0,0,0), *p2 = data(0,0,0,1), *p3 = data(0,0,0,2);
const longT whd = (longT)width()*height()*depth();
cimg_pragma_openmp(parallel for cimg_openmp_if_size(whd,512))
for (longT N = 0; N<whd; ++N) {
const Tfloat
R = (Tfloat)p1[N],
G = (Tfloat)p2[N],
B = (Tfloat)p3[N],
Y = (66*R + 129*G + 25*B + 128)/256 + 16,
Cb = (-38*R - 74*G + 112*B + 128)/256 + 128,
Cr = (112*R - 94*G - 18*B + 128)/256 + 128;
p1[N] = (T)cimg::cut(Y,0,255),
p2[N] = (T)cimg::cut(Cb,0,255),
p3[N] = (T)cimg::cut(Cr,0,255);
}
return *this;
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 298,323,245,500,953,040,000,000,000,000,000,000,000 | 23 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
bool eq_cmp_as_binary() { return MY_TEST(flags & BINARY_FLAG); }
| 0 |
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
| 188,980,176,995,540,400,000,000,000,000,000,000,000 | 1 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]>
|
ctnetlink_change_conntrack(struct nf_conn *ct,
const struct nlattr * const cda[])
{
int err;
/* only allow NAT changes and master assignation for new conntracks */
if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
return -EOPNOTSUPP;
if (cda[CTA_HELP]) {
err = ctnetlink_change_helper(ct, cda);
if (err < 0)
return err;
}
if (cda[CTA_TIMEOUT]) {
err = ctnetlink_change_timeout(ct, cda);
if (err < 0)
return err;
}
if (cda[CTA_STATUS]) {
err = ctnetlink_change_status(ct, cda);
if (err < 0)
return err;
}
if (cda[CTA_PROTOINFO]) {
err = ctnetlink_change_protoinfo(ct, cda);
if (err < 0)
return err;
}
#if defined(CONFIG_NF_CONNTRACK_MARK)
if (cda[CTA_MARK])
ctnetlink_change_mark(ct, cda);
#endif
if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
err = ctnetlink_change_seq_adj(ct, cda);
if (err < 0)
return err;
}
if (cda[CTA_SYNPROXY]) {
err = ctnetlink_change_synproxy(ct, cda);
if (err < 0)
return err;
}
if (cda[CTA_LABELS]) {
err = ctnetlink_attach_labels(ct, cda);
if (err < 0)
return err;
}
return 0;
}
| 0 |
[
"CWE-120"
] |
linux
|
1cc5ef91d2ff94d2bf2de3b3585423e8a1051cb6
| 74,936,425,995,661,300,000,000,000,000,000,000,000 | 58 |
netfilter: ctnetlink: add a range check for l3/l4 protonum
The indexes to the nf_nat_l[34]protos arrays come from userspace. So
check the tuple's family, e.g. l3num, when creating the conntrack in
order to prevent an OOB memory access during setup. Here is an example
kernel panic on 4.14.180 when userspace passes in an index greater than
NFPROTO_NUMPROTO.
Internal error: Oops - BUG: 0 [#1] PREEMPT SMP
Modules linked in:...
Process poc (pid: 5614, stack limit = 0x00000000a3933121)
CPU: 4 PID: 5614 Comm: poc Tainted: G S W O 4.14.180-g051355490483
Hardware name: Qualcomm Technologies, Inc. SM8150 V2 PM8150 Google Inc. MSM
task: 000000002a3dfffe task.stack: 00000000a3933121
pc : __cfi_check_fail+0x1c/0x24
lr : __cfi_check_fail+0x1c/0x24
...
Call trace:
__cfi_check_fail+0x1c/0x24
name_to_dev_t+0x0/0x468
nfnetlink_parse_nat_setup+0x234/0x258
ctnetlink_parse_nat_setup+0x4c/0x228
ctnetlink_new_conntrack+0x590/0xc40
nfnetlink_rcv_msg+0x31c/0x4d4
netlink_rcv_skb+0x100/0x184
nfnetlink_rcv+0xf4/0x180
netlink_unicast+0x360/0x770
netlink_sendmsg+0x5a0/0x6a4
___sys_sendmsg+0x314/0x46c
SyS_sendmsg+0xb4/0x108
el0_svc_naked+0x34/0x38
This crash is not happening since 5.4+, however, ctnetlink still
allows for creating entries with unsupported layer 3 protocol number.
Fixes: c1d10adb4a521 ("[NETFILTER]: Add ctnetlink port for nf_conntrack")
Signed-off-by: Will McVicker <[email protected]>
[[email protected]: rebased original patch on top of nf.git]
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
static int __from_ib_access_flags(int iflags)
{
int qflags = 0;
if (iflags & IB_ACCESS_LOCAL_WRITE)
qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
if (iflags & IB_ACCESS_REMOTE_READ)
qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
if (iflags & IB_ACCESS_REMOTE_WRITE)
qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
if (iflags & IB_ACCESS_REMOTE_ATOMIC)
qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
if (iflags & IB_ACCESS_MW_BIND)
qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
if (iflags & IB_ZERO_BASED)
qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
if (iflags & IB_ACCESS_ON_DEMAND)
qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
return qflags;
};
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
4a9d46a9fe14401f21df69cea97c62396d5fb053
| 321,848,223,666,795,200,000,000,000,000,000,000,000 | 20 |
RDMA: Fix goto target to release the allocated memory
In bnxt_re_create_srq(), when ib_copy_to_udata() fails allocated memory
should be released by goto fail.
Fixes: 37cb11acf1f7 ("RDMA/bnxt_re: Add SRQ support for Broadcom adapters")
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Navid Emamdoost <[email protected]>
Reviewed-by: Jason Gunthorpe <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
|
static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
{
int error;
int len;
struct inode *inode;
struct page *page = NULL;
char *kaddr;
struct shmem_inode_info *info;
len = strlen(symname) + 1;
if (len > PAGE_CACHE_SIZE)
return -ENAMETOOLONG;
inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
if (!inode)
return -ENOSPC;
error = security_inode_init_security(inode, dir, NULL, NULL,
NULL);
if (error) {
if (error != -EOPNOTSUPP) {
iput(inode);
return error;
}
error = 0;
}
info = SHMEM_I(inode);
inode->i_size = len-1;
if (len <= (char *)inode - (char *)info) {
/* do it inline */
memcpy(info, symname, len);
inode->i_op = &shmem_symlink_inline_operations;
} else {
error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
if (error) {
iput(inode);
return error;
}
unlock_page(page);
inode->i_mapping->a_ops = &shmem_aops;
inode->i_op = &shmem_symlink_inode_operations;
kaddr = kmap_atomic(page, KM_USER0);
memcpy(kaddr, symname, len);
kunmap_atomic(kaddr, KM_USER0);
set_page_dirty(page);
page_cache_release(page);
}
if (dir->i_mode & S_ISGID)
inode->i_gid = dir->i_gid;
dir->i_size += BOGO_DIRENT_SIZE;
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
d_instantiate(dentry, inode);
dget(dentry);
return 0;
}
| 0 |
[
"CWE-400"
] |
linux-2.6
|
14fcc23fdc78e9d32372553ccf21758a9bd56fa1
| 247,672,961,839,612,870,000,000,000,000,000,000,000 | 56 |
tmpfs: fix kernel BUG in shmem_delete_inode
SuSE's insserve initscript ordering program hits kernel BUG at mm/shmem.c:814
on 2.6.26. It's using posix_fadvise on directories, and the shmem_readpage
method added in 2.6.23 is letting POSIX_FADV_WILLNEED allocate useless pages
to a tmpfs directory, incrementing i_blocks count but never decrementing it.
Fix this by assigning shmem_aops (pointing to readpage and writepage and
set_page_dirty) only when it's needed, on a regular file or a long symlink.
Many thanks to Kel for outstanding bugreport and steps to reproduce it.
Reported-by: Kel Modderman <[email protected]>
Tested-by: Kel Modderman <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Cc: <[email protected]> [2.6.25.x, 2.6.26.x]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
pgtable_page_dtor(pte);
__free_page(pte);
}
| 0 |
[] |
linux
|
1d18c47c735e8adfe531fc41fae31e98f86b68fe
| 336,586,395,853,796,470,000,000,000,000,000,000,000 | 5 |
arm64: MMU fault handling and page table management
This patch adds support for the handling of the MMU faults (exception
entry code introduced by a previous patch) and page table management.
The user translation table is pointed to by TTBR0 and the kernel one
(swapper_pg_dir) by TTBR1. There is no translation information shared or
address space overlapping between user and kernel page tables.
Signed-off-by: Will Deacon <[email protected]>
Signed-off-by: Catalin Marinas <[email protected]>
Acked-by: Tony Lindgren <[email protected]>
Acked-by: Nicolas Pitre <[email protected]>
Acked-by: Olof Johansson <[email protected]>
Acked-by: Santosh Shilimkar <[email protected]>
Acked-by: Arnd Bergmann <[email protected]>
|
static int cmd_anal(void *data, const char *input) {
const char *r;
RCore *core = (RCore *)data;
ut32 tbs = core->blocksize;
switch (input[0]) {
case 'p': // "ap"
{
const ut8 *prelude = (const ut8*)"\xe9\x2d"; //:fffff000";
const int prelude_sz = 2;
const int bufsz = 4096;
ut8 *buf = calloc (1, bufsz);
ut64 off = core->offset;
if (input[1] == ' ') {
off = r_num_math (core->num, input+1);
r_io_read_at (core->io, off - bufsz + prelude_sz, buf, bufsz);
} else {
r_io_read_at (core->io, off - bufsz + prelude_sz, buf, bufsz);
}
//const char *prelude = "\x2d\xe9\xf0\x47"; //:fffff000";
r_mem_reverse (buf, bufsz);
//r_print_hexdump (NULL, off, buf, bufsz, 16, -16);
const ut8 *pos = r_mem_mem (buf, bufsz, prelude, prelude_sz);
if (pos) {
int delta = (size_t)(pos - buf);
eprintf ("POS = %d\n", delta);
eprintf ("HIT = 0x%"PFMT64x"\n", off - delta);
r_cons_printf ("0x%08"PFMT64x"\n", off - delta);
} else {
eprintf ("Cannot find prelude\n");
}
free (buf);
}
break;
case '8':
{
ut8 *buf = malloc (strlen (input) + 1);
if (buf) {
int len = r_hex_str2bin (input + 1, buf);
if (len > 0) {
core_anal_bytes (core, buf, len, 0, input[1]);
}
free (buf);
}
}
break;
case 'b':
if (input[1] == 'b') { // "abb"
core_anal_bbs (core, input + 2);
} else if (input[1] == 'r') { // "abr"
core_anal_bbs_range (core, input + 2);
} else if (input[1] == 't') {
cmd_anal_abt (core, input+2);
} else if (input[1] == 'j') { // "abj"
anal_fcn_list_bb (core, input + 1, false);
} else if (input[1] == ' ' || !input[1]) {
// find block
ut64 addr = core->offset;
if (input[1]) {
addr = r_num_math (core->num, input + 1);
}
r_core_cmdf (core, "afbi @ 0x%"PFMT64x, addr);
} else {
r_core_cmd_help (core, help_msg_ab);
}
break;
case 'L': return r_core_cmd0 (core, "e asm.arch=??"); break;
case 'i': cmd_anal_info (core, input + 1); break; // "ai"
case 'r': cmd_anal_reg (core, input + 1); break; // "ar"
case 'e': cmd_anal_esil (core, input + 1); break; // "ae"
case 'o': cmd_anal_opcode (core, input + 1); break; // "ao"
case 'O': cmd_anal_bytes (core, input + 1); break; // "aO"
case 'F': // "aF"
r_core_anal_fcn (core, core->offset, UT64_MAX, R_ANAL_REF_TYPE_NULL, 1);
break;
case 'f': // "af"
{
int res = cmd_anal_fcn (core, input);
run_pending_anal (core);
if (!res) {
return false;
}
}
break;
case 'n': // 'an'
{
const char *name = NULL;
bool use_json = false;
if (input[1] == 'j') {
use_json = true;
input++;
}
if (input[1] == ' ') {
name = input + 1;
while (name[0] == ' ') {
name++;
}
char *end = strchr (name, ' ');
if (end) {
*end = '\0';
}
if (*name == '\0') {
name = NULL;
}
}
cmd_an (core, use_json, name);
}
break;
case 'g': // "ag"
cmd_anal_graph (core, input + 1);
break;
case 's': // "as"
cmd_anal_syscall (core, input + 1);
break;
case 'v': // "av"
cmd_anal_virtual_functions (core, input + 1);
break;
case 'x': // "ax"
if (!cmd_anal_refs (core, input + 1)) {
return false;
}
break;
case 'a': // "aa"
if (!cmd_anal_all (core, input + 1)) {
return false;
}
break;
case 'c': // "ac"
{
RList *hooks;
RListIter *iter;
RAnalCycleHook *hook;
char *instr_tmp = NULL;
int ccl = input[1]? r_num_math (core->num, &input[2]): 0; //get cycles to look for
int cr = r_config_get_i (core->config, "asm.cmt.right");
int fun = r_config_get_i (core->config, "asm.functions");
int li = r_config_get_i (core->config, "asm.lines");
int xr = r_config_get_i (core->config, "asm.xrefs");
r_config_set_i (core->config, "asm.cmt.right", true);
r_config_set_i (core->config, "asm.functions", false);
r_config_set_i (core->config, "asm.lines", false);
r_config_set_i (core->config, "asm.xrefs", false);
hooks = r_core_anal_cycles (core, ccl); //analyse
r_cons_clear_line (1);
r_list_foreach (hooks, iter, hook) {
instr_tmp = r_core_disassemble_instr (core, hook->addr, 1);
r_cons_printf ("After %4i cycles:\t%s", (ccl - hook->cycles), instr_tmp);
r_cons_flush ();
free (instr_tmp);
}
r_list_free (hooks);
r_config_set_i (core->config, "asm.cmt.right", cr); //reset settings
r_config_set_i (core->config, "asm.functions", fun);
r_config_set_i (core->config, "asm.lines", li);
r_config_set_i (core->config, "asm.xrefs", xr);
}
break;
case 'd': // "ad"
switch (input[1]) {
case 'f': // "adf"
if (input[2] == 'g') {
anal_fcn_data_gaps (core, input + 1);
} else {
anal_fcn_data (core, input + 1);
}
break;
case 't': // "adt"
cmd_anal_trampoline (core, input + 2);
break;
case ' ': { // "ad"
const int default_depth = 1;
const char *p;
int a, b;
a = r_num_math (core->num, input + 2);
p = strchr (input + 2, ' ');
b = p? r_num_math (core->num, p + 1): default_depth;
if (a < 1) {
a = 1;
}
if (b < 1) {
b = 1;
}
r_core_anal_data (core, core->offset, a, b, 0);
} break;
case 'k': // "adk"
r = r_anal_data_kind (core->anal,
core->offset, core->block, core->blocksize);
r_cons_println (r);
break;
case '\0': // "ad"
r_core_anal_data (core, core->offset, 2 + (core->blocksize / 4), 1, 0);
break;
case '4': // "ad4"
r_core_anal_data (core, core->offset, 2 + (core->blocksize / 4), 1, 4);
break;
case '8': // "ad8"
r_core_anal_data (core, core->offset, 2 + (core->blocksize / 4), 1, 8);
break;
default:
r_core_cmd_help (core, help_msg_ad);
break;
}
break;
case 'h': // "ah"
cmd_anal_hint (core, input + 1);
break;
case '!': // "a!"
if (core->anal && core->anal->cur && core->anal->cur->cmd_ext) {
return core->anal->cur->cmd_ext (core->anal, input + 1);
} else {
r_cons_printf ("No plugins for this analysis plugin\n");
}
break;
default:
r_core_cmd_help (core, help_msg_a);
#if 0
r_cons_printf ("Examples:\n"
" f ts @ `S*~text:0[3]`; f t @ section..text\n"
" f ds @ `S*~data:0[3]`; f d @ section..data\n"
" .ad t t+ts @ d:ds\n",
NULL);
#endif
break;
}
if (tbs != core->blocksize) {
r_core_block_size (core, tbs);
}
if (r_cons_is_breaked ()) {
r_cons_clear_line (1);
}
return 0;
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
radare2
|
a1bc65c3db593530775823d6d7506a457ed95267
| 115,701,693,676,089,770,000,000,000,000,000,000,000 | 238 |
Fix #12375 - Crash in bd+ao (#12382)
|
QPDF::resolve(int objid, int generation)
{
// Check object cache before checking xref table. This allows us
// to insert things into the object cache that don't actually
// exist in the file.
QPDFObjGen og(objid, generation);
if (this->resolving.count(og))
{
// This can happen if an object references itself directly or
// indirectly in some key that has to be resolved during
// object parsing, such as stream length.
QTC::TC("qpdf", "QPDF recursion loop in resolve");
warn(QPDFExc(qpdf_e_damaged_pdf, this->file->getName(),
"", this->file->getLastOffset(),
"loop detected resolving object " +
QUtil::int_to_string(objid) + " " +
QUtil::int_to_string(generation)));
return new QPDF_Null;
}
ResolveRecorder rr(this, og);
if (! this->obj_cache.count(og))
{
if (! this->xref_table.count(og))
{
// PDF spec says unknown objects resolve to the null object.
return new QPDF_Null;
}
QPDFXRefEntry const& entry = this->xref_table[og];
switch (entry.getType())
{
case 1:
{
qpdf_offset_t offset = entry.getOffset();
// Object stored in cache by readObjectAtOffset
int aobjid;
int ageneration;
QPDFObjectHandle oh =
readObjectAtOffset(true, offset, "", objid, generation,
aobjid, ageneration);
}
break;
case 2:
resolveObjectsInStream(entry.getObjStreamNumber());
break;
default:
throw QPDFExc(qpdf_e_damaged_pdf, this->file->getName(), "", 0,
"object " +
QUtil::int_to_string(objid) + "/" +
QUtil::int_to_string(generation) +
" has unexpected xref entry type");
}
}
return this->obj_cache[og].object;
}
| 0 |
[
"CWE-399",
"CWE-835"
] |
qpdf
|
701b518d5c56a1449825a3a37a716c58e05e1c3e
| 59,050,538,673,624,260,000,000,000,000,000,000,000 | 59 |
Detect recursion loops resolving objects (fixes #51)
During parsing of an object, sometimes parts of the object have to be
resolved. An example is stream lengths. If such an object directly or
indirectly points to the object being parsed, it can cause an infinite
loop. Guard against all cases of re-entrant resolution of objects.
|
brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
const struct brcmf_event_msg *e, void *data)
{
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
struct brcmf_pno_net_info_le *netinfo, *netinfo_start;
struct cfg80211_scan_request *request = NULL;
struct wiphy *wiphy = cfg_to_wiphy(cfg);
int i, err = 0;
struct brcmf_pno_scanresults_le *pfn_result;
u32 bucket_map;
u32 result_count;
u32 status;
u32 datalen;
brcmf_dbg(SCAN, "Enter\n");
if (e->datalen < (sizeof(*pfn_result) + sizeof(*netinfo))) {
brcmf_dbg(SCAN, "Event data to small. Ignore\n");
return 0;
}
if (e->event_code == BRCMF_E_PFN_NET_LOST) {
brcmf_dbg(SCAN, "PFN NET LOST event. Do Nothing\n");
return 0;
}
pfn_result = (struct brcmf_pno_scanresults_le *)data;
result_count = le32_to_cpu(pfn_result->count);
status = le32_to_cpu(pfn_result->status);
/* PFN event is limited to fit 512 bytes so we may get
* multiple NET_FOUND events. For now place a warning here.
*/
WARN_ON(status != BRCMF_PNO_SCAN_COMPLETE);
brcmf_dbg(SCAN, "PFN NET FOUND event. count: %d\n", result_count);
if (!result_count) {
brcmf_err("FALSE PNO Event. (pfn_count == 0)\n");
goto out_err;
}
netinfo_start = brcmf_get_netinfo_array(pfn_result);
datalen = e->datalen - ((void *)netinfo_start - (void *)pfn_result);
if (datalen < result_count * sizeof(*netinfo)) {
brcmf_err("insufficient event data\n");
goto out_err;
}
request = brcmf_alloc_internal_escan_request(wiphy,
result_count);
if (!request) {
err = -ENOMEM;
goto out_err;
}
bucket_map = 0;
for (i = 0; i < result_count; i++) {
netinfo = &netinfo_start[i];
if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN)
netinfo->SSID_len = IEEE80211_MAX_SSID_LEN;
brcmf_dbg(SCAN, "SSID:%.32s Channel:%d\n",
netinfo->SSID, netinfo->channel);
bucket_map |= brcmf_pno_get_bucket_map(cfg->pno, netinfo);
err = brcmf_internal_escan_add_info(request,
netinfo->SSID,
netinfo->SSID_len,
netinfo->channel);
if (err)
goto out_err;
}
if (!bucket_map)
goto free_req;
err = brcmf_start_internal_escan(ifp, bucket_map, request);
if (!err)
goto free_req;
out_err:
cfg80211_sched_scan_stopped(wiphy, 0);
free_req:
kfree(request);
return err;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
8f44c9a41386729fea410e688959ddaa9d51be7c
| 236,459,693,886,694,800,000,000,000,000,000,000,000 | 84 |
brcmfmac: fix possible buffer overflow in brcmf_cfg80211_mgmt_tx()
The lower level nl80211 code in cfg80211 ensures that "len" is between
25 and NL80211_ATTR_FRAME (2304). We subtract DOT11_MGMT_HDR_LEN (24) from
"len" so thats's max of 2280. However, the action_frame->data[] buffer is
only BRCMF_FIL_ACTION_FRAME_SIZE (1800) bytes long so this memcpy() can
overflow.
memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN],
le16_to_cpu(action_frame->len));
Cc: [email protected] # 3.9.x
Fixes: 18e2f61db3b70 ("brcmfmac: P2P action frame tx.")
Reported-by: "freenerguo(郭大兴)" <[email protected]>
Signed-off-by: Arend van Spriel <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static bool __core_anal_fcn(RCore *core, ut64 at, ut64 from, int reftype, int depth) {
if (depth < 0) {
// printf ("Too deep for 0x%08"PFMT64x"\n", at);
// r_sys_backtrace ();
return false;
}
int has_next = r_config_get_i (core->config, "anal.hasnext");
RAnalHint *hint = NULL;
int i, nexti = 0;
ut64 *next = NULL;
int fcnlen;
RAnalFunction *fcn = r_anal_function_new (core->anal);
r_warn_if_fail (fcn);
const char *fcnpfx = r_config_get (core->config, "anal.fcnprefix");
if (!fcnpfx) {
fcnpfx = "fcn";
}
const char *cc = r_anal_cc_default (core->anal);
if (!cc) {
if (r_anal_cc_once (core->anal)) {
eprintf ("Warning: set your favourite calling convention in `e anal.cc=?`\n");
}
cc = "reg";
}
fcn->cc = r_str_constpool_get (&core->anal->constpool, cc);
r_warn_if_fail (fcn->cc);
hint = r_anal_hint_get (core->anal, at);
if (hint && hint->bits == 16) {
// expand 16bit for function
fcn->bits = 16;
} else {
fcn->bits = core->anal->bits;
}
fcn->addr = at;
fcn->name = get_function_name (core, at);
if (!fcn->name) {
fcn->name = r_str_newf ("%s.%08"PFMT64x, fcnpfx, at);
}
r_anal_function_invalidate_read_ahead_cache ();
do {
RFlagItem *f;
ut64 delta = r_anal_function_linear_size (fcn);
if (!r_io_is_valid_offset (core->io, at + delta, !core->anal->opt.noncode)) {
goto error;
}
if (r_cons_is_breaked ()) {
break;
}
fcnlen = r_anal_function(core->anal, fcn, at + delta, core->anal->opt.bb_max_size, reftype);
if (core->anal->opt.searchstringrefs) {
r_anal_set_stringrefs (core, fcn);
}
if (fcnlen == 0) {
if (core->anal->verbose) {
eprintf ("Analyzed function size is 0 at 0x%08"PFMT64x"\n", at + delta);
}
goto error;
}
if (fcnlen < 0) {
switch (fcnlen) {
case R_ANAL_RET_ERROR:
case R_ANAL_RET_NEW:
case R_ANAL_RET_DUP:
case R_ANAL_RET_END:
break;
default:
eprintf ("Oops. Negative fcnsize at 0x%08"PFMT64x" (%d)\n", at, fcnlen);
continue;
}
}
f = r_core_flag_get_by_spaces (core->flags, fcn->addr);
set_fcn_name_from_flag (fcn, f, fcnpfx);
if (fcnlen == R_ANAL_RET_ERROR ||
(fcnlen == R_ANAL_RET_END && !r_anal_function_realsize (fcn))) { /* Error analyzing function */
if (core->anal->opt.followbrokenfcnsrefs) {
r_anal_analyze_fcn_refs (core, fcn, depth);
}
goto error;
} else if (fcnlen == R_ANAL_RET_END) { /* Function analysis complete */
f = r_core_flag_get_by_spaces (core->flags, fcn->addr);
if (f && f->name && strncmp (f->name, "sect", 4)) { /* Check if it's already flagged */
char *new_name = strdup (f->name);
if (is_entry_flag (f)) {
RListIter *iter;
RBinSymbol *sym;
const RList *syms = r_bin_get_symbols (core->bin);
ut64 baddr = r_config_get_i (core->config, "bin.baddr");
r_list_foreach (syms, iter, sym) {
if (sym->type && (sym->paddr + baddr) == fcn->addr && !strcmp (sym->type, R_BIN_TYPE_FUNC_STR)) {
free (new_name);
new_name = r_str_newf ("sym.%s", sym->name);
break;
}
}
}
free (fcn->name);
fcn->name = new_name;
} else {
R_FREE (fcn->name);
const char *fcnpfx = r_anal_functiontype_tostring (fcn->type);
if (!fcnpfx || !*fcnpfx || !strcmp (fcnpfx, "fcn")) {
fcnpfx = r_config_get (core->config, "anal.fcnprefix");
}
fcn->name = r_str_newf ("%s.%08"PFMT64x, fcnpfx, fcn->addr);
autoname_imp_trampoline (core, fcn);
/* Add flag */
r_flag_space_push (core->flags, R_FLAGS_FS_FUNCTIONS);
r_flag_set (core->flags, fcn->name, fcn->addr, r_anal_function_linear_size (fcn));
r_flag_space_pop (core->flags);
}
/* New function: Add initial xref */
if (from != UT64_MAX) {
r_anal_xrefs_set (core->anal, from, fcn->addr, reftype);
}
// XXX: this is wrong. See CID 1134565
r_anal_add_function (core->anal, fcn);
if (has_next) {
ut64 addr = r_anal_function_max_addr (fcn);
RIOMap *map = r_io_map_get_at (core->io, addr);
// only get next if found on an executable section
if (!map || (map && map->perm & R_PERM_X)) {
for (i = 0; i < nexti; i++) {
if (next[i] == addr) {
break;
}
}
if (i == nexti) {
ut64 at = r_anal_function_max_addr (fcn);
while (true) {
ut64 size;
RAnalMetaItem *mi = r_meta_get_at (core->anal, at, R_META_TYPE_ANY, &size);
if (!mi) {
break;
}
at += size;
}
// TODO: ensure next address is function after padding (nop or trap or wat)
// XXX noisy for test cases because we want to clear the stderr
r_cons_clear_line (1);
loganal (fcn->addr, at, 10000 - depth);
next = next_append (next, &nexti, at);
}
}
}
if (!r_anal_analyze_fcn_refs (core, fcn, depth)) {
goto error;
}
}
} while (fcnlen != R_ANAL_RET_END);
r_list_free (core->anal->leaddrs);
core->anal->leaddrs = NULL;
if (has_next) {
for (i = 0; i < nexti; i++) {
if (!next[i] || r_anal_get_fcn_in (core->anal, next[i], 0)) {
continue;
}
r_core_anal_fcn (core, next[i], from, 0, depth - 1);
}
free (next);
}
if (core->anal->cur && core->anal->cur->arch && !strcmp (core->anal->cur->arch, "x86")) {
r_anal_function_check_bp_use (fcn);
if (fcn && !fcn->bp_frame) {
r_anal_function_delete_vars_by_kind (fcn, R_ANAL_VAR_KIND_BPV);
}
}
r_anal_hint_free (hint);
return true;
error:
r_list_free (core->anal->leaddrs);
core->anal->leaddrs = NULL;
// ugly hack to free fcn
if (fcn) {
if (!r_anal_function_realsize (fcn) || fcn->addr == UT64_MAX) {
r_anal_function_free (fcn);
fcn = NULL;
} else {
// TODO: mark this function as not properly analyzed
if (!fcn->name) {
// XXX dupped code.
fcn->name = r_str_newf (
"%s.%08" PFMT64x,
r_anal_functiontype_tostring (fcn->type),
at);
/* Add flag */
r_flag_space_push (core->flags, R_FLAGS_FS_FUNCTIONS);
r_flag_set (core->flags, fcn->name, at, r_anal_function_linear_size (fcn));
r_flag_space_pop (core->flags);
}
r_anal_add_function (core->anal, fcn);
}
if (fcn && has_next) {
ut64 newaddr = r_anal_function_max_addr (fcn);
RIOMap *map = r_io_map_get_at (core->io, newaddr);
if (!map || (map && (map->perm & R_PERM_X))) {
next = next_append (next, &nexti, newaddr);
for (i = 0; i < nexti; i++) {
if (!next[i]) {
continue;
}
r_core_anal_fcn (core, next[i], next[i], 0, depth - 1);
}
free (next);
}
}
}
if (fcn && core->anal->cur && core->anal->cur->arch && !strcmp (core->anal->cur->arch, "x86")) {
r_anal_function_check_bp_use (fcn);
if (!fcn->bp_frame) {
r_anal_function_delete_vars_by_kind (fcn, R_ANAL_VAR_KIND_BPV);
}
}
r_anal_hint_free (hint);
return false;
}
| 0 |
[
"CWE-416"
] |
radare2
|
10517e3ff0e609697eb8cde60ec8dc999ee5ea24
| 40,525,509,794,962,473,000,000,000,000,000,000,000 | 219 |
aaef on arm/thumb switches causes uaf ##crash
* Reported by peacock-doris via huntr.dev
* Reproducer: poc_uaf_r_reg_get
|
static int fts3IncrmergePush(
Fts3Table *p, /* Fts3 table handle */
IncrmergeWriter *pWriter, /* Writer object */
const char *zTerm, /* Term to write to internal node */
int nTerm /* Bytes at zTerm */
){
sqlite3_int64 iPtr = pWriter->aNodeWriter[0].iBlock;
int iLayer;
assert( nTerm>0 );
for(iLayer=1; ALWAYS(iLayer<FTS_MAX_APPENDABLE_HEIGHT); iLayer++){
sqlite3_int64 iNextPtr = 0;
NodeWriter *pNode = &pWriter->aNodeWriter[iLayer];
int rc = SQLITE_OK;
int nPrefix;
int nSuffix;
int nSpace;
/* Figure out how much space the key will consume if it is written to
** the current node of layer iLayer. Due to the prefix compression,
** the space required changes depending on which node the key is to
** be added to. */
nPrefix = fts3PrefixCompress(pNode->key.a, pNode->key.n, zTerm, nTerm);
nSuffix = nTerm - nPrefix;
if( NEVER(nSuffix<=0) ) return FTS_CORRUPT_VTAB;
nSpace = sqlite3Fts3VarintLen(nPrefix);
nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix;
if( pNode->key.n==0 || (pNode->block.n + nSpace)<=p->nNodeSize ){
/* If the current node of layer iLayer contains zero keys, or if adding
** the key to it will not cause it to grow to larger than nNodeSize
** bytes in size, write the key here. */
Blob *pBlk = &pNode->block;
if( pBlk->n==0 ){
blobGrowBuffer(pBlk, p->nNodeSize, &rc);
if( rc==SQLITE_OK ){
pBlk->a[0] = (char)iLayer;
pBlk->n = 1 + sqlite3Fts3PutVarint(&pBlk->a[1], iPtr);
}
}
blobGrowBuffer(pBlk, pBlk->n + nSpace, &rc);
blobGrowBuffer(&pNode->key, nTerm, &rc);
if( rc==SQLITE_OK ){
if( pNode->key.n ){
pBlk->n += sqlite3Fts3PutVarint(&pBlk->a[pBlk->n], nPrefix);
}
pBlk->n += sqlite3Fts3PutVarint(&pBlk->a[pBlk->n], nSuffix);
memcpy(&pBlk->a[pBlk->n], &zTerm[nPrefix], nSuffix);
pBlk->n += nSuffix;
memcpy(pNode->key.a, zTerm, nTerm);
pNode->key.n = nTerm;
}
}else{
/* Otherwise, flush the current node of layer iLayer to disk.
** Then allocate a new, empty sibling node. The key will be written
** into the parent of this node. */
rc = fts3WriteSegment(p, pNode->iBlock, pNode->block.a, pNode->block.n);
assert( pNode->block.nAlloc>=p->nNodeSize );
pNode->block.a[0] = (char)iLayer;
pNode->block.n = 1 + sqlite3Fts3PutVarint(&pNode->block.a[1], iPtr+1);
iNextPtr = pNode->iBlock;
pNode->iBlock++;
pNode->key.n = 0;
}
if( rc!=SQLITE_OK || iNextPtr==0 ) return rc;
iPtr = iNextPtr;
}
assert( 0 );
return 0;
}
| 1 |
[
"CWE-125"
] |
sqlite
|
fd6bf04bba8035836e8f92771d277a4e868c6299
| 77,547,830,713,582,680,000,000,000,000,000,000,000 | 77 |
Remove a reachable NEVER() in FTS3.
FossilOrigin-Name: 8bd75bf636f72f32d66c6c38e1918f27daf2f13290f00a001f41d50838bbda47
|
fr_window_go_forward (FrWindow *window)
{
g_return_if_fail (window != NULL);
if (window->priv->history == NULL)
return;
if (window->priv->history_current == NULL)
return;
if (window->priv->history_current->prev == NULL)
return;
window->priv->history_current = window->priv->history_current->prev;
fr_window_go_to_location (window, window->priv->history_current->data, FALSE);
}
| 0 |
[
"CWE-22"
] |
file-roller
|
b147281293a8307808475e102a14857055f81631
| 262,506,065,629,632,700,000,000,000,000,000,000,000 | 14 |
libarchive: sanitize filenames before extracting
|
static const char *am_cookie_params(request_rec *r)
{
int secure_cookie;
int http_only_cookie;
const char *cookie_domain = ap_get_server_name(r);
const char *cookie_path = "/";
const char *cookie_samesite = "";
am_dir_cfg_rec *cfg = am_get_dir_cfg(r);
if (cfg->cookie_domain) {
cookie_domain = cfg->cookie_domain;
}
if (cfg->cookie_path) {
cookie_path = cfg->cookie_path;
}
if (cfg->cookie_samesite == am_samesite_lax) {
cookie_samesite = "; SameSite=Lax";
} else if (cfg->cookie_samesite == am_samesite_strict) {
cookie_samesite = "; SameSite=Strict";
}
secure_cookie = cfg->secure;
http_only_cookie = cfg->http_only;
return apr_psprintf(r->pool,
"Version=1; Path=%s; Domain=%s%s%s%s;",
cookie_path, cookie_domain,
http_only_cookie ? "; HttpOnly" : "",
secure_cookie ? "; secure" : "",
cookie_samesite);
}
| 0 |
[
"CWE-79"
] |
mod_auth_mellon
|
7af21c53da7bb1de024274ee6da30bc22316a079
| 8,730,633,030,550,020,000,000,000,000,000,000,000 | 33 |
Fix Cross-Site Session Transfer vulnerability
mod_auth_mellon did not verify that the site the session was created
for was the same site as the site the user accessed. This allows an
attacker with access to one web site on a server to use the same
session to get access to a different site running on the same server.
This patch fixes this vulnerability by storing the cookie parameters
used when creating the session in the session, and verifying those
parameters when the session is loaded.
Thanks to François Kooman for reporting this vulnerability.
This vulnerability has been assigned CVE-2017-6807.
|
SetColor(f, b)
int f, b;
{
int of, ob;
static unsigned char sftrans[8] = {0,4,2,6,1,5,3,7};
if (!display)
return;
of = rend_getfg(&D_rend);
ob = rend_getbg(&D_rend);
#ifdef COLORS16
/* intense default not invented yet */
if (f == 0x100)
f = 0;
if (b == 0x100)
b = 0;
#endif
debug2("SetColor %d %d", coli2e(of), coli2e(ob));
debug2(" -> %d %d\n", coli2e(f), coli2e(b));
debug2("(%d %d", of, ob);
debug2(" -> %d %d)\n", f, b);
if (!D_CAX && D_hascolor && ((f == 0 && f != of) || (b == 0 && b != ob)))
{
if (D_OP)
AddCStr(D_OP);
else
{
int oattr;
oattr = D_rend.attr;
AddCStr(D_ME ? D_ME : "\033[m");
#ifdef FONT
if (D_ME && !D_CG0)
{
/* D_ME may also reset the alternate charset */
D_rend.font = 0;
# ifdef ENCODINGS
D_realfont = 0;
# endif
}
#endif
D_atyp = 0;
D_rend.attr = 0;
SetAttr(oattr);
}
of = ob = 0;
}
rend_setfg(&D_rend, f);
rend_setbg(&D_rend, b);
#ifdef COLORS16
D_col16change = 0;
#endif
if (!D_hascolor)
return;
f = f ? coli2e(f) : -1;
b = b ? coli2e(b) : -1;
of = of ? coli2e(of) : -1;
ob = ob ? coli2e(ob) : -1;
#ifdef COLORS256
if (f != of && f > 15 && D_CCO != 256)
f = D_CCO == 88 && D_CAF ? color256to88(f) : color256to16(f);
if (f != of && f > 15 && D_CAF)
{
AddCStr2(D_CAF, f);
of = f;
}
if (b != ob && b > 15 && D_CCO != 256)
b = D_CCO == 88 && D_CAB ? color256to88(b) : color256to16(b);
if (b != ob && b > 15 && D_CAB)
{
AddCStr2(D_CAB, b);
ob = b;
}
#endif
if (f != of && f != (of | 8))
{
if (f == -1)
AddCStr("\033[39m"); /* works because AX is set */
else if (D_CAF)
AddCStr2(D_CAF, f & 7);
else if (D_CSF)
AddCStr2(D_CSF, sftrans[f & 7]);
}
if (b != ob && b != (ob | 8))
{
if (b == -1)
AddCStr("\033[49m"); /* works because AX is set */
else if (D_CAB)
AddCStr2(D_CAB, b & 7);
else if (D_CSB)
AddCStr2(D_CSB, sftrans[b & 7]);
}
#ifdef COLORS16
if (f != of && D_CXT && (f & 8) != 0 && f != -1)
{
# ifdef TERMINFO
AddCStr2("\033[9%p1%dm", f & 7);
# else
AddCStr2("\033[9%dm", f & 7);
# endif
}
if (b != ob && D_CXT && (b & 8) != 0 && b != -1)
{
# ifdef TERMINFO
AddCStr2("\033[10%p1%dm", b & 7);
# else
AddCStr2("\033[10%dm", b & 7);
# endif
}
#endif
}
| 0 |
[] |
screen
|
c5db181b6e017cfccb8d7842ce140e59294d9f62
| 257,538,707,679,611,820,000,000,000,000,000,000,000 | 113 |
ansi: add support for xterm OSC 11
It allows for getting and setting the background color. Notably, Vim uses
OSC 11 to learn whether it's running on a light or dark colored terminal
and choose a color scheme accordingly.
Tested with gnome-terminal and xterm. When called with "?" argument the
current background color is returned:
$ echo -ne "\e]11;?\e\\"
$ 11;rgb:2323/2727/2929
Signed-off-by: Lubomir Rintel <[email protected]>
(cherry picked from commit 7059bff20a28778f9d3acf81cad07b1388d02309)
Signed-off-by: Amadeusz Sławiński <[email protected]
|
static MagickBooleanType ReadPSDChannelPixels(Image *image,const size_t channels,
const size_t row,const ssize_t type,const unsigned char *pixels,
ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
indexes=GetAuthenticIndexQueue(image);
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q++,indexes,x);
}
else
{
ssize_t
bit,
number_bits;
number_bits=image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit=0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++);
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
| 0 |
[
"CWE-125"
] |
ImageMagick6
|
4745eb1047617330141e9abfd5ae01236a71ae12
| 29,038,381,176,087,898,000,000,000,000,000,000,000 | 65 |
https://github.com/ImageMagick/ImageMagick/issues/1249
|
launch_location_free (LaunchLocation *location)
{
nautilus_file_unref (location->file);
g_free (location->uri);
g_free (location);
}
| 0 |
[
"CWE-20"
] |
nautilus
|
1630f53481f445ada0a455e9979236d31a8d3bb0
| 158,375,972,584,365,000,000,000,000,000,000,000,000 | 6 |
mime-actions: use file metadata for trusting desktop files
Currently we only trust desktop files that have the executable bit
set, and don't replace the displayed icon or the displayed name until
it's trusted, which prevents for running random programs by a malicious
desktop file.
However, the executable permission is preserved if the desktop file
comes from a compressed file.
To prevent this, add a metadata::trusted metadata to the file once the
user acknowledges the file as trusted. This adds metadata to the file,
which cannot be added unless it has access to the computer.
Also remove the SHEBANG "trusted" content we were putting inside the
desktop file, since that doesn't add more security since it can come
with the file itself.
https://bugzilla.gnome.org/show_bug.cgi?id=777991
|
iobuf_open_fd_or_name (gnupg_fd_t fd, const char *fname, const char *mode)
{
iobuf_t a;
if (fd == GNUPG_INVALID_FD)
a = iobuf_open (fname);
else
a = iobuf_fdopen_nc (FD2INT(fd), mode);
return a;
}
| 0 |
[
"CWE-20"
] |
gnupg
|
2183683bd633818dd031b090b5530951de76f392
| 312,487,857,577,471,060,000,000,000,000,000,000,000 | 10 |
Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]>
|
char *uwsgi_split4(char *buf, size_t len, char sep, char **part1, size_t * part1_len, char **part2, size_t * part2_len, char **part3, size_t * part3_len, char **part4, size_t * part4_len) {
size_t i;
int status = 0;
*part1 = NULL;
*part2 = NULL;
*part3 = NULL;
*part4 = NULL;
for (i = 0; i < len; i++) {
if (buf[i] == sep) {
// get part1
if (status == 0) {
*part1 = buf;
*part1_len = i;
status = 1;
}
// get part2
else if (status == 1) {
*part2 = *part1 + *part1_len + 1;
*part2_len = (buf + i) - *part2;
status = 2;
}
// get part3
else if (status == 2) {
*part3 = *part2 + *part2_len + 1;
*part3_len = (buf + i) - *part3;
break;
}
}
}
if (*part1 && *part2 && *part3) {
if (*part3 + *part3_len + 1 > buf + len) {
return NULL;
}
*part4 = *part3 + *part3_len + 1;
*part4_len = (buf + len) - *part4;
return buf + len;
}
return NULL;
}
| 0 |
[
"CWE-119",
"CWE-703",
"CWE-787"
] |
uwsgi
|
cb4636f7c0af2e97a4eef7a3cdcbd85a71247bfe
| 67,504,975,168,717,270,000,000,000,000,000,000,000 | 44 |
improve uwsgi_expand_path() to sanitize input, avoiding stack corruption and potential security issue
|
v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
struct super_block *sb)
{
umode_t mode;
char ext[32];
char tag_name[14];
unsigned int i_nlink;
struct v9fs_session_info *v9ses = sb->s_fs_info;
struct v9fs_inode *v9inode = V9FS_I(inode);
set_nlink(inode, 1);
inode->i_atime.tv_sec = stat->atime;
inode->i_mtime.tv_sec = stat->mtime;
inode->i_ctime.tv_sec = stat->mtime;
inode->i_uid = v9ses->dfltuid;
inode->i_gid = v9ses->dfltgid;
if (v9fs_proto_dotu(v9ses)) {
inode->i_uid = stat->n_uid;
inode->i_gid = stat->n_gid;
}
if ((S_ISREG(inode->i_mode)) || (S_ISDIR(inode->i_mode))) {
if (v9fs_proto_dotu(v9ses) && (stat->extension[0] != '\0')) {
/*
* Hadlink support got added later to
* to the .u extension. So there can be
* server out there that doesn't support
* this even with .u extension. So check
* for non NULL stat->extension
*/
strlcpy(ext, stat->extension, sizeof(ext));
/* HARDLINKCOUNT %u */
sscanf(ext, "%13s %u", tag_name, &i_nlink);
if (!strncmp(tag_name, "HARDLINKCOUNT", 13))
set_nlink(inode, i_nlink);
}
}
mode = p9mode2perm(v9ses, stat);
mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
i_size_write(inode, stat->length);
/* not real number of blocks, but 512 byte ones ... */
inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9;
v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR;
}
| 1 |
[
"CWE-835"
] |
linux
|
5e3cc1ee1405a7eb3487ed24f786dec01b4cbe1f
| 248,080,678,994,298,100,000,000,000,000,000,000,000 | 48 |
9p: use inode->i_lock to protect i_size_write() under 32-bit
Use inode->i_lock to protect i_size_write(), else i_size_read() in
generic_fillattr() may loop infinitely in read_seqcount_begin() when
multiple processes invoke v9fs_vfs_getattr() or v9fs_vfs_getattr_dotl()
simultaneously under 32-bit SMP environment, and a soft lockup will be
triggered as show below:
watchdog: BUG: soft lockup - CPU#5 stuck for 22s! [stat:2217]
Modules linked in:
CPU: 5 PID: 2217 Comm: stat Not tainted 5.0.0-rc1-00005-g7f702faf5a9e #4
Hardware name: Generic DT based system
PC is at generic_fillattr+0x104/0x108
LR is at 0xec497f00
pc : [<802b8898>] lr : [<ec497f00>] psr: 200c0013
sp : ec497e20 ip : ed608030 fp : ec497e3c
r10: 00000000 r9 : ec497f00 r8 : ed608030
r7 : ec497ebc r6 : ec497f00 r5 : ee5c1550 r4 : ee005780
r3 : 0000052d r2 : 00000000 r1 : ec497f00 r0 : ed608030
Flags: nzCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment none
Control: 10c5387d Table: ac48006a DAC: 00000051
CPU: 5 PID: 2217 Comm: stat Not tainted 5.0.0-rc1-00005-g7f702faf5a9e #4
Hardware name: Generic DT based system
Backtrace:
[<8010d974>] (dump_backtrace) from [<8010dc88>] (show_stack+0x20/0x24)
[<8010dc68>] (show_stack) from [<80a1d194>] (dump_stack+0xb0/0xdc)
[<80a1d0e4>] (dump_stack) from [<80109f34>] (show_regs+0x1c/0x20)
[<80109f18>] (show_regs) from [<801d0a80>] (watchdog_timer_fn+0x280/0x2f8)
[<801d0800>] (watchdog_timer_fn) from [<80198658>] (__hrtimer_run_queues+0x18c/0x380)
[<801984cc>] (__hrtimer_run_queues) from [<80198e60>] (hrtimer_run_queues+0xb8/0xf0)
[<80198da8>] (hrtimer_run_queues) from [<801973e8>] (run_local_timers+0x28/0x64)
[<801973c0>] (run_local_timers) from [<80197460>] (update_process_times+0x3c/0x6c)
[<80197424>] (update_process_times) from [<801ab2b8>] (tick_nohz_handler+0xe0/0x1bc)
[<801ab1d8>] (tick_nohz_handler) from [<80843050>] (arch_timer_handler_virt+0x38/0x48)
[<80843018>] (arch_timer_handler_virt) from [<80180a64>] (handle_percpu_devid_irq+0x8c/0x240)
[<801809d8>] (handle_percpu_devid_irq) from [<8017ac20>] (generic_handle_irq+0x34/0x44)
[<8017abec>] (generic_handle_irq) from [<8017b344>] (__handle_domain_irq+0x6c/0xc4)
[<8017b2d8>] (__handle_domain_irq) from [<801022e0>] (gic_handle_irq+0x4c/0x88)
[<80102294>] (gic_handle_irq) from [<80101a30>] (__irq_svc+0x70/0x98)
[<802b8794>] (generic_fillattr) from [<8056b284>] (v9fs_vfs_getattr_dotl+0x74/0xa4)
[<8056b210>] (v9fs_vfs_getattr_dotl) from [<802b8904>] (vfs_getattr_nosec+0x68/0x7c)
[<802b889c>] (vfs_getattr_nosec) from [<802b895c>] (vfs_getattr+0x44/0x48)
[<802b8918>] (vfs_getattr) from [<802b8a74>] (vfs_statx+0x9c/0xec)
[<802b89d8>] (vfs_statx) from [<802b9428>] (sys_lstat64+0x48/0x78)
[<802b93e0>] (sys_lstat64) from [<80101000>] (ret_fast_syscall+0x0/0x28)
[[email protected]: updated comment to not refer to a function
in another subsystem]
Link: http://lkml.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: 7549ae3e81cc ("9p: Use the i_size_[read, write]() macros instead of using inode->i_size directly.")
Reported-by: Xing Gaopeng <[email protected]>
Signed-off-by: Hou Tao <[email protected]>
Signed-off-by: Dominique Martinet <[email protected]>
|
treedelta (struct tree const *tree,
unsigned int depth,
unsigned char delta[])
{
if (!tree)
return;
treedelta(tree->llink, depth, delta);
treedelta(tree->rlink, depth, delta);
if (depth < delta[tree->label])
delta[tree->label] = depth;
}
| 0 |
[
"CWE-119"
] |
grep
|
83a95bd8c8561875b948cadd417c653dbe7ef2e2
| 72,167,231,890,382,350,000,000,000,000,000,000,000 | 11 |
grep -F: fix a heap buffer (read) overrun
grep's read buffer is often filled to its full size, except when
reading the final buffer of a file. In that case, the number of
bytes read may be far less than the size of the buffer. However, for
certain unusual pattern/text combinations, grep -F would mistakenly
examine bytes in that uninitialized region of memory when searching
for a match. With carefully chosen inputs, one can cause grep -F to
read beyond the end of that buffer altogether. This problem arose via
commit v2.18-90-g73893ff with the introduction of a more efficient
heuristic using what is now the memchr_kwset function. The use of
that function in bmexec_trans could leave TP much larger than EP,
and the subsequent call to bm_delta2_search would mistakenly access
beyond end of the main input read buffer.
* src/kwset.c (bmexec_trans): When TP reaches or exceeds EP,
do not call bm_delta2_search.
* tests/kwset-abuse: New file.
* tests/Makefile.am (TESTS): Add it.
* THANKS.in: Update.
* NEWS (Bug fixes): Mention it.
Prior to this patch, this command would trigger a UMR:
printf %0360db 0 | valgrind src/grep -F $(printf %019dXb 0)
Use of uninitialised value of size 8
at 0x4142BE: bmexec_trans (kwset.c:657)
by 0x4143CA: bmexec (kwset.c:678)
by 0x414973: kwsexec (kwset.c:848)
by 0x414DC4: Fexecute (kwsearch.c:128)
by 0x404E2E: grepbuf (grep.c:1238)
by 0x4054BF: grep (grep.c:1417)
by 0x405CEB: grepdesc (grep.c:1645)
by 0x405EC1: grep_command_line_arg (grep.c:1692)
by 0x4077D4: main (grep.c:2570)
See the accompanying test for how to trigger the heap buffer overrun.
Thanks to Nima Aghdaii for testing and finding numerous
ways to break early iterations of this patch.
|
Field_bit::do_last_null_byte() const
{
/*
Code elsewhere is assuming that bytes are 8 bits, so I'm using
that value instead of the correct one: CHAR_BIT.
REFACTOR SUGGESTION (Matz): Change to use the correct number of
bits. On systems with CHAR_BIT > 8 (not very common), the storage
will lose the extra bits.
*/
DBUG_PRINT("test", ("bit_ofs: %d, bit_len: %d bit_ptr: %p",
bit_ofs, bit_len, bit_ptr));
uchar *result;
if (bit_len == 0)
result= null_ptr;
else if (bit_ofs + bit_len > 8)
result= bit_ptr + 1;
else
result= bit_ptr;
if (result)
return (size_t) (result - table->record[0]) + 1;
return LAST_NULL_BYTE_UNDEF;
}
| 0 |
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
| 27,413,198,186,909,210,000,000,000,000,000,000,000 | 24 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]>
|
bool endsOn(const string &domain, const string &suffix)
{
if( suffix.empty() || ciEqual(domain, suffix) )
return true;
if(domain.size()<=suffix.size())
return false;
string::size_type dpos=domain.size()-suffix.size()-1, spos=0;
if(domain[dpos++]!='.')
return false;
for(; dpos < domain.size(); ++dpos, ++spos)
if(dns_tolower(domain[dpos]) != dns_tolower(suffix[spos]))
return false;
return true;
}
| 0 |
[
"CWE-399"
] |
pdns
|
881b5b03a590198d03008e4200dd00cc537712f3
| 59,351,835,903,429,900,000,000,000,000,000,000,000 | 19 |
Reject qname's wirelength > 255, `chopOff()` handle dot inside labels
|
_copyCreateUserMappingStmt(const CreateUserMappingStmt *from)
{
CreateUserMappingStmt *newnode = makeNode(CreateUserMappingStmt);
COPY_STRING_FIELD(username);
COPY_STRING_FIELD(servername);
COPY_NODE_FIELD(options);
return newnode;
}
| 0 |
[
"CWE-362"
] |
postgres
|
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
| 327,244,990,626,633,050,000,000,000,000,000,000,000 | 10 |
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
|
e_ews_connection_resolve_names (EEwsConnection *cnc,
gint pri,
const gchar *resolve_name,
EwsContactsSearchScope scope,
GSList *parent_folder_ids,
gboolean fetch_contact_data,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data)
{
ESoapMessage *msg;
GSimpleAsyncResult *simple;
EwsAsyncData *async_data;
g_return_if_fail (cnc != NULL);
msg = e_ews_message_new_with_header (
cnc->priv->settings,
cnc->priv->uri,
cnc->priv->impersonate_user,
"ResolveNames",
NULL,
NULL,
cnc->priv->version,
E_EWS_EXCHANGE_2007_SP1,
FALSE,
TRUE);
e_soap_message_add_attribute (msg, "SearchScope", get_search_scope_str (scope), NULL, NULL);
if (fetch_contact_data)
e_soap_message_add_attribute (msg, "ReturnFullContactData", "true", NULL, NULL);
else
e_soap_message_add_attribute (msg, "ReturnFullContactData", "false", NULL, NULL);
if (parent_folder_ids) {
e_soap_message_start_element (msg, "ParentFolderIds", "messages", NULL);
ews_append_folder_ids_to_msg (msg, cnc->priv->email, parent_folder_ids);
e_soap_message_end_element (msg);
}
e_ews_message_write_string_parameter (msg, "UnresolvedEntry", "messages", resolve_name);
e_ews_message_write_footer (msg);
simple = g_simple_async_result_new (
G_OBJECT (cnc), callback, user_data,
e_ews_connection_resolve_names);
async_data = g_new0 (EwsAsyncData, 1);
g_simple_async_result_set_op_res_gpointer (
simple, async_data, (GDestroyNotify) async_data_free);
e_ews_connection_queue_request (
cnc, msg, resolve_names_response_cb,
pri, cancellable, simple);
g_object_unref (simple);
}
| 0 |
[
"CWE-295"
] |
evolution-ews
|
915226eca9454b8b3e5adb6f2fff9698451778de
| 293,643,003,320,970,240,000,000,000,000,000,000,000 | 59 |
I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
|
int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
{
int status;
/*
* HBA gets first crack
*/
status = aac_dev_ioctl(dev, cmd, arg);
if (status != -ENOTTY)
return status;
switch (cmd) {
case FSACTL_MINIPORT_REV_CHECK:
status = check_revision(dev, arg);
break;
case FSACTL_SEND_LARGE_FIB:
case FSACTL_SENDFIB:
status = ioctl_send_fib(dev, arg);
break;
case FSACTL_OPEN_GET_ADAPTER_FIB:
status = open_getadapter_fib(dev, arg);
break;
case FSACTL_GET_NEXT_ADAPTER_FIB:
status = next_getadapter_fib(dev, arg);
break;
case FSACTL_CLOSE_GET_ADAPTER_FIB:
status = close_getadapter_fib(dev, arg);
break;
case FSACTL_SEND_RAW_SRB:
status = aac_send_raw_srb(dev,arg);
break;
case FSACTL_GET_PCI_INFO:
status = aac_get_pci_info(dev,arg);
break;
default:
status = -ENOTTY;
break;
}
return status;
}
| 0 |
[
"CWE-20",
"CWE-399"
] |
linux
|
b4789b8e6be3151a955ade74872822f30e8cd914
| 110,063,843,078,764,070,000,000,000,000,000,000,000 | 41 |
aacraid: prevent invalid pointer dereference
It appears that driver runs into a problem here if fibsize is too small
because we allocate user_srbcmd with fibsize size only but later we
access it until user_srbcmd->sg.count to copy it over to srbcmd.
It is not correct to test (fibsize < sizeof(*user_srbcmd)) because this
structure already includes one sg element and this is not needed for
commands without data. So, we would recommend to add the following
(instead of test for fibsize == 0).
Signed-off-by: Mahesh Rajashekhara <[email protected]>
Reported-by: Nico Golde <[email protected]>
Reported-by: Fabian Yamaguchi <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
struct kvm_regs *regs = vcpu_gp_regs(vcpu);
int nr_regs = sizeof(*regs) / sizeof(__u32);
__uint128_t tmp;
void *valp = &tmp;
u64 off;
int err = 0;
/* Our ID is an index into the kvm_regs struct. */
off = core_reg_offset_from_id(reg->id);
if (off >= nr_regs ||
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
return -EINVAL;
if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
err = -EFAULT;
goto out;
}
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
switch (mode) {
case PSR_AA32_MODE_USR:
case PSR_AA32_MODE_FIQ:
case PSR_AA32_MODE_IRQ:
case PSR_AA32_MODE_SVC:
case PSR_AA32_MODE_ABT:
case PSR_AA32_MODE_UND:
case PSR_MODE_EL0t:
case PSR_MODE_EL1t:
case PSR_MODE_EL1h:
break;
default:
err = -EINVAL;
goto out;
}
}
memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
out:
return err;
}
| 1 |
[
"CWE-20"
] |
linux
|
d26c25a9d19b5976b319af528886f89cf455692d
| 284,292,064,506,048,200,000,000,000,000,000,000,000 | 47 |
arm64: KVM: Tighten guest core register access from userspace
We currently allow userspace to access the core register file
in about any possible way, including straddling multiple
registers and doing unaligned accesses.
This is not the expected use of the ABI, and nobody is actually
using it that way. Let's tighten it by explicitly checking
the size and alignment for each field of the register file.
Cc: <[email protected]>
Fixes: 2f4a07c5f9fe ("arm64: KVM: guest one-reg interface")
Reviewed-by: Christoffer Dall <[email protected]>
Reviewed-by: Mark Rutland <[email protected]>
Signed-off-by: Dave Martin <[email protected]>
[maz: rewrote Dave's initial patch to be more easily backported]
Signed-off-by: Marc Zyngier <[email protected]>
Signed-off-by: Will Deacon <[email protected]>
|
static inline int padr_bcast(PCNetState *s, const uint8_t *buf, int size)
{
static const uint8_t BCAST[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
struct qemu_ether_header *hdr = (void *)buf;
int result = !CSR_DRCVBC(s) && !memcmp(hdr->ether_dhost, BCAST, 6);
#ifdef PCNET_DEBUG_MATCH
printf("padr_bcast result=%d\n", result);
#endif
return result;
}
| 0 |
[] |
qemu
|
837f21aacf5a714c23ddaadbbc5212f9b661e3f7
| 264,607,038,712,513,950,000,000,000,000,000,000,000 | 10 |
net: pcnet: add check to validate receive data size(CVE-2015-7504)
In loopback mode, pcnet_receive routine appends CRC code to the
receive buffer. If the data size given is same as the buffer size,
the appended CRC code overwrites 4 bytes after s->buffer. Added a
check to avoid that.
Reported by: Qinghao Tang <[email protected]>
Cc: [email protected]
Reviewed-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Signed-off-by: Jason Wang <[email protected]>
|
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
| 0 |
[
"CWE-20",
"CWE-190"
] |
tinyexr
|
a685e3332f61cd4e59324bf3f669d36973d64270
| 86,909,799,735,425,150,000,000,000,000,000,000,000 | 14 |
Make line_no with too large value(2**20) invalid. Fixes #124
|
dns_transmit_io (struct dns_transmit *d, iopause_fd *x, struct taia *deadline)
{
x->fd = d->s1 - 1;
switch (d->tcpstate)
{
case 0:
case 3:
case 4:
case 5:
x->events = IOPAUSE_READ;
break;
case 1:
case 2:
x->events = IOPAUSE_WRITE;
}
if (taia_less (&d->deadline, deadline))
*deadline = d->deadline;
}
| 1 |
[
"CWE-362"
] |
ndjbdns
|
177b5522e9b3d25778001c8cebfddd4d2973fcfd
| 95,786,177,002,988,540,000,000,000,000,000,000,000 | 21 |
Merge identical outgoing requests - patch 2.
This patch fixes dnscache to combine *same* client queries into one
single outgoing request, thus securing the server from possible cache
poisoning attacks. The merges operation takes place in the
dns_transmit layer, rather than between query and dns_transmit layers,
as done in the previous patch.
This fixes one of the cache poisoning vulnerability reported by
Mr Mark Johnson -> https://bugzilla.redhat.com/show_bug.cgi?id=838965.
Nonetheless the original patch for this issue was created by
Mr Jeff king -> http://marc.info/?l=djbdns&m=123859517723684&w=3#2
Sincere thanks to Mr Mark for reporting this issue and Mr Jeff for
creating the patch and releasing it under GPLv2.
|
level_store(struct mddev *mddev, const char *buf, size_t len)
{
char clevel[16];
ssize_t rv;
size_t slen = len;
struct md_personality *pers, *oldpers;
long level;
void *priv, *oldpriv;
struct md_rdev *rdev;
if (slen == 0 || slen >= sizeof(clevel))
return -EINVAL;
rv = mddev_lock(mddev);
if (rv)
return rv;
if (mddev->pers == NULL) {
strncpy(mddev->clevel, buf, slen);
if (mddev->clevel[slen-1] == '\n')
slen--;
mddev->clevel[slen] = 0;
mddev->level = LEVEL_NONE;
rv = len;
goto out_unlock;
}
rv = -EROFS;
if (mddev->ro)
goto out_unlock;
/* request to change the personality. Need to ensure:
* - array is not engaged in resync/recovery/reshape
* - old personality can be suspended
* - new personality will access other array.
*/
rv = -EBUSY;
if (mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
mddev->reshape_position != MaxSector ||
mddev->sysfs_active)
goto out_unlock;
rv = -EINVAL;
if (!mddev->pers->quiesce) {
printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
mdname(mddev), mddev->pers->name);
goto out_unlock;
}
/* Now find the new personality */
strncpy(clevel, buf, slen);
if (clevel[slen-1] == '\n')
slen--;
clevel[slen] = 0;
if (kstrtol(clevel, 10, &level))
level = LEVEL_NONE;
if (request_module("md-%s", clevel) != 0)
request_module("md-level-%s", clevel);
spin_lock(&pers_lock);
pers = find_pers(level, clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
rv = -EINVAL;
goto out_unlock;
}
spin_unlock(&pers_lock);
if (pers == mddev->pers) {
/* Nothing to do! */
module_put(pers->owner);
rv = len;
goto out_unlock;
}
if (!pers->takeover) {
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
mdname(mddev), clevel);
rv = -EINVAL;
goto out_unlock;
}
rdev_for_each(rdev, mddev)
rdev->new_raid_disk = rdev->raid_disk;
/* ->takeover must set new_* and/or delta_disks
* if it succeeds, and may set them when it fails.
*/
priv = pers->takeover(mddev);
if (IS_ERR(priv)) {
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->raid_disks -= mddev->delta_disks;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s would not accept array\n",
mdname(mddev), clevel);
rv = PTR_ERR(priv);
goto out_unlock;
}
/* Looks like we have a winner */
mddev_suspend(mddev);
mddev_detach(mddev);
spin_lock(&mddev->lock);
oldpers = mddev->pers;
oldpriv = mddev->private;
mddev->pers = pers;
mddev->private = priv;
strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
mddev->level = mddev->new_level;
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = mddev->new_chunk_sectors;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
mddev->degraded = 0;
spin_unlock(&mddev->lock);
if (oldpers->sync_request == NULL &&
mddev->external) {
/* We are converting from a no-redundancy array
* to a redundancy array and metadata is managed
* externally so we need to be sure that writes
* won't block due to a need to transition
* clean->dirty
* until external management is started.
*/
mddev->in_sync = 0;
mddev->safemode_delay = 0;
mddev->safemode = 0;
}
oldpers->free(mddev, oldpriv);
if (oldpers->sync_request == NULL &&
pers->sync_request != NULL) {
/* need to add the md_redundancy_group */
if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
}
if (oldpers->sync_request != NULL &&
pers->sync_request == NULL) {
/* need to remove the md_redundancy_group */
if (mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
}
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk < 0)
continue;
if (rdev->new_raid_disk >= mddev->raid_disks)
rdev->new_raid_disk = -1;
if (rdev->new_raid_disk == rdev->raid_disk)
continue;
sysfs_unlink_rdev(mddev, rdev);
}
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk < 0)
continue;
if (rdev->new_raid_disk == rdev->raid_disk)
continue;
rdev->raid_disk = rdev->new_raid_disk;
if (rdev->raid_disk < 0)
clear_bit(In_sync, &rdev->flags);
else {
if (sysfs_link_rdev(mddev, rdev))
printk(KERN_WARNING "md: cannot register rd%d"
" for %s after level change\n",
rdev->raid_disk, mdname(mddev));
}
}
if (pers->sync_request == NULL) {
/* this is now an array without redundancy, so
* it must always be in_sync
*/
mddev->in_sync = 1;
del_timer_sync(&mddev->safemode_timer);
}
blk_set_stacking_limits(&mddev->queue->limits);
pers->run(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev_resume(mddev);
if (!mddev->thread)
md_update_sb(mddev, 1);
sysfs_notify(&mddev->kobj, NULL, "level");
md_new_event(mddev);
rv = len;
out_unlock:
mddev_unlock(mddev);
return rv;
}
| 0 |
[
"CWE-200"
] |
linux
|
b6878d9e03043695dbf3fa1caa6dfc09db225b16
| 277,351,813,718,734,580,000,000,000,000,000,000,000 | 200 |
md: use kzalloc() when bitmap is disabled
In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a
mdu_bitmap_file_t called "file".
5769 file = kmalloc(sizeof(*file), GFP_NOIO);
5770 if (!file)
5771 return -ENOMEM;
This structure is copied to user space at the end of the function.
5786 if (err == 0 &&
5787 copy_to_user(arg, file, sizeof(*file)))
5788 err = -EFAULT
But if bitmap is disabled only the first byte of "file" is initialized
with zero, so it's possible to read some bytes (up to 4095) of kernel
space memory from user space. This is an information leak.
5775 /* bitmap disabled, zero the first byte and copy out */
5776 if (!mddev->bitmap_info.file)
5777 file->pathname[0] = '\0';
Signed-off-by: Benjamin Randazzo <[email protected]>
Signed-off-by: NeilBrown <[email protected]>
|
std::string to_string() const { return std::string((char*)ptr, len); }
| 0 |
[
"CWE-200"
] |
incubator-doris
|
246ac4e37aa4da6836b7850cb990f02d1c3725a3
| 209,951,107,055,225,800,000,000,000,000,000,000,000 | 1 |
[fix] fix a bug of encryption function with iv may return wrong result (#8277)
|
smil_type_find (GstTypeFind * tf, gpointer unused)
{
if (xml_check_first_element (tf, "smil", 4, FALSE)) {
gst_type_find_suggest (tf, GST_TYPE_FIND_MAXIMUM, SMIL_CAPS);
}
}
| 0 |
[
"CWE-125"
] |
gst-plugins-base
|
2fdccfd64fc609e44e9c4b8eed5bfdc0ab9c9095
| 24,326,528,116,604,030,000,000,000,000,000,000,000 | 6 |
typefind: bounds check windows ico detection
Fixes out of bounds read
https://bugzilla.gnome.org/show_bug.cgi?id=774902
|
uint32_t BinaryProtocolWriter::serializedSizeBinary(
folly::StringPiece str) const {
return serializedSizeBinary(folly::ByteRange(str));
}
| 0 |
[
"CWE-703",
"CWE-770"
] |
fbthrift
|
c9a903e5902834e95bbd4ab0e9fa53ba0189f351
| 178,099,794,931,345,000,000,000,000,000,000,000,000 | 4 |
Better handling of truncated data when reading strings
Summary:
Currently we read string size and blindly pre-allocate it. This allows malicious attacker to send a few bytes message and cause server to allocate huge amount of memory (>1GB).
This diff changes the logic to check if we have enough data in the buffer before allocating the string.
This is a second part of a fix for CVE-2019-3553.
Reviewed By: vitaut
Differential Revision: D14393393
fbshipit-source-id: e2046d2f5b087d3abc9a9d2c6c107cf088673057
|
static void
typebuilder_setup_events (MonoClass *klass, MonoError *error)
{
MonoReflectionTypeBuilder *tb = mono_class_get_ref_info (klass);
MonoReflectionEventBuilder *eb;
MonoImage *image = klass->image;
MonoEvent *events;
int i;
mono_error_init (error);
if (!klass->ext)
klass->ext = image_g_new0 (image, MonoClassExt, 1);
klass->ext->event.count = tb->events ? mono_array_length (tb->events) : 0;
klass->ext->event.first = 0;
events = image_g_new0 (image, MonoEvent, klass->ext->event.count);
klass->ext->events = events;
for (i = 0; i < klass->ext->event.count; ++i) {
eb = mono_array_get (tb->events, MonoReflectionEventBuilder*, i);
events [i].parent = klass;
events [i].attrs = eb->attrs;
events [i].name = mono_string_to_utf8_image (image, eb->name, error);
if (!mono_error_ok (error))
return;
if (eb->add_method)
events [i].add = eb->add_method->mhandle;
if (eb->remove_method)
events [i].remove = eb->remove_method->mhandle;
if (eb->raise_method)
events [i].raise = eb->raise_method->mhandle;
#ifndef MONO_SMALL_CONFIG
if (eb->other_methods) {
int j;
events [i].other = image_g_new0 (image, MonoMethod*, mono_array_length (eb->other_methods) + 1);
for (j = 0; j < mono_array_length (eb->other_methods); ++j) {
MonoReflectionMethodBuilder *mb =
mono_array_get (eb->other_methods,
MonoReflectionMethodBuilder*, j);
events [i].other [j] = mb->mhandle;
}
}
#endif
mono_save_custom_attrs (klass->image, &events [i], eb->cattrs);
}
| 0 |
[
"CWE-20"
] |
mono
|
65292a69c837b8a5f7a392d34db63de592153358
| 8,186,733,306,013,975,000,000,000,000,000,000,000 | 47 |
Handle invalid instantiation of generic methods.
* verify.c: Add new function to internal verifier API to check
method instantiations.
* reflection.c (mono_reflection_bind_generic_method_parameters):
Check the instantiation before returning it.
Fixes #655847
|
read_StreamsInfo(struct archive_read *a, struct _7z_stream_info *si)
{
struct _7zip *zip = (struct _7zip *)a->format->data;
const unsigned char *p;
unsigned i;
memset(si, 0, sizeof(*si));
if ((p = header_bytes(a, 1)) == NULL)
return (-1);
if (*p == kPackInfo) {
uint64_t packPos;
if (read_PackInfo(a, &(si->pi)) < 0)
return (-1);
if (si->pi.positions == NULL || si->pi.sizes == NULL)
return (-1);
/*
* Calculate packed stream positions.
*/
packPos = si->pi.pos;
for (i = 0; i < si->pi.numPackStreams; i++) {
si->pi.positions[i] = packPos;
packPos += si->pi.sizes[i];
if (packPos > zip->header_offset)
return (-1);
}
if ((p = header_bytes(a, 1)) == NULL)
return (-1);
}
if (*p == kUnPackInfo) {
uint32_t packIndex;
struct _7z_folder *f;
if (read_CodersInfo(a, &(si->ci)) < 0)
return (-1);
/*
* Calculate packed stream indexes.
*/
packIndex = 0;
f = si->ci.folders;
for (i = 0; i < si->ci.numFolders; i++) {
f[i].packIndex = packIndex;
packIndex += (uint32_t)f[i].numPackedStreams;
if (packIndex > si->pi.numPackStreams)
return (-1);
}
if ((p = header_bytes(a, 1)) == NULL)
return (-1);
}
if (*p == kSubStreamsInfo) {
if (read_SubStreamsInfo(a, &(si->ss),
si->ci.folders, (size_t)si->ci.numFolders) < 0)
return (-1);
if ((p = header_bytes(a, 1)) == NULL)
return (-1);
}
/*
* Must be kEnd.
*/
if (*p != kEnd)
return (-1);
return (0);
}
| 0 |
[
"CWE-190",
"CWE-125"
] |
libarchive
|
e79ef306afe332faf22e9b442a2c6b59cb175573
| 156,562,091,784,280,310,000,000,000,000,000,000,000 | 68 |
Issue #718: Fix TALOS-CAN-152
If a 7-Zip archive declares a rediculously large number of substreams,
it can overflow an internal counter, leading a subsequent memory
allocation to be too small for the substream data.
Thanks to the Open Source and Threat Intelligence project at Cisco
for reporting this issue.
|
static int copy_argv(char *argv[])
{
int i;
for (i = 0; argv[i]; i++) {
if (!(argv[i] = strdup(argv[i]))) {
rprintf (FERROR, "out of memory at %s(%d)\n",
__FILE__, __LINE__);
return RERR_MALLOC;
}
}
return 0;
}
| 0 |
[
"CWE-59"
] |
rsync
|
962f8b90045ab331fc04c9e65f80f1a53e68243b
| 270,586,781,790,933,240,000,000,000,000,000,000,000 | 14 |
Complain if an inc-recursive path is not right for its dir.
This ensures that a malicious sender can't use a just-sent
symlink as a trasnfer path.
|
static gboolean avdtp_open_cmd(struct avdtp *session, uint8_t transaction,
struct seid_req *req, unsigned int size)
{
struct avdtp_local_sep *sep;
struct avdtp_stream *stream;
uint8_t err;
if (size < sizeof(struct seid_req)) {
error("Too short abort request");
return FALSE;
}
sep = find_local_sep_by_seid(session, req->acp_seid);
if (!sep) {
err = AVDTP_BAD_ACP_SEID;
goto failed;
}
if (sep->state != AVDTP_STATE_CONFIGURED) {
err = AVDTP_BAD_STATE;
goto failed;
}
stream = sep->stream;
/* Check if the stream is pending and there is an IO set already */
if (stream == session->pending_open && session->pending_open_io) {
handle_transport_connect(session, session->pending_open_io,
stream->imtu, stream->omtu);
return avdtp_send(session, transaction, AVDTP_MSG_TYPE_ACCEPT,
AVDTP_OPEN, NULL, 0);
}
if (sep->ind && sep->ind->open && !session->pending_open) {
if (!sep->ind->open(session, sep, stream, &err,
sep->user_data))
goto failed;
}
avdtp_check_collision(session, AVDTP_OPEN, stream);
if (!avdtp_send(session, transaction, AVDTP_MSG_TYPE_ACCEPT,
AVDTP_OPEN, NULL, 0))
return FALSE;
if (!session->pending_open)
stream_set_pending_open(stream, NULL);
return TRUE;
failed:
return avdtp_send(session, transaction, AVDTP_MSG_TYPE_REJECT,
AVDTP_OPEN, &err, sizeof(err));
}
| 0 |
[
"CWE-703"
] |
bluez
|
7a80d2096f1b7125085e21448112aa02f49f5e9a
| 329,264,614,990,206,400,000,000,000,000,000,000,000 | 54 |
avdtp: Fix accepting invalid/malformed capabilities
Check if capabilities are valid before attempting to copy them.
|
bool Predicant_to_list_comparator::add_value_skip_null(const char *funcname,
Item_args *args,
uint value_index,
bool *nulls_found)
{
/*
Skip explicit NULL constant items.
Using real_item() to correctly detect references to explicit NULLs
in HAVING clause, e.g. in this example "b" is skipped:
SELECT a,NULL AS b FROM t1 GROUP BY a HAVING 'A' IN (b,'A');
*/
if (args->arguments()[value_index]->real_item()->type() == Item::NULL_ITEM)
{
*nulls_found= true;
return false;
}
return add_value(funcname, args, value_index);
}
| 0 |
[
"CWE-617"
] |
server
|
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
| 215,848,812,413,307,830,000,000,000,000,000,000,000 | 18 |
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item.
|
static void tcp_sum_lost(struct tcp_sock *tp, struct sk_buff *skb)
{
__u8 sacked = TCP_SKB_CB(skb)->sacked;
if (!(sacked & TCPCB_LOST) ||
((sacked & TCPCB_LOST) && (sacked & TCPCB_SACKED_RETRANS)))
tp->lost += tcp_skb_pcount(skb);
}
| 0 |
[
"CWE-190"
] |
net
|
3b4929f65b0d8249f19a50245cd88ed1a2f78cff
| 229,871,978,914,959,180,000,000,000,000,000,000,000 | 8 |
tcp: limit payload size of sacked skbs
Jonathan Looney reported that TCP can trigger the following crash
in tcp_shifted_skb() :
BUG_ON(tcp_skb_pcount(skb) < pcount);
This can happen if the remote peer has advertized the smallest
MSS that linux TCP accepts : 48
An skb can hold 17 fragments, and each fragment can hold 32KB
on x86, or 64KB on PowerPC.
This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs
can overflow.
Note that tcp_sendmsg() builds skbs with less than 64KB
of payload, so this problem needs SACK to be enabled.
SACK blocks allow TCP to coalesce multiple skbs in the retransmit
queue, thus filling the 17 fragments to maximal capacity.
CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs
Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jonathan Looney <[email protected]>
Acked-by: Neal Cardwell <[email protected]>
Reviewed-by: Tyler Hicks <[email protected]>
Cc: Yuchung Cheng <[email protected]>
Cc: Bruce Curtis <[email protected]>
Cc: Jonathan Lemon <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
DEFUN (clear_ip_bgp_instance_all_ipv4_soft_in,
clear_ip_bgp_instance_all_ipv4_soft_in_cmd,
"clear ip bgp view WORD * ipv4 (unicast|multicast) soft in",
CLEAR_STR
IP_STR
BGP_STR
"BGP view\n"
"view name\n"
"Clear all peers\n"
"Address family\n"
"Address Family modifier\n"
"Address Family modifier\n"
"Soft reconfig\n"
"Soft reconfig inbound update\n")
{
if (strncmp (argv[1], "m", 1) == 0)
return bgp_clear_vty (vty, argv[0], AFI_IP, SAFI_MULTICAST, clear_all,
BGP_CLEAR_SOFT_IN, NULL);
return bgp_clear_vty (vty, argv[0], AFI_IP, SAFI_UNICAST, clear_all,
BGP_CLEAR_SOFT_IN, NULL);
}
| 0 |
[
"CWE-125"
] |
frr
|
6d58272b4cf96f0daa846210dd2104877900f921
| 127,540,659,159,071,070,000,000,000,000,000,000,000 | 22 |
[bgpd] cleanup, compact and consolidate capability parsing code
2007-07-26 Paul Jakma <[email protected]>
* (general) Clean up and compact capability parsing slightly.
Consolidate validation of length and logging of generic TLV, and
memcpy of capability data, thus removing such from cap specifc
code (not always present or correct).
* bgp_open.h: Add structures for the generic capability TLV header
and for the data formats of the various specific capabilities we
support. Hence remove the badly named, or else misdefined, struct
capability.
* bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data.
Do the length checks *before* memcpy()'ing based on that length
(stored capability - should have been validated anyway on input,
but..).
(bgp_afi_safi_valid_indices) new function to validate (afi,safi)
which is about to be used as index into arrays, consolidates
several instances of same, at least one of which appeared to be
incomplete..
(bgp_capability_mp) Much condensed.
(bgp_capability_orf_entry) New, process one ORF entry
(bgp_capability_orf) Condensed. Fixed to process all ORF entries.
(bgp_capability_restart) Condensed, and fixed to use a
cap-specific type, rather than abusing capability_mp.
(struct message capcode_str) added to aid generic logging.
(size_t cap_minsizes[]) added to aid generic validation of
capability length field.
(bgp_capability_parse) Generic logging and validation of TLV
consolidated here. Code compacted as much as possible.
* bgp_packet.c: (bgp_open_receive) Capability parsers now use
streams, so no more need here to manually fudge the input stream
getp.
(bgp_capability_msg_parse) use struct capability_mp_data. Validate
lengths /before/ memcpy. Use bgp_afi_safi_valid_indices.
(bgp_capability_receive) Exported for use by test harness.
* bgp_vty.c: (bgp_show_summary) fix conversion warning
(bgp_show_peer) ditto
* bgp_debug.h: Fix storage 'extern' after type 'const'.
* lib/log.c: (mes_lookup) warning about code not being in
same-number array slot should be debug, not warning. E.g. BGP
has several discontigious number spaces, allocating from
different parts of a space is not uncommon (e.g. IANA
assigned versus vendor-assigned code points in some number
space).
|
static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
{
const struct xt_target *t;
int have_rev = 0;
mutex_lock(&xt[af].mutex);
list_for_each_entry(t, &xt[af].target, list) {
if (strcmp(t->name, name) == 0) {
if (t->revision > *bestp)
*bestp = t->revision;
if (t->revision == revision)
have_rev = 1;
}
}
mutex_unlock(&xt[af].mutex);
if (af != NFPROTO_UNSPEC && !have_rev)
return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
return have_rev;
}
| 0 |
[] |
linux
|
175e476b8cdf2a4de7432583b49c871345e4f8a1
| 141,665,444,027,804,840,000,000,000,000,000,000,000 | 21 |
netfilter: x_tables: Use correct memory barriers.
When a new table value was assigned, it was followed by a write memory
barrier. This ensured that all writes before this point would complete
before any writes after this point. However, to determine whether the
rules are unused, the sequence counter is read. To ensure that all
writes have been done before these reads, a full memory barrier is
needed, not just a write memory barrier. The same argument applies when
incrementing the counter, before the rules are read.
Changing to using smp_mb() instead of smp_wmb() fixes the kernel panic
reported in cc00bcaa5899 (which is still present), while still
maintaining the same speed of replacing tables.
The smb_mb() barriers potentially slow the packet path, however testing
has shown no measurable change in performance on a 4-core MIPS64
platform.
Fixes: 7f5c6d4f665b ("netfilter: get rid of atomic ops in fast path")
Signed-off-by: Mark Tomlinson <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
static inline void decode_eat32bits(Encoder *encoder)
{
decode_eatbits(encoder, 16);
decode_eatbits(encoder, 16);
}
| 0 |
[] |
spice-common
|
762e0abae36033ccde658fd52d3235887b60862d
| 144,969,034,427,598,620,000,000,000,000,000,000,000 | 5 |
quic: Check we have some data to start decoding quic image
All paths already pass some data to quic_decode_begin but for the
test check it, it's not that expensive test.
Checking for not 0 is enough, all other words will potentially be
read calling more_io_words but we need one to avoid a potential
initial buffer overflow or deferencing an invalid pointer.
Signed-off-by: Frediano Ziglio <[email protected]>
Acked-by: Uri Lublin <[email protected]>
|
vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
int main_fd)
{
struct virtio_net *dev = *pdev;
struct VhostUserMemory *memory = &msg->payload.memory;
struct rte_vhost_mem_region *reg;
void *mmap_addr;
uint64_t mmap_size;
uint64_t mmap_offset;
uint64_t alignment;
uint32_t i;
int populate;
int fd;
if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) {
RTE_LOG(ERR, VHOST_CONFIG,
"too many memory regions (%u)\n", memory->nregions);
return RTE_VHOST_MSG_RESULT_ERR;
}
if (dev->mem && !vhost_memory_changed(memory, dev->mem)) {
RTE_LOG(INFO, VHOST_CONFIG,
"(%d) memory regions not changed\n", dev->vid);
for (i = 0; i < memory->nregions; i++)
close(msg->fds[i]);
return RTE_VHOST_MSG_RESULT_OK;
}
if (dev->mem) {
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
}
/* Flush IOTLB cache as previous HVAs are now invalid */
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
for (i = 0; i < dev->nr_vring; i++)
vhost_user_iotlb_flush_all(dev->virtqueue[i]);
dev->nr_guest_pages = 0;
if (!dev->guest_pages) {
dev->max_guest_pages = 8;
dev->guest_pages = malloc(dev->max_guest_pages *
sizeof(struct guest_page));
if (dev->guest_pages == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to allocate memory "
"for dev->guest_pages\n",
dev->vid);
return RTE_VHOST_MSG_RESULT_ERR;
}
}
dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
sizeof(struct rte_vhost_mem_region) * memory->nregions, 0);
if (dev->mem == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to allocate memory for dev->mem\n",
dev->vid);
return RTE_VHOST_MSG_RESULT_ERR;
}
dev->mem->nregions = memory->nregions;
for (i = 0; i < memory->nregions; i++) {
fd = msg->fds[i];
reg = &dev->mem->regions[i];
reg->guest_phys_addr = memory->regions[i].guest_phys_addr;
reg->guest_user_addr = memory->regions[i].userspace_addr;
reg->size = memory->regions[i].memory_size;
reg->fd = fd;
mmap_offset = memory->regions[i].mmap_offset;
/* Check for memory_size + mmap_offset overflow */
if (mmap_offset >= -reg->size) {
RTE_LOG(ERR, VHOST_CONFIG,
"mmap_offset (%#"PRIx64") and memory_size "
"(%#"PRIx64") overflow\n",
mmap_offset, reg->size);
goto err_mmap;
}
mmap_size = reg->size + mmap_offset;
/* mmap() without flag of MAP_ANONYMOUS, should be called
* with length argument aligned with hugepagesz at older
* longterm version Linux, like 2.6.32 and 3.2.72, or
* mmap() will fail with EINVAL.
*
* to avoid failure, make sure in caller to keep length
* aligned.
*/
alignment = get_blk_size(fd);
if (alignment == (uint64_t)-1) {
RTE_LOG(ERR, VHOST_CONFIG,
"couldn't get hugepage size through fstat\n");
goto err_mmap;
}
mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
populate = (dev->dequeue_zero_copy) ? MAP_POPULATE : 0;
mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
MAP_SHARED | populate, fd, 0);
if (mmap_addr == MAP_FAILED) {
RTE_LOG(ERR, VHOST_CONFIG,
"mmap region %u failed.\n", i);
goto err_mmap;
}
reg->mmap_addr = mmap_addr;
reg->mmap_size = mmap_size;
reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
mmap_offset;
if (dev->dequeue_zero_copy)
if (add_guest_pages(dev, reg, alignment) < 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"adding guest pages to region %u failed.\n",
i);
goto err_mmap;
}
RTE_LOG(INFO, VHOST_CONFIG,
"guest memory region %u, size: 0x%" PRIx64 "\n"
"\t guest physical addr: 0x%" PRIx64 "\n"
"\t guest virtual addr: 0x%" PRIx64 "\n"
"\t host virtual addr: 0x%" PRIx64 "\n"
"\t mmap addr : 0x%" PRIx64 "\n"
"\t mmap size : 0x%" PRIx64 "\n"
"\t mmap align: 0x%" PRIx64 "\n"
"\t mmap off : 0x%" PRIx64 "\n",
i, reg->size,
reg->guest_phys_addr,
reg->guest_user_addr,
reg->host_user_addr,
(uint64_t)(uintptr_t)mmap_addr,
mmap_size,
alignment,
mmap_offset);
if (dev->postcopy_listening) {
/*
* We haven't a better way right now than sharing
* DPDK's virtual address with Qemu, so that Qemu can
* retrieve the region offset when handling userfaults.
*/
memory->regions[i].userspace_addr =
reg->host_user_addr;
}
}
if (dev->postcopy_listening) {
/* Send the addresses back to qemu */
msg->fd_num = 0;
send_vhost_reply(main_fd, msg);
/* Wait for qemu to acknolwedge it's got the addresses
* we've got to wait before we're allowed to generate faults.
*/
VhostUserMsg ack_msg;
if (read_vhost_message(main_fd, &ack_msg) <= 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"Failed to read qemu ack on postcopy set-mem-table\n");
goto err_mmap;
}
if (ack_msg.request.master != VHOST_USER_SET_MEM_TABLE) {
RTE_LOG(ERR, VHOST_CONFIG,
"Bad qemu ack on postcopy set-mem-table (%d)\n",
ack_msg.request.master);
goto err_mmap;
}
/* Now userfault register and we can use the memory */
for (i = 0; i < memory->nregions; i++) {
#ifdef RTE_LIBRTE_VHOST_POSTCOPY
reg = &dev->mem->regions[i];
struct uffdio_register reg_struct;
/*
* Let's register all the mmap'ed area to ensure
* alignment on page boundary.
*/
reg_struct.range.start =
(uint64_t)(uintptr_t)reg->mmap_addr;
reg_struct.range.len = reg->mmap_size;
reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER,
®_struct)) {
RTE_LOG(ERR, VHOST_CONFIG,
"Failed to register ufd for region %d: (ufd = %d) %s\n",
i, dev->postcopy_ufd,
strerror(errno));
goto err_mmap;
}
RTE_LOG(INFO, VHOST_CONFIG,
"\t userfaultfd registered for range : %llx - %llx\n",
reg_struct.range.start,
reg_struct.range.start +
reg_struct.range.len - 1);
#else
goto err_mmap;
#endif
}
}
for (i = 0; i < dev->nr_vring; i++) {
struct vhost_virtqueue *vq = dev->virtqueue[i];
if (vq->desc || vq->avail || vq->used) {
/*
* If the memory table got updated, the ring addresses
* need to be translated again as virtual addresses have
* changed.
*/
vring_invalidate(dev, vq);
dev = translate_ring_addresses(dev, i);
if (!dev) {
dev = *pdev;
goto err_mmap;
}
*pdev = dev;
}
}
dump_guest_pages(dev);
return RTE_VHOST_MSG_RESULT_OK;
err_mmap:
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
return RTE_VHOST_MSG_RESULT_ERR;
}
| 0 |
[
"CWE-190"
] |
dpdk
|
d87f1a1cb7b666550bb53e39c1d85d9f7b861e6f
| 264,094,632,889,409,600,000,000,000,000,000,000,000 | 240 |
vhost: support inflight info sharing
This patch introduces two new messages VHOST_USER_GET_INFLIGHT_FD
and VHOST_USER_SET_INFLIGHT_FD to support transferring a shared
buffer between qemu and backend.
Signed-off-by: Lin Li <[email protected]>
Signed-off-by: Xun Ni <[email protected]>
Signed-off-by: Yu Zhang <[email protected]>
Signed-off-by: Jin Yu <[email protected]>
Reviewed-by: Maxime Coquelin <[email protected]>
|
//! Convert pixel values from CMY to RGB color spaces.
CImg<T>& CMYtoRGB() {
if (_spectrum!=3)
throw CImgInstanceException(_cimg_instance
"CMYtoRGB(): Instance is not a CMY image.",
cimg_instance);
T *p1 = data(0,0,0,0), *p2 = data(0,0,0,1), *p3 = data(0,0,0,2);
const longT whd = (longT)width()*height()*depth();
cimg_pragma_openmp(parallel for cimg_openmp_if_size(whd,2048))
for (longT N = 0; N<whd; ++N) {
const Tfloat
C = (Tfloat)p1[N],
M = (Tfloat)p2[N],
Y = (Tfloat)p3[N],
R = 255 - C,
G = 255 - M,
B = 255 - Y;
p1[N] = (T)cimg::cut(R,0,255),
p2[N] = (T)cimg::cut(G,0,255),
p3[N] = (T)cimg::cut(B,0,255);
}
return *this;
| 0 |
[
"CWE-119",
"CWE-787"
] |
CImg
|
ac8003393569aba51048c9d67e1491559877b1d1
| 195,290,014,253,513,500,000,000,000,000,000,000,000 | 23 |
.
|
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
{
switch (dev->type) {
case ARPHRD_ETHER:
case ARPHRD_FDDI:
return addrconf_ifid_eui48(eui, dev);
case ARPHRD_ARCNET:
return addrconf_ifid_arcnet(eui, dev);
case ARPHRD_INFINIBAND:
return addrconf_ifid_infiniband(eui, dev);
case ARPHRD_SIT:
return addrconf_ifid_sit(eui, dev);
case ARPHRD_IPGRE:
return addrconf_ifid_gre(eui, dev);
case ARPHRD_6LOWPAN:
case ARPHRD_IEEE802154:
return addrconf_ifid_eui64(eui, dev);
case ARPHRD_IEEE1394:
return addrconf_ifid_ieee1394(eui, dev);
case ARPHRD_TUNNEL6:
return addrconf_ifid_ip6tnl(eui, dev);
}
return -1;
}
| 0 |
[
"CWE-20"
] |
linux
|
77751427a1ff25b27d47a4c36b12c3c8667855ac
| 51,466,054,138,752,660,000,000,000,000,000,000,000 | 24 |
ipv6: addrconf: validate new MTU before applying it
Currently we don't check if the new MTU is valid or not and this allows
one to configure a smaller than minimum allowed by RFCs or even bigger
than interface own MTU, which is a problem as it may lead to packet
drops.
If you have a daemon like NetworkManager running, this may be exploited
by remote attackers by forging RA packets with an invalid MTU, possibly
leading to a DoS. (NetworkManager currently only validates for values
too small, but not for too big ones.)
The fix is just to make sure the new value is valid. That is, between
IPV6_MIN_MTU and interface's MTU.
Note that similar check is already performed at
ndisc_router_discovery(), for when kernel itself parses the RA.
Signed-off-by: Marcelo Ricardo Leitner <[email protected]>
Signed-off-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
atol8(const char *p, unsigned char_cnt)
{
int64_t l;
int digit;
l = 0;
while (char_cnt-- > 0) {
if (*p >= '0' && *p <= '7')
digit = *p - '0';
else
return (l);
p++;
l <<= 3;
l |= digit;
}
return (l);
}
| 0 |
[] |
libarchive
|
fd7e0c02e272913a0a8b6d492c7260dfca0b1408
| 295,430,605,210,836,170,000,000,000,000,000,000,000 | 17 |
Reject cpio symlinks that exceed 1MB
|
sps_range_extension::sps_range_extension()
{
transform_skip_rotation_enabled_flag = 0;
transform_skip_context_enabled_flag = 0;
implicit_rdpcm_enabled_flag = 0;
explicit_rdpcm_enabled_flag = 0;
extended_precision_processing_flag = 0;
intra_smoothing_disabled_flag = 0;
high_precision_offsets_enabled_flag = 0;
persistent_rice_adaptation_enabled_flag = 0;
cabac_bypass_alignment_enabled_flag = 0;
}
| 0 |
[
"CWE-787"
] |
libde265
|
8e89fe0e175d2870c39486fdd09250b230ec10b8
| 165,708,133,671,819,040,000,000,000,000,000,000,000 | 12 |
error on out-of-range cpb_cnt_minus1 (oss-fuzz issue 27590)
|
sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
return snprintf(buf, 20, "%u\n", sdkp->lbpme);
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
0bfc96cb77224736dfa35c3c555d37b3646ef35e
| 323,223,461,842,314,120,000,000,000,000,000,000,000 | 7 |
block: fail SCSI passthrough ioctls on partition devices
Linux allows executing the SG_IO ioctl on a partition or LVM volume, and
will pass the command to the underlying block device. This is
well-known, but it is also a large security problem when (via Unix
permissions, ACLs, SELinux or a combination thereof) a program or user
needs to be granted access only to part of the disk.
This patch lets partitions forward a small set of harmless ioctls;
others are logged with printk so that we can see which ioctls are
actually sent. In my tests only CDROM_GET_CAPABILITY actually occurred.
Of course it was being sent to a (partition on a) hard disk, so it would
have failed with ENOTTY and the patch isn't changing anything in
practice. Still, I'm treating it specially to avoid spamming the logs.
In principle, this restriction should include programs running with
CAP_SYS_RAWIO. If for example I let a program access /dev/sda2 and
/dev/sdb, it still should not be able to read/write outside the
boundaries of /dev/sda2 independent of the capabilities. However, for
now programs with CAP_SYS_RAWIO will still be allowed to send the
ioctls. Their actions will still be logged.
This patch does not affect the non-libata IDE driver. That driver
however already tests for bd != bd->bd_contains before issuing some
ioctl; it could be restricted further to forbid these ioctls even for
programs running with CAP_SYS_ADMIN/CAP_SYS_RAWIO.
Cc: [email protected]
Cc: Jens Axboe <[email protected]>
Cc: James Bottomley <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
[ Make it also print the command name when warning - Linus ]
Signed-off-by: Linus Torvalds <[email protected]>
|
void OSD::osdmap_subscribe(version_t epoch, bool force_request)
{
Mutex::Locker l(osdmap_subscribe_lock);
if (latest_subscribed_epoch >= epoch && !force_request)
return;
latest_subscribed_epoch = MAX(epoch, latest_subscribed_epoch);
if (monc->sub_want_increment("osdmap", epoch, CEPH_SUBSCRIBE_ONETIME) ||
force_request) {
monc->renew_subs();
}
}
| 0 |
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
| 314,008,552,631,437,380,000,000,000,000,000,000,000 | 13 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
|
static void dump_info(FILE *dumpfile, int format, char *prefix, char *msg, ...)
{
if (format == DUMP_TEXT)
{
va_list ap;
va_start(ap, msg);
fprintf(dumpfile, "%s ", prefix);
vfprintf(dumpfile, msg, ap);
fprintf(dumpfile, "\n");
va_end(ap);
}
}
| 0 |
[
"CWE-125"
] |
libtiff
|
21d39de1002a5e69caa0574b2cc05d795d6fbfad
| 150,681,933,939,048,020,000,000,000,000,000,000,000 | 12 |
* tools/tiffcrop.c: fix multiple uint32 overflows in
writeBufferToSeparateStrips(), writeBufferToContigTiles() and
writeBufferToSeparateTiles() that could cause heap buffer overflows.
Reported by Henri Salo from Nixu Corporation.
Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2592
|
i40e_set_cld_element(struct i40e_cloud_filter *filter,
struct i40e_aqc_cloud_filters_element_data *cld)
{
int i, j;
u32 ipa;
memset(cld, 0, sizeof(*cld));
ether_addr_copy(cld->outer_mac, filter->dst_mac);
ether_addr_copy(cld->inner_mac, filter->src_mac);
if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
return;
if (filter->n_proto == ETH_P_IPV6) {
#define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
i++, j += 2) {
ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
ipa = cpu_to_le32(ipa);
memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
}
} else {
ipa = be32_to_cpu(filter->dst_ipv4);
memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
}
cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
/* tenant_id is not supported by FW now, once the support is enabled
* fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
*/
if (filter->tenant_id)
return;
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
27d461333459d282ffa4a2bdb6b215a59d493a8f
| 155,558,663,792,098,580,000,000,000,000,000,000,000 | 34 |
i40e: prevent memory leak in i40e_setup_macvlans
In i40e_setup_macvlans if i40e_setup_channel fails the allocated memory
for ch should be released.
Signed-off-by: Navid Emamdoost <[email protected]>
Tested-by: Andrew Bowers <[email protected]>
Signed-off-by: Jeff Kirsher <[email protected]>
|
void CLASS leaf_hdr_load_raw()
{
ushort *pixel;
unsigned tile=0, r, c, row, col;
pixel = (ushort *) calloc (raw_width, sizeof *pixel);
merror (pixel, "leaf_hdr_load_raw()");
FORC(tiff_samples)
for (r=0; r < raw_height; r++) {
if (r % tile_length == 0) {
fseek (ifp, data_offset + 4*tile++, SEEK_SET);
fseek (ifp, get4() + 2*left_margin, SEEK_SET);
}
if (filters && c != shot_select) continue;
read_shorts (pixel, raw_width);
if ((row = r - top_margin) >= height) continue;
for (col=0; col < width; col++)
if (filters) BAYER(row,col) = pixel[col];
else image[row*width+col][c] = pixel[col];
}
free (pixel);
if (!filters) {
maximum = 0xffff;
raw_color = 1;
}
}
| 0 |
[
"CWE-189"
] |
rawstudio
|
983bda1f0fa5fa86884381208274198a620f006e
| 195,879,555,334,484,900,000,000,000,000,000,000,000 | 26 |
Avoid overflow in ljpeg_start().
|
CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes,
char **buf, int *pbuf_type)
{
int rc = -EACCES;
READ_REQ *pSMB = NULL;
READ_RSP *pSMBr = NULL;
char *pReadData = NULL;
int wct;
int resp_buf_type = 0;
struct kvec iov[1];
__u32 pid = io_parms->pid;
__u16 netfid = io_parms->netfid;
__u64 offset = io_parms->offset;
struct cifs_tcon *tcon = io_parms->tcon;
unsigned int count = io_parms->length;
cFYI(1, "Reading %d bytes on fid %d", count, netfid);
if (tcon->ses->capabilities & CAP_LARGE_FILES)
wct = 12;
else {
wct = 10; /* old style read */
if ((offset >> 32) > 0) {
/* can not handle this big offset for old */
return -EIO;
}
}
*nbytes = 0;
rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **) &pSMB);
if (rc)
return rc;
pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
/* tcon and ses pointer are checked in smb_init */
if (tcon->ses->server == NULL)
return -ECONNABORTED;
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = netfid;
pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
if (wct == 12)
pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
pSMB->Remaining = 0;
pSMB->MaxCount = cpu_to_le16(count & 0xFFFF);
pSMB->MaxCountHigh = cpu_to_le32(count >> 16);
if (wct == 12)
pSMB->ByteCount = 0; /* no need to do le conversion since 0 */
else {
/* old style read */
struct smb_com_readx_req *pSMBW =
(struct smb_com_readx_req *)pSMB;
pSMBW->ByteCount = 0;
}
iov[0].iov_base = (char *)pSMB;
iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4;
rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */,
&resp_buf_type, CIFS_LOG_ERROR);
cifs_stats_inc(&tcon->num_reads);
pSMBr = (READ_RSP *)iov[0].iov_base;
if (rc) {
cERROR(1, "Send error in read = %d", rc);
} else {
int data_length = le16_to_cpu(pSMBr->DataLengthHigh);
data_length = data_length << 16;
data_length += le16_to_cpu(pSMBr->DataLength);
*nbytes = data_length;
/*check that DataLength would not go beyond end of SMB */
if ((data_length > CIFSMaxBufSize)
|| (data_length > count)) {
cFYI(1, "bad length %d for count %d",
data_length, count);
rc = -EIO;
*nbytes = 0;
} else {
pReadData = (char *) (&pSMBr->hdr.Protocol) +
le16_to_cpu(pSMBr->DataOffset);
/* if (rc = copy_to_user(buf, pReadData, data_length)) {
cERROR(1, "Faulting on read rc = %d",rc);
rc = -EFAULT;
}*/ /* can not use copy_to_user when using page cache*/
if (*buf)
memcpy(*buf, pReadData, data_length);
}
}
/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
if (*buf) {
if (resp_buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(iov[0].iov_base);
else if (resp_buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(iov[0].iov_base);
} else if (resp_buf_type != CIFS_NO_BUFFER) {
/* return buffer to caller to free */
*buf = iov[0].iov_base;
if (resp_buf_type == CIFS_SMALL_BUFFER)
*pbuf_type = CIFS_SMALL_BUFFER;
else if (resp_buf_type == CIFS_LARGE_BUFFER)
*pbuf_type = CIFS_LARGE_BUFFER;
} /* else no valid buffer on return - leave as null */
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
}
| 0 |
[
"CWE-362",
"CWE-119",
"CWE-189"
] |
linux
|
9438fabb73eb48055b58b89fc51e0bc4db22fabd
| 315,289,731,991,358,000,000,000,000,000,000,000,000 | 109 |
cifs: fix possible memory corruption in CIFSFindNext
The name_len variable in CIFSFindNext is a signed int that gets set to
the resume_name_len in the cifs_search_info. The resume_name_len however
is unsigned and for some infolevels is populated directly from a 32 bit
value sent by the server.
If the server sends a very large value for this, then that value could
look negative when converted to a signed int. That would make that
value pass the PATH_MAX check later in CIFSFindNext. The name_len would
then be used as a length value for a memcpy. It would then be treated
as unsigned again, and the memcpy scribbles over a ton of memory.
Fix this by making the name_len an unsigned value in CIFSFindNext.
Cc: <[email protected]>
Reported-by: Darren Lavender <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]>
|
atol8(const char *p, size_t char_cnt)
{
int64_t l;
int digit;
l = 0;
while (char_cnt-- > 0) {
if (*p >= '0' && *p <= '7')
digit = *p - '0';
else
break;
p++;
l <<= 3;
l |= digit;
}
return (l);
}
| 1 |
[
"CWE-125"
] |
libarchive
|
fa7438a0ff4033e4741c807394a9af6207940d71
| 58,332,935,140,371,250,000,000,000,000,000,000,000 | 17 |
Do something sensible for empty strings to make fuzzers happy.
|
ofputil_tid_command(const struct ofputil_flow_mod *fm,
enum ofputil_protocol protocol)
{
return htons(protocol & OFPUTIL_P_TID
? (fm->command & 0xff) | (fm->table_id << 8)
: fm->command);
}
| 0 |
[
"CWE-772"
] |
ovs
|
77ad4225d125030420d897c873e4734ac708c66b
| 312,728,027,551,327,830,000,000,000,000,000,000,000 | 7 |
ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod().
Found by libFuzzer.
Reported-by: Bhargava Shastry <[email protected]>
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]>
|
static bool cmd_write_pio(IDEState *s, uint8_t cmd)
{
bool lba48 = (cmd == WIN_WRITE_EXT);
if (!s->blk) {
ide_abort_command(s);
return true;
}
ide_cmd_lba48_transform(s, lba48);
s->req_nb_sectors = 1;
s->status = SEEK_STAT | READY_STAT;
ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
s->media_changed = 1;
return false;
}
| 0 |
[
"CWE-399"
] |
qemu
|
3251bdcf1c67427d964517053c3d185b46e618e8
| 43,016,777,885,711,770,000,000,000,000,000,000,000 | 19 |
ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <[email protected]>
Reviewed-by: Paolo Bonzini <[email protected]>
Message-id: [email protected]
Signed-off-by: Stefan Hajnoczi <[email protected]>
|
static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
{
int mss_now;
mss_now = tcp_current_mss(sk);
*size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
return mss_now;
}
| 0 |
[
"CWE-399",
"CWE-835"
] |
linux
|
ccf7abb93af09ad0868ae9033d1ca8108bdaec82
| 43,680,843,748,710,620,000,000,000,000,000,000,000 | 9 |
tcp: avoid infinite loop in tcp_splice_read()
Splicing from TCP socket is vulnerable when a packet with URG flag is
received and stored into receive queue.
__tcp_splice_read() returns 0, and sk_wait_data() immediately
returns since there is the problematic skb in queue.
This is a nice way to burn cpu (aka infinite loop) and trigger
soft lockups.
Again, this gem was found by syzkaller tool.
Fixes: 9c55e01c0cc8 ("[TCP]: Splice receive support.")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Dmitry Vyukov <[email protected]>
Cc: Willy Tarreau <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg,
u64 val, u32 val32,
u8 opcode, bool is_jmp32)
{
opcode = flip_opcode(opcode);
/* This uses zero as "not present in table"; luckily the zero opcode,
* BPF_JA, can't get here.
*/
if (opcode)
reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
}
| 0 |
[] |
linux
|
9b00f1b78809309163dda2d044d9e94a3c0248a3
| 126,802,335,254,101,960,000,000,000,000,000,000,000 | 12 |
bpf: Fix truncation handling for mod32 dst reg wrt zero
Recently noticed that when mod32 with a known src reg of 0 is performed,
then the dst register is 32-bit truncated in verifier:
0: R1=ctx(id=0,off=0,imm=0) R10=fp0
0: (b7) r0 = 0
1: R0_w=inv0 R1=ctx(id=0,off=0,imm=0) R10=fp0
1: (b7) r1 = -1
2: R0_w=inv0 R1_w=inv-1 R10=fp0
2: (b4) w2 = -1
3: R0_w=inv0 R1_w=inv-1 R2_w=inv4294967295 R10=fp0
3: (9c) w1 %= w0
4: R0_w=inv0 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0
4: (b7) r0 = 1
5: R0_w=inv1 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0
5: (1d) if r1 == r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0
6: R0_w=inv1 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0
6: (b7) r0 = 2
7: R0_w=inv2 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0
7: (95) exit
7: R0=inv1 R1=inv(id=0,umin_value=4294967295,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2=inv4294967295 R10=fp0
7: (95) exit
However, as a runtime result, we get 2 instead of 1, meaning the dst
register does not contain (u32)-1 in this case. The reason is fairly
straight forward given the 0 test leaves the dst register as-is:
# ./bpftool p d x i 23
0: (b7) r0 = 0
1: (b7) r1 = -1
2: (b4) w2 = -1
3: (16) if w0 == 0x0 goto pc+1
4: (9c) w1 %= w0
5: (b7) r0 = 1
6: (1d) if r1 == r2 goto pc+1
7: (b7) r0 = 2
8: (95) exit
This was originally not an issue given the dst register was marked as
completely unknown (aka 64 bit unknown). However, after 468f6eafa6c4
("bpf: fix 32-bit ALU op verification") the verifier casts the register
output to 32 bit, and hence it becomes 32 bit unknown. Note that for
the case where the src register is unknown, the dst register is marked
64 bit unknown. After the fix, the register is truncated by the runtime
and the test passes:
# ./bpftool p d x i 23
0: (b7) r0 = 0
1: (b7) r1 = -1
2: (b4) w2 = -1
3: (16) if w0 == 0x0 goto pc+2
4: (9c) w1 %= w0
5: (05) goto pc+1
6: (bc) w1 = w1
7: (b7) r0 = 1
8: (1d) if r1 == r2 goto pc+1
9: (b7) r0 = 2
10: (95) exit
Semantics also match with {R,W}x mod{64,32} 0 -> {R,W}x. Invalid div
has always been {R,W}x div{64,32} 0 -> 0. Rewrites are as follows:
mod32: mod64:
(16) if w0 == 0x0 goto pc+2 (15) if r0 == 0x0 goto pc+1
(9c) w1 %= w0 (9f) r1 %= r0
(05) goto pc+1
(bc) w1 = w1
Fixes: 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification")
Signed-off-by: Daniel Borkmann <[email protected]>
Reviewed-by: John Fastabend <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
|
void vQueueUnregisterQueue( QueueHandle_t xQueue )
{
UBaseType_t ux;
/* See if the handle of the queue being unregistered in actually in the
* registry. */
for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
{
if( xQueueRegistry[ ux ].xHandle == xQueue )
{
/* Set the name to NULL to show that this slot if free again. */
xQueueRegistry[ ux ].pcQueueName = NULL;
/* Set the handle to NULL to ensure the same queue handle cannot
* appear in the registry twice if it is added, removed, then
* added again. */
xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
break;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
| 0 |
[
"CWE-200",
"CWE-190"
] |
FreeRTOS-Kernel
|
47338393f1f79558f6144213409f09f81d7c4837
| 224,563,768,946,885,640,000,000,000,000,000,000,000 | 25 |
add assert for addition overflow on queue creation (#225)
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.