func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
xfs_reclaim_inode(
struct xfs_inode *ip,
struct xfs_perag *pag,
int sync_mode)
{
struct xfs_buf *bp = NULL;
xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
int error;
restart:
error = 0;
xfs_ilock(ip, XFS_ILOCK_EXCL);
if (!xfs_iflock_nowait(ip)) {
if (!(sync_mode & SYNC_WAIT))
goto out;
xfs_iflock(ip);
}
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
xfs_iunpin_wait(ip);
/* xfs_iflush_abort() drops the flush lock */
xfs_iflush_abort(ip, false);
goto reclaim;
}
if (xfs_ipincount(ip)) {
if (!(sync_mode & SYNC_WAIT))
goto out_ifunlock;
xfs_iunpin_wait(ip);
}
if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
xfs_ifunlock(ip);
goto reclaim;
}
/*
* Never flush out dirty data during non-blocking reclaim, as it would
* just contend with AIL pushing trying to do the same job.
*/
if (!(sync_mode & SYNC_WAIT))
goto out_ifunlock;
/*
* Now we have an inode that needs flushing.
*
* Note that xfs_iflush will never block on the inode buffer lock, as
* xfs_ifree_cluster() can lock the inode buffer before it locks the
* ip->i_lock, and we are doing the exact opposite here. As a result,
* doing a blocking xfs_imap_to_bp() to get the cluster buffer would
* result in an ABBA deadlock with xfs_ifree_cluster().
*
* As xfs_ifree_cluser() must gather all inodes that are active in the
* cache to mark them stale, if we hit this case we don't actually want
* to do IO here - we want the inode marked stale so we can simply
* reclaim it. Hence if we get an EAGAIN error here, just unlock the
* inode, back off and try again. Hopefully the next pass through will
* see the stale flag set on the inode.
*/
error = xfs_iflush(ip, &bp);
if (error == -EAGAIN) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
/* backoff longer than in xfs_ifree_cluster */
delay(2);
goto restart;
}
if (!error) {
error = xfs_bwrite(bp);
xfs_buf_relse(bp);
}
reclaim:
ASSERT(!xfs_isiflocked(ip));
/*
* Because we use RCU freeing we need to ensure the inode always appears
* to be reclaimed with an invalid inode number when in the free state.
* We do this as early as possible under the ILOCK so that
* xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
* detect races with us here. By doing this, we guarantee that once
* xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
* it will see either a valid inode that will serialise correctly, or it
* will see an invalid inode that it can skip.
*/
spin_lock(&ip->i_flags_lock);
ip->i_flags = XFS_IRECLAIM;
ip->i_ino = 0;
spin_unlock(&ip->i_flags_lock);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
/*
* Remove the inode from the per-AG radix tree.
*
* Because radix_tree_delete won't complain even if the item was never
* added to the tree assert that it's been there before to catch
* problems with the inode life time early on.
*/
spin_lock(&pag->pag_ici_lock);
if (!radix_tree_delete(&pag->pag_ici_root,
XFS_INO_TO_AGINO(ip->i_mount, ino)))
ASSERT(0);
xfs_perag_clear_reclaim_tag(pag);
spin_unlock(&pag->pag_ici_lock);
/*
* Here we do an (almost) spurious inode lock in order to coordinate
* with inode cache radix tree lookups. This is because the lookup
* can reference the inodes in the cache without taking references.
*
* We make that OK here by ensuring that we wait until the inode is
* unlocked after the lookup before we go ahead and free it.
*/
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_qm_dqdetach(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
__xfs_inode_free(ip);
return error;
out_ifunlock:
xfs_ifunlock(ip);
out:
xfs_iflags_clear(ip, XFS_IRECLAIM);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
/*
* We could return -EAGAIN here to make reclaim rescan the inode tree in
* a short while. However, this just burns CPU time scanning the tree
* waiting for IO to complete and the reclaim work never goes back to
* the idle state. Instead, return 0 to let the next scheduled
* background reclaim attempt to reclaim the inode again.
*/
return 0;
} | 0 | [
"CWE-476"
]
| linux | afca6c5b2595fc44383919fba740c194b0b76aff | 54,728,378,510,442,260,000,000,000,000,000,000,000 | 134 | xfs: validate cached inodes are free when allocated
A recent fuzzed filesystem image cached random dcache corruption
when the reproducer was run. This often showed up as panics in
lookup_slow() on a null inode->i_ops pointer when doing pathwalks.
BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
....
Call Trace:
lookup_slow+0x44/0x60
walk_component+0x3dd/0x9f0
link_path_walk+0x4a7/0x830
path_lookupat+0xc1/0x470
filename_lookup+0x129/0x270
user_path_at_empty+0x36/0x40
path_listxattr+0x98/0x110
SyS_listxattr+0x13/0x20
do_syscall_64+0xf5/0x280
entry_SYSCALL_64_after_hwframe+0x42/0xb7
but had many different failure modes including deadlocks trying to
lock the inode that was just allocated or KASAN reports of
use-after-free violations.
The cause of the problem was a corrupt INOBT on a v4 fs where the
root inode was marked as free in the inobt record. Hence when we
allocated an inode, it chose the root inode to allocate, found it in
the cache and re-initialised it.
We recently fixed a similar inode allocation issue caused by inobt
record corruption problem in xfs_iget_cache_miss() in commit
ee457001ed6c ("xfs: catch inode allocation state mismatch
corruption"). This change adds similar checks to the cache-hit path
to catch it, and turns the reproducer into a corruption shutdown
situation.
Reported-by: Wen Xu <[email protected]>
Signed-Off-By: Dave Chinner <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Reviewed-by: Carlos Maiolino <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
[darrick: fix typos in comment]
Signed-off-by: Darrick J. Wong <[email protected]> |
static inline struct ist htx_sl_p1(const struct htx_sl *sl)
{
return ist2(HTX_SL_P1_PTR(sl), HTX_SL_P1_LEN(sl));
} | 0 | [
"CWE-190"
]
| haproxy | 3b69886f7dcc3cfb3d166309018e6cfec9ce2c95 | 315,217,473,613,922,200,000,000,000,000,000,000,000 | 4 | BUG/MAJOR: htx: fix missing header name length check in htx_add_header/trailer
Ori Hollander of JFrog Security reported that htx_add_header() and
htx_add_trailer() were missing a length check on the header name. While
this does not allow to overwrite any memory area, it results in bits of
the header name length to slip into the header value length and may
result in forging certain header names on the input. The sad thing here
is that a FIXME comment was present suggesting to add the required length
checks :-(
The injected headers are visible to the HTTP internals and to the config
rules, so haproxy will generally stay synchronized with the server. But
there is one exception which is the content-length header field, because
it is already deduplicated on the input, but before being indexed. As
such, injecting a content-length header after the deduplication stage
may be abused to present a different, shorter one on the other side and
help build a request smuggling attack, or even maybe a response splitting
attack. CVE-2021-40346 was assigned to this problem.
As a mitigation measure, it is sufficient to verify that no more than
one such header is present in any message, which is normally the case
thanks to the duplicate checks:
http-request deny if { req.hdr_cnt(content-length) gt 1 }
http-response deny if { res.hdr_cnt(content-length) gt 1 }
This must be backported to all HTX-enabled versions, hence as far as 2.0.
In 2.3 and earlier, the functions are in src/htx.c instead.
Many thanks to Ori for his work and his responsible report! |
proto_tree_add_bitmask_list_value(proto_tree *tree, tvbuff_t *tvb, const guint offset,
const int len, int * const *fields, const guint64 value)
{
if (tree) {
proto_item_add_bitmask_tree(NULL, tvb, offset, len, -1, fields,
BMT_NO_APPEND, FALSE, TRUE, tree, value);
}
} | 0 | [
"CWE-401"
]
| wireshark | a9fc769d7bb4b491efb61c699d57c9f35269d871 | 329,374,163,863,924,100,000,000,000,000,000,000,000 | 8 | epan: Fix a memory leak.
Make sure _proto_tree_add_bits_ret_val allocates a bits array using the
packet scope, otherwise we leak memory. Fixes #17032. |
bool hex2bin(unsigned char *p, const char *hexstr, size_t len)
{
int nibble1, nibble2;
unsigned char idx;
bool ret = false;
while (*hexstr && len) {
if (unlikely(!hexstr[1])) {
applog(LOG_ERR, "hex2bin str truncated");
return ret;
}
idx = *hexstr++;
nibble1 = hex2bin_tbl[idx];
idx = *hexstr++;
nibble2 = hex2bin_tbl[idx];
if (unlikely((nibble1 < 0) || (nibble2 < 0))) {
applog(LOG_ERR, "hex2bin scan failed");
return ret;
}
*p++ = (((unsigned char)nibble1) << 4) | ((unsigned char)nibble2);
--len;
}
if (likely(len == 0 && *hexstr == 0))
ret = true;
return ret;
} | 0 | [
"CWE-20",
"CWE-703"
]
| sgminer | 910c36089940e81fb85c65b8e63dcd2fac71470c | 287,977,293,857,907,000,000,000,000,000,000,000,000 | 30 | stratum: parse_notify(): Don't die on malformed bbversion/prev_hash/nbit/ntime.
Might have introduced a memory leak, don't have time to check. :(
Should the other hex2bin()'s be checked?
Thanks to Mick Ayzenberg <mick.dejavusecurity.com> for finding this. |
static ssize_t sched_mc_power_savings_store(struct sys_device *dev,
const char *buf, size_t count)
{
return sched_power_savings_store(buf, count, 0);
} | 0 | []
| linux-2.6 | 8f1bc385cfbab474db6c27b5af1e439614f3025c | 50,782,383,441,238,960,000,000,000,000,000,000,000 | 5 | sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]> |
bool dwgReader18::readFileHeader() {
if (! fileBuf->setPosition(0x80))
return false;
// genMagicNumber(); DBG("\n"); DBG("\n");
DRW_DBG("Encrypted Header Data=\n");
duint8 byteStr[0x6C];
int size =0x6C;
for (int i=0, j=0; i< 0x6C;i++) {
duint8 ch = fileBuf->getRawChar8();
DRW_DBGH(ch);
if (j == 15) {
DRW_DBG("\n");
j = 0;
} else {
DRW_DBG(", ");
j++;
}
byteStr[i] = DRW_magicNum18[i] ^ ch;
}
DRW_DBG("\n");
// size =0x6C;
DRW_DBG("Decrypted Header Data=\n");
for (int i=0, j = 0; i< size;i++) {
DRW_DBGH( static_cast<unsigned char>(byteStr[i]));
if (j == 15) {
DRW_DBG("\n");
j = 0;
} else {
DRW_DBG(", ");
j++;
}
}
dwgBuffer buff(byteStr, 0x6C, &decoder);
std::string name = reinterpret_cast<char*>(byteStr);
DRW_DBG("\nFile ID string (AcFssFcAJMB)= "); DRW_DBG(name.c_str());
//ID string + NULL = 12
buff.setPosition(12);
DRW_DBG("\n0x00 long= "); DRW_DBGH(buff.getRawLong32());
DRW_DBG("\n0x6c long= "); DRW_DBGH(buff.getRawLong32());
DRW_DBG("\n0x04 long= "); DRW_DBGH(buff.getRawLong32());
DRW_DBG("\nRoot tree node gap= "); DRW_DBGH(buff.getRawLong32());
DRW_DBG("\nLowermost left tree node gap= "); DRW_DBGH(buff.getRawLong32());
DRW_DBG("\nLowermost right tree node gap= "); DRW_DBGH(buff.getRawLong32());
DRW_DBG("\nUnknown long (1)= "); DRW_DBGH(buff.getRawLong32());
DRW_DBG("\nLast section page Id= "); DRW_DBGH(buff.getRawLong32());
DRW_DBG("\nLast section page end address 64b= "); DRW_DBGH(buff.getRawLong64());
DRW_DBG("\nStart of second header data address 64b= "); DRW_DBGH(buff.getRawLong64());
DRW_DBG("\nGap amount= "); DRW_DBGH(buff.getRawLong32());
DRW_DBG("\nSection page amount= "); DRW_DBGH(buff.getRawLong32());
DRW_DBG("\n0x20 long= "); DRW_DBGH(buff.getRawLong32());
DRW_DBG("\n0x80 long= "); DRW_DBGH(buff.getRawLong32());
DRW_DBG("\n0x40 long= "); DRW_DBGH(buff.getRawLong32());
dint32 secPageMapId = buff.getRawLong32();
DRW_DBG("\nSection Page Map Id= "); DRW_DBGH(secPageMapId);
duint64 secPageMapAddr = buff.getRawLong64()+0x100;
DRW_DBG("\nSection Page Map address 64b= "); DRW_DBGH(secPageMapAddr);
DRW_DBG("\nSection Page Map address 64b dec= "); DRW_DBG(secPageMapAddr);
duint32 secMapId = buff.getRawLong32();
DRW_DBG("\nSection Map Id= "); DRW_DBGH(secMapId);
DRW_DBG("\nSection page array size= "); DRW_DBGH(buff.getRawLong32());
DRW_DBG("\nGap array size= "); DRW_DBGH(buff.getRawLong32());
//TODO: verify CRC
DRW_DBG("\nCRC32= "); DRW_DBGH(buff.getRawLong32());
for (duint8 i = 0x68; i < 0x6c; ++i)
byteStr[i] = '\0';
// byteStr[i] = '\0';
duint32 crcCalc = buff.crc32(0x00,0,0x6C);
DRW_DBG("\nCRC32 calculated= "); DRW_DBGH(crcCalc);
DRW_DBG("\nEnd Encrypted Data. Reads 0x14 bytes, equal to magic number:\n");
for (int i=0, j=0; i< 0x14;i++) {
DRW_DBG("magic num: "); DRW_DBGH( static_cast<unsigned char>(DRW_magicNumEnd18[i]));
DRW_DBG(",read "); DRW_DBGH( static_cast<unsigned char>(fileBuf->getRawChar8()));
if (j == 3) {
DRW_DBG("\n");
j = 0;
} else {
DRW_DBG(", ");
j++;
}
}
// At this point are parsed the first 256 bytes
DRW_DBG("\nJump to Section Page Map address: "); DRW_DBGH(secPageMapAddr);
if (! fileBuf->setPosition(secPageMapAddr))
return false;
duint32 pageType = fileBuf->getRawLong32();
DRW_DBG("\nSection page type= "); DRW_DBGH(pageType);
duint32 decompSize = fileBuf->getRawLong32();
DRW_DBG("\nDecompressed size= "); DRW_DBG(decompSize); DRW_DBG(", "); DRW_DBGH(decompSize);
if (pageType != 0x41630e3b){
//bad page type, ends
DRW_DBG("Warning, bad page type, was expected 0x41630e3b instead of"); DRW_DBGH(pageType); DRW_DBG("\n");
return false;
}
std::vector<duint8> tmpDecompSec(decompSize);
parseSysPage(tmpDecompSec.data(), decompSize);
//parses "Section page map" decompressed data
dwgBuffer buff2(tmpDecompSec.data(), decompSize, &decoder);
duint32 address = 0x100;
//stores temporarily info of all pages:
std::unordered_map<duint64, dwgPageInfo >sectionPageMapTmp;
for (unsigned int i = 0; i < decompSize;) {
dint32 id = buff2.getRawLong32();//RLZ bad can be +/-
duint32 size = buff2.getRawLong32();
i += 8;
DRW_DBG("Page num= "); DRW_DBG(id); DRW_DBG(" size= "); DRW_DBGH(size);
DRW_DBG(" address= "); DRW_DBGH(address); DRW_DBG("\n");
//TODO num can be negative indicating gap
// duint64 ind = id > 0 ? id : -id;
if (id < 0){
DRW_DBG("Parent= "); DRW_DBG(buff2.getRawLong32());
DRW_DBG("\nLeft= "); DRW_DBG(buff2.getRawLong32());
DRW_DBG(", Right= "); DRW_DBG(buff2.getRawLong32());
DRW_DBG(", 0x00= ");DRW_DBGH(buff2.getRawLong32()); DRW_DBG("\n");
i += 16;
}
sectionPageMapTmp[id] = dwgPageInfo(id, address, size);
address += size;
}
DRW_DBG("\n*** dwgReader18: Processing Data Section Map ***\n");
dwgPageInfo sectionMap = sectionPageMapTmp[secMapId];
if (!fileBuf->setPosition(sectionMap.address))
return false;
pageType = fileBuf->getRawLong32();
DRW_DBG("\nSection page type= "); DRW_DBGH(pageType);
decompSize = fileBuf->getRawLong32();
DRW_DBG("\nDecompressed size= "); DRW_DBG(decompSize); DRW_DBG(", "); DRW_DBGH(decompSize);
if (pageType != 0x4163003b){
//bad page type, ends
DRW_DBG("Warning, bad page type, was expected 0x4163003b instead of"); DRW_DBGH(pageType); DRW_DBG("\n");
return false;
}
tmpDecompSec.resize(decompSize);
parseSysPage(tmpDecompSec.data(), decompSize);
//reads sections:
DRW_DBG("\n*** dwgReader18: reads sections:");
dwgBuffer buff3(tmpDecompSec.data(), decompSize, &decoder);
duint32 numDescriptions = buff3.getRawLong32();
DRW_DBG("\nnumDescriptions (sections)= "); DRW_DBG(numDescriptions);
DRW_DBG("\n0x02 long= "); DRW_DBGH(buff3.getRawLong32());
DRW_DBG("\n0x00007400 long= "); DRW_DBGH(buff3.getRawLong32());
DRW_DBG("\n0x00 long= "); DRW_DBGH(buff3.getRawLong32());
DRW_DBG("\nunknown long (numDescriptions?)= "); DRW_DBG(buff3.getRawLong32()); DRW_DBG("\n");
for (unsigned int i = 0; i < numDescriptions; i++) {
dwgSectionInfo secInfo;
secInfo.size = buff3.getRawLong64();
DRW_DBG("\nSize of section= "); DRW_DBGH(secInfo.size);
secInfo.pageCount = buff3.getRawLong32();
DRW_DBG("\nPage count= "); DRW_DBGH(secInfo.pageCount);
secInfo.maxSize = buff3.getRawLong32();
DRW_DBG("\nMax Decompressed Size= "); DRW_DBGH(secInfo.maxSize);
DRW_DBG("\nunknown long= "); DRW_DBGH(buff3.getRawLong32());
secInfo.compressed = buff3.getRawLong32();
DRW_DBG("\nis Compressed? 1:no, 2:yes= "); DRW_DBGH(secInfo.compressed);
secInfo.Id = buff3.getRawLong32();
DRW_DBG("\nSection Id= "); DRW_DBGH(secInfo.Id);
secInfo.encrypted = buff3.getRawLong32();
//encrypted (doc: 0 no, 1 yes, 2 unkn) on read: objects 0 and encrypted yes
DRW_DBG("\nEncrypted= "); DRW_DBGH(secInfo.encrypted);
duint8 nameCStr[64];
buff3.getBytes(nameCStr, 64);
secInfo.name = reinterpret_cast<char*>(nameCStr);
DRW_DBG("\nSection std::Name= "); DRW_DBG( secInfo.name.c_str() ); DRW_DBG("\n");
for (unsigned int i = 0; i < secInfo.pageCount; i++){
duint32 pn = buff3.getRawLong32();
dwgPageInfo pi = sectionPageMapTmp[pn]; //get a copy
DRW_DBG(" reading pag num = "); DRW_DBGH(pn);
pi.dataSize = buff3.getRawLong32();
pi.startOffset = buff3.getRawLong64();
secInfo.pages[pn]= pi;//complete copy in secInfo
DRW_DBG("\n Page number= "); DRW_DBGH(secInfo.pages[pn].Id);
DRW_DBG("\n size in file= "); DRW_DBGH(secInfo.pages[pn].size);
DRW_DBG("\n address in file= "); DRW_DBGH(secInfo.pages[pn].address);
DRW_DBG("\n Data size= "); DRW_DBGH(secInfo.pages[pn].dataSize);
DRW_DBG("\n Start offset= "); DRW_DBGH(secInfo.pages[pn].startOffset); DRW_DBG("\n");
}
//do not save empty section
if (!secInfo.name.empty()) {
DRW_DBG("Saved section Name= "); DRW_DBG( secInfo.name.c_str() ); DRW_DBG("\n");
sections[secEnum::getEnum(secInfo.name)] = secInfo;
}
}
if (! fileBuf->isGood())
return false;
DRW_DBG("\ndwgReader18::readFileHeader END\n\n");
return true;
} | 1 | [
"CWE-191"
]
| libdxfrw | ba3fa95648bef948e008dfbdd31a4d21badd71f0 | 236,308,246,492,077,400,000,000,000,000,000,000,000 | 198 | fixed out-of-bounds write vulnerability CVE-2021-21898
as reported in TALOS-2021-1349 / CVE-2021-21898,
dwgCompressor::decompress18() could be abused with a malformed DWG file
to force out-of-bounds write and possibly lead to malicious code
execution. |
int smb_vfs_call_open(struct vfs_handle_struct *handle,
struct smb_filename *smb_fname, struct files_struct *fsp,
int flags, mode_t mode)
{
VFS_FIND(open);
return handle->fns->open_fn(handle, smb_fname, fsp, flags, mode);
} | 0 | [
"CWE-264"
]
| samba | 4278ef25f64d5fdbf432ff1534e275416ec9561e | 63,698,433,513,733,420,000,000,000,000,000,000,000 | 7 | CVE-2015-5252: s3: smbd: Fix symlink verification (file access outside the share).
Ensure matching component ends in '/' or '\0'.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=11395
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Volker Lendecke <[email protected]> |
void dump_mm(const struct mm_struct *mm)
{
pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n"
#ifdef CONFIG_MMU
"get_unmapped_area %px\n"
#endif
"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
"pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
"start_code %lx end_code %lx start_data %lx end_data %lx\n"
"start_brk %lx brk %lx start_stack %lx\n"
"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
"binfmt %px flags %lx core_state %px\n"
#ifdef CONFIG_AIO
"ioctx_table %px\n"
#endif
#ifdef CONFIG_MEMCG
"owner %px "
#endif
"exe_file %px\n"
#ifdef CONFIG_MMU_NOTIFIER
"mmu_notifier_mm %px\n"
#endif
#ifdef CONFIG_NUMA_BALANCING
"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
#endif
"tlb_flush_pending %d\n"
"def_flags: %#lx(%pGv)\n",
mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
#ifdef CONFIG_MMU
mm->get_unmapped_area,
#endif
mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
mm->pgd, atomic_read(&mm->mm_users),
atomic_read(&mm->mm_count),
mm_pgtables_bytes(mm),
mm->map_count,
mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm,
mm->start_code, mm->end_code, mm->start_data, mm->end_data,
mm->start_brk, mm->brk, mm->start_stack,
mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
mm->binfmt, mm->flags, mm->core_state,
#ifdef CONFIG_AIO
mm->ioctx_table,
#endif
#ifdef CONFIG_MEMCG
mm->owner,
#endif
mm->exe_file,
#ifdef CONFIG_MMU_NOTIFIER
mm->mmu_notifier_mm,
#endif
#ifdef CONFIG_NUMA_BALANCING
mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
#endif
atomic_read(&mm->tlb_flush_pending),
mm->def_flags, &mm->def_flags
);
} | 1 | [
"CWE-416"
]
| linux | 7a9cdebdcc17e426fb5287e4a82db1dfe86339b2 | 50,375,831,465,742,455,000,000,000,000,000,000,000 | 62 | mm: get rid of vmacache_flush_all() entirely
Jann Horn points out that the vmacache_flush_all() function is not only
potentially expensive, it's buggy too. It also happens to be entirely
unnecessary, because the sequence number overflow case can be avoided by
simply making the sequence number be 64-bit. That doesn't even grow the
data structures in question, because the other adjacent fields are
already 64-bit.
So simplify the whole thing by just making the sequence number overflow
case go away entirely, which gets rid of all the complications and makes
the code faster too. Win-win.
[ Oleg Nesterov points out that the VMACACHE_FULL_FLUSHES statistics
also just goes away entirely with this ]
Reported-by: Jann Horn <[email protected]>
Suggested-by: Will Deacon <[email protected]>
Acked-by: Davidlohr Bueso <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
QPDF::stopOnError(std::string const& message)
{
// Throw a generic exception when we lack context for something
// more specific. New code should not use this. This method exists
// to improve somewhat from calling assert in very old code.
throw QPDFExc(qpdf_e_damaged_pdf, this->m->file->getName(),
"", this->m->file->getLastOffset(), message);
} | 0 | [
"CWE-787"
]
| qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 254,275,295,936,279,700,000,000,000,000,000,000,000 | 8 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
void InstanceKlass::release_C_heap_structures() {
// Can't release the constant pool here because the constant pool can be
// deallocated separately from the InstanceKlass for default methods and
// redefine classes.
// Deallocate oop map cache
if (_oop_map_cache != NULL) {
delete _oop_map_cache;
_oop_map_cache = NULL;
}
// Deallocate JNI identifiers for jfieldIDs
JNIid::deallocate(jni_ids());
set_jni_ids(NULL);
jmethodID* jmeths = methods_jmethod_ids_acquire();
if (jmeths != (jmethodID*)NULL) {
release_set_methods_jmethod_ids(NULL);
FreeHeap(jmeths);
}
// Release dependencies.
// It is desirable to use DC::remove_all_dependents() here, but, unfortunately,
// it is not safe (see JDK-8143408). The problem is that the klass dependency
// context can contain live dependencies, since there's a race between nmethod &
// klass unloading. If the klass is dead when nmethod unloading happens, relevant
// dependencies aren't removed from the context associated with the class (see
// nmethod::flush_dependencies). It ends up during klass unloading as seemingly
// live dependencies pointing to unloaded nmethods and causes a crash in
// DC::remove_all_dependents() when it touches unloaded nmethod.
dependencies().wipe();
#if INCLUDE_JVMTI
// Deallocate breakpoint records
if (breakpoints() != 0x0) {
methods_do(clear_all_breakpoints);
assert(breakpoints() == 0x0, "should have cleared breakpoints");
}
// deallocate the cached class file
if (_cached_class_file != NULL && !MetaspaceShared::is_in_shared_metaspace(_cached_class_file)) {
os::free(_cached_class_file);
_cached_class_file = NULL;
}
#endif
// Decrement symbol reference counts associated with the unloaded class.
if (_name != NULL) _name->decrement_refcount();
// unreference array name derived from this class name (arrays of an unloaded
// class can't be referenced anymore).
if (_array_name != NULL) _array_name->decrement_refcount();
if (_source_debug_extension != NULL) FREE_C_HEAP_ARRAY(char, _source_debug_extension);
} | 0 | []
| jdk11u-dev | 41825fa33d605f8501164f9296572e4378e8183b | 135,348,286,473,905,580,000,000,000,000,000,000,000 | 53 | 8270386: Better verification of scan methods
Reviewed-by: mbaesken
Backport-of: ac329cef45979bd0159ecd1347e36f7129bb2ce4 |
udev_device_should_be_ignored(struct udev_device *udev_device)
{
const char *value;
value = udev_device_get_property_value(udev_device,
"LIBINPUT_IGNORE_DEVICE");
return value && !streq(value, "0");
} | 0 | [
"CWE-134"
]
| libinput | a423d7d3269dc32a87384f79e29bb5ac021c83d1 | 126,041,995,474,837,490,000,000,000,000,000,000,000 | 9 | evdev: strip the device name of format directives
This fixes a format string vulnerabilty.
evdev_log_message() composes a format string consisting of a fixed
prefix (including the rendered device name) and the passed-in format
buffer. This format string is then passed with the arguments to the
actual log handler, which usually and eventually ends up being printf.
If the device name contains a printf-style format directive, these ended
up in the format string and thus get interpreted correctly, e.g. for a
device "Foo%sBar" the log message vs printf invocation ends up being:
evdev_log_message(device, "some message %s", "some argument");
printf("event9 - Foo%sBar: some message %s", "some argument");
This can enable an attacker to execute malicious code with the
privileges of the process using libinput.
To exploit this, an attacker needs to be able to create a kernel device
with a malicious name, e.g. through /dev/uinput or a Bluetooth device.
To fix this, convert any potential format directives in the device name
by duplicating percentages.
Pre-rendering the device to avoid the issue altogether would be nicer
but the current log level hooks do not easily allow for this. The device
name is the only user-controlled part of the format string.
A second potential issue is the sysname of the device which is also
sanitized.
This issue was found by Albin Eldstål-Ahrens and Benjamin Svensson from
Assured AB, and independently by Lukas Lamster.
Fixes #752
Signed-off-by: Peter Hutterer <[email protected]> |
static ssize_t ucma_query_route(struct ucma_file *file,
const char __user *inbuf,
int in_len, int out_len)
{
struct rdma_ucm_query cmd;
struct rdma_ucm_query_route_resp resp;
struct ucma_context *ctx;
struct sockaddr *addr;
int ret = 0;
if (out_len < sizeof(resp))
return -ENOSPC;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
memset(&resp, 0, sizeof resp);
addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
sizeof(struct sockaddr_in) :
sizeof(struct sockaddr_in6));
addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
sizeof(struct sockaddr_in) :
sizeof(struct sockaddr_in6));
if (!ctx->cm_id->device)
goto out;
resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
resp.port_num = ctx->cm_id->port_num;
if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
ucma_copy_ib_route(&resp, &ctx->cm_id->route);
else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
ucma_copy_iw_route(&resp, &ctx->cm_id->route);
out:
if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp)))
ret = -EFAULT;
ucma_put_ctx(ctx);
return ret;
} | 0 | [
"CWE-416",
"CWE-703"
]
| linux | cb2595c1393b4a5211534e6f0a0fbad369e21ad8 | 47,267,952,962,882,080,000,000,000,000,000,000,000 | 50 | infiniband: fix a possible use-after-free bug
ucma_process_join() will free the new allocated "mc" struct,
if there is any error after that, especially the copy_to_user().
But in parallel, ucma_leave_multicast() could find this "mc"
through idr_find() before ucma_process_join() frees it, since it
is already published.
So "mc" could be used in ucma_leave_multicast() after it is been
allocated and freed in ucma_process_join(), since we don't refcnt
it.
Fix this by separating "publish" from ID allocation, so that we
can get an ID first and publish it later after copy_to_user().
Fixes: c8f6a362bf3e ("RDMA/cma: Add multicast communication support")
Reported-by: Noam Rathaus <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]> |
tiffUnmapCallback(thandle_t handle,
tdata_t data,
toff_t length)
{
return;
} | 0 | [
"CWE-125"
]
| leptonica | 5ba34b1fe741d69d43a6c8cf767756997eadd87c | 126,941,321,016,077,730,000,000,000,000,000,000,000 | 6 | Issue 23654 in oss-fuzz: Heap-buffer-overflow in pixReadFromTiffStream
* Increase scanline buffer for reading gray+alpha and converting to RGBA |
static float min() { return -FLT_MAX; } | 0 | [
"CWE-125"
]
| CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 128,011,495,143,092,530,000,000,000,000,000,000,000 | 1 | Fix other issues in 'CImg<T>::load_bmp()'. |
dirserv_orconn_tls_done(const tor_addr_t *addr,
uint16_t or_port,
const char *digest_rcvd)
{
node_t *node = NULL;
tor_addr_port_t orport;
routerinfo_t *ri = NULL;
time_t now = time(NULL);
tor_assert(addr);
tor_assert(digest_rcvd);
node = node_get_mutable_by_id(digest_rcvd);
if (node == NULL || node->ri == NULL)
return;
ri = node->ri;
tor_addr_copy(&orport.addr, addr);
orport.port = or_port;
if (router_has_orport(ri, &orport)) {
/* Found the right router. */
if (!authdir_mode_bridge(get_options()) ||
ri->purpose == ROUTER_PURPOSE_BRIDGE) {
char addrstr[TOR_ADDR_BUF_LEN];
/* This is a bridge or we're not a bridge authorititative --
mark it as reachable. */
log_info(LD_DIRSERV, "Found router %s to be reachable at %s:%d. Yay.",
router_describe(ri),
tor_addr_to_str(addrstr, addr, sizeof(addrstr), 1),
ri->or_port);
if (tor_addr_family(addr) == AF_INET) {
rep_hist_note_router_reachable(digest_rcvd, addr, or_port, now);
node->last_reachable = now;
} else if (tor_addr_family(addr) == AF_INET6) {
/* No rephist for IPv6. */
node->last_reachable6 = now;
}
}
}
} | 0 | []
| tor | 02e05bd74dbec614397b696cfcda6525562a4675 | 41,842,341,768,113,833,000,000,000,000,000,000,000 | 39 | When examining descriptors as a dirserver, reject ones with bad versions
This is an extra fix for bug 21278: it ensures that these
descriptors and platforms will never be listed in a legit consensus. |
static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
{
struct esp_data *esp = x->data;
u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
u32 align = max_t(u32, blksize, esp->padlen);
u32 rem;
mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
rem = mtu & (align - 1);
mtu &= ~(align - 1);
if (x->props.mode != XFRM_MODE_TUNNEL) {
u32 padsize = ((blksize - 1) & 7) + 1;
mtu -= blksize - padsize;
mtu += min_t(u32, blksize - padsize, rem);
}
return mtu - 2;
} | 0 | [
"CWE-16"
]
| linux-2.6 | 920fc941a9617f95ccb283037fe6f8a38d95bb69 | 191,976,841,145,147,470,000,000,000,000,000,000,000 | 19 | [ESP]: Ensure IV is in linear part of the skb to avoid BUG() due to OOB access
ESP does not account for the IV size when calling pskb_may_pull() to
ensure everything it accesses directly is within the linear part of a
potential fragment. This results in a BUG() being triggered when the
both the IPv4 and IPv6 ESP stack is fed with an skb where the first
fragment ends between the end of the esp header and the end of the IV.
This bug was found by Dirk Nehring <[email protected]> .
Signed-off-by: Thomas Graf <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
connection_handle_read_post_cq_compact (chunkqueue * const cq)
{
/* combine first mem chunk with next non-empty mem chunk
* (loop if next chunk is empty) */
chunk *c = cq->first;
if (NULL == c) return 0;
const uint32_t mlen = buffer_string_length(c->mem) - (size_t)c->offset;
while ((c = c->next)) {
const uint32_t blen = buffer_string_length(c->mem) - (size_t)c->offset;
if (0 == blen) continue;
chunkqueue_compact_mem(cq, mlen + blen);
return 1;
}
return 0;
} | 0 | [
"CWE-703"
]
| lighttpd1.4 | b03b86f47b0d5a553137f081fadc482b4af1372d | 254,687,059,693,772,220,000,000,000,000,000,000,000 | 15 | [core] fix merging large headers across mult reads (fixes #3059)
(thx mitd)
x-ref:
"Connections stuck in Close_Wait causing 100% cpu usage"
https://redmine.lighttpd.net/issues/3059 |
xrdp_mm_sync_load(long param1, long param2)
{
long rv;
char* libname;
libname = (char*)param1;
rv = g_load_library(libname);
return rv;
} | 0 | []
| xrdp | d8f9e8310dac362bb9578763d1024178f94f4ecc | 158,427,788,883,246,730,000,000,000,000,000,000,000 | 9 | move temp files from /tmp to /tmp/.xrdp |
static int s390_fpregs_get(struct task_struct *target,
const struct user_regset *regset, unsigned int pos,
unsigned int count, void *kbuf, void __user *ubuf)
{
if (target == current) {
save_fp_ctl(&target->thread.fp_regs.fpc);
save_fp_regs(target->thread.fp_regs.fprs);
}
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_regs, 0, -1);
} | 0 | [
"CWE-264",
"CWE-269"
]
| linux | dab6cf55f81a6e16b8147aed9a843e1691dcd318 | 17,129,788,145,931,416,000,000,000,000,000,000,000 | 12 | s390/ptrace: fix PSW mask check
The PSW mask check of the PTRACE_POKEUSR_AREA command is incorrect.
The PSW_MASK_USER define contains the PSW_MASK_ASC bits, the ptrace
interface accepts all combinations for the address-space-control
bits. To protect the kernel space the PSW mask check in ptrace needs
to reject the address-space-control bit combination for home space.
Fixes CVE-2014-3534
Cc: [email protected]
Signed-off-by: Martin Schwidefsky <[email protected]> |
static int process_cpu_clock_getres(const clockid_t which_clock,
struct timespec *tp)
{
return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
} | 0 | [
"CWE-189"
]
| linux | f8bd2258e2d520dff28c855658bd24bdafb5102d | 104,427,142,132,252,700,000,000,000,000,000,000,000 | 5 | remove div_long_long_rem
x86 is the only arch right now, which provides an optimized for
div_long_long_rem and it has the downside that one has to be very careful that
the divide doesn't overflow.
The API is a little akward, as the arguments for the unsigned divide are
signed. The signed version also doesn't handle a negative divisor and
produces worse code on 64bit archs.
There is little incentive to keep this API alive, so this converts the few
users to the new API.
Signed-off-by: Roman Zippel <[email protected]>
Cc: Ralf Baechle <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: john stultz <[email protected]>
Cc: Christoph Lameter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void reds_on_client_semi_seamless_migrate_complete(RedsState *reds, RedClient *client)
{
MainChannelClient *mcc;
spice_debug("%p", client);
mcc = client->get_main();
// TODO: not doing net test. consider doing it on client_migrate_info
mcc->push_init(reds->qxl_instances.size(), reds->mouse_mode,
reds->is_client_mouse_allowed,
reds_get_mm_time() - MM_TIME_DELTA,
reds_qxl_ram_size(reds));
reds_link_mig_target_channels(reds, client);
mcc->migrate_dst_complete();
} | 0 | []
| spice | ca5bbc5692e052159bce1a75f55dc60b36078749 | 227,235,587,260,770,500,000,000,000,000,000,000,000 | 15 | With OpenSSL 1.1: Disable client-initiated renegotiation.
Fixes issue #49
Fixes BZ#1904459
Signed-off-by: Julien Ropé <[email protected]>
Reported-by: BlackKD
Acked-by: Frediano Ziglio <[email protected]> |
int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
bool isdir)
{
struct fuse_open_out outarg;
struct fuse_file *ff;
int err;
int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
ff = fuse_file_alloc(fc);
if (!ff)
return -ENOMEM;
err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
if (err) {
fuse_file_free(ff);
return err;
}
if (isdir)
outarg.open_flags &= ~FOPEN_DIRECT_IO;
ff->fh = outarg.fh;
ff->nodeid = nodeid;
ff->open_flags = outarg.open_flags;
file->private_data = fuse_file_get(ff);
return 0;
} | 0 | []
| linux-2.6 | 0bd87182d3ab18a32a8e9175d3f68754c58e3432 | 234,791,511,539,770,720,000,000,000,000,000,000,000 | 28 | fuse: fix kunmap in fuse_ioctl_copy_user
Looks like another victim of the confusing kmap() vs kmap_atomic() API
differences.
Reported-by: Todor Gyumyushev <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: [email protected] |
static int sctp_setsockopt_delayed_ack(struct sock *sk,
char __user *optval, unsigned int optlen)
{
struct sctp_sack_info params;
struct sctp_transport *trans = NULL;
struct sctp_association *asoc = NULL;
struct sctp_sock *sp = sctp_sk(sk);
if (optlen == sizeof(struct sctp_sack_info)) {
if (copy_from_user(¶ms, optval, optlen))
return -EFAULT;
if (params.sack_delay == 0 && params.sack_freq == 0)
return 0;
} else if (optlen == sizeof(struct sctp_assoc_value)) {
pr_warn_ratelimited(DEPRECATED
"%s (pid %d) "
"Use of struct sctp_assoc_value in delayed_ack socket option.\n"
"Use struct sctp_sack_info instead\n",
current->comm, task_pid_nr(current));
if (copy_from_user(¶ms, optval, optlen))
return -EFAULT;
if (params.sack_delay == 0)
params.sack_freq = 1;
else
params.sack_freq = 0;
} else
return -EINVAL;
/* Validate value parameter. */
if (params.sack_delay > 500)
return -EINVAL;
/* Get association, if sack_assoc_id != 0 and the socket is a one
* to many style socket, and an association was not found, then
* the id was invalid.
*/
asoc = sctp_id2assoc(sk, params.sack_assoc_id);
if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (params.sack_delay) {
if (asoc) {
asoc->sackdelay =
msecs_to_jiffies(params.sack_delay);
asoc->param_flags =
sctp_spp_sackdelay_enable(asoc->param_flags);
} else {
sp->sackdelay = params.sack_delay;
sp->param_flags =
sctp_spp_sackdelay_enable(sp->param_flags);
}
}
if (params.sack_freq == 1) {
if (asoc) {
asoc->param_flags =
sctp_spp_sackdelay_disable(asoc->param_flags);
} else {
sp->param_flags =
sctp_spp_sackdelay_disable(sp->param_flags);
}
} else if (params.sack_freq > 1) {
if (asoc) {
asoc->sackfreq = params.sack_freq;
asoc->param_flags =
sctp_spp_sackdelay_enable(asoc->param_flags);
} else {
sp->sackfreq = params.sack_freq;
sp->param_flags =
sctp_spp_sackdelay_enable(sp->param_flags);
}
}
/* If change is for association, also apply to each transport. */
if (asoc) {
list_for_each_entry(trans, &asoc->peer.transport_addr_list,
transports) {
if (params.sack_delay) {
trans->sackdelay =
msecs_to_jiffies(params.sack_delay);
trans->param_flags =
sctp_spp_sackdelay_enable(trans->param_flags);
}
if (params.sack_freq == 1) {
trans->param_flags =
sctp_spp_sackdelay_disable(trans->param_flags);
} else if (params.sack_freq > 1) {
trans->sackfreq = params.sack_freq;
trans->param_flags =
sctp_spp_sackdelay_enable(trans->param_flags);
}
}
}
return 0;
} | 0 | [
"CWE-617",
"CWE-362"
]
| linux | 2dcab598484185dea7ec22219c76dcdd59e3cb90 | 156,234,082,536,652,700,000,000,000,000,000,000,000 | 98 | sctp: avoid BUG_ON on sctp_wait_for_sndbuf
Alexander Popov reported that an application may trigger a BUG_ON in
sctp_wait_for_sndbuf if the socket tx buffer is full, a thread is
waiting on it to queue more data and meanwhile another thread peels off
the association being used by the first thread.
This patch replaces the BUG_ON call with a proper error handling. It
will return -EPIPE to the original sendmsg call, similarly to what would
have been done if the association wasn't found in the first place.
Acked-by: Alexander Popov <[email protected]>
Signed-off-by: Marcelo Ricardo Leitner <[email protected]>
Reviewed-by: Xin Long <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
xfs_test_remount_options(
struct super_block *sb,
char *options)
{
int error = 0;
struct xfs_mount *tmp_mp;
tmp_mp = kmem_zalloc(sizeof(*tmp_mp), KM_MAYFAIL);
if (!tmp_mp)
return -ENOMEM;
tmp_mp->m_super = sb;
error = xfs_parseargs(tmp_mp, options);
xfs_free_fsname(tmp_mp);
kmem_free(tmp_mp);
return error;
} | 0 | [
"CWE-416"
]
| linux | c9fbd7bbc23dbdd73364be4d045e5d3612cf6e82 | 222,074,395,044,159,040,000,000,000,000,000,000,000 | 18 | xfs: clear sb->s_fs_info on mount failure
We recently had an oops reported on a 4.14 kernel in
xfs_reclaim_inodes_count() where sb->s_fs_info pointed to garbage
and so the m_perag_tree lookup walked into lala land.
Essentially, the machine was under memory pressure when the mount
was being run, xfs_fs_fill_super() failed after allocating the
xfs_mount and attaching it to sb->s_fs_info. It then cleaned up and
freed the xfs_mount, but the sb->s_fs_info field still pointed to
the freed memory. Hence when the superblock shrinker then ran
it fell off the bad pointer.
With the superblock shrinker problem fixed at teh VFS level, this
stale s_fs_info pointer is still a problem - we use it
unconditionally in ->put_super when the superblock is being torn
down, and hence we can still trip over it after a ->fill_super
call failure. Hence we need to clear s_fs_info if
xfs-fs_fill_super() fails, and we need to check if it's valid in
the places it can potentially be dereferenced after a ->fill_super
failure.
Signed-Off-By: Dave Chinner <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
Signed-off-by: Darrick J. Wong <[email protected]> |
static int ip6_forward_proxy_check(struct sk_buff *skb)
{
struct ipv6hdr *hdr = ipv6_hdr(skb);
u8 nexthdr = hdr->nexthdr;
int offset;
if (ipv6_ext_hdr(nexthdr)) {
offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
if (offset < 0)
return 0;
} else
offset = sizeof(struct ipv6hdr);
if (nexthdr == IPPROTO_ICMPV6) {
struct icmp6hdr *icmp6;
if (!pskb_may_pull(skb, (skb_network_header(skb) +
offset + 1 - skb->data)))
return 0;
icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
switch (icmp6->icmp6_type) {
case NDISC_ROUTER_SOLICITATION:
case NDISC_ROUTER_ADVERTISEMENT:
case NDISC_NEIGHBOUR_SOLICITATION:
case NDISC_NEIGHBOUR_ADVERTISEMENT:
case NDISC_REDIRECT:
/* For reaction involving unicast neighbor discovery
* message destined to the proxied address, pass it to
* input function.
*/
return 1;
default:
break;
}
}
/*
* The proxying router can't forward traffic sent to a link-local
* address, so signal the sender and discard the packet. This
* behavior is clarified by the MIPv6 specification.
*/
if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
dst_link_failure(skb);
return -1;
}
return 0;
} | 0 | [
"CWE-703"
]
| linux | 87c48fa3b4630905f98268dde838ee43626a060c | 195,043,655,762,327,400,000,000,000,000,000,000,000 | 50 | ipv6: make fragment identifications less predictable
IPv6 fragment identification generation is way beyond what we use for
IPv4 : It uses a single generator. Its not scalable and allows DOS
attacks.
Now inetpeer is IPv6 aware, we can use it to provide a more secure and
scalable frag ident generator (per destination, instead of system wide)
This patch :
1) defines a new secure_ipv6_id() helper
2) extends inet_getid() to provide 32bit results
3) extends ipv6_select_ident() with a new dest parameter
Reported-by: Fernando Gont <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
mbed_connect_step3(struct Curl_easy *data, struct connectdata *conn,
int sockindex)
{
CURLcode retcode = CURLE_OK;
struct ssl_connect_data *connssl = &conn->ssl[sockindex];
struct ssl_backend_data *backend = connssl->backend;
DEBUGASSERT(ssl_connect_3 == connssl->connecting_state);
if(SSL_SET_OPTION(primary.sessionid)) {
int ret;
mbedtls_ssl_session *our_ssl_sessionid;
void *old_ssl_sessionid = NULL;
bool isproxy = SSL_IS_PROXY() ? TRUE : FALSE;
our_ssl_sessionid = malloc(sizeof(mbedtls_ssl_session));
if(!our_ssl_sessionid)
return CURLE_OUT_OF_MEMORY;
mbedtls_ssl_session_init(our_ssl_sessionid);
ret = mbedtls_ssl_get_session(&backend->ssl, our_ssl_sessionid);
if(ret) {
if(ret != MBEDTLS_ERR_SSL_ALLOC_FAILED)
mbedtls_ssl_session_free(our_ssl_sessionid);
free(our_ssl_sessionid);
failf(data, "mbedtls_ssl_get_session returned -0x%x", -ret);
return CURLE_SSL_CONNECT_ERROR;
}
/* If there's already a matching session in the cache, delete it */
Curl_ssl_sessionid_lock(data);
if(!Curl_ssl_getsessionid(data, conn, isproxy, &old_ssl_sessionid, NULL,
sockindex))
Curl_ssl_delsessionid(data, old_ssl_sessionid);
retcode = Curl_ssl_addsessionid(data, conn, isproxy, our_ssl_sessionid,
0, sockindex);
Curl_ssl_sessionid_unlock(data);
if(retcode) {
mbedtls_ssl_session_free(our_ssl_sessionid);
free(our_ssl_sessionid);
failf(data, "failed to store ssl session");
return retcode;
}
}
connssl->connecting_state = ssl_connect_done;
return CURLE_OK;
} | 0 | [
"CWE-290"
]
| curl | b09c8ee15771c614c4bf3ddac893cdb12187c844 | 222,877,430,547,505,030,000,000,000,000,000,000,000 | 51 | vtls: add 'isproxy' argument to Curl_ssl_get/addsessionid()
To make sure we set and extract the correct session.
Reported-by: Mingtao Yang
Bug: https://curl.se/docs/CVE-2021-22890.html
CVE-2021-22890 |
static void ext4_mb_mark_free_simple(struct super_block *sb,
void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
struct ext4_group_info *grp)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_grpblk_t min;
ext4_grpblk_t max;
ext4_grpblk_t chunk;
unsigned int border;
BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
border = 2 << sb->s_blocksize_bits;
while (len > 0) {
/* find how many blocks can be covered since this position */
max = ffs(first | border) - 1;
/* find how many blocks of power 2 we need to mark */
min = fls(len) - 1;
if (max < min)
min = max;
chunk = 1 << min;
/* mark multiblock chunks only */
grp->bb_counters[min]++;
if (min > 0)
mb_clear_bit(first >> min,
buddy + sbi->s_mb_offsets[min]);
len -= chunk;
first += chunk;
}
} | 0 | [
"CWE-416"
]
| linux | 8844618d8aa7a9973e7b527d038a2a589665002c | 106,834,230,626,175,670,000,000,000,000,000,000,000 | 35 | ext4: only look at the bg_flags field if it is valid
The bg_flags field in the block group descripts is only valid if the
uninit_bg or metadata_csum feature is enabled. We were not
consistently looking at this field; fix this.
Also block group #0 must never have uninitialized allocation bitmaps,
or need to be zeroed, since that's where the root inode, and other
special inodes are set up. Check for these conditions and mark the
file system as corrupted if they are detected.
This addresses CVE-2018-10876.
https://bugzilla.kernel.org/show_bug.cgi?id=199403
Signed-off-by: Theodore Ts'o <[email protected]>
Cc: [email protected] |
bool WireFormat::ParseAndMergeMessageSetField(uint32_t field_number,
const FieldDescriptor* field,
Message* message,
io::CodedInputStream* input) {
const Reflection* message_reflection = message->GetReflection();
if (field == NULL) {
// We store unknown MessageSet extensions as groups.
return SkipMessageSetField(
input, field_number, message_reflection->MutableUnknownFields(message));
} else if (field->is_repeated() ||
field->type() != FieldDescriptor::TYPE_MESSAGE) {
// This shouldn't happen as we only allow optional message extensions to
// MessageSet.
GOOGLE_LOG(ERROR) << "Extensions of MessageSets must be optional messages.";
return false;
} else {
Message* sub_message = message_reflection->MutableMessage(
message, field, input->GetExtensionFactory());
return WireFormatLite::ReadMessage(input, sub_message);
}
} | 0 | [
"CWE-703"
]
| protobuf | d1635e1496f51e0d5653d856211e8821bc47adc4 | 32,564,256,162,248,420,000,000,000,000,000,000,000 | 21 | Apply patch |
bool IsNumpyHalf(PyObject* obj) {
return PyIsInstance(obj, &PyHalfArrType_Type);
} | 0 | [
"CWE-20",
"CWE-476"
]
| tensorflow | 5ac1b9e24ff6afc465756edf845d2e9660bd34bf | 135,182,860,121,414,700,000,000,000,000,000,000,000 | 3 | Fix segfault when attempting to convert string to float16.
To make sure this gets fixed, add test for converting string to any numeric type.
PiperOrigin-RevId: 286650886
Change-Id: I81f770ec2bbd33a863e8057ce198c679912fa8e0 |
xfs_attr_shortform_getvalue(xfs_da_args_t *args)
{
xfs_attr_shortform_t *sf;
xfs_attr_sf_entry_t *sfe;
int i;
ASSERT(args->dp->i_afp->if_flags == XFS_IFINLINE);
sf = (xfs_attr_shortform_t *)args->dp->i_afp->if_u1.if_data;
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
if (sfe->namelen != args->namelen)
continue;
if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
continue;
if (!xfs_attr_namesp_match(args->flags, sfe->flags))
continue;
if (args->flags & ATTR_KERNOVAL) {
args->valuelen = sfe->valuelen;
return -EEXIST;
}
if (args->valuelen < sfe->valuelen) {
args->valuelen = sfe->valuelen;
return -ERANGE;
}
args->valuelen = sfe->valuelen;
memcpy(args->value, &sfe->nameval[args->namelen],
args->valuelen);
return -EEXIST;
}
return -ENOATTR;
} | 0 | [
"CWE-476"
]
| linux | bb3d48dcf86a97dc25fe9fc2c11938e19cb4399a | 250,652,225,664,202,960,000,000,000,000,000,000,000 | 32 | xfs: don't call xfs_da_shrink_inode with NULL bp
xfs_attr3_leaf_create may have errored out before instantiating a buffer,
for example if the blkno is out of range. In that case there is no work
to do to remove it, and in fact xfs_da_shrink_inode will lead to an oops
if we try.
This also seems to fix a flaw where the original error from
xfs_attr3_leaf_create gets overwritten in the cleanup case, and it
removes a pointless assignment to bp which isn't used after this.
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=199969
Reported-by: Xu, Wen <[email protected]>
Tested-by: Xu, Wen <[email protected]>
Signed-off-by: Eric Sandeen <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
Signed-off-by: Darrick J. Wong <[email protected]> |
rsRetVal msgQueryInterface(void) { return RS_RET_NOT_IMPLEMENTED; } | 0 | [
"CWE-772"
]
| rsyslog | 8083bd1433449fd2b1b79bf759f782e0f64c0cd2 | 176,676,777,351,193,400,000,000,000,000,000,000,000 | 1 | backporting abort condition fix from 5.7.7 |
void mce_setup(struct mce *m)
{
memset(m, 0, sizeof(struct mce));
m->cpu = m->extcpu = smp_processor_id();
/* We hope get_seconds stays lockless */
m->time = get_seconds();
m->cpuvendor = boot_cpu_data.x86_vendor;
m->cpuid = cpuid_eax(1);
m->socketid = cpu_data(m->extcpu).phys_proc_id;
m->apicid = cpu_data(m->extcpu).initial_apicid;
rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
rdmsrl(MSR_PPIN, m->ppin);
m->microcode = boot_cpu_data.microcode;
} | 0 | [
"CWE-362"
]
| linux | b3b7c4795ccab5be71f080774c45bbbcc75c2aaf | 162,137,238,635,175,970,000,000,000,000,000,000,000 | 17 | x86/MCE: Serialize sysfs changes
The check_interval file in
/sys/devices/system/machinecheck/machinecheck<cpu number>
directory is a global timer value for MCE polling. If it is changed by one
CPU, mce_restart() broadcasts the event to other CPUs to delete and restart
the MCE polling timer and __mcheck_cpu_init_timer() reinitializes the
mce_timer variable.
If more than one CPU writes a specific value to the check_interval file
concurrently, mce_timer is not protected from such concurrent accesses and
all kinds of explosions happen. Since only root can write to those sysfs
variables, the issue is not a big deal security-wise.
However, concurrent writes to these configuration variables is void of
reason so the proper thing to do is to serialize the access with a mutex.
Boris:
- Make store_int_with_restart() use device_store_ulong() to filter out
negative intervals
- Limit min interval to 1 second
- Correct locking
- Massage commit message
Signed-off-by: Seunghun Han <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: linux-edac <[email protected]>
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected] |
nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd)
{
struct path path = {
.mnt = nd->path.mnt,
.dentry = dentry,
};
struct rpc_cred *cred;
struct nfs4_state *state;
fmode_t fmode = openflags & (FMODE_READ | FMODE_WRITE);
cred = rpc_lookup_cred();
if (IS_ERR(cred))
return PTR_ERR(cred);
state = nfs4_do_open(dir, &path, fmode, openflags, NULL, cred);
put_rpccred(cred);
if (IS_ERR(state)) {
switch (PTR_ERR(state)) {
case -EPERM:
case -EACCES:
case -EDQUOT:
case -ENOSPC:
case -EROFS:
lookup_instantiate_filp(nd, (struct dentry *)state, NULL);
return 1;
default:
goto out_drop;
}
}
if (state->inode == dentry->d_inode) {
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
nfs4_intent_set_file(nd, &path, state, fmode);
return 1;
}
nfs4_close_sync(&path, state, fmode);
out_drop:
d_drop(dentry);
return 0;
} | 0 | [
"CWE-703"
]
| linux | dc0b027dfadfcb8a5504f7d8052754bf8d501ab9 | 194,467,884,052,949,470,000,000,000,000,000,000,000 | 38 | NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <[email protected]> |
static inline bool is_slave_direction(enum dma_transfer_direction direction)
{
return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
} | 0 | []
| linux | 7bced397510ab569d31de4c70b39e13355046387 | 13,638,227,979,862,110,000,000,000,000,000,000,000 | 4 | net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.
This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.
Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():
https://lkml.org/lkml/2014/9/3/177
Cc: Dave Jiang <[email protected]>
Cc: Vinod Koul <[email protected]>
Cc: David Whipple <[email protected]>
Cc: Alexander Duyck <[email protected]>
Cc: <[email protected]>
Reported-by: Roman Gushchin <[email protected]>
Acked-by: David S. Miller <[email protected]>
Signed-off-by: Dan Williams <[email protected]> |
static OPJ_BOOL bmp_read_rle8_data(FILE* IN, OPJ_UINT8* pData, OPJ_UINT32 stride, OPJ_UINT32 width, OPJ_UINT32 height)
{
OPJ_UINT32 x, y;
OPJ_UINT8 *pix;
const OPJ_UINT8 *beyond;
beyond = pData + stride * height;
pix = pData;
x = y = 0U;
while (y < height)
{
int c = getc(IN);
if (c) {
int j;
OPJ_UINT8 c1 = (OPJ_UINT8)getc(IN);
for (j = 0; (j < c) && (x < width) && ((OPJ_SIZE_T)pix < (OPJ_SIZE_T)beyond); j++, x++, pix++) {
*pix = c1;
}
}
else {
c = getc(IN);
if (c == 0x00) { /* EOL */
x = 0;
++y;
pix = pData + y * stride + x;
}
else if (c == 0x01) { /* EOP */
break;
}
else if (c == 0x02) { /* MOVE by dxdy */
c = getc(IN);
x += (OPJ_UINT32)c;
c = getc(IN);
y += (OPJ_UINT32)c;
pix = pData + y * stride + x;
}
else /* 03 .. 255 */
{
int j;
for (j = 0; (j < c) && (x < width) && ((OPJ_SIZE_T)pix < (OPJ_SIZE_T)beyond); j++, x++, pix++)
{
OPJ_UINT8 c1 = (OPJ_UINT8)getc(IN);
*pix = c1;
}
if ((OPJ_UINT32)c & 1U) { /* skip padding byte */
getc(IN);
}
}
}
}/* while() */
return OPJ_TRUE;
} | 0 | [
"CWE-703",
"CWE-125"
]
| openjpeg | 15f081c89650dccee4aa4ae66f614c3fdb268767 | 329,800,992,912,237,940,000,000,000,000,000,000,000 | 55 | Fix Out-Of-Bounds Read in sycc42x_to_rgb function (#745)
42x Images with an odd x0/y0 lead to subsampled component starting at the
2nd column/line.
That is offset = comp->dx * comp->x0 - image->x0 = 1
Fix #726 |
void exec_status_dump(const ExecStatus *s, FILE *f, const char *prefix) {
char buf[FORMAT_TIMESTAMP_MAX];
assert(s);
assert(f);
if (s->pid <= 0)
return;
prefix = strempty(prefix);
fprintf(f,
"%sPID: "PID_FMT"\n",
prefix, s->pid);
if (dual_timestamp_is_set(&s->start_timestamp))
fprintf(f,
"%sStart Timestamp: %s\n",
prefix, format_timestamp(buf, sizeof(buf), s->start_timestamp.realtime));
if (dual_timestamp_is_set(&s->exit_timestamp))
fprintf(f,
"%sExit Timestamp: %s\n"
"%sExit Code: %s\n"
"%sExit Status: %i\n",
prefix, format_timestamp(buf, sizeof(buf), s->exit_timestamp.realtime),
prefix, sigchld_code_to_string(s->code),
prefix, s->status);
} | 0 | [
"CWE-269"
]
| systemd | f69567cbe26d09eac9d387c0be0fc32c65a83ada | 165,974,494,224,937,630,000,000,000,000,000,000,000 | 29 | core: expose SUID/SGID restriction as new unit setting RestrictSUIDSGID= |
static void drop_sysctl_table(struct ctl_table_header *header)
{
struct ctl_dir *parent = header->parent;
if (--header->nreg)
return;
put_links(header);
start_unregistering(header);
if (!--header->count)
kfree_rcu(header, rcu);
if (parent)
drop_sysctl_table(&parent->header);
} | 1 | [
"CWE-476"
]
| linux | 23da9588037ecdd4901db76a5b79a42b529c4ec3 | 159,461,115,907,145,170,000,000,000,000,000,000,000 | 15 | fs/proc/proc_sysctl.c: fix NULL pointer dereference in put_links
Syzkaller reports:
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN PTI
CPU: 1 PID: 5373 Comm: syz-executor.0 Not tainted 5.0.0-rc8+ #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
RIP: 0010:put_links+0x101/0x440 fs/proc/proc_sysctl.c:1599
Code: 00 0f 85 3a 03 00 00 48 8b 43 38 48 89 44 24 20 48 83 c0 38 48 89 c2 48 89 44 24 28 48 b8 00 00 00 00 00 fc ff df 48 c1 ea 03 <80> 3c 02 00 0f 85 fe 02 00 00 48 8b 74 24 20 48 c7 c7 60 2a 9d 91
RSP: 0018:ffff8881d828f238 EFLAGS: 00010202
RAX: dffffc0000000000 RBX: ffff8881e01b1140 RCX: ffffffff8ee98267
RDX: 0000000000000007 RSI: ffffc90001479000 RDI: ffff8881e01b1178
RBP: dffffc0000000000 R08: ffffed103ee27259 R09: ffffed103ee27259
R10: 0000000000000001 R11: ffffed103ee27258 R12: fffffffffffffff4
R13: 0000000000000006 R14: ffff8881f59838c0 R15: dffffc0000000000
FS: 00007f072254f700(0000) GS:ffff8881f7100000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007fff8b286668 CR3: 00000001f0542002 CR4: 00000000007606e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
PKRU: 55555554
Call Trace:
drop_sysctl_table+0x152/0x9f0 fs/proc/proc_sysctl.c:1629
get_subdir fs/proc/proc_sysctl.c:1022 [inline]
__register_sysctl_table+0xd65/0x1090 fs/proc/proc_sysctl.c:1335
br_netfilter_init+0xbc/0x1000 [br_netfilter]
do_one_initcall+0xfa/0x5ca init/main.c:887
do_init_module+0x204/0x5f6 kernel/module.c:3460
load_module+0x66b2/0x8570 kernel/module.c:3808
__do_sys_finit_module+0x238/0x2a0 kernel/module.c:3902
do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x462e99
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f072254ec58 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99
RDX: 0000000000000000 RSI: 0000000020000280 RDI: 0000000000000003
RBP: 00007f072254ec70 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f072254f6bc
R13: 00000000004bcefa R14: 00000000006f6fb0 R15: 0000000000000004
Modules linked in: br_netfilter(+) dvb_usb_dibusb_mc_common dib3000mc dibx000_common dvb_usb_dibusb_common dvb_usb_dw2102 dvb_usb classmate_laptop palmas_regulator cn videobuf2_v4l2 v4l2_common snd_soc_bd28623 mptbase snd_usb_usx2y snd_usbmidi_lib snd_rawmidi wmi libnvdimm lockd sunrpc grace rc_kworld_pc150u rc_core rtc_da9063 sha1_ssse3 i2c_cros_ec_tunnel adxl34x_spi adxl34x nfnetlink lib80211 i5500_temp dvb_as102 dvb_core videobuf2_common videodev media videobuf2_vmalloc videobuf2_memops udc_core lnbp22 leds_lp3952 hid_roccat_ryos s1d13xxxfb mtd vport_geneve openvswitch nf_conncount nf_nat_ipv6 nsh geneve udp_tunnel ip6_udp_tunnel snd_soc_mt6351 sis_agp phylink snd_soc_adau1761_spi snd_soc_adau1761 snd_soc_adau17x1 snd_soc_core snd_pcm_dmaengine ac97_bus snd_compress snd_soc_adau_utils snd_soc_sigmadsp_regmap snd_soc_sigmadsp raid_class hid_roccat_konepure hid_roccat_common hid_roccat c2port_duramar2150 core mdio_bcm_unimac iptable_security iptable_raw iptable_mangle
iptable_nat nf_nat_ipv4 nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 iptable_filter bpfilter ip6_vti ip_vti ip_gre ipip sit tunnel4 ip_tunnel hsr veth netdevsim devlink vxcan batman_adv cfg80211 rfkill chnl_net caif nlmon dummy team bonding vcan bridge stp llc ip6_gre gre ip6_tunnel tunnel6 tun crct10dif_pclmul crc32_pclmul crc32c_intel ghash_clmulni_intel joydev mousedev ide_pci_generic piix aesni_intel aes_x86_64 ide_core crypto_simd atkbd cryptd glue_helper serio_raw ata_generic pata_acpi i2c_piix4 floppy sch_fq_codel ip_tables x_tables ipv6 [last unloaded: lm73]
Dumping ftrace buffer:
(ftrace buffer empty)
---[ end trace 770020de38961fd0 ]---
A new dir entry can be created in get_subdir and its 'header->parent' is
set to NULL. Only after insert_header success, it will be set to 'dir',
otherwise 'header->parent' is set to NULL and drop_sysctl_table is called.
However in err handling path of get_subdir, drop_sysctl_table also be
called on 'new->header' regardless its value of parent pointer. Then
put_links is called, which triggers NULL-ptr deref when access member of
header->parent.
In fact we have multiple error paths which call drop_sysctl_table() there,
upon failure on insert_links() we also call drop_sysctl_table().And even
in the successful case on __register_sysctl_table() we still always call
drop_sysctl_table().This patch fix it.
Link: http://lkml.kernel.org/r/[email protected]
Fixes: 0e47c99d7fe25 ("sysctl: Replace root_list with links between sysctl_table_sets")
Signed-off-by: YueHaibing <[email protected]>
Reported-by: Hulk Robot <[email protected]>
Acked-by: Luis Chamberlain <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Alexey Dobriyan <[email protected]>
Cc: Alexei Starovoitov <[email protected]>
Cc: Daniel Borkmann <[email protected]>
Cc: Al Viro <[email protected]>
Cc: Eric W. Biederman <[email protected]>
Cc: <[email protected]> [3.4+]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
sym_swapcase(VALUE sym)
{
return rb_str_intern(rb_str_swapcase(rb_id2str(SYM2ID(sym))));
} | 0 | [
"CWE-119"
]
| ruby | 1c2ef610358af33f9ded3086aa2d70aac03dcac5 | 225,381,412,667,699,070,000,000,000,000,000,000,000 | 4 | * string.c (rb_str_justify): CVE-2009-4124.
Fixes a bug reported by
Emmanouel Kellinis <Emmanouel.Kellinis AT kpmg.co.uk>, KPMG London;
Patch by nobu.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@26038 b2dd03c8-39d4-4d8f-98ff-823fe69b080e |
static void burl_normalize_qs20_to_plus (buffer *b, int qs)
{
const char * const s = b->ptr;
const int used = qs < 0 ? 0 : (int)buffer_string_length(b);
int i;
if (qs < 0) return;
for (i = qs+1; i < used; ++i) {
if (s[i] == '%' && s[i+1] == '2' && s[i+2] == '0') break;
}
if (i != used) burl_normalize_qs20_to_plus_fix(b, i);
} | 0 | [
"CWE-190"
]
| lighttpd1.4 | 32120d5b8b3203fc21ccb9eafb0eaf824bb59354 | 264,348,630,205,717,570,000,000,000,000,000,000,000 | 11 | [core] fix abort in http-parseopts (fixes #2945)
fix abort in server.http-parseopts with url-path-2f-decode enabled
(thx stze)
x-ref:
"Security - SIGABRT during GET request handling with url-path-2f-decode enabled"
https://redmine.lighttpd.net/issues/2945 |
mysql_refresh(MYSQL *mysql,uint options)
{
uchar bits[1];
DBUG_ENTER("mysql_refresh");
bits[0]= (uchar) options;
DBUG_RETURN(simple_command(mysql, COM_REFRESH, bits, 1, 0));
} | 0 | []
| mysql-server | 3d8134d2c9b74bc8883ffe2ef59c168361223837 | 72,883,765,491,853,150,000,000,000,000,000,000,000 | 7 | Bug#25988681: USE-AFTER-FREE IN MYSQL_STMT_CLOSE()
Description: If mysql_stmt_close() encountered error,
it recorded error in prepared statement
but then frees memory assigned to prepared
statement. If mysql_stmt_error() is used
to get error information, it will result
into use after free.
In all cases where mysql_stmt_close() can
fail, error would have been set by
cli_advanced_command in MYSQL structure.
Solution: Don't copy error from MYSQL using set_stmt_errmsg.
There is no automated way to test the fix since
it is in mysql_stmt_close() which does not expect
any reply from server.
Reviewed-By: Georgi Kodinov <[email protected]>
Reviewed-By: Ramil Kalimullin <[email protected]> |
void jpc_tagtree_setvalue(jpc_tagtree_t *tree, jpc_tagtreenode_t *leaf,
int value)
{
jpc_tagtreenode_t *node;
/* Avoid compiler warnings about unused parameters. */
tree = 0;
assert(value >= 0);
node = leaf;
while (node && node->value_ > value) {
node->value_ = value;
node = node->parent_;
}
} | 0 | [
"CWE-189"
]
| jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 171,921,017,682,294,400,000,000,000,000,000,000,000 | 16 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
void license_generate_randoms(rdpLicense* license)
{
#ifdef LICENSE_NULL_CLIENT_RANDOM
ZeroMemory(license->ClientRandom, CLIENT_RANDOM_LENGTH); /* ClientRandom */
#else
winpr_RAND(license->ClientRandom, CLIENT_RANDOM_LENGTH); /* ClientRandom */
#endif
#ifdef LICENSE_NULL_PREMASTER_SECRET
ZeroMemory(license->PremasterSecret, PREMASTER_SECRET_LENGTH); /* PremasterSecret */
#else
winpr_RAND(license->PremasterSecret, PREMASTER_SECRET_LENGTH); /* PremasterSecret */
#endif
} | 0 | [
"CWE-125"
]
| FreeRDP | 6ade7b4cbfd71c54b3d724e8f2d6ac76a58e879a | 45,863,649,747,680,450,000,000,000,000,000,000,000 | 14 | Fixed OOB Read in license_read_new_or_upgrade_license_packet
CVE-2020-11099 thanks to @antonio-morales for finding this. |
static void hw_scan_work(struct work_struct *work)
{
struct mac80211_hwsim_data *hwsim =
container_of(work, struct mac80211_hwsim_data, hw_scan.work);
struct cfg80211_scan_request *req = hwsim->hw_scan_request;
int dwell, i;
mutex_lock(&hwsim->mutex);
if (hwsim->scan_chan_idx >= req->n_channels) {
struct cfg80211_scan_info info = {
.aborted = false,
};
wiphy_dbg(hwsim->hw->wiphy, "hw scan complete\n");
ieee80211_scan_completed(hwsim->hw, &info);
hwsim->hw_scan_request = NULL;
hwsim->hw_scan_vif = NULL;
hwsim->tmp_chan = NULL;
mutex_unlock(&hwsim->mutex);
return;
}
wiphy_dbg(hwsim->hw->wiphy, "hw scan %d MHz\n",
req->channels[hwsim->scan_chan_idx]->center_freq);
hwsim->tmp_chan = req->channels[hwsim->scan_chan_idx];
if (hwsim->tmp_chan->flags & (IEEE80211_CHAN_NO_IR |
IEEE80211_CHAN_RADAR) ||
!req->n_ssids) {
dwell = 120;
} else {
dwell = 30;
/* send probes */
for (i = 0; i < req->n_ssids; i++) {
struct sk_buff *probe;
struct ieee80211_mgmt *mgmt;
probe = ieee80211_probereq_get(hwsim->hw,
hwsim->scan_addr,
req->ssids[i].ssid,
req->ssids[i].ssid_len,
req->ie_len);
if (!probe)
continue;
mgmt = (struct ieee80211_mgmt *) probe->data;
memcpy(mgmt->da, req->bssid, ETH_ALEN);
memcpy(mgmt->bssid, req->bssid, ETH_ALEN);
if (req->ie_len)
skb_put_data(probe, req->ie, req->ie_len);
local_bh_disable();
mac80211_hwsim_tx_frame(hwsim->hw, probe,
hwsim->tmp_chan);
local_bh_enable();
}
}
ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan,
msecs_to_jiffies(dwell));
hwsim->survey_data[hwsim->scan_chan_idx].channel = hwsim->tmp_chan;
hwsim->survey_data[hwsim->scan_chan_idx].start = jiffies;
hwsim->survey_data[hwsim->scan_chan_idx].end =
jiffies + msecs_to_jiffies(dwell);
hwsim->scan_chan_idx++;
mutex_unlock(&hwsim->mutex);
} | 0 | [
"CWE-703",
"CWE-772"
]
| linux | 0ddcff49b672239dda94d70d0fcf50317a9f4b51 | 112,801,821,166,363,440,000,000,000,000,000,000,000 | 67 | mac80211_hwsim: fix possible memory leak in hwsim_new_radio_nl()
'hwname' is malloced in hwsim_new_radio_nl() and should be freed
before leaving from the error handling cases, otherwise it will cause
memory leak.
Fixes: ff4dd73dd2b4 ("mac80211_hwsim: check HWSIM_ATTR_RADIO_NAME length")
Signed-off-by: Wei Yongjun <[email protected]>
Reviewed-by: Ben Hutchings <[email protected]>
Signed-off-by: Johannes Berg <[email protected]> |
static int ext4_li_info_new(void)
{
struct ext4_lazy_init *eli = NULL;
eli = kzalloc(sizeof(*eli), GFP_KERNEL);
if (!eli)
return -ENOMEM;
eli->li_task = NULL;
INIT_LIST_HEAD(&eli->li_request_list);
mutex_init(&eli->li_list_mtx);
init_waitqueue_head(&eli->li_wait_daemon);
init_waitqueue_head(&eli->li_wait_task);
init_timer(&eli->li_timer);
eli->li_state |= EXT4_LAZYINIT_QUIT;
ext4_li_info = eli;
return 0;
} | 0 | [
"CWE-703"
]
| linux | 0449641130f5652b344ef6fa39fa019d7e94660a | 249,407,198,707,257,670,000,000,000,000,000,000,000 | 21 | ext4: init timer earlier to avoid a kernel panic in __save_error_info
During mount, when we fail to open journal inode or root inode, the
__save_error_info will mod_timer. But actually s_err_report isn't
initialized yet and the kernel oops. The detailed information can
be found https://bugzilla.kernel.org/show_bug.cgi?id=32082.
The best way is to check whether the timer s_err_report is initialized
or not. But it seems that in include/linux/timer.h, we can't find a
good function to check the status of this timer, so this patch just
move the initializtion of s_err_report earlier so that we can avoid
the kernel panic. The corresponding del_timer is also added in the
error path.
Reported-by: Sami Liedes <[email protected]>
Signed-off-by: Tao Ma <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]> |
Header::name()
{
return typedAttribute <StringAttribute> ("name").value();
} | 0 | [
"CWE-125"
]
| openexr | e79d2296496a50826a15c667bf92bdc5a05518b4 | 123,844,897,810,877,120,000,000,000,000,000,000,000 | 4 | fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <[email protected]> |
JVM_CurrentLoadedClass(JNIEnv *env)
{
jobject result;
Trc_SC_CurrentLoadedClass_Entry(env);
result = (*env)->CallStaticObjectMethod(env, jlClass, currentLoadedClassMID);
/* CMVC 95169: ensure that the result is a well defined error value if an exception occurred */
if ((*env)->ExceptionCheck(env)) {
result = NULL;
}
Trc_SC_CurrentLoadedClass_Exit(env, result);
return result;
} | 0 | [
"CWE-119"
]
| openj9 | 0971f22d88f42cf7332364ad7430e9bd8681c970 | 328,509,426,012,360,700,000,000,000,000,000,000,000 | 17 | Clean up jio_snprintf and jio_vfprintf
Fixes https://bugs.eclipse.org/bugs/show_bug.cgi?id=543659
Signed-off-by: Peter Bain <[email protected]> |
void createSyncMockAuthsAndVerifier(const StatusMap& statuses) {
for (const auto& it : statuses) {
auto mock_auth = std::make_unique<MockAuthenticator>();
EXPECT_CALL(*mock_auth, doVerify(_, _, _, _, _))
.WillOnce(Invoke([issuer = it.first, status = it.second](
Http::HeaderMap&, Tracing::Span&, std::vector<JwtLocationConstPtr>*,
SetPayloadCallback set_payload_cb, AuthenticatorCallback callback) {
if (status == Status::Ok) {
ProtobufWkt::Struct empty_struct;
set_payload_cb(issuer, empty_struct);
}
callback(status);
}));
EXPECT_CALL(*mock_auth, onDestroy());
mock_auths_[it.first] = std::move(mock_auth);
}
createVerifier();
} | 0 | [
"CWE-303",
"CWE-703"
]
| envoy | ea39e3cba652bcc4b11bb0d5c62b017e584d2e5a | 206,307,015,757,815,600,000,000,000,000,000,000,000 | 18 | jwt_authn: fix a bug where JWT with wrong issuer is allowed in allow_missing case (#15194)
[jwt] When allow_missing is used inside RequiresAny, the requests with JWT with wrong issuer are accepted. This is a bug, allow_missing should only allow requests without any JWT. This change fixed the above issue by preserving JwtUnknownIssuer in allow_missing case.
Signed-off-by: Wayne Zhang <[email protected]> |
static unsigned int bsg_poll(struct file *file, poll_table *wait)
{
struct bsg_device *bd = file->private_data;
unsigned int mask = 0;
poll_wait(file, &bd->wq_done, wait);
poll_wait(file, &bd->wq_free, wait);
spin_lock_irq(&bd->lock);
if (!list_empty(&bd->done_list))
mask |= POLLIN | POLLRDNORM;
if (bd->queued_cmds >= bd->max_queue)
mask |= POLLOUT;
spin_unlock_irq(&bd->lock);
return mask;
} | 0 | [
"CWE-399"
]
| linux-2.6 | f2f1fa78a155524b849edf359e42a3001ea652c0 | 157,881,654,187,639,960,000,000,000,000,000,000,000 | 17 | Enforce a minimum SG_IO timeout
There's no point in having too short SG_IO timeouts, since if the
command does end up timing out, we'll end up through the reset sequence
that is several seconds long in order to abort the command that timed
out.
As a result, shorter timeouts than a few seconds simply do not make
sense, as the recovery would be longer than the timeout itself.
Add a BLK_MIN_SG_TIMEOUT to match the existign BLK_DEFAULT_SG_TIMEOUT.
Suggested-by: Alan Cox <[email protected]>
Acked-by: Tejun Heo <[email protected]>
Acked-by: Jens Axboe <[email protected]>
Cc: Jeff Garzik <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
vmxnet3_is_registered_vlan(VMXNET3State *s, const void *data)
{
uint16_t vlan_tag = eth_get_pkt_tci(data) & VLAN_VID_MASK;
if (IS_SPECIAL_VLAN_ID(vlan_tag)) {
return true;
}
return VMXNET3_VFTABLE_ENTRY_IS_SET(s->vlan_table, vlan_tag);
} | 0 | [
"CWE-20"
]
| qemu | a7278b36fcab9af469563bd7b9dadebe2ae25e48 | 237,059,480,734,064,980,000,000,000,000,000,000,000 | 9 | net/vmxnet3: Refine l2 header validation
Validation of l2 header length assumed minimal packet size as
eth_header + 2 * vlan_header regardless of the actual protocol.
This caused crash for valid non-IP packets shorter than 22 bytes, as
'tx_pkt->packet_type' hasn't been assigned for such packets, and
'vmxnet3_on_tx_done_update_stats()' expects it to be properly set.
Refine header length validation in 'vmxnet_tx_pkt_parse_headers'.
Check its return value during packet processing flow.
As a side effect, in case IPv4 and IPv6 header validation failure,
corrupt packets will be dropped.
Signed-off-by: Dana Rubin <[email protected]>
Signed-off-by: Shmulik Ladkani <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
OkToChangeTag(TIFF* tif, uint32 tag)
{
const TIFFField* fip = TIFFFindField(tif, tag, TIFF_ANY);
if (!fip) { /* unknown tag */
TIFFErrorExt(tif->tif_clientdata, "TIFFSetField", "%s: Unknown %stag %u",
tif->tif_name, isPseudoTag(tag) ? "pseudo-" : "", tag);
return (0);
}
if (tag != TIFFTAG_IMAGELENGTH && (tif->tif_flags & TIFF_BEENWRITING) &&
!fip->field_oktochange) {
/*
* Consult info table to see if tag can be changed
* after we've started writing. We only allow changes
* to those tags that don't/shouldn't affect the
* compression and/or format of the data.
*/
TIFFErrorExt(tif->tif_clientdata, "TIFFSetField",
"%s: Cannot modify tag \"%s\" while writing",
tif->tif_name, fip->field_name);
return (0);
}
return (1);
} | 0 | [
"CWE-20"
]
| libtiff | 3144e57770c1e4d26520d8abee750f8ac8b75490 | 117,750,931,534,017,970,000,000,000,000,000,000,000 | 23 | * libtiff/tif_dir.c, tif_dirread.c, tif_dirwrite.c: implement various clampings
of double to other data types to avoid undefined behaviour if the output range
isn't big enough to hold the input value.
Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2643
http://bugzilla.maptools.org/show_bug.cgi?id=2642
http://bugzilla.maptools.org/show_bug.cgi?id=2646
http://bugzilla.maptools.org/show_bug.cgi?id=2647 |
gst_flxdec_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
{
GstCaps *caps;
guint avail;
GstFlowReturn res = GST_FLOW_OK;
GstFlxDec *flxdec;
FlxHeader *flxh;
g_return_val_if_fail (buf != NULL, GST_FLOW_ERROR);
flxdec = (GstFlxDec *) parent;
g_return_val_if_fail (flxdec != NULL, GST_FLOW_ERROR);
gst_adapter_push (flxdec->adapter, buf);
avail = gst_adapter_available (flxdec->adapter);
if (flxdec->state == GST_FLXDEC_READ_HEADER) {
if (avail >= FlxHeaderSize) {
const guint8 *data = gst_adapter_map (flxdec->adapter, FlxHeaderSize);
GstCaps *templ;
memcpy ((gchar *) & flxdec->hdr, data, FlxHeaderSize);
FLX_HDR_FIX_ENDIANNESS (&(flxdec->hdr));
gst_adapter_unmap (flxdec->adapter);
gst_adapter_flush (flxdec->adapter, FlxHeaderSize);
flxh = &flxdec->hdr;
/* check header */
if (flxh->type != FLX_MAGICHDR_FLI &&
flxh->type != FLX_MAGICHDR_FLC && flxh->type != FLX_MAGICHDR_FLX)
goto wrong_type;
GST_LOG ("size : %d", flxh->size);
GST_LOG ("frames : %d", flxh->frames);
GST_LOG ("width : %d", flxh->width);
GST_LOG ("height : %d", flxh->height);
GST_LOG ("depth : %d", flxh->depth);
GST_LOG ("speed : %d", flxh->speed);
flxdec->next_time = 0;
if (flxh->type == FLX_MAGICHDR_FLI) {
flxdec->frame_time = JIFFIE * flxh->speed;
} else if (flxh->speed == 0) {
flxdec->frame_time = GST_SECOND / 70;
} else {
flxdec->frame_time = flxh->speed * GST_MSECOND;
}
flxdec->duration = flxh->frames * flxdec->frame_time;
GST_LOG ("duration : %" GST_TIME_FORMAT,
GST_TIME_ARGS (flxdec->duration));
templ = gst_pad_get_pad_template_caps (flxdec->srcpad);
caps = gst_caps_copy (templ);
gst_caps_unref (templ);
gst_caps_set_simple (caps,
"width", G_TYPE_INT, flxh->width,
"height", G_TYPE_INT, flxh->height,
"framerate", GST_TYPE_FRACTION, (gint) GST_MSECOND,
(gint) flxdec->frame_time / 1000, NULL);
gst_pad_set_caps (flxdec->srcpad, caps);
gst_caps_unref (caps);
if (flxh->depth <= 8)
flxdec->converter =
flx_colorspace_converter_new (flxh->width, flxh->height);
if (flxh->type == FLX_MAGICHDR_FLC || flxh->type == FLX_MAGICHDR_FLX) {
GST_LOG ("(FLC) aspect_dx : %d", flxh->aspect_dx);
GST_LOG ("(FLC) aspect_dy : %d", flxh->aspect_dy);
GST_LOG ("(FLC) oframe1 : 0x%08x", flxh->oframe1);
GST_LOG ("(FLC) oframe2 : 0x%08x", flxh->oframe2);
}
flxdec->size = ((guint) flxh->width * (guint) flxh->height);
/* create delta and output frame */
flxdec->frame_data = g_malloc (flxdec->size);
flxdec->delta_data = g_malloc (flxdec->size);
flxdec->state = GST_FLXDEC_PLAYING;
}
} else if (flxdec->state == GST_FLXDEC_PLAYING) {
GstBuffer *out;
/* while we have enough data in the adapter */
while (avail >= FlxFrameChunkSize && res == GST_FLOW_OK) {
FlxFrameChunk flxfh;
guchar *chunk;
const guint8 *data;
GstMapInfo map;
chunk = NULL;
data = gst_adapter_map (flxdec->adapter, FlxFrameChunkSize);
memcpy (&flxfh, data, FlxFrameChunkSize);
FLX_FRAME_CHUNK_FIX_ENDIANNESS (&flxfh);
gst_adapter_unmap (flxdec->adapter);
switch (flxfh.id) {
case FLX_FRAME_TYPE:
/* check if we have the complete frame */
if (avail < flxfh.size)
goto need_more_data;
/* flush header */
gst_adapter_flush (flxdec->adapter, FlxFrameChunkSize);
chunk = gst_adapter_take (flxdec->adapter,
flxfh.size - FlxFrameChunkSize);
FLX_FRAME_TYPE_FIX_ENDIANNESS ((FlxFrameType *) chunk);
if (((FlxFrameType *) chunk)->chunks == 0)
break;
/* create 32 bits output frame */
// res = gst_pad_alloc_buffer_and_set_caps (flxdec->srcpad,
// GST_BUFFER_OFFSET_NONE,
// flxdec->size * 4, GST_PAD_CAPS (flxdec->srcpad), &out);
// if (res != GST_FLOW_OK)
// break;
out = gst_buffer_new_and_alloc (flxdec->size * 4);
/* decode chunks */
if (!flx_decode_chunks (flxdec,
((FlxFrameType *) chunk)->chunks,
chunk + FlxFrameTypeSize, flxdec->frame_data)) {
GST_ELEMENT_ERROR (flxdec, STREAM, DECODE,
("%s", "Could not decode chunk"), NULL);
return GST_FLOW_ERROR;
}
/* save copy of the current frame for possible delta. */
memcpy (flxdec->delta_data, flxdec->frame_data, flxdec->size);
gst_buffer_map (out, &map, GST_MAP_WRITE);
/* convert current frame. */
flx_colorspace_convert (flxdec->converter, flxdec->frame_data,
map.data);
gst_buffer_unmap (out, &map);
GST_BUFFER_TIMESTAMP (out) = flxdec->next_time;
flxdec->next_time += flxdec->frame_time;
res = gst_pad_push (flxdec->srcpad, out);
break;
default:
/* check if we have the complete frame */
if (avail < flxfh.size)
goto need_more_data;
gst_adapter_flush (flxdec->adapter, flxfh.size);
break;
}
g_free (chunk);
avail = gst_adapter_available (flxdec->adapter);
}
}
need_more_data:
return res;
/* ERRORS */
wrong_type:
{
GST_ELEMENT_ERROR (flxdec, STREAM, WRONG_TYPE, (NULL),
("not a flx file (type %x)", flxh->type));
return GST_FLOW_ERROR;
}
} | 0 | [
"CWE-125"
]
| gst-plugins-good | b31c504645a814c59d91d49e4fe218acaf93f4ca | 198,753,825,564,039,900,000,000,000,000,000,000,000 | 173 | flxdec: Don't unref() parent in the chain function
We don't own the reference here, it is owned by the caller and given to
us for the scope of this function. Leftover mistake from 0.10 porting.
https://bugzilla.gnome.org/show_bug.cgi?id=774897 |
unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
{
unsigned long __addr;
__addr = __recover_optprobed_insn(buf, addr);
if (__addr != addr)
return __addr;
return __recover_probed_insn(buf, addr);
} | 0 | [
"CWE-264"
]
| linux | 548acf19234dbda5a52d5a8e7e205af46e9da840 | 67,996,347,398,795,390,000,000,000,000,000,000,000 | 10 | x86/mm: Expand the exception table logic to allow new handling options
Huge amounts of help from Andy Lutomirski and Borislav Petkov to
produce this. Andy provided the inspiration to add classes to the
exception table with a clever bit-squeezing trick, Boris pointed
out how much cleaner it would all be if we just had a new field.
Linus Torvalds blessed the expansion with:
' I'd rather not be clever in order to save just a tiny amount of space
in the exception table, which isn't really criticial for anybody. '
The third field is another relative function pointer, this one to a
handler that executes the actions.
We start out with three handlers:
1: Legacy - just jumps the to fixup IP
2: Fault - provide the trap number in %ax to the fixup code
3: Cleaned up legacy for the uaccess error hack
Signed-off-by: Tony Luck <[email protected]>
Reviewed-by: Borislav Petkov <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: http://lkml.kernel.org/r/f6af78fcbd348cf4939875cfda9c19689b5e50b8.1455732970.git.tony.luck@intel.com
Signed-off-by: Ingo Molnar <[email protected]> |
fr_window_view_last_output (FrWindow *window,
const char *title)
{
GtkWidget *dialog;
GtkWidget *vbox;
GtkWidget *text_view;
GtkWidget *scrolled;
GtkTextBuffer *text_buffer;
GtkTextIter iter;
GList *scan;
if (title == NULL)
title = _("Last Output");
dialog = gtk_dialog_new_with_buttons (title,
GTK_WINDOW (window),
GTK_DIALOG_DESTROY_WITH_PARENT,
GTK_STOCK_CLOSE, GTK_RESPONSE_CLOSE,
NULL);
gtk_dialog_set_default_response (GTK_DIALOG (dialog), GTK_RESPONSE_CLOSE);
gtk_window_set_resizable (GTK_WINDOW (dialog), TRUE);
gtk_widget_set_size_request (dialog, 500, 300);
/* Add text */
scrolled = gtk_scrolled_window_new (NULL, NULL);
gtk_scrolled_window_set_policy (GTK_SCROLLED_WINDOW (scrolled),
GTK_POLICY_AUTOMATIC,
GTK_POLICY_AUTOMATIC);
gtk_scrolled_window_set_shadow_type (GTK_SCROLLED_WINDOW (scrolled),
GTK_SHADOW_ETCHED_IN);
text_buffer = gtk_text_buffer_new (NULL);
gtk_text_buffer_create_tag (text_buffer, "monospace",
"family", "monospace", NULL);
text_view = gtk_text_view_new_with_buffer (text_buffer);
g_object_unref (text_buffer);
gtk_text_view_set_editable (GTK_TEXT_VIEW (text_view), FALSE);
gtk_text_view_set_cursor_visible (GTK_TEXT_VIEW (text_view), FALSE);
/**/
vbox = gtk_box_new (GTK_ORIENTATION_VERTICAL, 6);
gtk_container_set_border_width (GTK_CONTAINER (vbox), 5);
gtk_container_add (GTK_CONTAINER (scrolled), text_view);
gtk_box_pack_start (GTK_BOX (vbox), scrolled,
TRUE, TRUE, 0);
gtk_widget_show_all (vbox);
gtk_box_pack_start (GTK_BOX (gtk_dialog_get_content_area (GTK_DIALOG (dialog))),
vbox,
TRUE, TRUE, 0);
/* signals */
g_signal_connect (G_OBJECT (dialog),
"response",
G_CALLBACK (gtk_widget_destroy),
NULL);
g_signal_connect (G_OBJECT (dialog),
"unrealize",
G_CALLBACK (last_output_window__unrealize_cb),
NULL);
/**/
gtk_text_buffer_get_iter_at_offset (text_buffer, &iter, 0);
if (FR_IS_COMMAND (window->archive))
scan = fr_command_get_last_output (FR_COMMAND (window->archive));
else
scan = NULL;
for (; scan; scan = scan->next) {
char *line = scan->data;
char *utf8_line;
gsize bytes_written;
utf8_line = g_locale_to_utf8 (line, -1, NULL, &bytes_written, NULL);
gtk_text_buffer_insert_with_tags_by_name (text_buffer,
&iter,
utf8_line,
bytes_written,
"monospace", NULL);
g_free (utf8_line);
gtk_text_buffer_insert (text_buffer, &iter, "\n", 1);
}
/**/
pref_util_restore_window_geometry (GTK_WINDOW (dialog), LAST_OUTPUT_SCHEMA_NAME);
} | 0 | [
"CWE-22"
]
| file-roller | b147281293a8307808475e102a14857055f81631 | 194,120,148,935,190,130,000,000,000,000,000,000,000 | 93 | libarchive: sanitize filenames before extracting |
void (*SSL_CTX_get_info_callback(SSL_CTX *ctx)) (const SSL *ssl, int type,
int val) {
return ctx->info_callback;
} | 0 | [
"CWE-362"
]
| openssl | 939b4960276b040fc0ed52232238fcc9e2e9ec21 | 144,455,963,420,671,600,000,000,000,000,000,000,000 | 4 | Fix race condition in NewSessionTicket
If a NewSessionTicket is received by a multi-threaded client when
attempting to reuse a previous ticket then a race condition can occur
potentially leading to a double free of the ticket data.
CVE-2015-1791
This also fixes RT#3808 where a session ID is changed for a session already
in the client session cache. Since the session ID is the key to the cache
this breaks the cache access.
Parts of this patch were inspired by this Akamai change:
https://github.com/akamai/openssl/commit/c0bf69a791239ceec64509f9f19fcafb2461b0d3
Reviewed-by: Rich Salz <[email protected]>
(cherry picked from commit 27c76b9b8010b536687318739c6f631ce4194688)
Conflicts:
ssl/ssl.h
ssl/ssl_err.c |
static long __video_do_ioctl(struct file *file,
unsigned int cmd, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct mutex *req_queue_lock = NULL;
struct mutex *lock; /* ioctl serialization mutex */
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
bool write_only = false;
struct v4l2_ioctl_info default_info;
const struct v4l2_ioctl_info *info;
void *fh = file->private_data;
struct v4l2_fh *vfh = NULL;
int dev_debug = vfd->dev_debug;
long ret = -ENOTTY;
if (ops == NULL) {
pr_warn("%s: has no ioctl_ops.\n",
video_device_node_name(vfd));
return ret;
}
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags))
vfh = file->private_data;
/*
* We need to serialize streamon/off with queueing new requests.
* These ioctls may trigger the cancellation of a streaming
* operation, and that should not be mixed with queueing a new
* request at the same time.
*/
if (v4l2_device_supports_requests(vfd->v4l2_dev) &&
(cmd == VIDIOC_STREAMON || cmd == VIDIOC_STREAMOFF)) {
req_queue_lock = &vfd->v4l2_dev->mdev->req_queue_mutex;
if (mutex_lock_interruptible(req_queue_lock))
return -ERESTARTSYS;
}
lock = v4l2_ioctl_get_lock(vfd, vfh, cmd, arg);
if (lock && mutex_lock_interruptible(lock)) {
if (req_queue_lock)
mutex_unlock(req_queue_lock);
return -ERESTARTSYS;
}
if (!video_is_registered(vfd)) {
ret = -ENODEV;
goto unlock;
}
if (v4l2_is_known_ioctl(cmd)) {
info = &v4l2_ioctls[_IOC_NR(cmd)];
if (!test_bit(_IOC_NR(cmd), vfd->valid_ioctls) &&
!((info->flags & INFO_FL_CTRL) && vfh && vfh->ctrl_handler))
goto done;
if (vfh && (info->flags & INFO_FL_PRIO)) {
ret = v4l2_prio_check(vfd->prio, vfh->prio);
if (ret)
goto done;
}
} else {
default_info.ioctl = cmd;
default_info.flags = 0;
default_info.debug = v4l_print_default;
info = &default_info;
}
write_only = _IOC_DIR(cmd) == _IOC_WRITE;
if (info != &default_info) {
ret = info->func(ops, file, fh, arg);
} else if (!ops->vidioc_default) {
ret = -ENOTTY;
} else {
ret = ops->vidioc_default(file, fh,
vfh ? v4l2_prio_check(vfd->prio, vfh->prio) >= 0 : 0,
cmd, arg);
}
done:
if (dev_debug & (V4L2_DEV_DEBUG_IOCTL | V4L2_DEV_DEBUG_IOCTL_ARG)) {
if (!(dev_debug & V4L2_DEV_DEBUG_STREAMING) &&
(cmd == VIDIOC_QBUF || cmd == VIDIOC_DQBUF))
goto unlock;
v4l_printk_ioctl(video_device_node_name(vfd), cmd);
if (ret < 0)
pr_cont(": error %ld", ret);
if (!(dev_debug & V4L2_DEV_DEBUG_IOCTL_ARG))
pr_cont("\n");
else if (_IOC_DIR(cmd) == _IOC_NONE)
info->debug(arg, write_only);
else {
pr_cont(": ");
info->debug(arg, write_only);
}
}
unlock:
if (lock)
mutex_unlock(lock);
if (req_queue_lock)
mutex_unlock(req_queue_lock);
return ret;
} | 0 | [
"CWE-401"
]
| linux | fb18802a338b36f675a388fc03d2aa504a0d0899 | 163,122,289,821,129,460,000,000,000,000,000,000,000 | 107 | media: v4l: ioctl: Fix memory leak in video_usercopy
When an IOCTL with argument size larger than 128 that also used array
arguments were handled, two memory allocations were made but alas, only
the latter one of them was released. This happened because there was only
a single local variable to hold such a temporary allocation.
Fix this by adding separate variables to hold the pointers to the
temporary allocations.
Reported-by: Arnd Bergmann <[email protected]>
Reported-by: [email protected]
Fixes: d14e6d76ebf7 ("[media] v4l: Add multi-planar ioctl handling code")
Cc: [email protected]
Signed-off-by: Sakari Ailus <[email protected]>
Acked-by: Arnd Bergmann <[email protected]>
Acked-by: Hans Verkuil <[email protected]>
Reviewed-by: Laurent Pinchart <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
xsltApplyStylesheet(xsltStylesheetPtr style, xmlDocPtr doc,
const char **params)
{
return (xsltApplyStylesheetInternal(style, doc, params, NULL, NULL, NULL));
} | 0 | []
| libxslt | 937ba2a3eb42d288f53c8adc211bd1122869f0bf | 323,920,614,848,006,900,000,000,000,000,000,000,000 | 5 | Fix default template processing on namespace nodes |
virDomainHostdevMatchCapsNet(virDomainHostdevDefPtr a,
virDomainHostdevDefPtr b)
{
return STREQ_NULLABLE(a->source.caps.u.net.ifname,
b->source.caps.u.net.ifname);
} | 0 | [
"CWE-212"
]
| libvirt | a5b064bf4b17a9884d7d361733737fb614ad8979 | 293,496,471,156,916,780,000,000,000,000,000,000,000 | 6 | conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used
Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410
(v6.1.0-122-g3b076391be) we support http cookies. Since they may contain
somewhat sensitive information we should not format them into the XML
unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted.
Reported-by: Han Han <[email protected]>
Signed-off-by: Peter Krempa <[email protected]>
Reviewed-by: Erik Skultety <[email protected]> |
static int selinux_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
{
u32 size;
int error;
char *context = NULL;
struct inode_security_struct *isec = inode->i_security;
if (strcmp(name, XATTR_SELINUX_SUFFIX))
return -EOPNOTSUPP;
/*
* If the caller has CAP_MAC_ADMIN, then get the raw context
* value even if it is not defined by current policy; otherwise,
* use the in-core value under current policy.
* Use the non-auditing forms of the permission checks since
* getxattr may be called by unprivileged processes commonly
* and lack of permission just means that we fall back to the
* in-core context value, not a denial.
*/
error = selinux_capable(current_cred(), &init_user_ns, CAP_MAC_ADMIN,
SECURITY_CAP_NOAUDIT);
if (!error)
error = security_sid_to_context_force(isec->sid, &context,
&size);
else
error = security_sid_to_context(isec->sid, &context, &size);
if (error)
return error;
error = size;
if (alloc) {
*buffer = context;
goto out_nofree;
}
kfree(context);
out_nofree:
return error;
} | 0 | [
"CWE-264"
]
| linux | 259e5e6c75a910f3b5e656151dc602f53f9d7548 | 287,551,253,559,099,500,000,000,000,000,000,000,000 | 37 | Add PR_{GET,SET}_NO_NEW_PRIVS to prevent execve from granting privs
With this change, calling
prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
disables privilege granting operations at execve-time. For example, a
process will not be able to execute a setuid binary to change their uid
or gid if this bit is set. The same is true for file capabilities.
Additionally, LSM_UNSAFE_NO_NEW_PRIVS is defined to ensure that
LSMs respect the requested behavior.
To determine if the NO_NEW_PRIVS bit is set, a task may call
prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
It returns 1 if set and 0 if it is not set. If any of the arguments are
non-zero, it will return -1 and set errno to -EINVAL.
(PR_SET_NO_NEW_PRIVS behaves similarly.)
This functionality is desired for the proposed seccomp filter patch
series. By using PR_SET_NO_NEW_PRIVS, it allows a task to modify the
system call behavior for itself and its child tasks without being
able to impact the behavior of a more privileged task.
Another potential use is making certain privileged operations
unprivileged. For example, chroot may be considered "safe" if it cannot
affect privileged tasks.
Note, this patch causes execve to fail when PR_SET_NO_NEW_PRIVS is
set and AppArmor is in use. It is fixed in a subsequent patch.
Signed-off-by: Andy Lutomirski <[email protected]>
Signed-off-by: Will Drewry <[email protected]>
Acked-by: Eric Paris <[email protected]>
Acked-by: Kees Cook <[email protected]>
v18: updated change desc
v17: using new define values as per 3.4
Signed-off-by: James Morris <[email protected]> |
static int tg3_change_mtu(struct net_device *dev, int new_mtu)
{
struct tg3 *tp = netdev_priv(dev);
int err, reset_phy = 0;
if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
return -EINVAL;
if (!netif_running(dev)) {
/* We'll just catch it later when the
* device is up'd.
*/
tg3_set_mtu(dev, tp, new_mtu);
return 0;
}
tg3_phy_stop(tp);
tg3_netif_stop(tp);
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_set_mtu(dev, tp, new_mtu);
/* Reset PHY, otherwise the read DMA engine will be in a mode that
* breaks all requests to 256 bytes.
*/
if (tg3_asic_rev(tp) == ASIC_REV_57766)
reset_phy = 1;
err = tg3_restart_hw(tp, reset_phy);
if (!err)
tg3_netif_start(tp);
tg3_full_unlock(tp);
if (!err)
tg3_phy_start(tp);
return err;
} | 0 | [
"CWE-476",
"CWE-119"
]
| linux | 715230a44310a8cf66fbfb5a46f9a62a9b2de424 | 281,929,275,052,837,440,000,000,000,000,000,000,000 | 44 | tg3: fix length overflow in VPD firmware parsing
Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version
when present") introduced VPD parsing that contained a potential length
overflow.
Limit the hardware's reported firmware string length (max 255 bytes) to
stay inside the driver's firmware string length (32 bytes). On overflow,
truncate the formatted firmware string instead of potentially overwriting
portions of the tg3 struct.
http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf
Signed-off-by: Kees Cook <[email protected]>
Reported-by: Oded Horovitz <[email protected]>
Reported-by: Brad Spengler <[email protected]>
Cc: [email protected]
Cc: Matt Carlson <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void warn_if_datadir_altered(THD *thd,
const partition_element *part_elem)
{
DBUG_ASSERT(part_elem);
if (part_elem->engine_type &&
part_elem->engine_type->db_type != DB_TYPE_INNODB)
return;
if (part_elem->data_file_name)
{
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
WARN_INNODB_PARTITION_OPTION_IGNORED,
ER(WARN_INNODB_PARTITION_OPTION_IGNORED),
"DATA DIRECTORY");
}
if (part_elem->index_file_name)
{
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
WARN_INNODB_PARTITION_OPTION_IGNORED,
ER(WARN_INNODB_PARTITION_OPTION_IGNORED),
"INDEX DIRECTORY");
}
} | 0 | [
"CWE-416"
]
| server | c02ebf3510850ba78a106be9974c94c3b97d8585 | 121,974,390,538,551,400,000,000,000,000,000,000,000 | 24 | MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments. |
int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
struct snd_timer **rtimer)
{
struct snd_timer *timer;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_timer_dev_free,
.dev_register = snd_timer_dev_register,
.dev_disconnect = snd_timer_dev_disconnect,
};
if (snd_BUG_ON(!tid))
return -EINVAL;
if (tid->dev_class == SNDRV_TIMER_CLASS_CARD ||
tid->dev_class == SNDRV_TIMER_CLASS_PCM) {
if (WARN_ON(!card))
return -EINVAL;
}
if (rtimer)
*rtimer = NULL;
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
timer->tmr_class = tid->dev_class;
timer->card = card;
timer->tmr_device = tid->device;
timer->tmr_subdevice = tid->subdevice;
if (id)
strlcpy(timer->id, id, sizeof(timer->id));
timer->sticks = 1;
INIT_LIST_HEAD(&timer->device_list);
INIT_LIST_HEAD(&timer->open_list_head);
INIT_LIST_HEAD(&timer->active_list_head);
INIT_LIST_HEAD(&timer->ack_list_head);
INIT_LIST_HEAD(&timer->sack_list_head);
spin_lock_init(&timer->lock);
tasklet_init(&timer->task_queue, snd_timer_tasklet,
(unsigned long)timer);
timer->max_instances = 1000; /* default limit per timer */
if (card != NULL) {
timer->module = card->module;
err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops);
if (err < 0) {
snd_timer_free(timer);
return err;
}
}
if (rtimer)
*rtimer = timer;
return 0;
} | 0 | [
"CWE-416",
"CWE-703"
]
| linux | e7af6307a8a54f0b873960b32b6a644f2d0fbd97 | 108,964,070,418,821,620,000,000,000,000,000,000,000 | 51 | ALSA: timer: Fix incorrectly assigned timer instance
The clean up commit 41672c0c24a6 ("ALSA: timer: Simplify error path in
snd_timer_open()") unified the error handling code paths with the
standard goto, but it introduced a subtle bug: the timer instance is
stored in snd_timer_open() incorrectly even if it returns an error.
This may eventually lead to UAF, as spotted by fuzzer.
The culprit is the snd_timer_open() code checks the
SNDRV_TIMER_IFLG_EXCLUSIVE flag with the common variable timeri.
This variable is supposed to be the newly created instance, but we
(ab-)used it for a temporary check before the actual creation of a
timer instance. After that point, there is another check for the max
number of instances, and it bails out if over the threshold. Before
the refactoring above, it worked fine because the code returned
directly from that point. After the refactoring, however, it jumps to
the unified error path that stores the timeri variable in return --
even if it returns an error. Unfortunately this stored value is kept
in the caller side (snd_timer_user_tselect()) in tu->timeri. This
causes inconsistency later, as if the timer was successfully
assigned.
In this patch, we fix it by not re-using timeri variable but a
temporary variable for testing the exclusive connection, so timeri
remains NULL at that point.
Fixes: 41672c0c24a6 ("ALSA: timer: Simplify error path in snd_timer_open()")
Reported-and-tested-by: Tristan Madani <[email protected]>
Cc: <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Takashi Iwai <[email protected]> |
int u_init_compressed_inmemory_website_config(struct _u_compressed_inmemory_website_config * config) {
int ret = U_OK;
pthread_mutexattr_t mutexattr;
if (config != NULL) {
config->files_path = NULL;
config->url_prefix = NULL;
config->redirect_on_404 = NULL;
config->allow_gzip = 1;
config->allow_deflate = 1;
config->mime_types_compressed = NULL;
config->mime_types_compressed_size = 0;
config->allow_cache_compressed = 1;
if ((ret = u_map_init(&(config->mime_types))) != U_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "u_init_compressed_inmemory_website_config - Error u_map_init mime_types");
} else if ((ret = u_map_init(&(config->map_header))) != U_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "u_init_compressed_inmemory_website_config - Error u_map_init map_header");
} else if ((ret = u_map_init(&(config->gzip_files))) != U_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "u_init_compressed_inmemory_website_config - Error u_map_init gzip_files");
} else if ((ret = u_map_init(&(config->deflate_files))) != U_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "u_init_compressed_inmemory_website_config - Error u_map_init deflate_files");
} else {
pthread_mutexattr_init (&mutexattr);
pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_RECURSIVE);
if (pthread_mutex_init(&(config->lock), &mutexattr) != 0) {
y_log_message(Y_LOG_LEVEL_ERROR, "u_init_compressed_inmemory_website_config - Error pthread_mutex_init");
ret = U_ERROR;
}
}
}
return ret;
} | 0 | [
"CWE-269",
"CWE-22"
]
| glewlwyd | e3f7245c33897bf9b3a75acfcdb8b7b93974bf11 | 302,206,093,060,883,360,000,000,000,000,000,000,000 | 32 | Fix file access check for directory traversal, and fix call for callback_static_file_uncompressed if header not set |
hfs_make_badblockfile(HFS_INFO * hfs, TSK_FS_FILE * fs_file)
{
TSK_FS_ATTR *fs_attr;
unsigned char dummy1, dummy2;
uint64_t dummy3;
uint8_t result;
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_make_badblockfile: Making virtual badblock file\n");
if (hfs_make_specialbase(fs_file)) {
error_returned(" - hfs_make_badblockfile");
return 1;
}
fs_file->meta->addr = HFS_BAD_BLOCK_FILE_ID;
strncpy(fs_file->meta->name2->name, HFS_BAD_BLOCK_FILE_NAME,
TSK_FS_META_NAME_LIST_NSIZE);
fs_file->meta->size = 0;
if ((fs_attr =
tsk_fs_attrlist_getnew(fs_file->meta->attr,
TSK_FS_ATTR_NONRES)) == NULL) {
error_returned(" - hfs_make_badblockfile");
return 1;
}
// add the run to the file.
if (tsk_fs_attr_set_run(fs_file, fs_attr, NULL, NULL,
TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA,
fs_file->meta->size, fs_file->meta->size, fs_file->meta->size,
0, 0)) {
error_returned(" - hfs_make_badblockfile");
return 1;
}
// see if file has additional runs
if (hfs_ext_find_extent_record_attr(hfs, HFS_BAD_BLOCK_FILE_ID,
fs_attr, TRUE)) {
error_returned(" - hfs_make_badblockfile");
fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR;
return 1;
}
/* @@@ We have a chicken and egg problem here... The current design of
* fs_attr_set() requires the size to be set, but we dont' know the size
* until we look into the extents file (which adds to an attribute...).
* This does not seem to be the best design... neeed a way to test this. */
fs_file->meta->size = fs_attr->nrd.initsize;
fs_attr->size = fs_file->meta->size;
fs_attr->nrd.allocsize = fs_file->meta->size;
result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3);
if (result != 0) {
if (tsk_verbose)
tsk_fprintf(stderr,
"WARNING: Extended attributes failed to load for the BadBlocks file.\n");
tsk_error_reset();
}
fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED;
return 0;
} | 0 | [
"CWE-190",
"CWE-284"
]
| sleuthkit | 114cd3d0aac8bd1aeaf4b33840feb0163d342d5b | 274,357,770,929,082,470,000,000,000,000,000,000,000 | 65 | hfs: fix keylen check in hfs_cat_traverse()
If key->key_len is 65535, calculating "uint16_t keylen' would
cause an overflow:
uint16_t keylen;
...
keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len)
so the code bypasses the sanity check "if (keylen > nodesize)"
which results in crash later:
./toolfs/fstools/fls -b 512 -f hfs <image>
=================================================================
==16==ERROR: AddressSanitizer: SEGV on unknown address 0x6210000256a4 (pc 0x00000054812b bp 0x7ffca548a8f0 sp 0x7ffca548a480 T0)
==16==The signal is caused by a READ memory access.
#0 0x54812a in hfs_dir_open_meta_cb /fuzzing/sleuthkit/tsk/fs/hfs_dent.c:237:20
#1 0x51a96c in hfs_cat_traverse /fuzzing/sleuthkit/tsk/fs/hfs.c:1082:21
#2 0x547785 in hfs_dir_open_meta /fuzzing/sleuthkit/tsk/fs/hfs_dent.c:480:9
#3 0x50f57d in tsk_fs_dir_open_meta /fuzzing/sleuthkit/tsk/fs/fs_dir.c:290:14
#4 0x54af17 in tsk_fs_path2inum /fuzzing/sleuthkit/tsk/fs/ifind_lib.c:237:23
#5 0x522266 in hfs_open /fuzzing/sleuthkit/tsk/fs/hfs.c:6579:9
#6 0x508e89 in main /fuzzing/sleuthkit/tools/fstools/fls.cpp:267:19
#7 0x7f9daf67c2b0 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x202b0)
#8 0x41d679 in _start (/fuzzing/sleuthkit/tools/fstools/fls+0x41d679)
Make 'keylen' int type to prevent the overflow and fix that.
Now, I get proper error message instead of crash:
./toolfs/fstools/fls -b 512 -f hfs <image>
General file system error (hfs_cat_traverse: length of key 3 in leaf node 1 too large (65537 vs 4096)) |
nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
{
struct nsim_bpf_bound_prog *state;
int ret = 0;
state = env->prog->aux->offload->dev_priv;
if (state->nsim_dev->bpf_bind_verifier_delay && !insn_idx)
msleep(state->nsim_dev->bpf_bind_verifier_delay);
if (insn_idx == env->prog->len - 1) {
pr_vlog(env, "Hello from netdevsim!\n");
if (!state->nsim_dev->bpf_bind_verifier_accept)
ret = -EOPNOTSUPP;
}
return ret;
} | 0 | []
| net | 481221775d53d6215a6e5e9ce1cce6d2b4ab9a46 | 116,010,802,853,077,320,000,000,000,000,000,000,000 | 18 | netdevsim: Zero-initialize memory for new map's value in function nsim_bpf_map_alloc
Zero-initialize memory for new map's value in function nsim_bpf_map_alloc
since it may cause a potential kernel information leak issue, as follows:
1. nsim_bpf_map_alloc calls nsim_map_alloc_elem to allocate elements for
a new map.
2. nsim_map_alloc_elem uses kmalloc to allocate map's value, but doesn't
zero it.
3. A user application can use IOCTL BPF_MAP_LOOKUP_ELEM to get specific
element's information in the map.
4. The kernel function map_lookup_elem will call bpf_map_copy_value to get
the information allocated at step-2, then use copy_to_user to copy to the
user buffer.
This can only leak information for an array map.
Fixes: 395cacb5f1a0 ("netdevsim: bpf: support fake map offload")
Suggested-by: Jakub Kicinski <[email protected]>
Acked-by: Jakub Kicinski <[email protected]>
Signed-off-by: Haimin Zhang <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]> |
dict_spot_params(const ref * pdict, gs_spot_halftone * psp,
ref * psproc, ref * ptproc, gs_memory_t *mem)
{
int code;
check_dict_read(*pdict);
if ((code = dict_float_param(pdict, "Frequency", 0.0,
&psp->screen.frequency)) != 0 ||
(code = dict_float_param(pdict, "Angle", 0.0,
&psp->screen.angle)) != 0 ||
(code = dict_proc_param(pdict, "SpotFunction", psproc, false)) != 0 ||
(code = dict_bool_param(pdict, "AccurateScreens",
gs_currentaccuratescreens(mem),
&psp->accurate_screens)) < 0 ||
(code = dict_proc_param(pdict, "TransferFunction", ptproc, false)) < 0
)
return (code < 0 ? code : gs_error_undefined);
psp->transfer = (code > 0 ? (gs_mapping_proc) 0 : gs_mapped_transfer);
psp->transfer_closure.proc = 0;
psp->transfer_closure.data = 0;
return 0;
} | 0 | []
| ghostpdl | f5c7555c30393e64ec1f5ab0dfae5b55b3b3fc78 | 319,117,518,936,298,320,000,000,000,000,000,000,000 | 22 | Bug 697203: check for sufficient params in .sethalftone5
and param types |
bgp_apply_next_hop(struct bgp_parse_state *s, rta *a, ip_addr gw, ip_addr ll)
{
struct bgp_proto *p = s->proto;
struct bgp_channel *c = s->channel;
if (c->cf->gw_mode == GW_DIRECT)
{
neighbor *nbr = NULL;
/* GW_DIRECT -> single_hop -> p->neigh != NULL */
if (ipa_nonzero(gw))
nbr = neigh_find(&p->p, gw, NULL, 0);
else if (ipa_nonzero(ll))
nbr = neigh_find(&p->p, ll, p->neigh->iface, 0);
if (!nbr || (nbr->scope == SCOPE_HOST))
WITHDRAW(BAD_NEXT_HOP);
a->dest = RTD_UNICAST;
a->nh.gw = nbr->addr;
a->nh.iface = nbr->iface;
}
else /* GW_RECURSIVE */
{
if (ipa_zero(gw))
WITHDRAW(BAD_NEXT_HOP);
rtable *tab = ipa_is_ip4(gw) ? c->igp_table_ip4 : c->igp_table_ip6;
s->hostentry = rt_get_hostentry(tab, gw, ll, c->c.table);
if (!s->mpls)
rta_apply_hostentry(a, s->hostentry, NULL);
/* With MPLS, hostentry is applied later in bgp_apply_mpls_labels() */
}
} | 0 | [
"CWE-787"
]
| bird | 8388f5a7e14108a1458fea35bfbb5a453e2c563c | 182,309,439,935,956,880,000,000,000,000,000,000,000 | 36 | BGP: Fix bugs in handling of shutdown messages
There is an improper check for valid message size, which may lead to
stack overflow and buffer leaks to log when a large message is received.
Thanks to Daniel McCarney for bugreport and analysis. |
int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
int r, q;
assert(m);
dual_timestamp_get(&m->generators_start_timestamp);
manager_run_generators(m);
dual_timestamp_get(&m->generators_finish_timestamp);
r = lookup_paths_init(
&m->lookup_paths, m->running_as, true,
m->generator_unit_path,
m->generator_unit_path_early,
m->generator_unit_path_late);
if (r < 0)
return r;
manager_build_unit_path_cache(m);
/* If we will deserialize make sure that during enumeration
* this is already known, so we increase the counter here
* already */
if (serialization)
m->n_reloading ++;
/* First, enumerate what we can from all config files */
dual_timestamp_get(&m->units_load_start_timestamp);
r = manager_enumerate(m);
dual_timestamp_get(&m->units_load_finish_timestamp);
/* Second, deserialize if there is something to deserialize */
if (serialization) {
q = manager_deserialize(m, serialization, fds);
if (q < 0)
r = q;
}
/* Any fds left? Find some unit which wants them. This is
* useful to allow container managers to pass some file
* descriptors to us pre-initialized. This enables
* socket-based activation of entire containers. */
if (fdset_size(fds) > 0) {
q = manager_distribute_fds(m, fds);
if (q < 0)
r = q;
}
/* We might have deserialized the notify fd, but if we didn't
* then let's create the bus now */
manager_setup_notify(m);
/* We might have deserialized the kdbus control fd, but if we
* didn't, then let's create the bus now. */
manager_setup_kdbus(m);
manager_connect_bus(m, !!serialization);
/* Third, fire things up! */
q = manager_coldplug(m);
if (q < 0)
r = q;
if (serialization) {
assert(m->n_reloading > 0);
m->n_reloading --;
/* Let's wait for the UnitNew/JobNew messages being
* sent, before we notify that the reload is
* finished */
m->send_reloading_done = true;
}
return r;
} | 0 | []
| systemd | 5ba6985b6c8ef85a8bcfeb1b65239c863436e75b | 291,229,911,396,152,000,000,000,000,000,000,000,000 | 73 | core: allow PIDs to be watched by two units at the same time
In some cases it is interesting to map a PID to two units at the same
time. For example, when a user logs in via a getty, which is reexeced to
/sbin/login that binary will be explicitly referenced as main pid of the
getty service, as well as implicitly referenced as part of the session
scope. |
static double mp_self_pow(_cimg_math_parser& mp) {
double &val = _mp_arg(1);
return val = std::pow(val,_mp_arg(2)); | 0 | [
"CWE-125"
]
| CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 164,173,910,564,354,210,000,000,000,000,000,000,000 | 4 | Fix other issues in 'CImg<T>::load_bmp()'. |
apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam)
{
apr_bucket *b;
apr_off_t l = 0;
h2_beam_lock bl;
if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
for (b = H2_BLIST_FIRST(&beam->send_list);
b != H2_BLIST_SENTINEL(&beam->send_list);
b = APR_BUCKET_NEXT(b)) {
/* should all have determinate length */
l += b->length;
}
leave_yellow(beam, &bl);
}
return l;
} | 0 | [
"CWE-400"
]
| mod_h2 | 83a2e3866918ce6567a683eb4c660688d047ee81 | 171,565,282,080,717,900,000,000,000,000,000,000,000 | 17 | * fixes a race condition where aborting streams triggers an unnecessary timeout. |
gst_h264_parser_insert_sei_internal (GstH264NalParser * nalparser,
guint8 nal_prefix_size, gboolean packetized, GstBuffer * au,
GstMemory * sei)
{
GstH264NalUnit nalu;
GstMapInfo info;
GstH264ParserResult pres;
guint offset = 0;
GstBuffer *new_buffer = NULL;
if (!gst_buffer_map (au, &info, GST_MAP_READ)) {
GST_ERROR ("Cannot map au buffer");
return NULL;
}
/* Find the offset of the first slice */
do {
if (packetized) {
pres = gst_h264_parser_identify_nalu_avc (nalparser,
info.data, offset, info.size, nal_prefix_size, &nalu);
} else {
pres = gst_h264_parser_identify_nalu (nalparser,
info.data, offset, info.size, &nalu);
}
if (pres != GST_H264_PARSER_OK && pres != GST_H264_PARSER_NO_NAL_END) {
GST_DEBUG ("Failed to identify nal unit, ret: %d", pres);
gst_buffer_unmap (au, &info);
return NULL;
}
if ((nalu.type >= GST_H264_NAL_SLICE && nalu.type <= GST_H264_NAL_SLICE_IDR)
|| (nalu.type >= GST_H264_NAL_SLICE_AUX
&& nalu.type <= GST_H264_NAL_SLICE_DEPTH)) {
GST_DEBUG ("Found slice nal type %d at offset %d",
nalu.type, nalu.sc_offset);
break;
}
offset = nalu.offset + nalu.size;
} while (pres == GST_H264_PARSER_OK);
gst_buffer_unmap (au, &info);
/* found the best position now, create new buffer */
new_buffer = gst_buffer_new ();
/* copy all metadata */
if (!gst_buffer_copy_into (new_buffer, au, GST_BUFFER_COPY_METADATA, 0, -1)) {
GST_ERROR ("Failed to copy metadata into new buffer");
gst_clear_buffer (&new_buffer);
goto out;
}
/* copy non-slice nal */
if (nalu.sc_offset > 0) {
if (!gst_buffer_copy_into (new_buffer, au,
GST_BUFFER_COPY_MEMORY, 0, nalu.sc_offset)) {
GST_ERROR ("Failed to copy buffer");
gst_clear_buffer (&new_buffer);
goto out;
}
}
/* insert sei */
gst_buffer_append_memory (new_buffer, gst_memory_ref (sei));
/* copy the rest */
if (!gst_buffer_copy_into (new_buffer, au,
GST_BUFFER_COPY_MEMORY, nalu.sc_offset, -1)) {
GST_ERROR ("Failed to copy buffer");
gst_clear_buffer (&new_buffer);
goto out;
}
out:
return new_buffer;
} | 0 | [
"CWE-787"
]
| gst-plugins-bad | 11353b3f6e2f047cc37483d21e6a37ae558896bc | 1,103,480,087,984,626,500,000,000,000,000,000,000 | 78 | codecparsers: h264parser: guard against ref_pic_markings overflow
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1703> |
static void coroutine_fn v9fs_link(void *opaque)
{
V9fsPDU *pdu = opaque;
int32_t dfid, oldfid;
V9fsFidState *dfidp, *oldfidp;
V9fsString name;
size_t offset = 7;
int err = 0;
v9fs_string_init(&name);
err = pdu_unmarshal(pdu, offset, "dds", &dfid, &oldfid, &name);
if (err < 0) {
goto out_nofid;
}
trace_v9fs_link(pdu->tag, pdu->id, dfid, oldfid, name.data);
if (name_is_illegal(name.data)) {
err = -ENOENT;
goto out_nofid;
}
if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
err = -EEXIST;
goto out_nofid;
}
dfidp = get_fid(pdu, dfid);
if (dfidp == NULL) {
err = -ENOENT;
goto out_nofid;
}
oldfidp = get_fid(pdu, oldfid);
if (oldfidp == NULL) {
err = -ENOENT;
goto out;
}
err = v9fs_co_link(pdu, oldfidp, dfidp, &name);
if (!err) {
err = offset;
}
put_fid(pdu, oldfidp);
out:
put_fid(pdu, dfidp);
out_nofid:
v9fs_string_free(&name);
pdu_complete(pdu, err);
} | 0 | [
"CWE-362"
]
| qemu | 89fbea8737e8f7b954745a1ffc4238d377055305 | 69,722,811,981,000,430,000,000,000,000,000,000,000 | 48 | 9pfs: Fully restart unreclaim loop (CVE-2021-20181)
Depending on the client activity, the server can be asked to open a huge
number of file descriptors and eventually hit RLIMIT_NOFILE. This is
currently mitigated using a reclaim logic : the server closes the file
descriptors of idle fids, based on the assumption that it will be able
to re-open them later. This assumption doesn't hold of course if the
client requests the file to be unlinked. In this case, we loop on the
entire fid list and mark all related fids as unreclaimable (the reclaim
logic will just ignore them) and, of course, we open or re-open their
file descriptors if needed since we're about to unlink the file.
This is the purpose of v9fs_mark_fids_unreclaim(). Since the actual
opening of a file can cause the coroutine to yield, another client
request could possibly add a new fid that we may want to mark as
non-reclaimable as well. The loop is thus restarted if the re-open
request was actually transmitted to the backend. This is achieved
by keeping a reference on the first fid (head) before traversing
the list.
This is wrong in several ways:
- a potential clunk request from the client could tear the first
fid down and cause the reference to be stale. This leads to a
use-after-free error that can be detected with ASAN, using a
custom 9p client
- fids are added at the head of the list : restarting from the
previous head will always miss fids added by a some other
potential request
All these problems could be avoided if fids were being added at the
end of the list. This can be achieved with a QSIMPLEQ, but this is
probably too much change for a bug fix. For now let's keep it
simple and just restart the loop from the current head.
Fixes: CVE-2021-20181
Buglink: https://bugs.launchpad.net/qemu/+bug/1911666
Reported-by: Zero Day Initiative <[email protected]>
Reviewed-by: Christian Schoenebeck <[email protected]>
Reviewed-by: Stefano Stabellini <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Greg Kurz <[email protected]> |
gnutls_x509_crt_get_dn_oid(gnutls_x509_crt_t cert,
int indx, void *oid, size_t * oid_size)
{
if (cert == NULL) {
gnutls_assert();
return GNUTLS_E_INVALID_REQUEST;
}
return _gnutls_x509_get_dn_oid(cert->cert,
"tbsCertificate.subject.rdnSequence",
indx, oid, oid_size);
} | 0 | [
"CWE-295"
]
| gnutls | 6e76e9b9fa845b76b0b9a45f05f4b54a052578ff | 41,667,073,276,978,853,000,000,000,000,000,000,000 | 12 | on certificate import check whether the two signature algorithms match |
*/
static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
struct task_struct *tsk, bool cancel_all)
{
struct hlist_node *tmp;
struct io_kiocb *req;
bool found = false;
int i;
spin_lock(&ctx->completion_lock);
for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
struct hlist_head *list;
list = &ctx->cancel_hash[i];
hlist_for_each_entry_safe(req, tmp, list, hash_node) {
if (io_match_task_safe(req, tsk, cancel_all)) {
hlist_del_init(&req->hash_node);
io_poll_cancel_req(req);
found = true;
}
}
}
spin_unlock(&ctx->completion_lock);
return found; | 0 | [
"CWE-416"
]
| linux | e677edbcabee849bfdd43f1602bccbecf736a646 | 249,694,473,016,181,930,000,000,000,000,000,000,000 | 24 | io_uring: fix race between timeout flush and removal
io_flush_timeouts() assumes the timeout isn't in progress of triggering
or being removed/canceled, so it unconditionally removes it from the
timeout list and attempts to cancel it.
Leave it on the list and let the normal timeout cancelation take care
of it.
Cc: [email protected] # 5.5+
Signed-off-by: Jens Axboe <[email protected]> |
static JSValue js_bs_put_u64(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv)
{
return js_bs_put_val(ctx, this_val, argc, argv, 9);
} | 0 | [
"CWE-787"
]
| gpac | ea1eca00fd92fa17f0e25ac25652622924a9a6a0 | 193,735,626,946,340,300,000,000,000,000,000,000,000 | 4 | fixed #2138 |
static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
{
int seg, index;
if (!fixed_mtrr_is_enabled(iter->mtrr_state))
return false;
seg = fixed_mtrr_addr_to_seg(iter->start);
if (seg < 0)
return false;
iter->fixed = true;
index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg);
iter->index = index;
iter->seg = seg;
return true;
} | 0 | [
"CWE-284"
]
| linux | 9842df62004f366b9fed2423e24df10542ee0dc5 | 288,858,155,934,315,500,000,000,000,000,000,000,000 | 17 | KVM: MTRR: remove MSR 0x2f8
MSR 0x2f8 accessed the 124th Variable Range MTRR ever since MTRR support
was introduced by 9ba075a664df ("KVM: MTRR support").
0x2f8 became harmful when 910a6aae4e2e ("KVM: MTRR: exactly define the
size of variable MTRRs") shrinked the array of VR MTRRs from 256 to 8,
which made access to index 124 out of bounds. The surrounding code only
WARNs in this situation, thus the guest gained a limited read/write
access to struct kvm_arch_vcpu.
0x2f8 is not a valid VR MTRR MSR, because KVM has/advertises only 16 VR
MTRR MSRs, 0x200-0x20f. Every VR MTRR is set up using two MSRs, 0x2f8
was treated as a PHYSBASE and 0x2f9 would be its PHYSMASK, but 0x2f9 was
not implemented in KVM, therefore 0x2f8 could never do anything useful
and getting rid of it is safe.
This fixes CVE-2016-3713.
Fixes: 910a6aae4e2e ("KVM: MTRR: exactly define the size of variable MTRRs")
Cc: [email protected]
Reported-by: David Matlack <[email protected]>
Signed-off-by: Andy Honig <[email protected]>
Signed-off-by: Radim Krčmář <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static int xfrm_dump_sa_done(struct netlink_callback *cb)
{
struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
struct sock *sk = cb->skb->sk;
struct net *net = sock_net(sk);
if (cb->args[0])
xfrm_state_walk_done(walk, net);
return 0;
} | 0 | [
"CWE-284"
]
| linux | 677e806da4d916052585301785d847c3b3e6186a | 104,616,008,331,985,440,000,000,000,000,000,000,000 | 10 | xfrm_user: validate XFRM_MSG_NEWAE XFRMA_REPLAY_ESN_VAL replay_window
When a new xfrm state is created during an XFRM_MSG_NEWSA call we
validate the user supplied replay_esn to ensure that the size is valid
and to ensure that the replay_window size is within the allocated
buffer. However later it is possible to update this replay_esn via a
XFRM_MSG_NEWAE call. There we again validate the size of the supplied
buffer matches the existing state and if so inject the contents. We do
not at this point check that the replay_window is within the allocated
memory. This leads to out-of-bounds reads and writes triggered by
netlink packets. This leads to memory corruption and the potential for
priviledge escalation.
We already attempt to validate the incoming replay information in
xfrm_new_ae() via xfrm_replay_verify_len(). This confirms that the user
is not trying to change the size of the replay state buffer which
includes the replay_esn. It however does not check the replay_window
remains within that buffer. Add validation of the contained
replay_window.
CVE-2017-7184
Signed-off-by: Andy Whitcroft <[email protected]>
Acked-by: Steffen Klassert <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void tr_variantListReserve(tr_variant* list, size_t count)
{
TR_ASSERT(tr_variantIsList(list));
containerReserve(list, count);
} | 0 | [
"CWE-416",
"CWE-284"
]
| transmission | 2123adf8e5e1c2b48791f9d22fc8c747e974180e | 117,678,927,276,968,270,000,000,000,000,000,000,000 | 6 | CVE-2018-10756: Fix heap-use-after-free in tr_variantWalk
In libtransmission/variant.c, function tr_variantWalk, when the variant
stack is reallocated, a pointer to the previously allocated memory
region is kept. This address is later accessed (heap use-after-free)
while walking back down the stack, causing the application to crash.
The application can be any application which uses libtransmission, such
as transmission-daemon, transmission-gtk, transmission-show, etc.
Reported-by: Tom Richards <[email protected]> |
static bool extract_if_dead(struct connectdata *conn,
struct Curl_easy *data)
{
size_t pipeLen = conn->send_pipe.size + conn->recv_pipe.size;
if(!pipeLen && !CONN_INUSE(conn)) {
/* The check for a dead socket makes sense only if there are no
handles in pipeline and the connection isn't already marked in
use */
bool dead;
conn->data = data;
if(conn->handler->connection_check) {
/* The protocol has a special method for checking the state of the
connection. Use it to check if the connection is dead. */
unsigned int state;
state = conn->handler->connection_check(conn, CONNCHECK_ISDEAD);
dead = (state & CONNRESULT_DEAD);
}
else {
/* Use the general method for determining the death of a connection */
dead = SocketIsDead(conn->sock[FIRSTSOCKET]);
}
if(dead) {
infof(data, "Connection %ld seems to be dead!\n", conn->connection_id);
Curl_conncache_remove_conn(conn, FALSE);
conn->data = NULL; /* detach */
return TRUE;
}
}
return FALSE;
} | 0 | [
"CWE-416"
]
| curl | 81d135d67155c5295b1033679c606165d4e28f3f | 54,279,711,811,014,650,000,000,000,000,000,000,000 | 33 | Curl_close: clear data->multi_easy on free to avoid use-after-free
Regression from b46cfbc068 (7.59.0)
CVE-2018-16840
Reported-by: Brian Carpenter (Geeknik Labs)
Bug: https://curl.haxx.se/docs/CVE-2018-16840.html |
static int sse8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
{
int s, i;
uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < h; i++) {
s += sq[pix1[0] - pix2[0]];
s += sq[pix1[1] - pix2[1]];
s += sq[pix1[2] - pix2[2]];
s += sq[pix1[3] - pix2[3]];
s += sq[pix1[4] - pix2[4]];
s += sq[pix1[5] - pix2[5]];
s += sq[pix1[6] - pix2[6]];
s += sq[pix1[7] - pix2[7]];
pix1 += line_size;
pix2 += line_size;
}
return s;
} | 0 | [
"CWE-703",
"CWE-189"
]
| FFmpeg | 454a11a1c9c686c78aa97954306fb63453299760 | 246,974,679,230,614,270,000,000,000,000,000,000,000 | 20 | avcodec/dsputil: fix signedness in sizeof() comparissions
Signed-off-by: Michael Niedermayer <[email protected]> |
static char *ask_new_shell(char *question, char *oldshell)
{
int len;
char *ans = NULL;
size_t dummy = 0;
if (!oldshell)
oldshell = "";
printf("%s [%s]:", question, oldshell);
putchar(' ');
if (getline(&ans, &dummy, stdin) < 0)
return NULL;
/* remove the newline at the end of ans. */
ltrim_whitespace((unsigned char *) ans);
len = rtrim_whitespace((unsigned char *) ans);
if (len == 0)
return NULL;
return ans;
} | 0 | []
| util-linux | 39a81981ac4b8a1f521db550afc117ccab9548cb | 126,504,633,856,378,580,000,000,000,000,000,000,000 | 21 | chsh, chfn: remove readline support [CVE-2022-0563]
The readline library uses INPUTRC= environment variable to get a path
to the library config file. When the library cannot parse the
specified file, it prints an error message containing data from the
file.
Unfortunately, the library does not use secure_getenv() (or a similar
concept) to avoid vulnerabilities that could occur if set-user-ID or
set-group-ID programs.
Reported-by: Rory Mackie <[email protected]>
Signed-off-by: Karel Zak <[email protected]> |
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
/* The second termination condition only applies for REPE
* and REPNE. Test if the repeat string operation prefix is
* REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
* corresponding termination condition according to:
* - if REPE/REPZ and ZF = 0 then done
* - if REPNE/REPNZ and ZF = 1 then done
*/
if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
(ctxt->b == 0xae) || (ctxt->b == 0xaf))
&& (((ctxt->rep_prefix == REPE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == 0))
|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
return true;
return false;
} | 0 | []
| kvm | e28ba7bb020f07193bc000453c8775e9d2c0dda7 | 96,293,351,979,635,000,000,000,000,000,000,000,000 | 19 | KVM: x86: fix missing checks in syscall emulation
On hosts without this patch, 32bit guests will crash (and 64bit guests
may behave in a wrong way) for example by simply executing following
nasm-demo-application:
[bits 32]
global _start
SECTION .text
_start: syscall
(I tested it with winxp and linux - both always crashed)
Disassembly of section .text:
00000000 <_start>:
0: 0f 05 syscall
The reason seems a missing "invalid opcode"-trap (int6) for the
syscall opcode "0f05", which is not available on Intel CPUs
within non-longmodes, as also on some AMD CPUs within legacy-mode.
(depending on CPU vendor, MSR_EFER and cpuid)
Because previous mentioned OSs may not engage corresponding
syscall target-registers (STAR, LSTAR, CSTAR), they remain
NULL and (non trapping) syscalls are leading to multiple
faults and finally crashs.
Depending on the architecture (AMD or Intel) pretended by
guests, various checks according to vendor's documentation
are implemented to overcome the current issue and behave
like the CPUs physical counterparts.
[mtosatti: cleanup/beautify code]
Signed-off-by: Stephan Baerwolf <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
std::shared_ptr<Ope> get_core_operator() const { return holder_->ope_; } | 0 | [
"CWE-125"
]
| cpp-peglib | b3b29ce8f3acf3a32733d930105a17d7b0ba347e | 289,951,112,431,601,850,000,000,000,000,000,000,000 | 1 | Fix #122 |
njs_vm_value_string_alloc(njs_vm_t *vm, njs_value_t *value, uint32_t size)
{
return njs_string_alloc(vm, value, size, 0);
} | 0 | [
"CWE-416"
]
| njs | 6a07c2156a07ef307b6dcf3c2ca8571a5f1af7a6 | 299,845,881,370,491,100,000,000,000,000,000,000,000 | 4 | Fixed recursive async function calls.
Previously, PromiseCapability record was stored (function->context)
directly in function object during a function invocation. This is
not correct, because PromiseCapability record should be linked to
current execution context. As a result, function->context is
overwritten with consecutive recursive calls which results in
use-after-free.
This closes #451 issue on Github. |
static void server_stats(ADD_STAT add_stats, conn *c) {
pid_t pid = getpid();
rel_time_t now = current_time;
struct thread_stats thread_stats;
threadlocal_stats_aggregate(&thread_stats);
struct slab_stats slab_stats;
slab_stats_aggregate(&thread_stats, &slab_stats);
#ifdef EXTSTORE
struct extstore_stats st;
#endif
#ifndef WIN32
struct rusage usage;
getrusage(RUSAGE_SELF, &usage);
#endif /* !WIN32 */
STATS_LOCK();
APPEND_STAT("pid", "%lu", (long)pid);
APPEND_STAT("uptime", "%u", now - ITEM_UPDATE_INTERVAL);
APPEND_STAT("time", "%ld", now + (long)process_started);
APPEND_STAT("version", "%s", VERSION);
APPEND_STAT("libevent", "%s", event_get_version());
APPEND_STAT("pointer_size", "%d", (int)(8 * sizeof(void *)));
#ifndef WIN32
append_stat("rusage_user", add_stats, c, "%ld.%06ld",
(long)usage.ru_utime.tv_sec,
(long)usage.ru_utime.tv_usec);
append_stat("rusage_system", add_stats, c, "%ld.%06ld",
(long)usage.ru_stime.tv_sec,
(long)usage.ru_stime.tv_usec);
#endif /* !WIN32 */
APPEND_STAT("max_connections", "%d", settings.maxconns);
APPEND_STAT("curr_connections", "%llu", (unsigned long long)stats_state.curr_conns - 1);
APPEND_STAT("total_connections", "%llu", (unsigned long long)stats.total_conns);
if (settings.maxconns_fast) {
APPEND_STAT("rejected_connections", "%llu", (unsigned long long)stats.rejected_conns);
}
APPEND_STAT("connection_structures", "%u", stats_state.conn_structs);
APPEND_STAT("response_obj_bytes", "%llu", (unsigned long long)thread_stats.response_obj_bytes);
APPEND_STAT("response_obj_total", "%llu", (unsigned long long)thread_stats.response_obj_total);
APPEND_STAT("response_obj_free", "%llu", (unsigned long long)thread_stats.response_obj_free);
APPEND_STAT("response_obj_oom", "%llu", (unsigned long long)thread_stats.response_obj_oom);
APPEND_STAT("read_buf_bytes", "%llu", (unsigned long long)thread_stats.read_buf_bytes);
APPEND_STAT("read_buf_bytes_free", "%llu", (unsigned long long)thread_stats.read_buf_bytes_free);
APPEND_STAT("read_buf_oom", "%llu", (unsigned long long)thread_stats.read_buf_oom);
APPEND_STAT("reserved_fds", "%u", stats_state.reserved_fds);
APPEND_STAT("cmd_get", "%llu", (unsigned long long)thread_stats.get_cmds);
APPEND_STAT("cmd_set", "%llu", (unsigned long long)slab_stats.set_cmds);
APPEND_STAT("cmd_flush", "%llu", (unsigned long long)thread_stats.flush_cmds);
APPEND_STAT("cmd_touch", "%llu", (unsigned long long)thread_stats.touch_cmds);
APPEND_STAT("cmd_meta", "%llu", (unsigned long long)thread_stats.meta_cmds);
APPEND_STAT("get_hits", "%llu", (unsigned long long)slab_stats.get_hits);
APPEND_STAT("get_misses", "%llu", (unsigned long long)thread_stats.get_misses);
APPEND_STAT("get_expired", "%llu", (unsigned long long)thread_stats.get_expired);
APPEND_STAT("get_flushed", "%llu", (unsigned long long)thread_stats.get_flushed);
#ifdef EXTSTORE
if (c->thread->storage) {
APPEND_STAT("get_extstore", "%llu", (unsigned long long)thread_stats.get_extstore);
APPEND_STAT("get_aborted_extstore", "%llu", (unsigned long long)thread_stats.get_aborted_extstore);
APPEND_STAT("get_oom_extstore", "%llu", (unsigned long long)thread_stats.get_oom_extstore);
APPEND_STAT("recache_from_extstore", "%llu", (unsigned long long)thread_stats.recache_from_extstore);
APPEND_STAT("miss_from_extstore", "%llu", (unsigned long long)thread_stats.miss_from_extstore);
APPEND_STAT("badcrc_from_extstore", "%llu", (unsigned long long)thread_stats.badcrc_from_extstore);
}
#endif
APPEND_STAT("delete_misses", "%llu", (unsigned long long)thread_stats.delete_misses);
APPEND_STAT("delete_hits", "%llu", (unsigned long long)slab_stats.delete_hits);
APPEND_STAT("incr_misses", "%llu", (unsigned long long)thread_stats.incr_misses);
APPEND_STAT("incr_hits", "%llu", (unsigned long long)slab_stats.incr_hits);
APPEND_STAT("decr_misses", "%llu", (unsigned long long)thread_stats.decr_misses);
APPEND_STAT("decr_hits", "%llu", (unsigned long long)slab_stats.decr_hits);
APPEND_STAT("cas_misses", "%llu", (unsigned long long)thread_stats.cas_misses);
APPEND_STAT("cas_hits", "%llu", (unsigned long long)slab_stats.cas_hits);
APPEND_STAT("cas_badval", "%llu", (unsigned long long)slab_stats.cas_badval);
APPEND_STAT("touch_hits", "%llu", (unsigned long long)slab_stats.touch_hits);
APPEND_STAT("touch_misses", "%llu", (unsigned long long)thread_stats.touch_misses);
APPEND_STAT("auth_cmds", "%llu", (unsigned long long)thread_stats.auth_cmds);
APPEND_STAT("auth_errors", "%llu", (unsigned long long)thread_stats.auth_errors);
if (settings.idle_timeout) {
APPEND_STAT("idle_kicks", "%llu", (unsigned long long)thread_stats.idle_kicks);
}
APPEND_STAT("bytes_read", "%llu", (unsigned long long)thread_stats.bytes_read);
APPEND_STAT("bytes_written", "%llu", (unsigned long long)thread_stats.bytes_written);
APPEND_STAT("limit_maxbytes", "%llu", (unsigned long long)settings.maxbytes);
APPEND_STAT("accepting_conns", "%u", stats_state.accepting_conns);
APPEND_STAT("listen_disabled_num", "%llu", (unsigned long long)stats.listen_disabled_num);
APPEND_STAT("time_in_listen_disabled_us", "%llu", stats.time_in_listen_disabled_us);
APPEND_STAT("threads", "%d", settings.num_threads);
APPEND_STAT("conn_yields", "%llu", (unsigned long long)thread_stats.conn_yields);
APPEND_STAT("hash_power_level", "%u", stats_state.hash_power_level);
APPEND_STAT("hash_bytes", "%llu", (unsigned long long)stats_state.hash_bytes);
APPEND_STAT("hash_is_expanding", "%u", stats_state.hash_is_expanding);
if (settings.slab_reassign) {
APPEND_STAT("slab_reassign_rescues", "%llu", stats.slab_reassign_rescues);
APPEND_STAT("slab_reassign_chunk_rescues", "%llu", stats.slab_reassign_chunk_rescues);
APPEND_STAT("slab_reassign_evictions_nomem", "%llu", stats.slab_reassign_evictions_nomem);
APPEND_STAT("slab_reassign_inline_reclaim", "%llu", stats.slab_reassign_inline_reclaim);
APPEND_STAT("slab_reassign_busy_items", "%llu", stats.slab_reassign_busy_items);
APPEND_STAT("slab_reassign_busy_deletes", "%llu", stats.slab_reassign_busy_deletes);
APPEND_STAT("slab_reassign_running", "%u", stats_state.slab_reassign_running);
APPEND_STAT("slabs_moved", "%llu", stats.slabs_moved);
}
if (settings.lru_crawler) {
APPEND_STAT("lru_crawler_running", "%u", stats_state.lru_crawler_running);
APPEND_STAT("lru_crawler_starts", "%u", stats.lru_crawler_starts);
}
if (settings.lru_maintainer_thread) {
APPEND_STAT("lru_maintainer_juggles", "%llu", (unsigned long long)stats.lru_maintainer_juggles);
}
APPEND_STAT("malloc_fails", "%llu",
(unsigned long long)stats.malloc_fails);
APPEND_STAT("log_worker_dropped", "%llu", (unsigned long long)stats.log_worker_dropped);
APPEND_STAT("log_worker_written", "%llu", (unsigned long long)stats.log_worker_written);
APPEND_STAT("log_watcher_skipped", "%llu", (unsigned long long)stats.log_watcher_skipped);
APPEND_STAT("log_watcher_sent", "%llu", (unsigned long long)stats.log_watcher_sent);
STATS_UNLOCK();
#ifdef EXTSTORE
if (c->thread->storage) {
STATS_LOCK();
APPEND_STAT("extstore_compact_lost", "%llu", (unsigned long long)stats.extstore_compact_lost);
APPEND_STAT("extstore_compact_rescues", "%llu", (unsigned long long)stats.extstore_compact_rescues);
APPEND_STAT("extstore_compact_skipped", "%llu", (unsigned long long)stats.extstore_compact_skipped);
STATS_UNLOCK();
extstore_get_stats(c->thread->storage, &st);
APPEND_STAT("extstore_page_allocs", "%llu", (unsigned long long)st.page_allocs);
APPEND_STAT("extstore_page_evictions", "%llu", (unsigned long long)st.page_evictions);
APPEND_STAT("extstore_page_reclaims", "%llu", (unsigned long long)st.page_reclaims);
APPEND_STAT("extstore_pages_free", "%llu", (unsigned long long)st.pages_free);
APPEND_STAT("extstore_pages_used", "%llu", (unsigned long long)st.pages_used);
APPEND_STAT("extstore_objects_evicted", "%llu", (unsigned long long)st.objects_evicted);
APPEND_STAT("extstore_objects_read", "%llu", (unsigned long long)st.objects_read);
APPEND_STAT("extstore_objects_written", "%llu", (unsigned long long)st.objects_written);
APPEND_STAT("extstore_objects_used", "%llu", (unsigned long long)st.objects_used);
APPEND_STAT("extstore_bytes_evicted", "%llu", (unsigned long long)st.bytes_evicted);
APPEND_STAT("extstore_bytes_written", "%llu", (unsigned long long)st.bytes_written);
APPEND_STAT("extstore_bytes_read", "%llu", (unsigned long long)st.bytes_read);
APPEND_STAT("extstore_bytes_used", "%llu", (unsigned long long)st.bytes_used);
APPEND_STAT("extstore_bytes_fragmented", "%llu", (unsigned long long)st.bytes_fragmented);
APPEND_STAT("extstore_limit_maxbytes", "%llu", (unsigned long long)(st.page_count * st.page_size));
APPEND_STAT("extstore_io_queue", "%llu", (unsigned long long)(st.io_queue));
}
#endif
#ifdef TLS
if (settings.ssl_enabled) {
APPEND_STAT("ssl_handshake_errors", "%llu", (unsigned long long)stats.ssl_handshake_errors);
APPEND_STAT("time_since_server_cert_refresh", "%u", now - settings.ssl_last_cert_refresh_time);
}
#endif
} | 0 | []
| memcached | f249724cedcab6605ca8a0769ac4b356a8124f63 | 295,084,132,285,811,000,000,000,000,000,000,000,000 | 152 | crash fix: errstr wasn't initialized in metaget
if meta_flag_preparse bailed out early it would try to read
uninitialized memory. |
add_font_path_args (FlatpakBwrap *bwrap)
{
g_autoptr(GString) xml_snippet = g_string_new ("");
gchar *path_build_tmp = NULL;
g_autoptr(GFile) user_font1 = NULL;
g_autoptr(GFile) user_font2 = NULL;
g_autoptr(GFile) user_font_cache = NULL;
g_auto(GStrv) system_cache_dirs = NULL;
gboolean found_cache = FALSE;
int i;
g_string_append (xml_snippet,
"<?xml version=\"1.0\"?>\n"
"<!DOCTYPE fontconfig SYSTEM \"fonts.dtd\">\n"
"<fontconfig>\n");
if (g_file_test (SYSTEM_FONTS_DIR, G_FILE_TEST_EXISTS))
{
flatpak_bwrap_add_args (bwrap,
"--ro-bind", SYSTEM_FONTS_DIR, "/run/host/fonts",
NULL);
g_string_append_printf (xml_snippet,
"\t<remap-dir as-path=\"%s\">/run/host/fonts</remap-dir>\n",
SYSTEM_FONTS_DIR);
}
if (g_file_test ("/usr/local/share/fonts", G_FILE_TEST_EXISTS))
{
flatpak_bwrap_add_args (bwrap,
"--ro-bind", "/usr/local/share/fonts", "/run/host/local-fonts",
NULL);
g_string_append_printf (xml_snippet,
"\t<remap-dir as-path=\"%s\">/run/host/local-fonts</remap-dir>\n",
"/usr/local/share/fonts");
}
system_cache_dirs = g_strsplit (SYSTEM_FONT_CACHE_DIRS, ":", 0);
for (i = 0; system_cache_dirs[i] != NULL; i++)
{
if (g_file_test (system_cache_dirs[i], G_FILE_TEST_EXISTS))
{
flatpak_bwrap_add_args (bwrap,
"--ro-bind", system_cache_dirs[i], "/run/host/fonts-cache",
NULL);
found_cache = TRUE;
break;
}
}
if (!found_cache)
{
/* We ensure these directories are never writable, or fontconfig
will use them to write the default cache */
flatpak_bwrap_add_args (bwrap,
"--tmpfs", "/run/host/fonts-cache",
"--remount-ro", "/run/host/fonts-cache",
NULL);
}
path_build_tmp = g_build_filename (g_get_user_data_dir (), "fonts", NULL);
user_font1 = g_file_new_for_path (path_build_tmp);
g_clear_pointer (&path_build_tmp, g_free);
path_build_tmp = g_build_filename (g_get_home_dir (), ".fonts", NULL);
user_font2 = g_file_new_for_path (path_build_tmp);
g_clear_pointer (&path_build_tmp, g_free);
if (g_file_query_exists (user_font1, NULL))
{
flatpak_bwrap_add_args (bwrap,
"--ro-bind", flatpak_file_get_path_cached (user_font1), "/run/host/user-fonts",
NULL);
g_string_append_printf (xml_snippet,
"\t<remap-dir as-path=\"%s\">/run/host/user-fonts</remap-dir>\n",
flatpak_file_get_path_cached (user_font1));
}
else if (g_file_query_exists (user_font2, NULL))
{
flatpak_bwrap_add_args (bwrap,
"--ro-bind", flatpak_file_get_path_cached (user_font2), "/run/host/user-fonts",
NULL);
g_string_append_printf (xml_snippet,
"\t<remap-dir as-path=\"%s\">/run/host/user-fonts</remap-dir>\n",
flatpak_file_get_path_cached (user_font2));
}
path_build_tmp = g_build_filename (g_get_user_cache_dir (), "fontconfig", NULL);
user_font_cache = g_file_new_for_path (path_build_tmp);
g_clear_pointer (&path_build_tmp, g_free);
if (g_file_query_exists (user_font_cache, NULL))
{
flatpak_bwrap_add_args (bwrap,
"--ro-bind", flatpak_file_get_path_cached (user_font_cache), "/run/host/user-fonts-cache",
NULL);
}
else
{
/* We ensure these directories are never writable, or fontconfig
will use them to write the default cache */
flatpak_bwrap_add_args (bwrap,
"--tmpfs", "/run/host/user-fonts-cache",
"--remount-ro", "/run/host/user-fonts-cache",
NULL);
}
g_string_append (xml_snippet,
"</fontconfig>\n");
if (!flatpak_bwrap_add_args_data (bwrap, "font-dirs.xml", xml_snippet->str, xml_snippet->len, "/run/host/font-dirs.xml", NULL))
g_warning ("Unable to add fontconfig data snippet");
} | 0 | [
"CWE-94",
"CWE-74"
]
| flatpak | 6d1773d2a54dde9b099043f07a2094a4f1c2f486 | 246,646,040,156,255,770,000,000,000,000,000,000,000 | 113 | run: Convert all environment variables into bwrap arguments
This avoids some of them being filtered out by a setuid bwrap. It also
means that if they came from an untrusted source, they cannot be used
to inject arbitrary code into a non-setuid bwrap via mechanisms like
LD_PRELOAD.
Because they get bundled into a memfd or temporary file, they do not
actually appear in argv, ensuring that they remain inaccessible to
processes running under a different uid (which is important if their
values are tokens or other secrets).
Signed-off-by: Simon McVittie <[email protected]>
Part-of: https://github.com/flatpak/flatpak/security/advisories/GHSA-4ppf-fxf6-vxg2 |
int ssl3_write_bytes(SSL *s, int type, const void *buf_, int len)
{
const unsigned char *buf = buf_;
int tot;
unsigned int n, nw;
#if !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK
unsigned int max_send_fragment;
#endif
SSL3_BUFFER *wb = &(s->s3->wbuf);
int i;
s->rwstate = SSL_NOTHING;
OPENSSL_assert(s->s3->wnum <= INT_MAX);
tot = s->s3->wnum;
s->s3->wnum = 0;
if (SSL_in_init(s) && !s->in_handshake) {
i = s->handshake_func(s);
if (i < 0)
return (i);
if (i == 0) {
SSLerr(SSL_F_SSL3_WRITE_BYTES, SSL_R_SSL_HANDSHAKE_FAILURE);
return -1;
}
}
/*
* ensure that if we end up with a smaller value of data to write out
* than the the original len from a write which didn't complete for
* non-blocking I/O and also somehow ended up avoiding the check for
* this in ssl3_write_pending/SSL_R_BAD_WRITE_RETRY as it must never be
* possible to end up with (len-tot) as a large number that will then
* promptly send beyond the end of the users buffer ... so we trap and
* report the error in a way the user will notice
*/
if ((len < tot) || ((wb->left != 0) && (len < (tot + s->s3->wpend_tot)))) {
SSLerr(SSL_F_SSL3_WRITE_BYTES, SSL_R_BAD_LENGTH);
return (-1);
}
/*
* first check if there is a SSL3_BUFFER still being written out. This
* will happen with non blocking IO
*/
if (wb->left != 0) {
i = ssl3_write_pending(s, type, &buf[tot], s->s3->wpend_tot);
if (i <= 0) {
/* XXX should we ssl3_release_write_buffer if i<0? */
s->s3->wnum = tot;
return i;
}
tot += i; /* this might be last fragment */
}
#if !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK
/*
* Depending on platform multi-block can deliver several *times*
* better performance. Downside is that it has to allocate
* jumbo buffer to accomodate up to 8 records, but the
* compromise is considered worthy.
*/
if (type == SSL3_RT_APPLICATION_DATA &&
len >= 4 * (int)(max_send_fragment = s->max_send_fragment) &&
s->compress == NULL && s->msg_callback == NULL &&
SSL_USE_EXPLICIT_IV(s) &&
s->enc_write_ctx != NULL &&
EVP_CIPHER_flags(s->enc_write_ctx->cipher) &
EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK) {
unsigned char aad[13];
EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM mb_param;
int packlen;
/* minimize address aliasing conflicts */
if ((max_send_fragment & 0xfff) == 0)
max_send_fragment -= 512;
if (tot == 0 || wb->buf == NULL) { /* allocate jumbo buffer */
ssl3_release_write_buffer(s);
packlen = EVP_CIPHER_CTX_ctrl(s->enc_write_ctx,
EVP_CTRL_TLS1_1_MULTIBLOCK_MAX_BUFSIZE,
max_send_fragment, NULL);
if (len >= 8 * (int)max_send_fragment)
packlen *= 8;
else
packlen *= 4;
wb->buf = OPENSSL_malloc(packlen);
if (!wb->buf) {
SSLerr(SSL_F_SSL3_WRITE_BYTES, ERR_R_MALLOC_FAILURE);
return -1;
}
wb->len = packlen;
} else if (tot == len) { /* done? */
OPENSSL_free(wb->buf); /* free jumbo buffer */
wb->buf = NULL;
return tot;
}
n = (len - tot);
for (;;) {
if (n < 4 * max_send_fragment) {
OPENSSL_free(wb->buf); /* free jumbo buffer */
wb->buf = NULL;
break;
}
if (s->s3->alert_dispatch) {
i = s->method->ssl_dispatch_alert(s);
if (i <= 0) {
s->s3->wnum = tot;
return i;
}
}
if (n >= 8 * max_send_fragment)
nw = max_send_fragment * (mb_param.interleave = 8);
else
nw = max_send_fragment * (mb_param.interleave = 4);
memcpy(aad, s->s3->write_sequence, 8);
aad[8] = type;
aad[9] = (unsigned char)(s->version >> 8);
aad[10] = (unsigned char)(s->version);
aad[11] = 0;
aad[12] = 0;
mb_param.out = NULL;
mb_param.inp = aad;
mb_param.len = nw;
packlen = EVP_CIPHER_CTX_ctrl(s->enc_write_ctx,
EVP_CTRL_TLS1_1_MULTIBLOCK_AAD,
sizeof(mb_param), &mb_param);
if (packlen <= 0 || packlen > (int)wb->len) { /* never happens */
OPENSSL_free(wb->buf); /* free jumbo buffer */
wb->buf = NULL;
break;
}
mb_param.out = wb->buf;
mb_param.inp = &buf[tot];
mb_param.len = nw;
if (EVP_CIPHER_CTX_ctrl(s->enc_write_ctx,
EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT,
sizeof(mb_param), &mb_param) <= 0)
return -1;
s->s3->write_sequence[7] += mb_param.interleave;
if (s->s3->write_sequence[7] < mb_param.interleave) {
int j = 6;
while (j >= 0 && (++s->s3->write_sequence[j--]) == 0) ;
}
wb->offset = 0;
wb->left = packlen;
s->s3->wpend_tot = nw;
s->s3->wpend_buf = &buf[tot];
s->s3->wpend_type = type;
s->s3->wpend_ret = nw;
i = ssl3_write_pending(s, type, &buf[tot], nw);
if (i <= 0) {
if (i < 0 && (!s->wbio || !BIO_should_retry(s->wbio))) {
OPENSSL_free(wb->buf);
wb->buf = NULL;
}
s->s3->wnum = tot;
return i;
}
if (i == (int)n) {
OPENSSL_free(wb->buf); /* free jumbo buffer */
wb->buf = NULL;
return tot + i;
}
n -= i;
tot += i;
}
} else
#endif
if (tot == len) { /* done? */
if (s->mode & SSL_MODE_RELEASE_BUFFERS && !SSL_IS_DTLS(s))
ssl3_release_write_buffer(s);
return tot;
}
n = (len - tot);
for (;;) {
if (n > s->max_send_fragment)
nw = s->max_send_fragment;
else
nw = n;
i = do_ssl3_write(s, type, &(buf[tot]), nw, 0);
if (i <= 0) {
/* XXX should we ssl3_release_write_buffer if i<0? */
s->s3->wnum = tot;
return i;
}
if ((i == (int)n) ||
(type == SSL3_RT_APPLICATION_DATA &&
(s->mode & SSL_MODE_ENABLE_PARTIAL_WRITE))) {
/*
* next chunk of data should get another prepended empty fragment
* in ciphersuites with known-IV weakness:
*/
s->s3->empty_fragment_done = 0;
if ((i == (int)n) && s->mode & SSL_MODE_RELEASE_BUFFERS &&
!SSL_IS_DTLS(s))
ssl3_release_write_buffer(s);
return tot + i;
}
n -= i;
tot += i;
}
} | 0 | [
"CWE-200",
"CWE-203"
]
| openssl | e9bbefbf0f24c57645e7ad6a5a71ae649d18ac8e | 73,473,330,674,849,810,000,000,000,000,000,000,000 | 223 | Go into the error state if a fatal alert is sent or received
If an application calls SSL_shutdown after a fatal alert has occured and
then behaves different based on error codes from that function then the
application may be vulnerable to a padding oracle.
CVE-2019-1559
Reviewed-by: Richard Levitte <[email protected]> |
ReadImage (FILE *fp,
tga_info *info,
const gchar *filename)
{
static gint32 image_ID;
gint32 layer_ID;
GimpPixelRgn pixel_rgn;
GimpDrawable *drawable;
guchar *data, *buffer, *row;
GimpImageType dtype = 0;
GimpImageBaseType itype = 0;
gint i, y;
gint max_tileheight, tileheight;
guint cmap_bytes = 0;
guchar *tga_cmap = NULL;
guchar *gimp_cmap = NULL;
guchar *convert_cmap = NULL;
switch (info->imageType)
{
case TGA_TYPE_MAPPED:
cmap_bytes = (info->colorMapSize + 7 ) / 8;
tga_cmap = g_new (guchar, info->colorMapLength * cmap_bytes);
if (info->colorMapSize > 24)
{
/* indexed + full alpha => promoted to RGBA */
itype = GIMP_RGB;
dtype = GIMP_RGBA_IMAGE;
convert_cmap = g_new (guchar, info->colorMapLength * 4);
}
else if (info->colorMapIndex + info->colorMapLength > 256)
{
/* more than 256 colormap entries => promoted to RGB */
itype = GIMP_RGB;
dtype = GIMP_RGB_IMAGE;
convert_cmap = g_new (guchar, info->colorMapLength * 3);
}
else if (info->alphaBits > 0)
{
/* if alpha exists here, promote to RGB */
itype = GIMP_RGB;
dtype = GIMP_RGBA_IMAGE;
convert_cmap = g_new (guchar, info->colorMapLength * 4);
}
else
{
itype = GIMP_INDEXED;
dtype = GIMP_INDEXED_IMAGE;
gimp_cmap = g_new (guchar, info->colorMapLength * 3);
}
break;
case TGA_TYPE_GRAY:
itype = GIMP_GRAY;
if (info->alphaBits)
dtype = GIMP_GRAYA_IMAGE;
else
dtype = GIMP_GRAY_IMAGE;
break;
case TGA_TYPE_COLOR:
itype = GIMP_RGB;
if (info->alphaBits)
dtype = GIMP_RGBA_IMAGE;
else
dtype = GIMP_RGB_IMAGE;
break;
}
/* Handle colormap */
if (info->imageType == TGA_TYPE_MAPPED)
{
if (cmap_bytes <= 4 &&
fread (tga_cmap, info->colorMapLength * cmap_bytes, 1, fp) == 1)
{
if (convert_cmap)
{
if (info->colorMapSize == 32)
bgr2rgb (convert_cmap, tga_cmap,
info->colorMapLength, cmap_bytes, 1);
else if (info->colorMapSize == 24)
bgr2rgb (convert_cmap, tga_cmap,
info->colorMapLength, cmap_bytes, 0);
else if (info->colorMapSize == 16 || info->colorMapSize == 15)
upsample (convert_cmap, tga_cmap,
info->colorMapLength, cmap_bytes, info->alphaBits);
else
{
g_message ("Unsupported colormap depth: %u",
info->colorMapSize);
return -1;
}
}
else
{
if (info->colorMapSize == 24)
bgr2rgb (gimp_cmap, tga_cmap,
info->colorMapLength, cmap_bytes, 0);
else if (info->colorMapSize == 16 || info->colorMapSize == 15)
upsample (gimp_cmap, tga_cmap,
info->colorMapLength, cmap_bytes, info->alphaBits);
else
{
g_message ("Unsupported colormap depth: %u",
info->colorMapSize);
return -1;
}
}
}
else
{
g_message ("File '%s' is truncated or corrupted",
gimp_filename_to_utf8 (filename));
return -1;
}
}
image_ID = gimp_image_new (info->width, info->height, itype);
gimp_image_set_filename (image_ID, filename);
if (gimp_cmap)
gimp_image_set_colormap (image_ID, gimp_cmap, info->colorMapLength);
layer_ID = gimp_layer_new (image_ID,
_("Background"),
info->width, info->height,
dtype, 100,
GIMP_NORMAL_MODE);
gimp_image_insert_layer (image_ID, layer_ID, -1, 0);
drawable = gimp_drawable_get (layer_ID);
/* Prepare the pixel region. */
gimp_pixel_rgn_init (&pixel_rgn, drawable, 0, 0,
info->width, info->height, TRUE, FALSE);
/* Allocate the data. */
max_tileheight = gimp_tile_height ();
data = g_new (guchar, info->width * max_tileheight * drawable->bpp);
buffer = g_new (guchar, info->width * info->bytes);
if (info->flipVert)
{
for (i = 0; i < info->height; i += tileheight)
{
tileheight = i ? max_tileheight : (info->height % max_tileheight);
if (tileheight == 0)
tileheight = max_tileheight;
for (y = 1; y <= tileheight; ++y)
{
row = data + (info->width * drawable->bpp * (tileheight - y));
read_line (fp, row, buffer, info, drawable, convert_cmap);
}
gimp_progress_update ((gdouble) (i + tileheight) /
(gdouble) info->height);
gimp_pixel_rgn_set_rect (&pixel_rgn, data, 0,
info->height - i - tileheight,
info->width, tileheight);
}
}
else
{
for (i = 0; i < info->height; i += max_tileheight)
{
tileheight = MIN (max_tileheight, info->height - i);
for (y = 0; y < tileheight; ++y)
{
row= data + (info->width * drawable->bpp * y);
read_line (fp, row, buffer, info, drawable, convert_cmap);
}
gimp_progress_update ((gdouble) (i + tileheight) /
(gdouble) info->height);
gimp_pixel_rgn_set_rect (&pixel_rgn, data, 0, i,
info->width, tileheight);
}
}
gimp_progress_update (1.0);
g_free (data);
g_free (buffer);
g_free (convert_cmap);
g_free (gimp_cmap);
g_free (tga_cmap);
gimp_drawable_flush (drawable);
gimp_drawable_detach (drawable);
return image_ID;
} /*read_image*/ | 0 | [
"CWE-125"
]
| GIMP | 22e2571c25425f225abdb11a566cc281fca6f366 | 324,149,756,710,289,100,000,000,000,000,000,000,000 | 202 | plug-ins: TGA 16-bit RGB (without alpha bit) is also valid.
According to some spec on the web, 16-bit RGB is also valid. In this
case, the last bit is simply ignored (at least that's how it is
implemented right now).
(cherry picked from commit 8ea316667c8a3296bce2832b3986b58d0fdfc077) |
static int atusb_get_and_show_revision(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
char *hw_name;
unsigned char *buffer;
int ret;
buffer = kmalloc(3, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
/* Get a couple of the ATMega Firmware values */
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
buffer, 3, 1000);
if (ret >= 0) {
atusb->fw_ver_maj = buffer[0];
atusb->fw_ver_min = buffer[1];
atusb->fw_hw_type = buffer[2];
switch (atusb->fw_hw_type) {
case ATUSB_HW_TYPE_100813:
case ATUSB_HW_TYPE_101216:
case ATUSB_HW_TYPE_110131:
hw_name = "ATUSB";
atusb->data = &atusb_chip_data;
break;
case ATUSB_HW_TYPE_RZUSB:
hw_name = "RZUSB";
atusb->data = &atusb_chip_data;
break;
case ATUSB_HW_TYPE_HULUSB:
hw_name = "HULUSB";
atusb->data = &hulusb_chip_data;
break;
default:
hw_name = "UNKNOWN";
atusb->err = -ENOTSUPP;
ret = -ENOTSUPP;
break;
}
dev_info(&usb_dev->dev,
"Firmware: major: %u, minor: %u, hardware type: %s (%d)\n",
atusb->fw_ver_maj, atusb->fw_ver_min, hw_name,
atusb->fw_hw_type);
}
if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 2) {
dev_info(&usb_dev->dev,
"Firmware version (%u.%u) predates our first public release.",
atusb->fw_ver_maj, atusb->fw_ver_min);
dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
}
kfree(buffer);
return ret;
} | 0 | [
"CWE-416"
]
| linux | 7fd25e6fc035f4b04b75bca6d7e8daa069603a76 | 283,309,853,548,557,220,000,000,000,000,000,000,000 | 57 | ieee802154: atusb: fix use-after-free at disconnect
The disconnect callback was accessing the hardware-descriptor private
data after having having freed it.
Fixes: 7490b008d123 ("ieee802154: add support for atusb transceiver")
Cc: stable <[email protected]> # 4.2
Cc: Alexander Aring <[email protected]>
Reported-by: [email protected]
Signed-off-by: Johan Hovold <[email protected]>
Signed-off-by: Stefan Schmidt <[email protected]> |
TEST_F(RouterTest, TimeoutBudgetHistogramStatOnlyGlobal) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _))
.WillOnce(Invoke(
[&](Http::ResponseDecoder& decoder,
Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {
response_decoder = &decoder;
callbacks.onPoolReady(encoder, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
expectPerTryTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-timeout-ms", "200"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
Buffer::OwnedImpl data;
router_.decodeData(data, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Global timeout budget used.
EXPECT_CALL(
cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,
deliverHistogramToSinks(
Property(&Stats::Metric::name, "upstream_rq_timeout_budget_percent_used"), 40ull));
// Per-try budget used is zero out of an infinite timeout.
EXPECT_CALL(
cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,
deliverHistogramToSinks(
Property(&Stats::Metric::name, "upstream_rq_timeout_budget_per_try_percent_used"), 0ull));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
response_decoder->decodeHeaders(std::move(response_headers), false);
test_time_.advanceTimeWait(std::chrono::milliseconds(80));
response_decoder->decodeData(data, true);
} | 0 | [
"CWE-703"
]
| envoy | 18871dbfb168d3512a10c78dd267ff7c03f564c6 | 224,580,825,111,665,400,000,000,000,000,000,000,000 | 39 | [1.18] CVE-2022-21655
Crash with direct_response
Signed-off-by: Otto van der Schaaf <[email protected]> |
nm_utils_dnsmasq_status_to_string(int status, char *dest, gsize size)
{
const char *msg;
nm_utils_to_string_buffer_init(&dest, &size);
if (status == 0)
msg = "Success";
else if (status == 1)
msg = "Configuration problem";
else if (status == 2)
msg = "Network access problem (address in use, permissions)";
else if (status == 3)
msg = "Filesystem problem (missing file/directory, permissions)";
else if (status == 4)
msg = "Memory allocation failure";
else if (status == 5)
msg = "Other problem";
else if (status >= 11) {
g_snprintf(dest, size, "Lease script failed with error %d", status - 10);
return dest;
} else
msg = "Unknown problem";
g_snprintf(dest, size, "%s (%d)", msg, status);
return dest;
} | 0 | [
"CWE-20"
]
| NetworkManager | 420784e342da4883f6debdfe10cde68507b10d27 | 82,027,437,504,517,100,000,000,000,000,000,000,000 | 27 | core: fix crash in nm_wildcard_match_check()
It's not entirely clear how to treat %NULL.
Clearly "match.interface-name=eth0" should not
match with an interface %NULL. But what about
"match.interface-name=!eth0"? It's now implemented
that negative matches still succeed against %NULL.
What about "match.interface-name=*"? That probably
should also match with %NULL. So we treat %NULL really
like "".
Against commit 11cd443448bc ('iwd: Don't call IWD methods when device
unmanaged'), we got this backtrace:
#0 0x00007f1c164069f1 in __strnlen_avx2 () at ../sysdeps/x86_64/multiarch/strlen-avx2.S:62
#1 0x00007f1c1637ac9e in __fnmatch (pattern=<optimized out>, string=<optimized out>, string@entry=0x0, flags=flags@entry=0) at fnmatch.c:379
p = 0x0
res = <optimized out>
orig_pattern = <optimized out>
n = <optimized out>
wpattern = 0x7fff8d860730 L"pci-0000:03:00.0"
ps = {__count = 0, __value = {__wch = 0, __wchb = "\000\000\000"}}
wpattern_malloc = 0x0
wstring_malloc = 0x0
wstring = <optimized out>
alloca_used = 80
__PRETTY_FUNCTION__ = "__fnmatch"
#2 0x0000564484a978bf in nm_wildcard_match_check (str=0x0, patterns=<optimized out>, num_patterns=<optimized out>) at src/core/nm-core-utils.c:1959
is_inverted = 0
is_mandatory = 0
match = <optimized out>
p = 0x564486c43fa0 "pci-0000:03:00.0"
has_optional = 0
has_any_optional = 0
i = <optimized out>
#3 0x0000564484bf4797 in check_connection_compatible (self=<optimized out>, connection=<optimized out>, error=0x0) at src/core/devices/nm-device.c:7499
patterns = <optimized out>
device_driver = 0x564486c76bd0 "veth"
num_patterns = 1
priv = 0x564486cbe0b0
__func__ = "check_connection_compatible"
device_iface = <optimized out>
local = 0x564486c99a60
conn_iface = 0x0
klass = <optimized out>
s_match = 0x564486c63df0 [NMSettingMatch]
#4 0x0000564484c38491 in check_connection_compatible (device=0x564486cbe590 [NMDeviceVeth], connection=0x564486c6b160, error=0x0) at src/core/devices/nm-device-ethernet.c:348
self = 0x564486cbe590 [NMDeviceVeth]
s_wired = <optimized out>
Fixes: 3ced486f4162 ('libnm/match: extend syntax for match patterns with '|', '&', '!' and '\\'')
https://bugzilla.redhat.com/show_bug.cgi?id=1942741 |
static js_Ast *newexp(js_State *J)
{
js_Ast *a, *b;
if (jsP_accept(J, TK_NEW)) {
a = memberexp(J);
if (jsP_accept(J, '(')) {
b = arguments(J);
jsP_expect(J, ')');
return EXP2(NEW, a, b);
}
return EXP1(NEW, a);
}
if (jsP_accept(J, TK_FUNCTION))
return funexp(J);
return primary(J);
} | 0 | [
"CWE-674"
]
| mujs | 4d45a96e57fbabf00a7378b337d0ddcace6f38c1 | 171,594,737,259,083,900,000,000,000,000,000,000,000 | 19 | Guard binary expressions from too much recursion. |
apr_byte_t oidc_get_provider_from_session(request_rec *r, oidc_cfg *c,
oidc_session_t *session, oidc_provider_t **provider) {
oidc_debug(r, "enter");
/* get the issuer value from the session state */
const char *issuer = oidc_session_get_issuer(r, session);
if (issuer == NULL) {
oidc_warn(r, "empty or invalid session: no issuer found");
return FALSE;
}
/* get the provider info associated with the issuer value */
oidc_provider_t *p = oidc_get_provider_for_issuer(r, c, issuer, FALSE);
if (p == NULL) {
oidc_error(r, "session corrupted: no provider found for issuer: %s",
issuer);
return FALSE;
}
*provider = p;
return TRUE;
} | 0 | [
"CWE-79"
]
| mod_auth_openidc | 55ea0a085290cd2c8cdfdd960a230cbc38ba8b56 | 46,202,633,556,846,170,000,000,000,000,000,000,000 | 24 | Add a function to escape Javascript characters |
int dns_packet_extract(DnsPacket *p) {
_cleanup_(dns_question_unrefp) DnsQuestion *question = NULL;
_cleanup_(dns_answer_unrefp) DnsAnswer *answer = NULL;
_cleanup_(rewind_dns_packet) DnsPacketRewinder rewinder = {};
unsigned n, i;
int r;
if (p->extracted)
return 0;
INIT_REWINDER(rewinder, p);
dns_packet_rewind(p, DNS_PACKET_HEADER_SIZE);
n = DNS_PACKET_QDCOUNT(p);
if (n > 0) {
question = dns_question_new(n);
if (!question)
return -ENOMEM;
for (i = 0; i < n; i++) {
_cleanup_(dns_resource_key_unrefp) DnsResourceKey *key = NULL;
bool cache_flush;
r = dns_packet_read_key(p, &key, &cache_flush, NULL);
if (r < 0)
return r;
if (cache_flush)
return -EBADMSG;
if (!dns_type_is_valid_query(key->type))
return -EBADMSG;
r = dns_question_add(question, key);
if (r < 0)
return r;
}
}
n = DNS_PACKET_RRCOUNT(p);
if (n > 0) {
_cleanup_(dns_resource_record_unrefp) DnsResourceRecord *previous = NULL;
bool bad_opt = false;
answer = dns_answer_new(n);
if (!answer)
return -ENOMEM;
for (i = 0; i < n; i++) {
_cleanup_(dns_resource_record_unrefp) DnsResourceRecord *rr = NULL;
bool cache_flush = false;
r = dns_packet_read_rr(p, &rr, &cache_flush, NULL);
if (r < 0)
return r;
/* Try to reduce memory usage a bit */
if (previous)
dns_resource_key_reduce(&rr->key, &previous->key);
if (rr->key->type == DNS_TYPE_OPT) {
bool has_rfc6975;
if (p->opt || bad_opt) {
/* Multiple OPT RRs? if so, let's ignore all, because there's something wrong
* with the server, and if one is valid we wouldn't know which one. */
log_debug("Multiple OPT RRs detected, ignoring all.");
bad_opt = true;
continue;
}
if (!dns_name_is_root(dns_resource_key_name(rr->key))) {
/* If the OPT RR is not owned by the root domain, then it is bad, let's ignore
* it. */
log_debug("OPT RR is not owned by root domain, ignoring.");
bad_opt = true;
continue;
}
if (i < DNS_PACKET_ANCOUNT(p) + DNS_PACKET_NSCOUNT(p)) {
/* OPT RR is in the wrong section? Some Belkin routers do this. This is a hint
* the EDNS implementation is borked, like the Belkin one is, hence ignore
* it. */
log_debug("OPT RR in wrong section, ignoring.");
bad_opt = true;
continue;
}
if (!opt_is_good(rr, &has_rfc6975)) {
log_debug("Malformed OPT RR, ignoring.");
bad_opt = true;
continue;
}
if (DNS_PACKET_QR(p)) {
/* Additional checks for responses */
if (!DNS_RESOURCE_RECORD_OPT_VERSION_SUPPORTED(rr)) {
/* If this is a reply and we don't know the EDNS version then something
* is weird... */
log_debug("EDNS version newer that our request, bad server.");
return -EBADMSG;
}
if (has_rfc6975) {
/* If the OPT RR contains RFC6975 algorithm data, then this is indication that
* the server just copied the OPT it got from us (which contained that data)
* back into the reply. If so, then it doesn't properly support EDNS, as
* RFC6975 makes it very clear that the algorithm data should only be contained
* in questions, never in replies. Crappy Belkin routers copy the OPT data for
* example, hence let's detect this so that we downgrade early. */
log_debug("OPT RR contained RFC6975 data, ignoring.");
bad_opt = true;
continue;
}
}
p->opt = dns_resource_record_ref(rr);
} else {
/* According to RFC 4795, section 2.9. only the RRs from the Answer section shall be
* cached. Hence mark only those RRs as cacheable by default, but not the ones from the
* Additional or Authority sections. */
r = dns_answer_add(answer, rr, p->ifindex,
(i < DNS_PACKET_ANCOUNT(p) ? DNS_ANSWER_CACHEABLE : 0) |
(p->protocol == DNS_PROTOCOL_MDNS && !cache_flush ? DNS_ANSWER_SHARED_OWNER : 0));
if (r < 0)
return r;
}
/* Remember this RR, so that we potentically can merge it's ->key object with the next RR. Note
* that we only do this if we actually decided to keep the RR around. */
dns_resource_record_unref(previous);
previous = dns_resource_record_ref(rr);
}
if (bad_opt)
p->opt = dns_resource_record_unref(p->opt);
}
p->question = question;
question = NULL;
p->answer = answer;
answer = NULL;
p->extracted = true;
/* no CANCEL, always rewind */
return 0;
} | 0 | [
"CWE-20",
"CWE-476"
]
| systemd | a924f43f30f9c4acaf70618dd2a055f8b0f166be | 174,000,044,265,771,900,000,000,000,000,000,000,000 | 152 | resolved: bugfix of null pointer p->question dereferencing (#6020)
See https://bugs.launchpad.net/ubuntu/+source/systemd/+bug/1621396 |
ptp_unpack_uint32_t_array(PTPParams *params, unsigned char* data, unsigned int offset, unsigned int datalen, uint32_t **array)
{
uint32_t n, i=0;
if (!data)
return 0;
if (offset >= datalen)
return 0;
if (offset + sizeof(uint32_t) > datalen)
return 0;
*array = NULL;
n=dtoh32a(&data[offset]);
if (n >= UINT_MAX/sizeof(uint32_t))
return 0;
if (!n)
return 0;
if (offset + sizeof(uint32_t)*(n+1) > datalen) {
ptp_debug (params ,"array runs over datalen bufferend (%d vs %d)", offset + sizeof(uint32_t)*(n+1) , datalen);
return 0;
}
*array = malloc (n*sizeof(uint32_t));
if (!*array)
return 0;
for (i=0;i<n;i++)
(*array)[i]=dtoh32a(&data[offset+(sizeof(uint32_t)*(i+1))]);
return n;
} | 0 | [
"CWE-190"
]
| libgphoto2 | 203df81b9d97e820411e1eb94ae08139af73bbd0 | 91,956,132,027,864,650,000,000,000,000,000,000,000 | 32 | check for an integer overflow in ptp_unpack_OPL |
static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
put_page(buf->page);
buf->flags &= ~PIPE_BUF_FLAG_LRU;
} | 0 | [
"CWE-416"
]
| linux | 15fab63e1e57be9fdb5eec1bbc5916e9825e9acb | 12,709,243,667,426,930,000,000,000,000,000,000,000 | 6 | fs: prevent page refcount overflow in pipe_buf_get
Change pipe_buf_get() to return a bool indicating whether it succeeded
in raising the refcount of the page (if the thing in the pipe is a page).
This removes another mechanism for overflowing the page refcount. All
callers converted to handle a failure.
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Matthew Wilcox <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
int X509_sign_ctx(X509 *x, EVP_MD_CTX *ctx)
{
x->cert_info->enc.modified = 1;
return ASN1_item_sign_ctx(ASN1_ITEM_rptr(X509_CINF),
x->cert_info->signature,
x->sig_alg, x->signature, x->cert_info, ctx);
} | 0 | [
"CWE-310"
]
| openssl | 684400ce192dac51df3d3e92b61830a6ef90be3e | 312,154,560,771,135,500,000,000,000,000,000,000,000 | 7 | Fix various certificate fingerprint issues.
By using non-DER or invalid encodings outside the signed portion of a
certificate the fingerprint can be changed without breaking the signature.
Although no details of the signed portion of the certificate can be changed
this can cause problems with some applications: e.g. those using the
certificate fingerprint for blacklists.
1. Reject signatures with non zero unused bits.
If the BIT STRING containing the signature has non zero unused bits reject
the signature. All current signature algorithms require zero unused bits.
2. Check certificate algorithm consistency.
Check the AlgorithmIdentifier inside TBS matches the one in the
certificate signature. NB: this will result in signature failure
errors for some broken certificates.
3. Check DSA/ECDSA signatures use DER.
Reencode DSA/ECDSA signatures and compare with the original received
signature. Return an error if there is a mismatch.
This will reject various cases including garbage after signature
(thanks to Antti Karjalainen and Tuomo Untinen from the Codenomicon CROSS
program for discovering this case) and use of BER or invalid ASN.1 INTEGERs
(negative or with leading zeroes).
CVE-2014-8275
Reviewed-by: Emilia Käsper <[email protected]> |
static int ZEND_FASTCALL ZEND_FETCH_OBJ_FUNC_ARG_SPEC_UNUSED_CONST_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
if (ARG_SHOULD_BE_SENT_BY_REF(EX(fbc), opline->extended_value)) {
/* Behave like FETCH_OBJ_W */
zend_free_op free_op1;
zval *property = &opline->op2.u.constant;
zval **container = _get_obj_zval_ptr_ptr_unused(TSRMLS_C);
if (0) {
MAKE_REAL_ZVAL_PTR(property);
}
if (IS_UNUSED == IS_VAR && !container) {
zend_error_noreturn(E_ERROR, "Cannot use string offset as an object");
}
zend_fetch_property_address(&EX_T(opline->result.u.var), container, property, BP_VAR_W TSRMLS_CC);
if (0) {
zval_ptr_dtor(&property);
} else {
}
if (IS_UNUSED == IS_VAR && 0 &&
READY_TO_DESTROY(free_op1.var)) {
AI_USE_PTR(EX_T(opline->result.u.var).var);
if (!PZVAL_IS_REF(*EX_T(opline->result.u.var).var.ptr_ptr) &&
Z_REFCOUNT_PP(EX_T(opline->result.u.var).var.ptr_ptr) > 2) {
SEPARATE_ZVAL(EX_T(opline->result.u.var).var.ptr_ptr);
}
}
ZEND_VM_NEXT_OPCODE();
} else {
return zend_fetch_property_address_read_helper_SPEC_UNUSED_CONST(BP_VAR_R, ZEND_OPCODE_HANDLER_ARGS_PASSTHRU);
}
} | 0 | []
| php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 58,148,279,711,879,880,000,000,000,000,000,000,000 | 36 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
{
if (is_guest_mode(vcpu)) {
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
unsigned long orig_val = val;
/*
* We get here when L2 changed cr0 in a way that did not change
* any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
* but did change L0 shadowed bits. So we first calculate the
* effective cr0 value that L1 would like to write into the
* hardware. It consists of the L2-owned bits from the new
* value combined with the L1-owned bits from L1's guest_cr0.
*/
val = (val & ~vmcs12->cr0_guest_host_mask) |
(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
if (!nested_cr0_valid(vmcs12, val))
return 1;
if (kvm_set_cr0(vcpu, val))
return 1;
vmcs_writel(CR0_READ_SHADOW, orig_val);
return 0;
} else {
if (to_vmx(vcpu)->nested.vmxon &&
((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
return 1;
return kvm_set_cr0(vcpu, val);
}
} | 0 | []
| kvm | a642fc305053cc1c6e47e4f4df327895747ab485 | 221,992,151,894,748,860,000,000,000,000,000,000,000 | 31 | kvm: vmx: handle invvpid vm exit gracefully
On systems with invvpid instruction support (corresponding bit in
IA32_VMX_EPT_VPID_CAP MSR is set) guest invocation of invvpid
causes vm exit, which is currently not handled and results in
propagation of unknown exit to userspace.
Fix this by installing an invvpid vm exit handler.
This is CVE-2014-3646.
Cc: [email protected]
Signed-off-by: Petr Matousek <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
yang_fill_extcomplex_flags(struct lys_ext_instance_complex *ext, char *parent_name, char *node_name,
LY_STMT stmt, uint16_t value, uint16_t mask)
{
uint16_t *data;
struct lyext_substmt *info;
data = lys_ext_complex_get_substmt(stmt, ext, &info);
if (!data) {
LOGVAL(ext->module->ctx, LYE_INCHILDSTMT, LY_VLOG_NONE, NULL, node_name, parent_name);
return EXIT_FAILURE;
}
if (info->cardinality < LY_STMT_CARD_SOME && (*data & mask)) {
LOGVAL(ext->module->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, node_name, parent_name);
return EXIT_FAILURE;
}
*data |= value;
return EXIT_SUCCESS;
} | 0 | [
"CWE-415"
]
| libyang | d9feacc4a590d35dbc1af21caf9080008b4450ed | 302,089,872,588,620,500,000,000,000,000,000,000,000 | 19 | yang parser BUGFIX double free
Fixes #742 |
static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
{
struct xregs_state *xstate = ((__force struct xregs_state *)buf);
u32 lmask = mask;
u32 hmask = mask >> 32;
int err;
stac();
XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
clac();
return err;
} | 0 | [
"CWE-119",
"CWE-732",
"CWE-787"
]
| linux | 59c4bd853abcea95eccc167a7d7fd5f1a5f47b98 | 115,214,848,502,669,600,000,000,000,000,000,000,000 | 13 | x86/fpu: Don't cache access to fpu_fpregs_owner_ctx
The state/owner of the FPU is saved to fpu_fpregs_owner_ctx by pointing
to the context that is currently loaded. It never changed during the
lifetime of a task - it remained stable/constant.
After deferred FPU registers loading until return to userland was
implemented, the content of fpu_fpregs_owner_ctx may change during
preemption and must not be cached.
This went unnoticed for some time and was now noticed, in particular
since gcc 9 is caching that load in copy_fpstate_to_sigframe() and
reusing it in the retry loop:
copy_fpstate_to_sigframe()
load fpu_fpregs_owner_ctx and save on stack
fpregs_lock()
copy_fpregs_to_sigframe() /* failed */
fpregs_unlock()
*** PREEMPTION, another uses FPU, changes fpu_fpregs_owner_ctx ***
fault_in_pages_writeable() /* succeed, retry */
fpregs_lock()
__fpregs_load_activate()
fpregs_state_valid() /* uses fpu_fpregs_owner_ctx from stack */
copy_fpregs_to_sigframe() /* succeeds, random FPU content */
This is a comparison of the assembly produced by gcc 9, without vs with this
patch:
| # arch/x86/kernel/fpu/signal.c:173: if (!access_ok(buf, size))
| cmpq %rdx, %rax # tmp183, _4
| jb .L190 #,
|-# arch/x86/include/asm/fpu/internal.h:512: return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
|-#APP
|-# 512 "arch/x86/include/asm/fpu/internal.h" 1
|- movq %gs:fpu_fpregs_owner_ctx,%rax #, pfo_ret__
|-# 0 "" 2
|-#NO_APP
|- movq %rax, -88(%rbp) # pfo_ret__, %sfp
…
|-# arch/x86/include/asm/fpu/internal.h:512: return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
|- movq -88(%rbp), %rcx # %sfp, pfo_ret__
|- cmpq %rcx, -64(%rbp) # pfo_ret__, %sfp
|+# arch/x86/include/asm/fpu/internal.h:512: return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
|+#APP
|+# 512 "arch/x86/include/asm/fpu/internal.h" 1
|+ movq %gs:fpu_fpregs_owner_ctx(%rip),%rax # fpu_fpregs_owner_ctx, pfo_ret__
|+# 0 "" 2
|+# arch/x86/include/asm/fpu/internal.h:512: return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
|+#NO_APP
|+ cmpq %rax, -64(%rbp) # pfo_ret__, %sfp
Use this_cpu_read() instead this_cpu_read_stable() to avoid caching of
fpu_fpregs_owner_ctx during preemption points.
The Fixes: tag points to the commit where deferred FPU loading was
added. Since this commit, the compiler is no longer allowed to move the
load of fpu_fpregs_owner_ctx somewhere else / outside of the locked
section. A task preemption will change its value and stale content will
be observed.
[ bp: Massage. ]
Debugged-by: Austin Clements <[email protected]>
Debugged-by: David Chase <[email protected]>
Debugged-by: Ian Lance Taylor <[email protected]>
Fixes: 5f409e20b7945 ("x86/fpu: Defer FPU state load until return to userspace")
Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
Reviewed-by: Rik van Riel <[email protected]>
Tested-by: Borislav Petkov <[email protected]>
Cc: Aubrey Li <[email protected]>
Cc: Austin Clements <[email protected]>
Cc: Barret Rhoden <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: David Chase <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: [email protected]
Cc: Ingo Molnar <[email protected]>
Cc: Josh Bleecher Snyder <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: x86-ml <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
Link: https://bugzilla.kernel.org/show_bug.cgi?id=205663 |
Subsets and Splits