CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2013-1828
|
https://www.cvedetails.com/cve/CVE-2013-1828/
|
CWE-20
|
https://github.com/torvalds/linux/commit/726bc6b092da4c093eb74d13c07184b18c1af0f1
|
726bc6b092da4c093eb74d13c07184b18c1af0f1
|
net/sctp: Validate parameter size for SCTP_GET_ASSOC_STATS
Building sctp may fail with:
In function ‘copy_from_user’,
inlined from ‘sctp_getsockopt_assoc_stats’ at
net/sctp/socket.c:5656:20:
arch/x86/include/asm/uaccess_32.h:211:26: error: call to
‘copy_from_user_overflow’ declared with attribute error: copy_from_user()
buffer size is not provably correct
if built with W=1 due to a missing parameter size validation
before the call to copy_from_user.
Signed-off-by: Guenter Roeck <[email protected]>
Acked-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
struct sockaddr __user *addrs,
int addrs_size)
{
sctp_assoc_t assoc_id = 0;
int err = 0;
err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id);
if (err)
return err;
else
return assoc_id;
}
|
SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
struct sockaddr __user *addrs,
int addrs_size)
{
sctp_assoc_t assoc_id = 0;
int err = 0;
err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id);
if (err)
return err;
else
return assoc_id;
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/87c15175997b0103166020d79fe9048dcf4025f4
|
87c15175997b0103166020d79fe9048dcf4025f4
|
Add support for horizontal mouse wheel messages in Windows Desktop Aura.
This is simply a matter of recognizing the WM_MOUSEHWHEEL message as a valid mouse wheel message.
Tested this on web pages with horizontal scrollbars and it works well.
BUG=332797
[email protected], sky
Review URL: https://codereview.chromium.org/140653006
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@245651 0039d316-1c4b-4281-b951-d872f2087c98
|
EventType EventTypeFromNative(const base::NativeEvent& native_event) {
switch (native_event.message) {
case WM_KEYDOWN:
case WM_SYSKEYDOWN:
case WM_CHAR:
return ET_KEY_PRESSED;
case WM_KEYUP:
case WM_SYSKEYUP:
return ET_KEY_RELEASED;
case WM_LBUTTONDBLCLK:
case WM_LBUTTONDOWN:
case WM_MBUTTONDBLCLK:
case WM_MBUTTONDOWN:
case WM_NCLBUTTONDBLCLK:
case WM_NCLBUTTONDOWN:
case WM_NCMBUTTONDBLCLK:
case WM_NCMBUTTONDOWN:
case WM_NCRBUTTONDBLCLK:
case WM_NCRBUTTONDOWN:
case WM_NCXBUTTONDBLCLK:
case WM_NCXBUTTONDOWN:
case WM_RBUTTONDBLCLK:
case WM_RBUTTONDOWN:
case WM_XBUTTONDBLCLK:
case WM_XBUTTONDOWN:
return ET_MOUSE_PRESSED;
case WM_LBUTTONUP:
case WM_MBUTTONUP:
case WM_NCLBUTTONUP:
case WM_NCMBUTTONUP:
case WM_NCRBUTTONUP:
case WM_NCXBUTTONUP:
case WM_RBUTTONUP:
case WM_XBUTTONUP:
return ET_MOUSE_RELEASED;
case WM_MOUSEMOVE:
return IsButtonDown(native_event) ? ET_MOUSE_DRAGGED : ET_MOUSE_MOVED;
case WM_NCMOUSEMOVE:
return ET_MOUSE_MOVED;
case WM_MOUSEWHEEL:
case WM_MOUSEHWHEEL:
return ET_MOUSEWHEEL;
case WM_MOUSELEAVE:
case WM_NCMOUSELEAVE:
return ET_MOUSE_EXITED;
case WM_VSCROLL:
case WM_HSCROLL:
return ET_SCROLL;
default:
break;
}
return ET_UNKNOWN;
}
|
EventType EventTypeFromNative(const base::NativeEvent& native_event) {
switch (native_event.message) {
case WM_KEYDOWN:
case WM_SYSKEYDOWN:
case WM_CHAR:
return ET_KEY_PRESSED;
case WM_KEYUP:
case WM_SYSKEYUP:
return ET_KEY_RELEASED;
case WM_LBUTTONDBLCLK:
case WM_LBUTTONDOWN:
case WM_MBUTTONDBLCLK:
case WM_MBUTTONDOWN:
case WM_NCLBUTTONDBLCLK:
case WM_NCLBUTTONDOWN:
case WM_NCMBUTTONDBLCLK:
case WM_NCMBUTTONDOWN:
case WM_NCRBUTTONDBLCLK:
case WM_NCRBUTTONDOWN:
case WM_NCXBUTTONDBLCLK:
case WM_NCXBUTTONDOWN:
case WM_RBUTTONDBLCLK:
case WM_RBUTTONDOWN:
case WM_XBUTTONDBLCLK:
case WM_XBUTTONDOWN:
return ET_MOUSE_PRESSED;
case WM_LBUTTONUP:
case WM_MBUTTONUP:
case WM_NCLBUTTONUP:
case WM_NCMBUTTONUP:
case WM_NCRBUTTONUP:
case WM_NCXBUTTONUP:
case WM_RBUTTONUP:
case WM_XBUTTONUP:
return ET_MOUSE_RELEASED;
case WM_MOUSEMOVE:
return IsButtonDown(native_event) ? ET_MOUSE_DRAGGED : ET_MOUSE_MOVED;
case WM_NCMOUSEMOVE:
return ET_MOUSE_MOVED;
case WM_MOUSEWHEEL:
return ET_MOUSEWHEEL;
case WM_MOUSELEAVE:
case WM_NCMOUSELEAVE:
return ET_MOUSE_EXITED;
case WM_VSCROLL:
case WM_HSCROLL:
return ET_SCROLL;
default:
break;
}
return ET_UNKNOWN;
}
|
C
|
Chrome
| 1 |
CVE-2018-9503
|
https://www.cvedetails.com/cve/CVE-2018-9503/
|
CWE-125
|
https://android.googlesource.com/platform/system/bt/+/92a7bf8c44a236607c146240f3c0adc1ae01fedf
|
92a7bf8c44a236607c146240f3c0adc1ae01fedf
|
Check remaining frame length in rfc_process_mx_message
Bug: 111936792
Bug: 80432928
Test: manual
Change-Id: Ie2c09f3d598fb230ce060c9043f5a88c241cdd79
(cherry picked from commit 0471355c8b035aaa2ce07a33eecad60ad49c5ad0)
|
void rfc_send_dm(tRFC_MCB* p_mcb, uint8_t dlci, bool pf) {
uint8_t* p_data;
uint8_t cr = RFCOMM_CR(p_mcb->is_initiator, false);
BT_HDR* p_buf = (BT_HDR*)osi_malloc(RFCOMM_CMD_BUF_SIZE);
p_buf->offset = L2CAP_MIN_OFFSET;
p_data = (uint8_t*)(p_buf + 1) + L2CAP_MIN_OFFSET;
/* DM frame, response, PF = 1, dlci */
*p_data++ = RFCOMM_EA | cr | (dlci << RFCOMM_SHIFT_DLCI);
*p_data++ = RFCOMM_DM | ((pf) ? RFCOMM_PF : 0);
*p_data++ = RFCOMM_EA | 0;
*p_data = RFCOMM_DM_FCS((uint8_t*)(p_buf + 1) + L2CAP_MIN_OFFSET, cr, dlci);
p_buf->len = 4;
rfc_check_send_cmd(p_mcb, p_buf);
}
|
void rfc_send_dm(tRFC_MCB* p_mcb, uint8_t dlci, bool pf) {
uint8_t* p_data;
uint8_t cr = RFCOMM_CR(p_mcb->is_initiator, false);
BT_HDR* p_buf = (BT_HDR*)osi_malloc(RFCOMM_CMD_BUF_SIZE);
p_buf->offset = L2CAP_MIN_OFFSET;
p_data = (uint8_t*)(p_buf + 1) + L2CAP_MIN_OFFSET;
/* DM frame, response, PF = 1, dlci */
*p_data++ = RFCOMM_EA | cr | (dlci << RFCOMM_SHIFT_DLCI);
*p_data++ = RFCOMM_DM | ((pf) ? RFCOMM_PF : 0);
*p_data++ = RFCOMM_EA | 0;
*p_data = RFCOMM_DM_FCS((uint8_t*)(p_buf + 1) + L2CAP_MIN_OFFSET, cr, dlci);
p_buf->len = 4;
rfc_check_send_cmd(p_mcb, p_buf);
}
|
C
|
Android
| 0 |
CVE-2011-4112
|
https://www.cvedetails.com/cve/CVE-2011-4112/
|
CWE-264
|
https://github.com/torvalds/linux/commit/550fd08c2cebad61c548def135f67aba284c6162
|
550fd08c2cebad61c548def135f67aba284c6162
|
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <[email protected]>
CC: Karsten Keil <[email protected]>
CC: "David S. Miller" <[email protected]>
CC: Jay Vosburgh <[email protected]>
CC: Andy Gospodarek <[email protected]>
CC: Patrick McHardy <[email protected]>
CC: Krzysztof Halasa <[email protected]>
CC: "John W. Linville" <[email protected]>
CC: Greg Kroah-Hartman <[email protected]>
CC: Marcel Holtmann <[email protected]>
CC: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
ar6000_regDomain_event(struct ar6_softc *ar, u32 regCode)
{
A_PRINTF("AR6000 Reg Code = 0x%x\n", regCode);
ar->arRegCode = regCode;
}
|
ar6000_regDomain_event(struct ar6_softc *ar, u32 regCode)
{
A_PRINTF("AR6000 Reg Code = 0x%x\n", regCode);
ar->arRegCode = regCode;
}
|
C
|
linux
| 0 |
CVE-2009-0397
|
https://www.cvedetails.com/cve/CVE-2009-0397/
|
CWE-119
|
https://cgit.freedesktop.org/gstreamer/gst-plugins-good/commit/?id=bdc20b9baf13564d9a061343416395f8f9a92b53
|
bdc20b9baf13564d9a061343416395f8f9a92b53
| null |
gst_qtdemux_activate_segment (GstQTDemux * qtdemux, QtDemuxStream * stream,
guint32 seg_idx, guint64 offset)
{
GstEvent *event;
QtDemuxSegment *segment;
guint32 index, kf_index;
guint64 seg_time;
guint64 start, stop, time;
gdouble rate;
GST_LOG_OBJECT (qtdemux, "activate segment %d, offset %" G_GUINT64_FORMAT,
seg_idx, offset);
/* update the current segment */
stream->segment_index = seg_idx;
/* get the segment */
segment = &stream->segments[seg_idx];
if (offset < segment->time) {
GST_WARNING_OBJECT (qtdemux, "offset < segment->time %" G_GUINT64_FORMAT,
segment->time);
return FALSE;
}
/* get time in this segment */
seg_time = offset - segment->time;
GST_LOG_OBJECT (qtdemux, "seg_time %" GST_TIME_FORMAT,
GST_TIME_ARGS (seg_time));
if (seg_time > segment->duration) {
GST_LOG_OBJECT (qtdemux, "seg_time > segment->duration %" GST_TIME_FORMAT,
GST_TIME_ARGS (segment->duration));
return FALSE;
}
/* qtdemux->segment.stop is in outside-time-realm, whereas
* segment->media_stop is in track-time-realm.
*
* In order to compare the two, we need to bring segment.stop
* into the track-time-realm */
if (qtdemux->segment.stop == -1)
stop = segment->media_stop;
else
stop =
MIN (segment->media_stop,
qtdemux->segment.stop - segment->time + segment->media_start);
if (qtdemux->segment.rate >= 0) {
start = MIN (segment->media_start + seg_time, stop);
time = offset;
} else {
start = segment->media_start;
stop = MIN (segment->media_start + seg_time, stop);
time = segment->time;
}
GST_DEBUG_OBJECT (qtdemux, "newsegment %d from %" GST_TIME_FORMAT
" to %" GST_TIME_FORMAT ", time %" GST_TIME_FORMAT, seg_idx,
GST_TIME_ARGS (start), GST_TIME_ARGS (stop), GST_TIME_ARGS (time));
/* combine global rate with that of the segment */
rate = segment->rate * qtdemux->segment.rate;
/* update the segment values used for clipping */
gst_segment_init (&stream->segment, GST_FORMAT_TIME);
gst_segment_set_newsegment (&stream->segment, FALSE, rate, GST_FORMAT_TIME,
start, stop, time);
/* now prepare and send the segment */
if (stream->pad) {
event = gst_event_new_new_segment (FALSE, rate, GST_FORMAT_TIME,
start, stop, time);
gst_pad_push_event (stream->pad, event);
/* assume we can send more data now */
stream->last_ret = GST_FLOW_OK;
}
/* and move to the keyframe before the indicated media time of the
* segment */
if (qtdemux->segment.rate >= 0) {
index = gst_qtdemux_find_index (qtdemux, stream, start);
stream->to_sample = stream->n_samples;
GST_DEBUG_OBJECT (qtdemux, "moving data pointer to %" GST_TIME_FORMAT
", index: %u, pts %" GST_TIME_FORMAT, GST_TIME_ARGS (start), index,
GST_TIME_ARGS (stream->samples[index].timestamp));
} else {
index = gst_qtdemux_find_index (qtdemux, stream, stop);
stream->to_sample = index;
GST_DEBUG_OBJECT (qtdemux, "moving data pointer to %" GST_TIME_FORMAT
", index: %u, pts %" GST_TIME_FORMAT, GST_TIME_ARGS (stop), index,
GST_TIME_ARGS (stream->samples[index].timestamp));
}
/* we're at the right spot */
if (index == stream->sample_index) {
GST_DEBUG_OBJECT (qtdemux, "we are at the right index");
return TRUE;
}
/* find keyframe of the target index */
kf_index = gst_qtdemux_find_keyframe (qtdemux, stream, index);
/* if we move forwards, we don't have to go back to the previous
* keyframe since we already sent that. We can also just jump to
* the keyframe right before the target index if there is one. */
if (index > stream->sample_index) {
/* moving forwards check if we move past a keyframe */
if (kf_index > stream->sample_index) {
GST_DEBUG_OBJECT (qtdemux, "moving forwards to keyframe at %u (pts %"
GST_TIME_FORMAT, kf_index,
GST_TIME_ARGS (stream->samples[kf_index].timestamp));
gst_qtdemux_move_stream (qtdemux, stream, kf_index);
} else {
GST_DEBUG_OBJECT (qtdemux, "moving forwards, keyframe at %u (pts %"
GST_TIME_FORMAT " already sent", kf_index,
GST_TIME_ARGS (stream->samples[kf_index].timestamp));
}
} else {
GST_DEBUG_OBJECT (qtdemux, "moving backwards to keyframe at %u (pts %"
GST_TIME_FORMAT, kf_index,
GST_TIME_ARGS (stream->samples[kf_index].timestamp));
gst_qtdemux_move_stream (qtdemux, stream, kf_index);
}
return TRUE;
}
|
gst_qtdemux_activate_segment (GstQTDemux * qtdemux, QtDemuxStream * stream,
guint32 seg_idx, guint64 offset)
{
GstEvent *event;
QtDemuxSegment *segment;
guint32 index, kf_index;
guint64 seg_time;
guint64 start, stop, time;
gdouble rate;
GST_LOG_OBJECT (qtdemux, "activate segment %d, offset %" G_GUINT64_FORMAT,
seg_idx, offset);
/* update the current segment */
stream->segment_index = seg_idx;
/* get the segment */
segment = &stream->segments[seg_idx];
if (offset < segment->time) {
GST_WARNING_OBJECT (qtdemux, "offset < segment->time %" G_GUINT64_FORMAT,
segment->time);
return FALSE;
}
/* get time in this segment */
seg_time = offset - segment->time;
GST_LOG_OBJECT (qtdemux, "seg_time %" GST_TIME_FORMAT,
GST_TIME_ARGS (seg_time));
if (seg_time > segment->duration) {
GST_LOG_OBJECT (qtdemux, "seg_time > segment->duration %" GST_TIME_FORMAT,
GST_TIME_ARGS (segment->duration));
return FALSE;
}
/* qtdemux->segment.stop is in outside-time-realm, whereas
* segment->media_stop is in track-time-realm.
*
* In order to compare the two, we need to bring segment.stop
* into the track-time-realm */
if (qtdemux->segment.stop == -1)
stop = segment->media_stop;
else
stop =
MIN (segment->media_stop,
qtdemux->segment.stop - segment->time + segment->media_start);
if (qtdemux->segment.rate >= 0) {
start = MIN (segment->media_start + seg_time, stop);
time = offset;
} else {
start = segment->media_start;
stop = MIN (segment->media_start + seg_time, stop);
time = segment->time;
}
GST_DEBUG_OBJECT (qtdemux, "newsegment %d from %" GST_TIME_FORMAT
" to %" GST_TIME_FORMAT ", time %" GST_TIME_FORMAT, seg_idx,
GST_TIME_ARGS (start), GST_TIME_ARGS (stop), GST_TIME_ARGS (time));
/* combine global rate with that of the segment */
rate = segment->rate * qtdemux->segment.rate;
/* update the segment values used for clipping */
gst_segment_init (&stream->segment, GST_FORMAT_TIME);
gst_segment_set_newsegment (&stream->segment, FALSE, rate, GST_FORMAT_TIME,
start, stop, time);
/* now prepare and send the segment */
if (stream->pad) {
event = gst_event_new_new_segment (FALSE, rate, GST_FORMAT_TIME,
start, stop, time);
gst_pad_push_event (stream->pad, event);
/* assume we can send more data now */
stream->last_ret = GST_FLOW_OK;
}
/* and move to the keyframe before the indicated media time of the
* segment */
if (qtdemux->segment.rate >= 0) {
index = gst_qtdemux_find_index (qtdemux, stream, start);
stream->to_sample = stream->n_samples;
GST_DEBUG_OBJECT (qtdemux, "moving data pointer to %" GST_TIME_FORMAT
", index: %u, pts %" GST_TIME_FORMAT, GST_TIME_ARGS (start), index,
GST_TIME_ARGS (stream->samples[index].timestamp));
} else {
index = gst_qtdemux_find_index (qtdemux, stream, stop);
stream->to_sample = index;
GST_DEBUG_OBJECT (qtdemux, "moving data pointer to %" GST_TIME_FORMAT
", index: %u, pts %" GST_TIME_FORMAT, GST_TIME_ARGS (stop), index,
GST_TIME_ARGS (stream->samples[index].timestamp));
}
/* we're at the right spot */
if (index == stream->sample_index) {
GST_DEBUG_OBJECT (qtdemux, "we are at the right index");
return TRUE;
}
/* find keyframe of the target index */
kf_index = gst_qtdemux_find_keyframe (qtdemux, stream, index);
/* if we move forwards, we don't have to go back to the previous
* keyframe since we already sent that. We can also just jump to
* the keyframe right before the target index if there is one. */
if (index > stream->sample_index) {
/* moving forwards check if we move past a keyframe */
if (kf_index > stream->sample_index) {
GST_DEBUG_OBJECT (qtdemux, "moving forwards to keyframe at %u (pts %"
GST_TIME_FORMAT, kf_index,
GST_TIME_ARGS (stream->samples[kf_index].timestamp));
gst_qtdemux_move_stream (qtdemux, stream, kf_index);
} else {
GST_DEBUG_OBJECT (qtdemux, "moving forwards, keyframe at %u (pts %"
GST_TIME_FORMAT " already sent", kf_index,
GST_TIME_ARGS (stream->samples[kf_index].timestamp));
}
} else {
GST_DEBUG_OBJECT (qtdemux, "moving backwards to keyframe at %u (pts %"
GST_TIME_FORMAT, kf_index,
GST_TIME_ARGS (stream->samples[kf_index].timestamp));
gst_qtdemux_move_stream (qtdemux, stream, kf_index);
}
return TRUE;
}
|
C
|
gstreamer
| 0 |
CVE-2013-2916
|
https://www.cvedetails.com/cve/CVE-2013-2916/
| null |
https://github.com/chromium/chromium/commit/47a054e9ad826421b789097d82b44c102ab6ac97
|
47a054e9ad826421b789097d82b44c102ab6ac97
|
Don't wait to notify client of spoof attempt if a modal dialog is created.
BUG=281256
TEST=See bug for repro steps.
Review URL: https://chromiumcodereview.appspot.com/23620020
git-svn-id: svn://svn.chromium.org/blink/trunk@157196 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void FrameLoader::detachFromParent()
{
RefPtr<Frame> protect(m_frame);
closeURL();
history()->saveScrollPositionAndViewStateToItem(history()->currentItem());
detachChildren();
stopAllLoaders();
InspectorInstrumentation::frameDetachedFromParent(m_frame);
if (m_documentLoader)
m_documentLoader->detachFromFrame();
m_documentLoader = 0;
m_client->detachedFromParent();
m_progressTracker.clear();
if (Frame* parent = m_frame->tree()->parent()) {
parent->loader()->closeAndRemoveChild(m_frame);
parent->loader()->scheduleCheckCompleted();
} else {
m_frame->setView(0);
m_frame->willDetachPage();
m_frame->detachFromPage();
}
}
|
void FrameLoader::detachFromParent()
{
RefPtr<Frame> protect(m_frame);
closeURL();
history()->saveScrollPositionAndViewStateToItem(history()->currentItem());
detachChildren();
stopAllLoaders();
InspectorInstrumentation::frameDetachedFromParent(m_frame);
if (m_documentLoader)
m_documentLoader->detachFromFrame();
m_documentLoader = 0;
m_client->detachedFromParent();
m_progressTracker.clear();
if (Frame* parent = m_frame->tree()->parent()) {
parent->loader()->closeAndRemoveChild(m_frame);
parent->loader()->scheduleCheckCompleted();
} else {
m_frame->setView(0);
m_frame->willDetachPage();
m_frame->detachFromPage();
}
}
|
C
|
Chrome
| 0 |
CVE-2012-0045
|
https://www.cvedetails.com/cve/CVE-2012-0045/
| null |
https://github.com/torvalds/linux/commit/c2226fc9e87ba3da060e47333657cd6616652b84
|
c2226fc9e87ba3da060e47333657cd6616652b84
|
KVM: x86: fix missing checks in syscall emulation
On hosts without this patch, 32bit guests will crash (and 64bit guests
may behave in a wrong way) for example by simply executing following
nasm-demo-application:
[bits 32]
global _start
SECTION .text
_start: syscall
(I tested it with winxp and linux - both always crashed)
Disassembly of section .text:
00000000 <_start>:
0: 0f 05 syscall
The reason seems a missing "invalid opcode"-trap (int6) for the
syscall opcode "0f05", which is not available on Intel CPUs
within non-longmodes, as also on some AMD CPUs within legacy-mode.
(depending on CPU vendor, MSR_EFER and cpuid)
Because previous mentioned OSs may not engage corresponding
syscall target-registers (STAR, LSTAR, CSTAR), they remain
NULL and (non trapping) syscalls are leading to multiple
faults and finally crashs.
Depending on the architecture (AMD or Intel) pretended by
guests, various checks according to vendor's documentation
are implemented to overcome the current issue and behave
like the CPUs physical counterparts.
[mtosatti: cleanup/beautify code]
Signed-off-by: Stephan Baerwolf <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
|
static int em_vmcall(struct x86_emulate_ctxt *ctxt)
{
int rc;
if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
return X86EMUL_UNHANDLEABLE;
rc = ctxt->ops->fix_hypercall(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
/* Let the processor re-execute the fixed hypercall */
ctxt->_eip = ctxt->eip;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
|
static int em_vmcall(struct x86_emulate_ctxt *ctxt)
{
int rc;
if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
return X86EMUL_UNHANDLEABLE;
rc = ctxt->ops->fix_hypercall(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
/* Let the processor re-execute the fixed hypercall */
ctxt->_eip = ctxt->eip;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
|
C
|
linux
| 0 |
CVE-2016-7425
|
https://www.cvedetails.com/cve/CVE-2016-7425/
|
CWE-119
|
https://github.com/torvalds/linux/commit/7bc2b55a5c030685b399bb65b6baa9ccc3d1f167
|
7bc2b55a5c030685b399bb65b6baa9ccc3d1f167
|
scsi: arcmsr: Buffer overflow in arcmsr_iop_message_xfer()
We need to put an upper bound on "user_len" so the memcpy() doesn't
overflow.
Cc: <[email protected]>
Reported-by: Marco Grassi <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Reviewed-by: Tomas Henzl <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]>
|
static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
{
uint32_t intmask_org, Index, firmware_state = 0;
struct MessageUnit_C __iomem *reg = pACB->pmuC;
char *acb_firm_model = pACB->firm_model;
char *acb_firm_version = pACB->firm_version;
char __iomem *iop_firm_model = (char __iomem *)(®->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
char __iomem *iop_firm_version = (char __iomem *)(®->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
int count;
/* disable all outbound interrupt */
intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */
writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask);
/* wait firmware ready */
do {
firmware_state = readl(®->outbound_msgaddr1);
} while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
/* post "get config" instruction */
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
/* wait message ready */
for (Index = 0; Index < 2000; Index++) {
if (readl(®->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, ®->outbound_doorbell_clear);/*clear interrupt*/
break;
}
udelay(10);
} /*max 1 seconds*/
if (Index >= 2000) {
printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
miscellaneous data' timeout \n", pACB->host->host_no);
return false;
}
count = 8;
while (count) {
*acb_firm_model = readb(iop_firm_model);
acb_firm_model++;
iop_firm_model++;
count--;
}
count = 16;
while (count) {
*acb_firm_version = readb(iop_firm_version);
acb_firm_version++;
iop_firm_version++;
count--;
}
pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
pACB->host->host_no,
pACB->firm_model,
pACB->firm_version);
pACB->firm_request_len = readl(®->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/
pACB->firm_numbers_queue = readl(®->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
pACB->firm_sdram_size = readl(®->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/
pACB->firm_hd_channels = readl(®->msgcode_rwbuffer[4]); /*firm_ide_channels,4,16-19*/
pACB->firm_cfg_version = readl(®->msgcode_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
/*all interrupt service will be enable at arcmsr_iop_init*/
return true;
}
|
static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
{
uint32_t intmask_org, Index, firmware_state = 0;
struct MessageUnit_C __iomem *reg = pACB->pmuC;
char *acb_firm_model = pACB->firm_model;
char *acb_firm_version = pACB->firm_version;
char __iomem *iop_firm_model = (char __iomem *)(®->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
char __iomem *iop_firm_version = (char __iomem *)(®->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
int count;
/* disable all outbound interrupt */
intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */
writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask);
/* wait firmware ready */
do {
firmware_state = readl(®->outbound_msgaddr1);
} while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
/* post "get config" instruction */
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
/* wait message ready */
for (Index = 0; Index < 2000; Index++) {
if (readl(®->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, ®->outbound_doorbell_clear);/*clear interrupt*/
break;
}
udelay(10);
} /*max 1 seconds*/
if (Index >= 2000) {
printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
miscellaneous data' timeout \n", pACB->host->host_no);
return false;
}
count = 8;
while (count) {
*acb_firm_model = readb(iop_firm_model);
acb_firm_model++;
iop_firm_model++;
count--;
}
count = 16;
while (count) {
*acb_firm_version = readb(iop_firm_version);
acb_firm_version++;
iop_firm_version++;
count--;
}
pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
pACB->host->host_no,
pACB->firm_model,
pACB->firm_version);
pACB->firm_request_len = readl(®->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/
pACB->firm_numbers_queue = readl(®->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
pACB->firm_sdram_size = readl(®->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/
pACB->firm_hd_channels = readl(®->msgcode_rwbuffer[4]); /*firm_ide_channels,4,16-19*/
pACB->firm_cfg_version = readl(®->msgcode_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
/*all interrupt service will be enable at arcmsr_iop_init*/
return true;
}
|
C
|
linux
| 0 |
CVE-2019-10664
|
https://www.cvedetails.com/cve/CVE-2019-10664/
|
CWE-89
|
https://github.com/domoticz/domoticz/commit/ee70db46f81afa582c96b887b73bcd2a86feda00
|
ee70db46f81afa582c96b887b73bcd2a86feda00
|
Fixed possible SQL Injection Vulnerability (Thanks to Fabio Carretto!)
|
void CWebServer::Cmd_ChangePlanDeviceOrder(WebEmSession & session, const request& req, Json::Value &root)
{
std::string planid = request::findValue(&req, "planid");
std::string idx = request::findValue(&req, "idx");
std::string sway = request::findValue(&req, "way");
if (
(planid.empty()) ||
(idx.empty()) ||
(sway.empty())
)
return;
bool bGoUp = (sway == "0");
std::string aOrder, oID, oOrder;
std::vector<std::vector<std::string> > result;
result = m_sql.safe_query("SELECT [Order] FROM DeviceToPlansMap WHERE ((ID=='%q') AND (PlanID=='%q'))",
idx.c_str(), planid.c_str());
if (result.empty())
return;
aOrder = result[0][0];
if (!bGoUp)
{
result = m_sql.safe_query("SELECT ID, [Order] FROM DeviceToPlansMap WHERE (([Order]>'%q') AND (PlanID=='%q')) ORDER BY [Order] ASC",
aOrder.c_str(), planid.c_str());
if (result.empty())
return;
oID = result[0][0];
oOrder = result[0][1];
}
else
{
result = m_sql.safe_query("SELECT ID, [Order] FROM DeviceToPlansMap WHERE (([Order]<'%q') AND (PlanID=='%q')) ORDER BY [Order] DESC",
aOrder.c_str(), planid.c_str());
if (result.empty())
return;
oID = result[0][0];
oOrder = result[0][1];
}
root["status"] = "OK";
root["title"] = "ChangePlanOrder";
m_sql.safe_query("UPDATE DeviceToPlansMap SET [Order] = '%q' WHERE (ID='%q')",
oOrder.c_str(), idx.c_str());
m_sql.safe_query("UPDATE DeviceToPlansMap SET [Order] = '%q' WHERE (ID='%q')",
aOrder.c_str(), oID.c_str());
}
|
void CWebServer::Cmd_ChangePlanDeviceOrder(WebEmSession & session, const request& req, Json::Value &root)
{
std::string planid = request::findValue(&req, "planid");
std::string idx = request::findValue(&req, "idx");
std::string sway = request::findValue(&req, "way");
if (
(planid.empty()) ||
(idx.empty()) ||
(sway.empty())
)
return;
bool bGoUp = (sway == "0");
std::string aOrder, oID, oOrder;
std::vector<std::vector<std::string> > result;
result = m_sql.safe_query("SELECT [Order] FROM DeviceToPlansMap WHERE ((ID=='%q') AND (PlanID=='%q'))",
idx.c_str(), planid.c_str());
if (result.empty())
return;
aOrder = result[0][0];
if (!bGoUp)
{
result = m_sql.safe_query("SELECT ID, [Order] FROM DeviceToPlansMap WHERE (([Order]>'%q') AND (PlanID=='%q')) ORDER BY [Order] ASC",
aOrder.c_str(), planid.c_str());
if (result.empty())
return;
oID = result[0][0];
oOrder = result[0][1];
}
else
{
result = m_sql.safe_query("SELECT ID, [Order] FROM DeviceToPlansMap WHERE (([Order]<'%q') AND (PlanID=='%q')) ORDER BY [Order] DESC",
aOrder.c_str(), planid.c_str());
if (result.empty())
return;
oID = result[0][0];
oOrder = result[0][1];
}
root["status"] = "OK";
root["title"] = "ChangePlanOrder";
m_sql.safe_query("UPDATE DeviceToPlansMap SET [Order] = '%q' WHERE (ID='%q')",
oOrder.c_str(), idx.c_str());
m_sql.safe_query("UPDATE DeviceToPlansMap SET [Order] = '%q' WHERE (ID='%q')",
aOrder.c_str(), oID.c_str());
}
|
C
|
domoticz
| 0 |
CVE-2015-1265
|
https://www.cvedetails.com/cve/CVE-2015-1265/
| null |
https://github.com/chromium/chromium/commit/ce1a9c0e454b08c96ca73788a1b4dccb405ce027
|
ce1a9c0e454b08c96ca73788a1b4dccb405ce027
|
Move ImageLoader timer to frame-specific TaskRunnerTimer.
Move ImageLoader timer m_derefElementTimer to frame-specific TaskRunnerTimer.
This associates it with the frame's Networking timer task queue.
BUG=624694
Review-Url: https://codereview.chromium.org/2642103002
Cr-Commit-Position: refs/heads/master@{#444927}
|
inline void ImageLoader::enqueueImageLoadingMicroTask(
UpdateFromElementBehavior updateBehavior,
ReferrerPolicy referrerPolicy) {
std::unique_ptr<Task> task =
Task::create(this, updateBehavior, referrerPolicy);
m_pendingTask = task->createWeakPtr();
Microtask::enqueueMicrotask(
WTF::bind(&Task::run, WTF::passed(std::move(task))));
m_loadDelayCounter =
IncrementLoadEventDelayCount::create(m_element->document());
}
|
inline void ImageLoader::enqueueImageLoadingMicroTask(
UpdateFromElementBehavior updateBehavior,
ReferrerPolicy referrerPolicy) {
std::unique_ptr<Task> task =
Task::create(this, updateBehavior, referrerPolicy);
m_pendingTask = task->createWeakPtr();
Microtask::enqueueMicrotask(
WTF::bind(&Task::run, WTF::passed(std::move(task))));
m_loadDelayCounter =
IncrementLoadEventDelayCount::create(m_element->document());
}
|
C
|
Chrome
| 0 |
CVE-2015-7513
|
https://www.cvedetails.com/cve/CVE-2015-7513/
| null |
https://github.com/torvalds/linux/commit/0185604c2d82c560dab2f2933a18f797e74ab5a8
|
0185604c2d82c560dab2f2933a18f797e74ab5a8
|
KVM: x86: Reload pit counters for all channels when restoring state
Currently if userspace restores the pit counters with a count of 0
on channels 1 or 2 and the guest attempts to read the count on those
channels, then KVM will perform a mod of 0 and crash. This will ensure
that 0 values are converted to 65536 as per the spec.
This is CVE-2015-7513.
Signed-off-by: Andy Honig <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
int x86_emulate_instruction(struct kvm_vcpu *vcpu,
unsigned long cr2,
int emulation_type,
void *insn,
int insn_len)
{
int r;
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
bool writeback = true;
bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
/*
* Clear write_fault_to_shadow_pgtable here to ensure it is
* never reused.
*/
vcpu->arch.write_fault_to_shadow_pgtable = false;
kvm_clear_exception_queue(vcpu);
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
init_emulate_ctxt(vcpu);
/*
* We will reenter on the same instruction since
* we do not set complete_userspace_io. This does not
* handle watchpoints yet, those would be handled in
* the emulate_ops.
*/
if (kvm_vcpu_check_breakpoint(vcpu, &r))
return r;
ctxt->interruptibility = 0;
ctxt->have_exception = false;
ctxt->exception.vector = -1;
ctxt->perm_ok = false;
ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
r = x86_decode_insn(ctxt, insn, insn_len);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
if (r != EMULATION_OK) {
if (emulation_type & EMULTYPE_TRAP_UD)
return EMULATE_FAIL;
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
emulation_type))
return EMULATE_DONE;
if (emulation_type & EMULTYPE_SKIP)
return EMULATE_FAIL;
return handle_emulation_failure(vcpu);
}
}
if (emulation_type & EMULTYPE_SKIP) {
kvm_rip_write(vcpu, ctxt->_eip);
if (ctxt->eflags & X86_EFLAGS_RF)
kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
return EMULATE_DONE;
}
if (retry_instruction(ctxt, cr2, emulation_type))
return EMULATE_DONE;
/* this is needed for vmware backdoor interface to work since it
changes registers values during IO operation */
if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
emulator_invalidate_register_cache(ctxt);
}
restart:
r = x86_emulate_insn(ctxt);
if (r == EMULATION_INTERCEPTED)
return EMULATE_DONE;
if (r == EMULATION_FAILED) {
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
emulation_type))
return EMULATE_DONE;
return handle_emulation_failure(vcpu);
}
if (ctxt->have_exception) {
r = EMULATE_DONE;
if (inject_emulated_exception(vcpu))
return r;
} else if (vcpu->arch.pio.count) {
if (!vcpu->arch.pio.in) {
/* FIXME: return into emulator if single-stepping. */
vcpu->arch.pio.count = 0;
} else {
writeback = false;
vcpu->arch.complete_userspace_io = complete_emulated_pio;
}
r = EMULATE_USER_EXIT;
} else if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write)
writeback = false;
r = EMULATE_USER_EXIT;
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
} else if (r == EMULATION_RESTART)
goto restart;
else
r = EMULATE_DONE;
if (writeback) {
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
if (vcpu->arch.hflags != ctxt->emul_flags)
kvm_set_hflags(vcpu, ctxt->emul_flags);
kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE)
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
__kvm_set_rflags(vcpu, ctxt->eflags);
/*
* For STI, interrupts are shadowed; so KVM_REQ_EVENT will
* do nothing, and it will be requested again as soon as
* the shadow expires. But we still need to check here,
* because POPF has no interrupt shadow.
*/
if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
kvm_make_request(KVM_REQ_EVENT, vcpu);
} else
vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
return r;
}
|
int x86_emulate_instruction(struct kvm_vcpu *vcpu,
unsigned long cr2,
int emulation_type,
void *insn,
int insn_len)
{
int r;
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
bool writeback = true;
bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
/*
* Clear write_fault_to_shadow_pgtable here to ensure it is
* never reused.
*/
vcpu->arch.write_fault_to_shadow_pgtable = false;
kvm_clear_exception_queue(vcpu);
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
init_emulate_ctxt(vcpu);
/*
* We will reenter on the same instruction since
* we do not set complete_userspace_io. This does not
* handle watchpoints yet, those would be handled in
* the emulate_ops.
*/
if (kvm_vcpu_check_breakpoint(vcpu, &r))
return r;
ctxt->interruptibility = 0;
ctxt->have_exception = false;
ctxt->exception.vector = -1;
ctxt->perm_ok = false;
ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
r = x86_decode_insn(ctxt, insn, insn_len);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
if (r != EMULATION_OK) {
if (emulation_type & EMULTYPE_TRAP_UD)
return EMULATE_FAIL;
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
emulation_type))
return EMULATE_DONE;
if (emulation_type & EMULTYPE_SKIP)
return EMULATE_FAIL;
return handle_emulation_failure(vcpu);
}
}
if (emulation_type & EMULTYPE_SKIP) {
kvm_rip_write(vcpu, ctxt->_eip);
if (ctxt->eflags & X86_EFLAGS_RF)
kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
return EMULATE_DONE;
}
if (retry_instruction(ctxt, cr2, emulation_type))
return EMULATE_DONE;
/* this is needed for vmware backdoor interface to work since it
changes registers values during IO operation */
if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
emulator_invalidate_register_cache(ctxt);
}
restart:
r = x86_emulate_insn(ctxt);
if (r == EMULATION_INTERCEPTED)
return EMULATE_DONE;
if (r == EMULATION_FAILED) {
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
emulation_type))
return EMULATE_DONE;
return handle_emulation_failure(vcpu);
}
if (ctxt->have_exception) {
r = EMULATE_DONE;
if (inject_emulated_exception(vcpu))
return r;
} else if (vcpu->arch.pio.count) {
if (!vcpu->arch.pio.in) {
/* FIXME: return into emulator if single-stepping. */
vcpu->arch.pio.count = 0;
} else {
writeback = false;
vcpu->arch.complete_userspace_io = complete_emulated_pio;
}
r = EMULATE_USER_EXIT;
} else if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write)
writeback = false;
r = EMULATE_USER_EXIT;
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
} else if (r == EMULATION_RESTART)
goto restart;
else
r = EMULATE_DONE;
if (writeback) {
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
if (vcpu->arch.hflags != ctxt->emul_flags)
kvm_set_hflags(vcpu, ctxt->emul_flags);
kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE)
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
__kvm_set_rflags(vcpu, ctxt->eflags);
/*
* For STI, interrupts are shadowed; so KVM_REQ_EVENT will
* do nothing, and it will be requested again as soon as
* the shadow expires. But we still need to check here,
* because POPF has no interrupt shadow.
*/
if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
kvm_make_request(KVM_REQ_EVENT, vcpu);
} else
vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
return r;
}
|
C
|
linux
| 0 |
CVE-2016-2184
|
https://www.cvedetails.com/cve/CVE-2016-2184/
| null |
https://github.com/torvalds/linux/commit/0f886ca12765d20124bd06291c82951fd49a33be
|
0f886ca12765d20124bd06291c82951fd49a33be
|
ALSA: usb-audio: Fix NULL dereference in create_fixed_stream_quirk()
create_fixed_stream_quirk() may cause a NULL-pointer dereference by
accessing the non-existing endpoint when a USB device with a malformed
USB descriptor is used.
This patch avoids it simply by adding a sanity check of bNumEndpoints
before the accesses.
Bugzilla: https://bugzilla.suse.com/show_bug.cgi?id=971125
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
|
void snd_usb_set_interface_quirk(struct usb_device *dev)
{
struct snd_usb_audio *chip = dev_get_drvdata(&dev->dev);
if (!chip)
return;
/*
* "Playback Design" products need a 50ms delay after setting the
* USB interface.
*/
switch (USB_ID_VENDOR(chip->usb_id)) {
case 0x23ba: /* Playback Design */
case 0x0644: /* TEAC Corp. */
mdelay(50);
break;
}
}
|
void snd_usb_set_interface_quirk(struct usb_device *dev)
{
struct snd_usb_audio *chip = dev_get_drvdata(&dev->dev);
if (!chip)
return;
/*
* "Playback Design" products need a 50ms delay after setting the
* USB interface.
*/
switch (USB_ID_VENDOR(chip->usb_id)) {
case 0x23ba: /* Playback Design */
case 0x0644: /* TEAC Corp. */
mdelay(50);
break;
}
}
|
C
|
linux
| 0 |
CVE-2016-5219
|
https://www.cvedetails.com/cve/CVE-2016-5219/
|
CWE-416
|
https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
This makes the query of GL_COMPLETION_STATUS_KHR to programs much
cheaper by minimizing the round-trip to the GPU thread.
Bug: 881152, 957001
Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630
Commit-Queue: Kenneth Russell <[email protected]>
Reviewed-by: Kentaro Hara <[email protected]>
Reviewed-by: Geoff Lang <[email protected]>
Reviewed-by: Kenneth Russell <[email protected]>
Cr-Commit-Position: refs/heads/master@{#657568}
|
void GLES2DecoderImpl::ClearUnclearedAttachments(
GLenum target, Framebuffer* framebuffer) {
framebuffer->ClearUnclearedIntOr3DTexturesOrPartiallyClearedTextures(
this, texture_manager());
bool cleared_int_renderbuffers = false;
Framebuffer* draw_framebuffer = GetBoundDrawFramebuffer();
if (framebuffer->HasUnclearedIntRenderbufferAttachments()) {
if (target == GL_READ_FRAMEBUFFER && draw_framebuffer != framebuffer) {
api()->glBindFramebufferEXTFn(GL_DRAW_FRAMEBUFFER,
framebuffer->service_id());
}
state_.SetDeviceColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
ClearDeviceWindowRectangles();
framebuffer->ClearUnclearedIntRenderbufferAttachments(
renderbuffer_manager());
cleared_int_renderbuffers = true;
}
GLbitfield clear_bits = 0;
bool reset_draw_buffers = false;
if (framebuffer->HasUnclearedColorAttachments()) {
api()->glClearColorFn(0.0f, 0.0f, 0.0f, 0.0f);
state_.SetDeviceColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
clear_bits |= GL_COLOR_BUFFER_BIT;
if (SupportsDrawBuffers()) {
reset_draw_buffers =
framebuffer->PrepareDrawBuffersForClearingUninitializedAttachments();
}
}
if (framebuffer->HasUnclearedAttachment(GL_STENCIL_ATTACHMENT)) {
api()->glClearStencilFn(0);
state_.SetDeviceStencilMaskSeparate(GL_FRONT, kDefaultStencilMask);
state_.SetDeviceStencilMaskSeparate(GL_BACK, kDefaultStencilMask);
clear_bits |= GL_STENCIL_BUFFER_BIT;
}
if (framebuffer->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT)) {
api()->glClearDepthFn(1.0f);
state_.SetDeviceDepthMask(GL_TRUE);
clear_bits |= GL_DEPTH_BUFFER_BIT;
}
if (clear_bits) {
if (!cleared_int_renderbuffers &&
target == GL_READ_FRAMEBUFFER && draw_framebuffer != framebuffer) {
api()->glBindFramebufferEXTFn(GL_DRAW_FRAMEBUFFER,
framebuffer->service_id());
}
state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
ClearDeviceWindowRectangles();
if (workarounds().gl_clear_broken) {
ClearFramebufferForWorkaround(clear_bits);
} else {
api()->glClearFn(clear_bits);
}
}
if (cleared_int_renderbuffers || clear_bits) {
if (reset_draw_buffers)
framebuffer->RestoreDrawBuffers();
RestoreClearState();
if (target == GL_READ_FRAMEBUFFER && draw_framebuffer != framebuffer) {
GLuint service_id = draw_framebuffer ? draw_framebuffer->service_id() :
GetBackbufferServiceId();
api()->glBindFramebufferEXTFn(GL_DRAW_FRAMEBUFFER, service_id);
}
}
framebuffer_manager()->MarkAttachmentsAsCleared(
framebuffer, renderbuffer_manager(), texture_manager());
}
|
void GLES2DecoderImpl::ClearUnclearedAttachments(
GLenum target, Framebuffer* framebuffer) {
framebuffer->ClearUnclearedIntOr3DTexturesOrPartiallyClearedTextures(
this, texture_manager());
bool cleared_int_renderbuffers = false;
Framebuffer* draw_framebuffer = GetBoundDrawFramebuffer();
if (framebuffer->HasUnclearedIntRenderbufferAttachments()) {
if (target == GL_READ_FRAMEBUFFER && draw_framebuffer != framebuffer) {
api()->glBindFramebufferEXTFn(GL_DRAW_FRAMEBUFFER,
framebuffer->service_id());
}
state_.SetDeviceColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
ClearDeviceWindowRectangles();
framebuffer->ClearUnclearedIntRenderbufferAttachments(
renderbuffer_manager());
cleared_int_renderbuffers = true;
}
GLbitfield clear_bits = 0;
bool reset_draw_buffers = false;
if (framebuffer->HasUnclearedColorAttachments()) {
api()->glClearColorFn(0.0f, 0.0f, 0.0f, 0.0f);
state_.SetDeviceColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
clear_bits |= GL_COLOR_BUFFER_BIT;
if (SupportsDrawBuffers()) {
reset_draw_buffers =
framebuffer->PrepareDrawBuffersForClearingUninitializedAttachments();
}
}
if (framebuffer->HasUnclearedAttachment(GL_STENCIL_ATTACHMENT)) {
api()->glClearStencilFn(0);
state_.SetDeviceStencilMaskSeparate(GL_FRONT, kDefaultStencilMask);
state_.SetDeviceStencilMaskSeparate(GL_BACK, kDefaultStencilMask);
clear_bits |= GL_STENCIL_BUFFER_BIT;
}
if (framebuffer->HasUnclearedAttachment(GL_DEPTH_ATTACHMENT)) {
api()->glClearDepthFn(1.0f);
state_.SetDeviceDepthMask(GL_TRUE);
clear_bits |= GL_DEPTH_BUFFER_BIT;
}
if (clear_bits) {
if (!cleared_int_renderbuffers &&
target == GL_READ_FRAMEBUFFER && draw_framebuffer != framebuffer) {
api()->glBindFramebufferEXTFn(GL_DRAW_FRAMEBUFFER,
framebuffer->service_id());
}
state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false);
ClearDeviceWindowRectangles();
if (workarounds().gl_clear_broken) {
ClearFramebufferForWorkaround(clear_bits);
} else {
api()->glClearFn(clear_bits);
}
}
if (cleared_int_renderbuffers || clear_bits) {
if (reset_draw_buffers)
framebuffer->RestoreDrawBuffers();
RestoreClearState();
if (target == GL_READ_FRAMEBUFFER && draw_framebuffer != framebuffer) {
GLuint service_id = draw_framebuffer ? draw_framebuffer->service_id() :
GetBackbufferServiceId();
api()->glBindFramebufferEXTFn(GL_DRAW_FRAMEBUFFER, service_id);
}
}
framebuffer_manager()->MarkAttachmentsAsCleared(
framebuffer, renderbuffer_manager(), texture_manager());
}
|
C
|
Chrome
| 0 |
CVE-2018-16077
|
https://www.cvedetails.com/cve/CVE-2018-16077/
|
CWE-285
|
https://github.com/chromium/chromium/commit/90f878780cce9c4b0475fcea14d91b8f510cce11
|
90f878780cce9c4b0475fcea14d91b8f510cce11
|
Prevent sandboxed documents from reusing the default window
Bug: 377995
Change-Id: Iff66c6d214dfd0cb7ea9c80f83afeedfff703541
Reviewed-on: https://chromium-review.googlesource.com/983558
Commit-Queue: Andy Paicu <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Cr-Commit-Position: refs/heads/master@{#567663}
|
void Document::PropagateStyleToViewport() {
DCHECK(InStyleRecalc());
DCHECK(documentElement());
HTMLElement* body = this->body();
const ComputedStyle* body_style =
body ? body->EnsureComputedStyle() : nullptr;
const ComputedStyle* document_element_style =
documentElement()->EnsureComputedStyle();
WritingMode root_writing_mode = document_element_style->GetWritingMode();
TextDirection root_direction = document_element_style->Direction();
if (body_style) {
root_writing_mode = body_style->GetWritingMode();
root_direction = body_style->Direction();
}
const ComputedStyle* background_style = document_element_style;
if (IsHTMLHtmlElement(documentElement()) && IsHTMLBodyElement(body) &&
!background_style->HasBackground())
background_style = body_style;
Color background_color =
background_style->VisitedDependentColor(GetCSSPropertyBackgroundColor());
FillLayer background_layers = background_style->BackgroundLayers();
for (auto* current_layer = &background_layers; current_layer;
current_layer = current_layer->Next()) {
current_layer->SetClip(EFillBox::kBorder);
if (current_layer->Attachment() == EFillAttachment::kScroll)
current_layer->SetAttachment(EFillAttachment::kLocal);
}
EImageRendering image_rendering = background_style->ImageRendering();
const ComputedStyle* overflow_style = nullptr;
if (Element* element = ViewportDefiningElement(document_element_style)) {
if (element == body) {
overflow_style = body_style;
} else {
DCHECK_EQ(element, documentElement());
overflow_style = document_element_style;
if (body_style && !body_style->IsOverflowVisible())
UseCounter::Count(*this, WebFeature::kBodyScrollsInAdditionToViewport);
}
}
EOverflowAnchor overflow_anchor = EOverflowAnchor::kAuto;
EOverflow overflow_x = EOverflow::kAuto;
EOverflow overflow_y = EOverflow::kAuto;
GapLength column_gap;
if (overflow_style) {
overflow_anchor = overflow_style->OverflowAnchor();
overflow_x = overflow_style->OverflowX();
overflow_y = overflow_style->OverflowY();
if (overflow_x == EOverflow::kVisible)
overflow_x = EOverflow::kAuto;
if (overflow_y == EOverflow::kVisible)
overflow_y = EOverflow::kAuto;
if (overflow_anchor == EOverflowAnchor::kVisible)
overflow_anchor = EOverflowAnchor::kAuto;
column_gap = overflow_style->ColumnGap();
}
ScrollSnapType snap_type = overflow_style->GetScrollSnapType();
ScrollBehavior scroll_behavior = document_element_style->GetScrollBehavior();
EOverscrollBehavior overscroll_behavior_x =
overflow_style->OverscrollBehaviorX();
EOverscrollBehavior overscroll_behavior_y =
overflow_style->OverscrollBehaviorY();
using OverscrollBehaviorType = cc::OverscrollBehavior::OverscrollBehaviorType;
if (RuntimeEnabledFeatures::CSSOverscrollBehaviorEnabled() &&
IsInMainFrame()) {
GetPage()->GetOverscrollController().SetOverscrollBehavior(
cc::OverscrollBehavior(
static_cast<OverscrollBehaviorType>(overscroll_behavior_x),
static_cast<OverscrollBehaviorType>(overscroll_behavior_y)));
}
Length scroll_padding_top = overflow_style->ScrollPaddingTop();
Length scroll_padding_right = overflow_style->ScrollPaddingRight();
Length scroll_padding_bottom = overflow_style->ScrollPaddingBottom();
Length scroll_padding_left = overflow_style->ScrollPaddingLeft();
const ComputedStyle& viewport_style = GetLayoutView()->StyleRef();
if (viewport_style.GetWritingMode() != root_writing_mode ||
viewport_style.Direction() != root_direction ||
viewport_style.VisitedDependentColor(GetCSSPropertyBackgroundColor()) !=
background_color ||
viewport_style.BackgroundLayers() != background_layers ||
viewport_style.ImageRendering() != image_rendering ||
viewport_style.OverflowAnchor() != overflow_anchor ||
viewport_style.OverflowX() != overflow_x ||
viewport_style.OverflowY() != overflow_y ||
viewport_style.ColumnGap() != column_gap ||
viewport_style.GetScrollSnapType() != snap_type ||
viewport_style.GetScrollBehavior() != scroll_behavior ||
viewport_style.OverscrollBehaviorX() != overscroll_behavior_x ||
viewport_style.OverscrollBehaviorY() != overscroll_behavior_y ||
viewport_style.ScrollPaddingTop() != scroll_padding_top ||
viewport_style.ScrollPaddingRight() != scroll_padding_right ||
viewport_style.ScrollPaddingBottom() != scroll_padding_bottom ||
viewport_style.ScrollPaddingLeft() != scroll_padding_left) {
scoped_refptr<ComputedStyle> new_style =
ComputedStyle::Clone(viewport_style);
new_style->SetWritingMode(root_writing_mode);
new_style->SetDirection(root_direction);
new_style->SetBackgroundColor(background_color);
new_style->AccessBackgroundLayers() = background_layers;
new_style->SetImageRendering(image_rendering);
new_style->SetOverflowAnchor(overflow_anchor);
new_style->SetOverflowX(overflow_x);
new_style->SetOverflowY(overflow_y);
new_style->SetColumnGap(column_gap);
new_style->SetScrollSnapType(snap_type);
new_style->SetScrollBehavior(scroll_behavior);
new_style->SetOverscrollBehaviorX(overscroll_behavior_x);
new_style->SetOverscrollBehaviorY(overscroll_behavior_y);
new_style->SetScrollPaddingTop(scroll_padding_top);
new_style->SetScrollPaddingRight(scroll_padding_right);
new_style->SetScrollPaddingBottom(scroll_padding_bottom);
new_style->SetScrollPaddingLeft(scroll_padding_left);
GetLayoutView()->SetStyle(new_style);
SetupFontBuilder(*new_style);
View()->RecalculateScrollbarOverlayColorTheme(
View()->DocumentBackgroundColor());
if (PaintLayerScrollableArea* scrollable_area =
GetLayoutView()->GetScrollableArea()) {
if (scrollable_area->HorizontalScrollbar() &&
scrollable_area->HorizontalScrollbar()->IsCustomScrollbar())
scrollable_area->HorizontalScrollbar()->StyleChanged();
if (scrollable_area->VerticalScrollbar() &&
scrollable_area->VerticalScrollbar()->IsCustomScrollbar())
scrollable_area->VerticalScrollbar()->StyleChanged();
}
}
}
|
void Document::PropagateStyleToViewport() {
DCHECK(InStyleRecalc());
DCHECK(documentElement());
HTMLElement* body = this->body();
const ComputedStyle* body_style =
body ? body->EnsureComputedStyle() : nullptr;
const ComputedStyle* document_element_style =
documentElement()->EnsureComputedStyle();
WritingMode root_writing_mode = document_element_style->GetWritingMode();
TextDirection root_direction = document_element_style->Direction();
if (body_style) {
root_writing_mode = body_style->GetWritingMode();
root_direction = body_style->Direction();
}
const ComputedStyle* background_style = document_element_style;
if (IsHTMLHtmlElement(documentElement()) && IsHTMLBodyElement(body) &&
!background_style->HasBackground())
background_style = body_style;
Color background_color =
background_style->VisitedDependentColor(GetCSSPropertyBackgroundColor());
FillLayer background_layers = background_style->BackgroundLayers();
for (auto* current_layer = &background_layers; current_layer;
current_layer = current_layer->Next()) {
current_layer->SetClip(EFillBox::kBorder);
if (current_layer->Attachment() == EFillAttachment::kScroll)
current_layer->SetAttachment(EFillAttachment::kLocal);
}
EImageRendering image_rendering = background_style->ImageRendering();
const ComputedStyle* overflow_style = nullptr;
if (Element* element = ViewportDefiningElement(document_element_style)) {
if (element == body) {
overflow_style = body_style;
} else {
DCHECK_EQ(element, documentElement());
overflow_style = document_element_style;
if (body_style && !body_style->IsOverflowVisible())
UseCounter::Count(*this, WebFeature::kBodyScrollsInAdditionToViewport);
}
}
EOverflowAnchor overflow_anchor = EOverflowAnchor::kAuto;
EOverflow overflow_x = EOverflow::kAuto;
EOverflow overflow_y = EOverflow::kAuto;
GapLength column_gap;
if (overflow_style) {
overflow_anchor = overflow_style->OverflowAnchor();
overflow_x = overflow_style->OverflowX();
overflow_y = overflow_style->OverflowY();
if (overflow_x == EOverflow::kVisible)
overflow_x = EOverflow::kAuto;
if (overflow_y == EOverflow::kVisible)
overflow_y = EOverflow::kAuto;
if (overflow_anchor == EOverflowAnchor::kVisible)
overflow_anchor = EOverflowAnchor::kAuto;
column_gap = overflow_style->ColumnGap();
}
ScrollSnapType snap_type = overflow_style->GetScrollSnapType();
ScrollBehavior scroll_behavior = document_element_style->GetScrollBehavior();
EOverscrollBehavior overscroll_behavior_x =
overflow_style->OverscrollBehaviorX();
EOverscrollBehavior overscroll_behavior_y =
overflow_style->OverscrollBehaviorY();
using OverscrollBehaviorType = cc::OverscrollBehavior::OverscrollBehaviorType;
if (RuntimeEnabledFeatures::CSSOverscrollBehaviorEnabled() &&
IsInMainFrame()) {
GetPage()->GetOverscrollController().SetOverscrollBehavior(
cc::OverscrollBehavior(
static_cast<OverscrollBehaviorType>(overscroll_behavior_x),
static_cast<OverscrollBehaviorType>(overscroll_behavior_y)));
}
Length scroll_padding_top = overflow_style->ScrollPaddingTop();
Length scroll_padding_right = overflow_style->ScrollPaddingRight();
Length scroll_padding_bottom = overflow_style->ScrollPaddingBottom();
Length scroll_padding_left = overflow_style->ScrollPaddingLeft();
const ComputedStyle& viewport_style = GetLayoutView()->StyleRef();
if (viewport_style.GetWritingMode() != root_writing_mode ||
viewport_style.Direction() != root_direction ||
viewport_style.VisitedDependentColor(GetCSSPropertyBackgroundColor()) !=
background_color ||
viewport_style.BackgroundLayers() != background_layers ||
viewport_style.ImageRendering() != image_rendering ||
viewport_style.OverflowAnchor() != overflow_anchor ||
viewport_style.OverflowX() != overflow_x ||
viewport_style.OverflowY() != overflow_y ||
viewport_style.ColumnGap() != column_gap ||
viewport_style.GetScrollSnapType() != snap_type ||
viewport_style.GetScrollBehavior() != scroll_behavior ||
viewport_style.OverscrollBehaviorX() != overscroll_behavior_x ||
viewport_style.OverscrollBehaviorY() != overscroll_behavior_y ||
viewport_style.ScrollPaddingTop() != scroll_padding_top ||
viewport_style.ScrollPaddingRight() != scroll_padding_right ||
viewport_style.ScrollPaddingBottom() != scroll_padding_bottom ||
viewport_style.ScrollPaddingLeft() != scroll_padding_left) {
scoped_refptr<ComputedStyle> new_style =
ComputedStyle::Clone(viewport_style);
new_style->SetWritingMode(root_writing_mode);
new_style->SetDirection(root_direction);
new_style->SetBackgroundColor(background_color);
new_style->AccessBackgroundLayers() = background_layers;
new_style->SetImageRendering(image_rendering);
new_style->SetOverflowAnchor(overflow_anchor);
new_style->SetOverflowX(overflow_x);
new_style->SetOverflowY(overflow_y);
new_style->SetColumnGap(column_gap);
new_style->SetScrollSnapType(snap_type);
new_style->SetScrollBehavior(scroll_behavior);
new_style->SetOverscrollBehaviorX(overscroll_behavior_x);
new_style->SetOverscrollBehaviorY(overscroll_behavior_y);
new_style->SetScrollPaddingTop(scroll_padding_top);
new_style->SetScrollPaddingRight(scroll_padding_right);
new_style->SetScrollPaddingBottom(scroll_padding_bottom);
new_style->SetScrollPaddingLeft(scroll_padding_left);
GetLayoutView()->SetStyle(new_style);
SetupFontBuilder(*new_style);
View()->RecalculateScrollbarOverlayColorTheme(
View()->DocumentBackgroundColor());
if (PaintLayerScrollableArea* scrollable_area =
GetLayoutView()->GetScrollableArea()) {
if (scrollable_area->HorizontalScrollbar() &&
scrollable_area->HorizontalScrollbar()->IsCustomScrollbar())
scrollable_area->HorizontalScrollbar()->StyleChanged();
if (scrollable_area->VerticalScrollbar() &&
scrollable_area->VerticalScrollbar()->IsCustomScrollbar())
scrollable_area->VerticalScrollbar()->StyleChanged();
}
}
}
|
C
|
Chrome
| 0 |
CVE-2017-11142
|
https://www.cvedetails.com/cve/CVE-2017-11142/
|
CWE-400
|
https://github.com/php/php-src/commit/a15bffd105ac28fd0dd9b596632dbf035238fda3
|
a15bffd105ac28fd0dd9b596632dbf035238fda3
|
Fix bug #73807
|
static zend_bool php_auto_globals_create_server(zend_string *name)
{
if (PG(variables_order) && (strchr(PG(variables_order),'S') || strchr(PG(variables_order),'s'))) {
php_register_server_variables();
if (PG(register_argc_argv)) {
if (SG(request_info).argc) {
zval *argc, *argv;
if ((argc = zend_hash_str_find_ind(&EG(symbol_table), "argc", sizeof("argc")-1)) != NULL &&
(argv = zend_hash_str_find_ind(&EG(symbol_table), "argv", sizeof("argv")-1)) != NULL) {
Z_ADDREF_P(argv);
zend_hash_str_update(Z_ARRVAL(PG(http_globals)[TRACK_VARS_SERVER]), "argv", sizeof("argv")-1, argv);
zend_hash_str_update(Z_ARRVAL(PG(http_globals)[TRACK_VARS_SERVER]), "argc", sizeof("argc")-1, argc);
}
} else {
php_build_argv(SG(request_info).query_string, &PG(http_globals)[TRACK_VARS_SERVER]);
}
}
} else {
zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_SERVER]);
array_init(&PG(http_globals)[TRACK_VARS_SERVER]);
}
check_http_proxy(Z_ARRVAL(PG(http_globals)[TRACK_VARS_SERVER]));
zend_hash_update(&EG(symbol_table), name, &PG(http_globals)[TRACK_VARS_SERVER]);
Z_ADDREF(PG(http_globals)[TRACK_VARS_SERVER]);
return 0; /* don't rearm */
}
|
static zend_bool php_auto_globals_create_server(zend_string *name)
{
if (PG(variables_order) && (strchr(PG(variables_order),'S') || strchr(PG(variables_order),'s'))) {
php_register_server_variables();
if (PG(register_argc_argv)) {
if (SG(request_info).argc) {
zval *argc, *argv;
if ((argc = zend_hash_str_find_ind(&EG(symbol_table), "argc", sizeof("argc")-1)) != NULL &&
(argv = zend_hash_str_find_ind(&EG(symbol_table), "argv", sizeof("argv")-1)) != NULL) {
Z_ADDREF_P(argv);
zend_hash_str_update(Z_ARRVAL(PG(http_globals)[TRACK_VARS_SERVER]), "argv", sizeof("argv")-1, argv);
zend_hash_str_update(Z_ARRVAL(PG(http_globals)[TRACK_VARS_SERVER]), "argc", sizeof("argc")-1, argc);
}
} else {
php_build_argv(SG(request_info).query_string, &PG(http_globals)[TRACK_VARS_SERVER]);
}
}
} else {
zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_SERVER]);
array_init(&PG(http_globals)[TRACK_VARS_SERVER]);
}
check_http_proxy(Z_ARRVAL(PG(http_globals)[TRACK_VARS_SERVER]));
zend_hash_update(&EG(symbol_table), name, &PG(http_globals)[TRACK_VARS_SERVER]);
Z_ADDREF(PG(http_globals)[TRACK_VARS_SERVER]);
return 0; /* don't rearm */
}
|
C
|
php-src
| 0 |
CVE-2018-20169
|
https://www.cvedetails.com/cve/CVE-2018-20169/
|
CWE-400
|
https://github.com/torvalds/linux/commit/704620afc70cf47abb9d6a1a57f3825d2bca49cf
|
704620afc70cf47abb9d6a1a57f3825d2bca49cf
|
USB: check usb_get_extra_descriptor for proper size
When reading an extra descriptor, we need to properly check the minimum
and maximum size allowed, to prevent from invalid data being sent by a
device.
Reported-by: Hui Peng <[email protected]>
Reported-by: Mathias Payer <[email protected]>
Co-developed-by: Linus Torvalds <[email protected]>
Signed-off-by: Hui Peng <[email protected]>
Signed-off-by: Mathias Payer <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc, int delay)
{
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
struct wahc *wa = &hwahc->wa;
u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
int ret;
ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
WUSB_REQ_CHAN_STOP,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
delay * 1000,
iface_no,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (ret == 0)
msleep(delay);
wa_nep_disarm(&hwahc->wa);
__wa_stop(&hwahc->wa);
}
|
static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc, int delay)
{
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
struct wahc *wa = &hwahc->wa;
u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
int ret;
ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
WUSB_REQ_CHAN_STOP,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
delay * 1000,
iface_no,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (ret == 0)
msleep(delay);
wa_nep_disarm(&hwahc->wa);
__wa_stop(&hwahc->wa);
}
|
C
|
linux
| 0 |
CVE-2019-5757
|
https://www.cvedetails.com/cve/CVE-2019-5757/
|
CWE-704
|
https://github.com/chromium/chromium/commit/032c3339bfb454c65ce38e7eafe49a54bac83073
|
032c3339bfb454c65ce38e7eafe49a54bac83073
|
Fix SVG crash for v0 distribution into foreignObject.
We require a parent element to be an SVG element for non-svg-root
elements in order to create a LayoutObject for them. However, we checked
the light tree parent element, not the flat tree one which is the parent
for the layout tree construction. Note that this is just an issue in
Shadow DOM v0 since v1 does not allow shadow roots on SVG elements.
Bug: 915469
Change-Id: Id81843abad08814fae747b5bc81c09666583f130
Reviewed-on: https://chromium-review.googlesource.com/c/1382494
Reviewed-by: Fredrik Söderquist <[email protected]>
Commit-Queue: Rune Lillesveen <[email protected]>
Cr-Commit-Position: refs/heads/master@{#617487}
|
MutableCSSPropertyValueSet* SVGElement::AnimatedSMILStyleProperties() const {
if (HasSVGRareData())
return SvgRareData()->AnimatedSMILStyleProperties();
return nullptr;
}
|
MutableCSSPropertyValueSet* SVGElement::AnimatedSMILStyleProperties() const {
if (HasSVGRareData())
return SvgRareData()->AnimatedSMILStyleProperties();
return nullptr;
}
|
C
|
Chrome
| 0 |
CVE-2016-2179
|
https://www.cvedetails.com/cve/CVE-2016-2179/
|
CWE-399
|
https://git.openssl.org/?p=openssl.git;a=commit;h=f5c7f5dfbaf0d2f7d946d0fe86f08e6bcb36ed0d
|
f5c7f5dfbaf0d2f7d946d0fe86f08e6bcb36ed0d
| null |
WORK_STATE ossl_statem_server_pre_work(SSL *s, WORK_STATE wst)
{
OSSL_STATEM *st = &s->statem;
switch (st->hand_state) {
case TLS_ST_SW_HELLO_REQ:
s->shutdown = 0;
if (SSL_IS_DTLS(s))
dtls1_clear_sent_buffer(s);
break;
case DTLS_ST_SW_HELLO_VERIFY_REQUEST:
s->shutdown = 0;
if (SSL_IS_DTLS(s)) {
dtls1_clear_sent_buffer(s);
/* We don't buffer this message so don't use the timer */
st->use_timer = 0;
}
break;
case TLS_ST_SW_SRVR_HELLO:
if (SSL_IS_DTLS(s)) {
/*
* Messages we write from now on should be bufferred and
* retransmitted if necessary, so we need to use the timer now
*/
st->use_timer = 1;
}
break;
case TLS_ST_SW_SRVR_DONE:
#ifndef OPENSSL_NO_SCTP
if (SSL_IS_DTLS(s) && BIO_dgram_is_sctp(SSL_get_wbio(s)))
return dtls_wait_for_dry(s);
#endif
return WORK_FINISHED_CONTINUE;
case TLS_ST_SW_SESSION_TICKET:
if (SSL_IS_DTLS(s)) {
/*
* We're into the last flight. We don't retransmit the last flight
* unless we need to, so we don't use the timer
*/
st->use_timer = 0;
}
break;
case TLS_ST_SW_CHANGE:
s->session->cipher = s->s3->tmp.new_cipher;
if (!s->method->ssl3_enc->setup_key_block(s)) {
ossl_statem_set_error(s);
return WORK_ERROR;
}
if (SSL_IS_DTLS(s)) {
/*
* We're into the last flight. We don't retransmit the last flight
* unless we need to, so we don't use the timer. This might have
* already been set to 0 if we sent a NewSessionTicket message,
* but we'll set it again here in case we didn't.
*/
st->use_timer = 0;
}
return WORK_FINISHED_CONTINUE;
case TLS_ST_OK:
return tls_finish_handshake(s, wst);
default:
/* No pre work to be done */
break;
}
return WORK_FINISHED_CONTINUE;
}
|
WORK_STATE ossl_statem_server_pre_work(SSL *s, WORK_STATE wst)
{
OSSL_STATEM *st = &s->statem;
switch (st->hand_state) {
case TLS_ST_SW_HELLO_REQ:
s->shutdown = 0;
if (SSL_IS_DTLS(s))
dtls1_clear_record_buffer(s);
break;
case DTLS_ST_SW_HELLO_VERIFY_REQUEST:
s->shutdown = 0;
if (SSL_IS_DTLS(s)) {
dtls1_clear_record_buffer(s);
/* We don't buffer this message so don't use the timer */
st->use_timer = 0;
}
break;
case TLS_ST_SW_SRVR_HELLO:
if (SSL_IS_DTLS(s)) {
/*
* Messages we write from now on should be bufferred and
* retransmitted if necessary, so we need to use the timer now
*/
st->use_timer = 1;
}
break;
case TLS_ST_SW_SRVR_DONE:
#ifndef OPENSSL_NO_SCTP
if (SSL_IS_DTLS(s) && BIO_dgram_is_sctp(SSL_get_wbio(s)))
return dtls_wait_for_dry(s);
#endif
return WORK_FINISHED_CONTINUE;
case TLS_ST_SW_SESSION_TICKET:
if (SSL_IS_DTLS(s)) {
/*
* We're into the last flight. We don't retransmit the last flight
* unless we need to, so we don't use the timer
*/
st->use_timer = 0;
}
break;
case TLS_ST_SW_CHANGE:
s->session->cipher = s->s3->tmp.new_cipher;
if (!s->method->ssl3_enc->setup_key_block(s)) {
ossl_statem_set_error(s);
return WORK_ERROR;
}
if (SSL_IS_DTLS(s)) {
/*
* We're into the last flight. We don't retransmit the last flight
* unless we need to, so we don't use the timer. This might have
* already been set to 0 if we sent a NewSessionTicket message,
* but we'll set it again here in case we didn't.
*/
st->use_timer = 0;
}
return WORK_FINISHED_CONTINUE;
case TLS_ST_OK:
return tls_finish_handshake(s, wst);
default:
/* No pre work to be done */
break;
}
return WORK_FINISHED_CONTINUE;
}
|
C
|
openssl
| 1 |
CVE-2018-16427
|
https://www.cvedetails.com/cve/CVE-2018-16427/
|
CWE-125
|
https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
|
static void coolkey_free_private_data(coolkey_private_data_t *priv)
{
list_t *l = &priv->objects_list;
sc_cardctl_coolkey_object_t *o;
/* Clean up the allocated memory in the items */
list_iterator_start(l);
while (list_iterator_hasnext(l)) {
o = (sc_cardctl_coolkey_object_t *)list_iterator_next(l);
free(o->data);
o->data = NULL;
}
list_iterator_stop(l);
list_destroy(&priv->objects_list);
if (priv->token_name) {
free(priv->token_name);
}
free(priv);
return;
}
|
static void coolkey_free_private_data(coolkey_private_data_t *priv)
{
list_t *l = &priv->objects_list;
sc_cardctl_coolkey_object_t *o;
/* Clean up the allocated memory in the items */
list_iterator_start(l);
while (list_iterator_hasnext(l)) {
o = (sc_cardctl_coolkey_object_t *)list_iterator_next(l);
free(o->data);
o->data = NULL;
}
list_iterator_stop(l);
list_destroy(&priv->objects_list);
if (priv->token_name) {
free(priv->token_name);
}
free(priv);
return;
}
|
C
|
OpenSC
| 0 |
CVE-2018-13006
|
https://www.cvedetails.com/cve/CVE-2018-13006/
|
CWE-125
|
https://github.com/gpac/gpac/commit/bceb03fd2be95097a7b409ea59914f332fb6bc86
|
bceb03fd2be95097a7b409ea59914f332fb6bc86
|
fixed 2 possible heap overflows (inc. #1088)
|
void vmhd_del(GF_Box *s)
{
GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
|
void vmhd_del(GF_Box *s)
{
GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
|
C
|
gpac
| 0 |
CVE-2013-3301
|
https://www.cvedetails.com/cve/CVE-2013-3301/
| null |
https://github.com/torvalds/linux/commit/6a76f8c0ab19f215af2a3442870eeb5f0e81998d
|
6a76f8c0ab19f215af2a3442870eeb5f0e81998d
|
tracing: Fix possible NULL pointer dereferences
Currently set_ftrace_pid and set_graph_function files use seq_lseek
for their fops. However seq_open() is called only for FMODE_READ in
the fops->open() so that if an user tries to seek one of those file
when she open it for writing, it sees NULL seq_file and then panic.
It can be easily reproduced with following command:
$ cd /sys/kernel/debug/tracing
$ echo 1234 | sudo tee -a set_ftrace_pid
In this example, GNU coreutils' tee opens the file with fopen(, "a")
and then the fopen() internally calls lseek().
Link: http://lkml.kernel.org/r/[email protected]
Cc: Frederic Weisbecker <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Namhyung Kim <[email protected]>
Cc: [email protected]
Signed-off-by: Namhyung Kim <[email protected]>
Signed-off-by: Steven Rostedt <[email protected]>
|
t_hash_next(struct seq_file *m, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
struct hlist_node *hnd = NULL;
struct hlist_head *hhd;
(*pos)++;
iter->pos = *pos;
if (iter->probe)
hnd = &iter->probe->node;
retry:
if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
return NULL;
hhd = &ftrace_func_hash[iter->hidx];
if (hlist_empty(hhd)) {
iter->hidx++;
hnd = NULL;
goto retry;
}
if (!hnd)
hnd = hhd->first;
else {
hnd = hnd->next;
if (!hnd) {
iter->hidx++;
goto retry;
}
}
if (WARN_ON_ONCE(!hnd))
return NULL;
iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
return iter;
}
|
t_hash_next(struct seq_file *m, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
struct hlist_node *hnd = NULL;
struct hlist_head *hhd;
(*pos)++;
iter->pos = *pos;
if (iter->probe)
hnd = &iter->probe->node;
retry:
if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
return NULL;
hhd = &ftrace_func_hash[iter->hidx];
if (hlist_empty(hhd)) {
iter->hidx++;
hnd = NULL;
goto retry;
}
if (!hnd)
hnd = hhd->first;
else {
hnd = hnd->next;
if (!hnd) {
iter->hidx++;
goto retry;
}
}
if (WARN_ON_ONCE(!hnd))
return NULL;
iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
return iter;
}
|
C
|
linux
| 0 |
CVE-2016-10066
|
https://www.cvedetails.com/cve/CVE-2016-10066/
|
CWE-119
|
https://github.com/ImageMagick/ImageMagick/commit/f6e9d0d9955e85bdd7540b251cd50d598dacc5e6
|
f6e9d0d9955e85bdd7540b251cd50d598dacc5e6
| null |
static MagickBooleanType WriteMTVImage(const ImageInfo *image_info,Image *image)
{
char
buffer[MaxTextExtent];
MagickBooleanType
status;
MagickOffsetType
scene;
register const PixelPacket
*p;
register ssize_t
x;
register unsigned char
*q;
ssize_t
y;
unsigned char
*pixels;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
scene=0;
do
{
/*
Allocate memory for pixels.
*/
(void) TransformImageColorspace(image,sRGBColorspace);
pixels=(unsigned char *) AcquireQuantumMemory((size_t) image->columns,
3UL*sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
/*
Initialize raster file header.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,"%.20g %.20g\n",(double)
image->columns,(double) image->rows);
(void) WriteBlobString(image,buffer);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
q=pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=ScaleQuantumToChar(GetPixelRed(p));
*q++=ScaleQuantumToChar(GetPixelGreen(p));
*q++=ScaleQuantumToChar(GetPixelBlue(p));
p++;
}
(void) WriteBlob(image,(size_t) (q-pixels),pixels);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene,
GetImageListLength(image));
if (status == MagickFalse)
break;
scene++;
} while (image_info->adjoin != MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
|
static MagickBooleanType WriteMTVImage(const ImageInfo *image_info,Image *image)
{
char
buffer[MaxTextExtent];
MagickBooleanType
status;
MagickOffsetType
scene;
register const PixelPacket
*p;
register ssize_t
x;
register unsigned char
*q;
ssize_t
y;
unsigned char
*pixels;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
scene=0;
do
{
/*
Allocate memory for pixels.
*/
(void) TransformImageColorspace(image,sRGBColorspace);
pixels=(unsigned char *) AcquireQuantumMemory((size_t) image->columns,
3UL*sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
/*
Initialize raster file header.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,"%.20g %.20g\n",(double)
image->columns,(double) image->rows);
(void) WriteBlobString(image,buffer);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
q=pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=ScaleQuantumToChar(GetPixelRed(p));
*q++=ScaleQuantumToChar(GetPixelGreen(p));
*q++=ScaleQuantumToChar(GetPixelBlue(p));
p++;
}
(void) WriteBlob(image,(size_t) (q-pixels),pixels);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene,
GetImageListLength(image));
if (status == MagickFalse)
break;
scene++;
} while (image_info->adjoin != MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
|
C
|
ImageMagick
| 0 |
CVE-2017-7616
|
https://www.cvedetails.com/cve/CVE-2017-7616/
|
CWE-388
|
https://github.com/torvalds/linux/commit/cf01fb9985e8deb25ccf0ea54d916b8871ae0e62
|
cf01fb9985e8deb25ccf0ea54d916b8871ae0e62
|
mm/mempolicy.c: fix error handling in set_mempolicy and mbind.
In the case that compat_get_bitmap fails we do not want to copy the
bitmap to the user as it will contain uninitialized stack data and leak
sensitive data.
Signed-off-by: Chris Salls <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static long do_get_mempolicy(int *policy, nodemask_t *nmask,
unsigned long addr, unsigned long flags)
{
int err;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = NULL;
struct mempolicy *pol = current->mempolicy;
if (flags &
~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
return -EINVAL;
if (flags & MPOL_F_MEMS_ALLOWED) {
if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
return -EINVAL;
*policy = 0; /* just so it's initialized */
task_lock(current);
*nmask = cpuset_current_mems_allowed;
task_unlock(current);
return 0;
}
if (flags & MPOL_F_ADDR) {
/*
* Do NOT fall back to task policy if the
* vma/shared policy at addr is NULL. We
* want to return MPOL_DEFAULT in this case.
*/
down_read(&mm->mmap_sem);
vma = find_vma_intersection(mm, addr, addr+1);
if (!vma) {
up_read(&mm->mmap_sem);
return -EFAULT;
}
if (vma->vm_ops && vma->vm_ops->get_policy)
pol = vma->vm_ops->get_policy(vma, addr);
else
pol = vma->vm_policy;
} else if (addr)
return -EINVAL;
if (!pol)
pol = &default_policy; /* indicates default behavior */
if (flags & MPOL_F_NODE) {
if (flags & MPOL_F_ADDR) {
err = lookup_node(addr);
if (err < 0)
goto out;
*policy = err;
} else if (pol == current->mempolicy &&
pol->mode == MPOL_INTERLEAVE) {
*policy = current->il_next;
} else {
err = -EINVAL;
goto out;
}
} else {
*policy = pol == &default_policy ? MPOL_DEFAULT :
pol->mode;
/*
* Internal mempolicy flags must be masked off before exposing
* the policy to userspace.
*/
*policy |= (pol->flags & MPOL_MODE_FLAGS);
}
if (vma) {
up_read(¤t->mm->mmap_sem);
vma = NULL;
}
err = 0;
if (nmask) {
if (mpol_store_user_nodemask(pol)) {
*nmask = pol->w.user_nodemask;
} else {
task_lock(current);
get_policy_nodemask(pol, nmask);
task_unlock(current);
}
}
out:
mpol_cond_put(pol);
if (vma)
up_read(¤t->mm->mmap_sem);
return err;
}
|
static long do_get_mempolicy(int *policy, nodemask_t *nmask,
unsigned long addr, unsigned long flags)
{
int err;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = NULL;
struct mempolicy *pol = current->mempolicy;
if (flags &
~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
return -EINVAL;
if (flags & MPOL_F_MEMS_ALLOWED) {
if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
return -EINVAL;
*policy = 0; /* just so it's initialized */
task_lock(current);
*nmask = cpuset_current_mems_allowed;
task_unlock(current);
return 0;
}
if (flags & MPOL_F_ADDR) {
/*
* Do NOT fall back to task policy if the
* vma/shared policy at addr is NULL. We
* want to return MPOL_DEFAULT in this case.
*/
down_read(&mm->mmap_sem);
vma = find_vma_intersection(mm, addr, addr+1);
if (!vma) {
up_read(&mm->mmap_sem);
return -EFAULT;
}
if (vma->vm_ops && vma->vm_ops->get_policy)
pol = vma->vm_ops->get_policy(vma, addr);
else
pol = vma->vm_policy;
} else if (addr)
return -EINVAL;
if (!pol)
pol = &default_policy; /* indicates default behavior */
if (flags & MPOL_F_NODE) {
if (flags & MPOL_F_ADDR) {
err = lookup_node(addr);
if (err < 0)
goto out;
*policy = err;
} else if (pol == current->mempolicy &&
pol->mode == MPOL_INTERLEAVE) {
*policy = current->il_next;
} else {
err = -EINVAL;
goto out;
}
} else {
*policy = pol == &default_policy ? MPOL_DEFAULT :
pol->mode;
/*
* Internal mempolicy flags must be masked off before exposing
* the policy to userspace.
*/
*policy |= (pol->flags & MPOL_MODE_FLAGS);
}
if (vma) {
up_read(¤t->mm->mmap_sem);
vma = NULL;
}
err = 0;
if (nmask) {
if (mpol_store_user_nodemask(pol)) {
*nmask = pol->w.user_nodemask;
} else {
task_lock(current);
get_policy_nodemask(pol, nmask);
task_unlock(current);
}
}
out:
mpol_cond_put(pol);
if (vma)
up_read(¤t->mm->mmap_sem);
return err;
}
|
C
|
linux
| 0 |
CVE-2016-10150
|
https://www.cvedetails.com/cve/CVE-2016-10150/
|
CWE-416
|
https://github.com/torvalds/linux/commit/a0f1d21c1ccb1da66629627a74059dd7f5ac9c61
|
a0f1d21c1ccb1da66629627a74059dd7f5ac9c61
|
KVM: use after free in kvm_ioctl_create_device()
We should move the ops->destroy(dev) after the list_del(&dev->vm_node)
so that we don't use "dev" after freeing it.
Fixes: a28ebea2adc4 ("KVM: Protect device ops->create and list_add with kvm->lock")
Signed-off-by: Dan Carpenter <[email protected]>
Reviewed-by: David Hildenbrand <[email protected]>
Signed-off-by: Radim Krčmář <[email protected]>
|
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
int offset, int len)
{
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
|
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
int offset, int len)
{
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
|
C
|
linux
| 0 |
CVE-2013-2878
|
https://www.cvedetails.com/cve/CVE-2013-2878/
|
CWE-119
|
https://github.com/chromium/chromium/commit/09fbb829eab7ee25e90bb4e9c2f4973c6c62d0f3
|
09fbb829eab7ee25e90bb4e9c2f4973c6c62d0f3
|
Upgrade a TextIterator ASSERT to a RELEASE_ASSERT as a defensive measure.
BUG=156930,177197
[email protected]
Review URL: https://codereview.chromium.org/15057010
git-svn-id: svn://svn.chromium.org/blink/trunk@150123 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
inline void SearchBuffer::reachedBreak()
{
m_atBreak = true;
}
|
inline void SearchBuffer::reachedBreak()
{
m_atBreak = true;
}
|
C
|
Chrome
| 0 |
CVE-2011-2840
|
https://www.cvedetails.com/cve/CVE-2011-2840/
|
CWE-20
|
https://github.com/chromium/chromium/commit/2db5a2048dfcacfe5ad4311c2b1e435c4c67febc
|
2db5a2048dfcacfe5ad4311c2b1e435c4c67febc
|
chromeos: fix bug where "aw snap" page replaces first tab if it was a NTP when closing window with > 1 tab.
BUG=chromium-os:12088
TEST=verify bug per bug report.
Review URL: http://codereview.chromium.org/6882058
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@83031 0039d316-1c4b-4281-b951-d872f2087c98
|
State* GetStateAt(int index) const {
DCHECK(index >= 0 && index < GetStateCount());
return states_.at(index);
}
|
State* GetStateAt(int index) const {
DCHECK(index >= 0 && index < GetStateCount());
return states_.at(index);
}
|
C
|
Chrome
| 0 |
CVE-2012-1179
|
https://www.cvedetails.com/cve/CVE-2012-1179/
|
CWE-264
|
https://github.com/torvalds/linux/commit/4a1d704194a441bf83c636004a479e01360ec850
|
4a1d704194a441bf83c636004a479e01360ec850
|
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
commit 1a5a9906d4e8d1976b701f889d8f35d54b928f25 upstream.
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
unsigned long long val)
{
int retry_count;
u64 memlimit, memswlimit, oldusage, curusage;
int children = mem_cgroup_count_children(memcg);
int ret = -EBUSY;
int enlarge = 0;
/* see mem_cgroup_resize_res_limit */
retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
while (retry_count) {
if (signal_pending(current)) {
ret = -EINTR;
break;
}
/*
* Rather than hide all in some function, I do this in
* open coded manner. You see what this really does.
* We have to guarantee memcg->res.limit < memcg->memsw.limit.
*/
mutex_lock(&set_limit_mutex);
memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
if (memlimit > val) {
ret = -EINVAL;
mutex_unlock(&set_limit_mutex);
break;
}
memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
if (memswlimit < val)
enlarge = 1;
ret = res_counter_set_limit(&memcg->memsw, val);
if (!ret) {
if (memlimit == val)
memcg->memsw_is_minimum = true;
else
memcg->memsw_is_minimum = false;
}
mutex_unlock(&set_limit_mutex);
if (!ret)
break;
mem_cgroup_reclaim(memcg, GFP_KERNEL,
MEM_CGROUP_RECLAIM_NOSWAP |
MEM_CGROUP_RECLAIM_SHRINK);
curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
/* Usage is reduced ? */
if (curusage >= oldusage)
retry_count--;
else
oldusage = curusage;
}
if (!ret && enlarge)
memcg_oom_recover(memcg);
return ret;
}
|
static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
unsigned long long val)
{
int retry_count;
u64 memlimit, memswlimit, oldusage, curusage;
int children = mem_cgroup_count_children(memcg);
int ret = -EBUSY;
int enlarge = 0;
/* see mem_cgroup_resize_res_limit */
retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
while (retry_count) {
if (signal_pending(current)) {
ret = -EINTR;
break;
}
/*
* Rather than hide all in some function, I do this in
* open coded manner. You see what this really does.
* We have to guarantee memcg->res.limit < memcg->memsw.limit.
*/
mutex_lock(&set_limit_mutex);
memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
if (memlimit > val) {
ret = -EINVAL;
mutex_unlock(&set_limit_mutex);
break;
}
memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
if (memswlimit < val)
enlarge = 1;
ret = res_counter_set_limit(&memcg->memsw, val);
if (!ret) {
if (memlimit == val)
memcg->memsw_is_minimum = true;
else
memcg->memsw_is_minimum = false;
}
mutex_unlock(&set_limit_mutex);
if (!ret)
break;
mem_cgroup_reclaim(memcg, GFP_KERNEL,
MEM_CGROUP_RECLAIM_NOSWAP |
MEM_CGROUP_RECLAIM_SHRINK);
curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
/* Usage is reduced ? */
if (curusage >= oldusage)
retry_count--;
else
oldusage = curusage;
}
if (!ret && enlarge)
memcg_oom_recover(memcg);
return ret;
}
|
C
|
linux
| 0 |
CVE-2013-7294
|
https://www.cvedetails.com/cve/CVE-2013-7294/
|
CWE-20
|
https://github.com/libreswan/libreswan/commit/2899351224fe2940aec37d7656e1e392c0fe07f0
|
2899351224fe2940aec37d7656e1e392c0fe07f0
|
SECURITY: Properly handle IKEv2 I1 notification packet without KE payload
|
stf_status ikev2parent_inI1outR1(struct msg_digest *md)
{
struct state *st = md->st;
lset_t policy = POLICY_IKEV2_ALLOW;
struct connection *c = find_host_connection(&md->iface->ip_addr,
md->iface->port,
&md->sender,
md->sender_port,
POLICY_IKEV2_ALLOW);
/* retrieve st->st_gi */
#if 0
if (c == NULL) {
/*
* make up a policy from the thing that was proposed, and see
* if we can find a connection with that policy.
*/
pb_stream pre_sa_pbs = sa_pd->pbs;
policy = preparse_isakmp_sa_body(&pre_sa_pbs);
c = find_host_connection(&md->iface->ip_addr, pluto_port,
(ip_address*)NULL, md->sender_port,
policy);
}
#endif
if (c == NULL) {
/* See if a wildcarded connection can be found.
* We cannot pick the right connection, so we're making a guess.
* All Road Warrior connections are fair game:
* we pick the first we come across (if any).
* If we don't find any, we pick the first opportunistic
* with the smallest subnet that includes the peer.
* There is, of course, no necessary relationship between
* an Initiator's address and that of its client,
* but Food Groups kind of assumes one.
*/
{
struct connection *d;
d = find_host_connection(&md->iface->ip_addr,
pluto_port,
(ip_address*)NULL,
md->sender_port, policy);
for (; d != NULL; d = d->hp_next) {
if (d->kind == CK_GROUP) {
/* ignore */
} else {
if (d->kind == CK_TEMPLATE &&
!(d->policy & POLICY_OPPO)) {
/* must be Road Warrior: we have a winner */
c = d;
break;
}
/* Opportunistic or Shunt: pick tightest match */
if (addrinsubnet(&md->sender,
&d->spd.that.client)
&&
(c == NULL ||
!subnetinsubnet(&c->spd.that.
client,
&d->spd.that.
client)))
c = d;
}
}
}
if (c == NULL) {
loglog(RC_LOG_SERIOUS, "initial parent SA message received on %s:%u"
" but no connection has been authorized%s%s",
ip_str(
&md->iface->ip_addr),
ntohs(portof(&md->iface->ip_addr)),
(policy != LEMPTY) ? " with policy=" : "",
(policy !=
LEMPTY) ? bitnamesof(sa_policy_bit_names,
policy) : "");
return STF_FAIL + v2N_NO_PROPOSAL_CHOSEN;
}
if (c->kind != CK_TEMPLATE) {
loglog(RC_LOG_SERIOUS, "initial parent SA message received on %s:%u"
" but \"%s\" forbids connection",
ip_str(
&md->iface->ip_addr), pluto_port,
c->name);
return STF_FAIL + v2N_NO_PROPOSAL_CHOSEN;
}
c = rw_instantiate(c, &md->sender, NULL, NULL);
} else {
/* we found a non-wildcard conn. double check if it needs instantiation anyway (eg vnet=) */
/* vnet=/vhost= should have set CK_TEMPLATE on connection loading */
if ((c->kind == CK_TEMPLATE) && c->spd.that.virt) {
DBG(DBG_CONTROL,
DBG_log(
"local endpoint has virt (vnet/vhost) set without wildcards - needs instantiation"));
c = rw_instantiate(c, &md->sender, NULL, NULL);
} else if ((c->kind == CK_TEMPLATE) &&
(c->policy & POLICY_IKEV2_ALLOW_NARROWING)) {
DBG(DBG_CONTROL,
DBG_log(
"local endpoint has narrowing=yes - needs instantiation"));
c = rw_instantiate(c, &md->sender, NULL, NULL);
}
}
DBG_log("found connection: %s\n", c ? c->name : "<none>");
if (!st) {
st = new_state();
/* set up new state */
memcpy(st->st_icookie, md->hdr.isa_icookie, COOKIE_SIZE);
/* initialize_new_state expects valid icookie/rcookie values, so create it now */
get_cookie(FALSE, st->st_rcookie, COOKIE_SIZE, &md->sender);
initialize_new_state(st, c, policy, 0, NULL_FD,
pcim_stranger_crypto);
st->st_ikev2 = TRUE;
change_state(st, STATE_PARENT_R1);
st->st_msgid_lastack = INVALID_MSGID;
st->st_msgid_nextuse = 0;
md->st = st;
md->from_state = STATE_IKEv2_BASE;
}
/* check,as a responder, are we under dos attack or not
* if yes go to 6 message exchange mode. it is a config option for now.
* TBD set force_busy dynamically
* Paul: Can we check for STF_TOOMUCHCRYPTO ?
*/
if (force_busy == TRUE) {
u_char dcookie[SHA1_DIGEST_SIZE];
chunk_t dc;
ikev2_get_dcookie( dcookie, st->st_ni, &md->sender,
st->st_icookie);
dc.ptr = dcookie;
dc.len = SHA1_DIGEST_SIZE;
/* check if I1 packet contian KE and a v2N payload with type COOKIE */
if ( md->chain[ISAKMP_NEXT_v2KE] &&
md->chain[ISAKMP_NEXT_v2N] &&
(md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_type ==
v2N_COOKIE)) {
u_int8_t spisize;
const pb_stream *dc_pbs;
chunk_t blob;
DBG(DBG_CONTROLMORE,
DBG_log("received a DOS cookie in I1 verify it"));
/* we received dcookie we send earlier verify it */
spisize =
md->chain[ISAKMP_NEXT_v2N]->payload.v2n.
isan_spisize;
dc_pbs = &md->chain[ISAKMP_NEXT_v2N]->pbs;
blob.ptr = dc_pbs->cur + spisize;
blob.len = pbs_left(dc_pbs) - spisize;
DBG(DBG_CONTROLMORE,
DBG_dump_chunk("dcookie received in I1 Packet",
blob);
DBG_dump("dcookie computed", dcookie,
SHA1_DIGEST_SIZE));
if (memcmp(blob.ptr, dcookie, SHA1_DIGEST_SIZE) != 0) {
libreswan_log(
"mismatch in DOS v2N_COOKIE,send a new one");
SEND_NOTIFICATION_AA(v2N_COOKIE, &dc);
return STF_FAIL + v2N_INVALID_IKE_SPI;
}
DBG(DBG_CONTROLMORE,
DBG_log("dcookie received match with computed one"));
} else {
/* we are under DOS attack I1 contains no DOS COOKIE */
DBG(DBG_CONTROLMORE,
DBG_log(
"busy mode on. receieved I1 without a valid dcookie");
DBG_log("send a dcookie and forget this state"));
SEND_NOTIFICATION_AA(v2N_COOKIE, &dc);
return STF_FAIL;
}
} else {
DBG(DBG_CONTROLMORE,
DBG_log("will not send/process a dcookie"));
}
/*
* We have to agree to the DH group before we actually know who
* we are talking to. If we support the group, we use it.
*
* It is really too hard here to go through all the possible policies
* that might permit this group. If we think we are being DOS'ed
* then we should demand a cookie.
*/
{
struct ikev2_ke *ke;
char fromname[ADDRTOT_BUF];
addrtot(&md->sender, 0, fromname, ADDRTOT_BUF);
if (!md->chain[ISAKMP_NEXT_v2KE]) {
/* is this a notify? If so, log it */
if(md->chain[ISAKMP_NEXT_v2N]) {
libreswan_log("Received Notify(%d): %s",
md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_type,
enum_name(&ikev2_notify_names,
md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_type));
}
libreswan_log(
"rejecting I1 from %s:%u, no KE payload present",
fromname, md->sender_port);
return STF_FAIL + v2N_INVALID_KE_PAYLOAD;
}
ke = &md->chain[ISAKMP_NEXT_v2KE]->payload.v2ke;
st->st_oakley.group = lookup_group(ke->isak_group);
if (st->st_oakley.group == NULL) {
libreswan_log(
"rejecting I1 from %s:%u, invalid DH group=%u",
fromname, md->sender_port,
ke->isak_group);
return STF_FAIL + v2N_INVALID_KE_PAYLOAD;
}
}
/* now. we need to go calculate the nonce, and the KE */
{
struct ke_continuation *ke = alloc_thing(
struct ke_continuation,
"ikev2_inI1outR1 KE");
stf_status e;
ke->md = md;
set_suspended(st, ke->md);
if (!st->st_sec_in_use) {
pcrc_init(&ke->ke_pcrc);
ke->ke_pcrc.pcrc_func =
ikev2_parent_inI1outR1_continue;
e = build_ke(&ke->ke_pcrc, st, st->st_oakley.group,
pcim_stranger_crypto);
if (e != STF_SUSPEND && e != STF_INLINE) {
loglog(RC_CRYPTOFAILED, "system too busy");
delete_state(st);
}
} else {
e =
ikev2_parent_inI1outR1_tail((struct
pluto_crypto_req_cont
*)ke,
NULL);
}
reset_globals();
return e;
}
}
|
stf_status ikev2parent_inI1outR1(struct msg_digest *md)
{
struct state *st = md->st;
lset_t policy = POLICY_IKEV2_ALLOW;
struct connection *c = find_host_connection(&md->iface->ip_addr,
md->iface->port,
&md->sender,
md->sender_port,
POLICY_IKEV2_ALLOW);
/* retrieve st->st_gi */
#if 0
if (c == NULL) {
/*
* make up a policy from the thing that was proposed, and see
* if we can find a connection with that policy.
*/
pb_stream pre_sa_pbs = sa_pd->pbs;
policy = preparse_isakmp_sa_body(&pre_sa_pbs);
c = find_host_connection(&md->iface->ip_addr, pluto_port,
(ip_address*)NULL, md->sender_port,
policy);
}
#endif
if (c == NULL) {
/* See if a wildcarded connection can be found.
* We cannot pick the right connection, so we're making a guess.
* All Road Warrior connections are fair game:
* we pick the first we come across (if any).
* If we don't find any, we pick the first opportunistic
* with the smallest subnet that includes the peer.
* There is, of course, no necessary relationship between
* an Initiator's address and that of its client,
* but Food Groups kind of assumes one.
*/
{
struct connection *d;
d = find_host_connection(&md->iface->ip_addr,
pluto_port,
(ip_address*)NULL,
md->sender_port, policy);
for (; d != NULL; d = d->hp_next) {
if (d->kind == CK_GROUP) {
/* ignore */
} else {
if (d->kind == CK_TEMPLATE &&
!(d->policy & POLICY_OPPO)) {
/* must be Road Warrior: we have a winner */
c = d;
break;
}
/* Opportunistic or Shunt: pick tightest match */
if (addrinsubnet(&md->sender,
&d->spd.that.client)
&&
(c == NULL ||
!subnetinsubnet(&c->spd.that.
client,
&d->spd.that.
client)))
c = d;
}
}
}
if (c == NULL) {
loglog(RC_LOG_SERIOUS, "initial parent SA message received on %s:%u"
" but no connection has been authorized%s%s",
ip_str(
&md->iface->ip_addr),
ntohs(portof(&md->iface->ip_addr)),
(policy != LEMPTY) ? " with policy=" : "",
(policy !=
LEMPTY) ? bitnamesof(sa_policy_bit_names,
policy) : "");
return STF_FAIL + v2N_NO_PROPOSAL_CHOSEN;
}
if (c->kind != CK_TEMPLATE) {
loglog(RC_LOG_SERIOUS, "initial parent SA message received on %s:%u"
" but \"%s\" forbids connection",
ip_str(
&md->iface->ip_addr), pluto_port,
c->name);
return STF_FAIL + v2N_NO_PROPOSAL_CHOSEN;
}
c = rw_instantiate(c, &md->sender, NULL, NULL);
} else {
/* we found a non-wildcard conn. double check if it needs instantiation anyway (eg vnet=) */
/* vnet=/vhost= should have set CK_TEMPLATE on connection loading */
if ((c->kind == CK_TEMPLATE) && c->spd.that.virt) {
DBG(DBG_CONTROL,
DBG_log(
"local endpoint has virt (vnet/vhost) set without wildcards - needs instantiation"));
c = rw_instantiate(c, &md->sender, NULL, NULL);
} else if ((c->kind == CK_TEMPLATE) &&
(c->policy & POLICY_IKEV2_ALLOW_NARROWING)) {
DBG(DBG_CONTROL,
DBG_log(
"local endpoint has narrowing=yes - needs instantiation"));
c = rw_instantiate(c, &md->sender, NULL, NULL);
}
}
DBG_log("found connection: %s\n", c ? c->name : "<none>");
if (!st) {
st = new_state();
/* set up new state */
memcpy(st->st_icookie, md->hdr.isa_icookie, COOKIE_SIZE);
/* initialize_new_state expects valid icookie/rcookie values, so create it now */
get_cookie(FALSE, st->st_rcookie, COOKIE_SIZE, &md->sender);
initialize_new_state(st, c, policy, 0, NULL_FD,
pcim_stranger_crypto);
st->st_ikev2 = TRUE;
change_state(st, STATE_PARENT_R1);
st->st_msgid_lastack = INVALID_MSGID;
st->st_msgid_nextuse = 0;
md->st = st;
md->from_state = STATE_IKEv2_BASE;
}
/* check,as a responder, are we under dos attack or not
* if yes go to 6 message exchange mode. it is a config option for now.
* TBD set force_busy dynamically
* Paul: Can we check for STF_TOOMUCHCRYPTO ?
*/
if (force_busy == TRUE) {
u_char dcookie[SHA1_DIGEST_SIZE];
chunk_t dc;
ikev2_get_dcookie( dcookie, st->st_ni, &md->sender,
st->st_icookie);
dc.ptr = dcookie;
dc.len = SHA1_DIGEST_SIZE;
/* check if I1 packet contian KE and a v2N payload with type COOKIE */
if ( md->chain[ISAKMP_NEXT_v2KE] &&
md->chain[ISAKMP_NEXT_v2N] &&
(md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_type ==
v2N_COOKIE)) {
u_int8_t spisize;
const pb_stream *dc_pbs;
chunk_t blob;
DBG(DBG_CONTROLMORE,
DBG_log("received a DOS cookie in I1 verify it"));
/* we received dcookie we send earlier verify it */
spisize =
md->chain[ISAKMP_NEXT_v2N]->payload.v2n.
isan_spisize;
dc_pbs = &md->chain[ISAKMP_NEXT_v2N]->pbs;
blob.ptr = dc_pbs->cur + spisize;
blob.len = pbs_left(dc_pbs) - spisize;
DBG(DBG_CONTROLMORE,
DBG_dump_chunk("dcookie received in I1 Packet",
blob);
DBG_dump("dcookie computed", dcookie,
SHA1_DIGEST_SIZE));
if (memcmp(blob.ptr, dcookie, SHA1_DIGEST_SIZE) != 0) {
libreswan_log(
"mismatch in DOS v2N_COOKIE,send a new one");
SEND_NOTIFICATION_AA(v2N_COOKIE, &dc);
return STF_FAIL + v2N_INVALID_IKE_SPI;
}
DBG(DBG_CONTROLMORE,
DBG_log("dcookie received match with computed one"));
} else {
/* we are under DOS attack I1 contains no DOS COOKIE */
DBG(DBG_CONTROLMORE,
DBG_log(
"busy mode on. receieved I1 without a valid dcookie");
DBG_log("send a dcookie and forget this state"));
SEND_NOTIFICATION_AA(v2N_COOKIE, &dc);
return STF_FAIL;
}
} else {
DBG(DBG_CONTROLMORE,
DBG_log("will not send/process a dcookie"));
}
/*
* We have to agree to the DH group before we actually know who
* we are talking to. If we support the group, we use it.
*
* It is really too hard here to go through all the possible policies
* that might permit this group. If we think we are being DOS'ed
* then we should demand a cookie.
*/
{
struct ikev2_ke *ke;
ke = &md->chain[ISAKMP_NEXT_v2KE]->payload.v2ke;
st->st_oakley.group = lookup_group(ke->isak_group);
if (st->st_oakley.group == NULL) {
char fromname[ADDRTOT_BUF];
addrtot(&md->sender, 0, fromname, ADDRTOT_BUF);
libreswan_log(
"rejecting I1 from %s:%u, invalid DH group=%u",
fromname, md->sender_port,
ke->isak_group);
return v2N_INVALID_KE_PAYLOAD;
}
}
/* now. we need to go calculate the nonce, and the KE */
{
struct ke_continuation *ke = alloc_thing(
struct ke_continuation,
"ikev2_inI1outR1 KE");
stf_status e;
ke->md = md;
set_suspended(st, ke->md);
if (!st->st_sec_in_use) {
pcrc_init(&ke->ke_pcrc);
ke->ke_pcrc.pcrc_func =
ikev2_parent_inI1outR1_continue;
e = build_ke(&ke->ke_pcrc, st, st->st_oakley.group,
pcim_stranger_crypto);
if (e != STF_SUSPEND && e != STF_INLINE) {
loglog(RC_CRYPTOFAILED, "system too busy");
delete_state(st);
}
} else {
e =
ikev2_parent_inI1outR1_tail((struct
pluto_crypto_req_cont
*)ke,
NULL);
}
reset_globals();
return e;
}
}
|
C
|
libreswan
| 1 |
CVE-2016-9557
|
https://www.cvedetails.com/cve/CVE-2016-9557/
|
CWE-190
|
https://github.com/mdadams/jasper/commit/d42b2388f7f8e0332c846675133acea151fc557a
|
d42b2388f7f8e0332c846675133acea151fc557a
|
The generation of the configuration file jas_config.h has been completely
reworked in order to avoid pollution of the global namespace.
Some problematic types like uchar, ulong, and friends have been replaced
with names with a jas_ prefix.
An option max_samples has been added to the BMP and JPEG decoders to
restrict the maximum size of image that they can decode. This change
was made as a (possibly temporary) fix to address security concerns.
A max_samples command-line option has also been added to imginfo.
Whether an image component (for jas_image_t) is stored in memory or on
disk is now based on the component size (rather than the image size).
Some debug log message were added.
Some new integer overflow checks were added.
Some new safe integer add/multiply functions were added.
More pre-C99 cruft was removed. JasPer has numerous "hacks" to
handle pre-C99 compilers. JasPer now assumes C99 support. So, this
pre-C99 cruft is unnecessary and can be removed.
The regression jasper-doublefree-mem_close.jpg has been re-enabled.
Theoretically, it should work more predictably now.
|
static int jas_iccgetuint64(jas_stream_t *in, jas_iccuint64_t *val)
{
jas_ulonglong tmp;
if (jas_iccgetuint(in, 8, &tmp))
return -1;
*val = tmp;
return 0;
}
|
static int jas_iccgetuint64(jas_stream_t *in, jas_iccuint64_t *val)
{
ulonglong tmp;
if (jas_iccgetuint(in, 8, &tmp))
return -1;
*val = tmp;
return 0;
}
|
C
|
jasper
| 1 |
CVE-2019-5797
| null | null |
https://github.com/chromium/chromium/commit/ba169c14aa9cc2efd708a878ae21ff34f3898fe0
|
ba169c14aa9cc2efd708a878ae21ff34f3898fe0
|
Fixing BadMessageCallback usage by SessionStorage
TBR: [email protected]
Bug: 916523
Change-Id: I027cc818cfba917906844ad2ec0edd7fa4761bd1
Reviewed-on: https://chromium-review.googlesource.com/c/1401604
Commit-Queue: Daniel Murphy <[email protected]>
Reviewed-by: Marijn Kruisselbrink <[email protected]>
Reviewed-by: Ken Rockot <[email protected]>
Cr-Commit-Position: refs/heads/master@{#621772}
|
CacheStorageContextImpl* StoragePartitionImpl::GetCacheStorageContext() {
return cache_storage_context_.get();
}
|
CacheStorageContextImpl* StoragePartitionImpl::GetCacheStorageContext() {
return cache_storage_context_.get();
}
|
C
|
Chrome
| 0 |
CVE-2011-3209
|
https://www.cvedetails.com/cve/CVE-2011-3209/
|
CWE-189
|
https://github.com/torvalds/linux/commit/f8bd2258e2d520dff28c855658bd24bdafb5102d
|
f8bd2258e2d520dff28c855658bd24bdafb5102d
|
remove div_long_long_rem
x86 is the only arch right now, which provides an optimized for
div_long_long_rem and it has the downside that one has to be very careful that
the divide doesn't overflow.
The API is a little akward, as the arguments for the unsigned divide are
signed. The signed version also doesn't handle a negative divisor and
produces worse code on 64bit archs.
There is little incentive to keep this API alive, so this converts the few
users to the new API.
Signed-off-by: Roman Zippel <[email protected]>
Cc: Ralf Baechle <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: john stultz <[email protected]>
Cc: Christoph Lameter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
return *(void **)(object + s->offset);
}
|
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
return *(void **)(object + s->offset);
}
|
C
|
linux
| 0 |
CVE-2016-5199
|
https://www.cvedetails.com/cve/CVE-2016-5199/
|
CWE-119
|
https://github.com/chromium/chromium/commit/c995d4fe5e96f4d6d4a88b7867279b08e72d2579
|
c995d4fe5e96f4d6d4a88b7867279b08e72d2579
|
Move IsDataSaverEnabledByUser to be a static method and use it
This method now officially becomes the source of truth that
everything in the code base eventually calls into to determine whether
or not DataSaver is enabled.
Bug: 934399
Change-Id: Iae837b710ace8cc3101188f79d02cbc2d4f0fd93
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1537242
Reviewed-by: Joshua Pawlicki <[email protected]>
Reviewed-by: Tarun Bansal <[email protected]>
Commit-Queue: Robert Ogden <[email protected]>
Cr-Commit-Position: refs/heads/master@{#643948}
|
ChromeContentBrowserClient::GetServiceManifestOverlay(base::StringPiece name) {
if (name == content::mojom::kBrowserServiceName) {
return GetChromeContentBrowserOverlayManifest();
} else if (name == content::mojom::kGpuServiceName) {
return GetChromeContentGpuOverlayManifest();
} else if (name == content::mojom::kPackagedServicesServiceName) {
service_manager::Manifest overlay;
overlay.packaged_services = GetChromePackagedServiceManifests();
return overlay;
} else if (name == content::mojom::kRendererServiceName) {
return GetChromeContentRendererOverlayManifest();
} else if (name == content::mojom::kUtilityServiceName) {
return GetChromeContentUtilityOverlayManifest();
}
return base::nullopt;
}
|
ChromeContentBrowserClient::GetServiceManifestOverlay(base::StringPiece name) {
if (name == content::mojom::kBrowserServiceName) {
return GetChromeContentBrowserOverlayManifest();
} else if (name == content::mojom::kGpuServiceName) {
return GetChromeContentGpuOverlayManifest();
} else if (name == content::mojom::kPackagedServicesServiceName) {
service_manager::Manifest overlay;
overlay.packaged_services = GetChromePackagedServiceManifests();
return overlay;
} else if (name == content::mojom::kRendererServiceName) {
return GetChromeContentRendererOverlayManifest();
} else if (name == content::mojom::kUtilityServiceName) {
return GetChromeContentUtilityOverlayManifest();
}
return base::nullopt;
}
|
C
|
Chrome
| 0 |
CVE-2018-17462
|
https://www.cvedetails.com/cve/CVE-2018-17462/
|
CWE-20
|
https://github.com/chromium/chromium/commit/9d2ead1650a1c901754dd1a68705006a6934cffc
|
9d2ead1650a1c901754dd1a68705006a6934cffc
|
Refcount AppCacheGroup correctly.
Bug: 888926
Change-Id: Iab0d82d272e2f24a5e91180d64bc8e2aa8a8238d
Reviewed-on: https://chromium-review.googlesource.com/1246827
Reviewed-by: Marijn Kruisselbrink <[email protected]>
Reviewed-by: Joshua Bell <[email protected]>
Commit-Queue: Chris Palmer <[email protected]>
Cr-Commit-Position: refs/heads/master@{#594475}
|
void AppCacheGroup::HostDestructionImminent(AppCacheHost* host) {
queued_updates_.erase(host);
if (queued_updates_.empty() && !restart_update_task_.IsCancelled())
restart_update_task_.Cancel();
}
|
void AppCacheGroup::HostDestructionImminent(AppCacheHost* host) {
queued_updates_.erase(host);
if (queued_updates_.empty() && !restart_update_task_.IsCancelled())
restart_update_task_.Cancel();
}
|
C
|
Chrome
| 0 |
CVE-2013-4160
|
https://www.cvedetails.com/cve/CVE-2013-4160/
| null |
https://github.com/mm2/Little-CMS/commit/91c2db7f2559be504211b283bc3a2c631d6f06d9
|
91c2db7f2559be504211b283bc3a2c631d6f06d9
|
Non happy-path fixes
|
cmsBool FixWhiteMisalignment(cmsPipeline* Lut, cmsColorSpaceSignature EntryColorSpace, cmsColorSpaceSignature ExitColorSpace)
{
cmsUInt16Number *WhitePointIn, *WhitePointOut;
cmsUInt16Number WhiteIn[cmsMAXCHANNELS], WhiteOut[cmsMAXCHANNELS], ObtainedOut[cmsMAXCHANNELS];
cmsUInt32Number i, nOuts, nIns;
cmsStage *PreLin = NULL, *CLUT = NULL, *PostLin = NULL;
if (!_cmsEndPointsBySpace(EntryColorSpace,
&WhitePointIn, NULL, &nIns)) return FALSE;
if (!_cmsEndPointsBySpace(ExitColorSpace,
&WhitePointOut, NULL, &nOuts)) return FALSE;
if (Lut ->InputChannels != nIns) return FALSE;
if (Lut ->OutputChannels != nOuts) return FALSE;
cmsPipelineEval16(WhitePointIn, ObtainedOut, Lut);
if (WhitesAreEqual(nOuts, WhitePointOut, ObtainedOut)) return TRUE; // whites already match
if (!cmsPipelineCheckAndRetreiveStages(Lut, 3, cmsSigCurveSetElemType, cmsSigCLutElemType, cmsSigCurveSetElemType, &PreLin, &CLUT, &PostLin))
if (!cmsPipelineCheckAndRetreiveStages(Lut, 2, cmsSigCurveSetElemType, cmsSigCLutElemType, &PreLin, &CLUT))
if (!cmsPipelineCheckAndRetreiveStages(Lut, 2, cmsSigCLutElemType, cmsSigCurveSetElemType, &CLUT, &PostLin))
if (!cmsPipelineCheckAndRetreiveStages(Lut, 1, cmsSigCLutElemType, &CLUT))
return FALSE;
if (PreLin) {
cmsToneCurve** Curves = _cmsStageGetPtrToCurveSet(PreLin);
for (i=0; i < nIns; i++) {
WhiteIn[i] = cmsEvalToneCurve16(Curves[i], WhitePointIn[i]);
}
}
else {
for (i=0; i < nIns; i++)
WhiteIn[i] = WhitePointIn[i];
}
if (PostLin) {
cmsToneCurve** Curves = _cmsStageGetPtrToCurveSet(PostLin);
for (i=0; i < nOuts; i++) {
cmsToneCurve* InversePostLin = cmsReverseToneCurve(Curves[i]);
WhiteOut[i] = cmsEvalToneCurve16(InversePostLin, WhitePointOut[i]);
cmsFreeToneCurve(InversePostLin);
}
}
else {
for (i=0; i < nOuts; i++)
WhiteOut[i] = WhitePointOut[i];
}
PatchLUT(CLUT, WhiteIn, WhiteOut, nOuts, nIns);
return TRUE;
}
|
cmsBool FixWhiteMisalignment(cmsPipeline* Lut, cmsColorSpaceSignature EntryColorSpace, cmsColorSpaceSignature ExitColorSpace)
{
cmsUInt16Number *WhitePointIn, *WhitePointOut;
cmsUInt16Number WhiteIn[cmsMAXCHANNELS], WhiteOut[cmsMAXCHANNELS], ObtainedOut[cmsMAXCHANNELS];
cmsUInt32Number i, nOuts, nIns;
cmsStage *PreLin = NULL, *CLUT = NULL, *PostLin = NULL;
if (!_cmsEndPointsBySpace(EntryColorSpace,
&WhitePointIn, NULL, &nIns)) return FALSE;
if (!_cmsEndPointsBySpace(ExitColorSpace,
&WhitePointOut, NULL, &nOuts)) return FALSE;
if (Lut ->InputChannels != nIns) return FALSE;
if (Lut ->OutputChannels != nOuts) return FALSE;
cmsPipelineEval16(WhitePointIn, ObtainedOut, Lut);
if (WhitesAreEqual(nOuts, WhitePointOut, ObtainedOut)) return TRUE; // whites already match
if (!cmsPipelineCheckAndRetreiveStages(Lut, 3, cmsSigCurveSetElemType, cmsSigCLutElemType, cmsSigCurveSetElemType, &PreLin, &CLUT, &PostLin))
if (!cmsPipelineCheckAndRetreiveStages(Lut, 2, cmsSigCurveSetElemType, cmsSigCLutElemType, &PreLin, &CLUT))
if (!cmsPipelineCheckAndRetreiveStages(Lut, 2, cmsSigCLutElemType, cmsSigCurveSetElemType, &CLUT, &PostLin))
if (!cmsPipelineCheckAndRetreiveStages(Lut, 1, cmsSigCLutElemType, &CLUT))
return FALSE;
if (PreLin) {
cmsToneCurve** Curves = _cmsStageGetPtrToCurveSet(PreLin);
for (i=0; i < nIns; i++) {
WhiteIn[i] = cmsEvalToneCurve16(Curves[i], WhitePointIn[i]);
}
}
else {
for (i=0; i < nIns; i++)
WhiteIn[i] = WhitePointIn[i];
}
if (PostLin) {
cmsToneCurve** Curves = _cmsStageGetPtrToCurveSet(PostLin);
for (i=0; i < nOuts; i++) {
cmsToneCurve* InversePostLin = cmsReverseToneCurve(Curves[i]);
WhiteOut[i] = cmsEvalToneCurve16(InversePostLin, WhitePointOut[i]);
cmsFreeToneCurve(InversePostLin);
}
}
else {
for (i=0; i < nOuts; i++)
WhiteOut[i] = WhitePointOut[i];
}
PatchLUT(CLUT, WhiteIn, WhiteOut, nOuts, nIns);
return TRUE;
}
|
C
|
Little-CMS
| 0 |
CVE-2015-3412
|
https://www.cvedetails.com/cve/CVE-2015-3412/
|
CWE-254
|
https://git.php.net/?p=php-src.git;a=commit;h=4435b9142ff9813845d5c97ab29a5d637bedb257
|
4435b9142ff9813845d5c97ab29a5d637bedb257
| null |
PHP_FUNCTION(imagefilledarc)
{
zval *IM;
long cx, cy, w, h, ST, E, col, style;
gdImagePtr im;
int e, st;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllllllll", &IM, &cx, &cy, &w, &h, &ST, &E, &col, &style) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
e = E;
if (e < 0) {
e %= 360;
}
st = ST;
if (st < 0) {
st %= 360;
}
gdImageFilledArc(im, cx, cy, w, h, st, e, col, style);
RETURN_TRUE;
}
|
PHP_FUNCTION(imagefilledarc)
{
zval *IM;
long cx, cy, w, h, ST, E, col, style;
gdImagePtr im;
int e, st;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllllllll", &IM, &cx, &cy, &w, &h, &ST, &E, &col, &style) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
e = E;
if (e < 0) {
e %= 360;
}
st = ST;
if (st < 0) {
st %= 360;
}
gdImageFilledArc(im, cx, cy, w, h, st, e, col, style);
RETURN_TRUE;
}
|
C
|
php
| 0 |
CVE-2016-9557
|
https://www.cvedetails.com/cve/CVE-2016-9557/
|
CWE-190
|
https://github.com/mdadams/jasper/commit/d42b2388f7f8e0332c846675133acea151fc557a
|
d42b2388f7f8e0332c846675133acea151fc557a
|
The generation of the configuration file jas_config.h has been completely
reworked in order to avoid pollution of the global namespace.
Some problematic types like uchar, ulong, and friends have been replaced
with names with a jas_ prefix.
An option max_samples has been added to the BMP and JPEG decoders to
restrict the maximum size of image that they can decode. This change
was made as a (possibly temporary) fix to address security concerns.
A max_samples command-line option has also been added to imginfo.
Whether an image component (for jas_image_t) is stored in memory or on
disk is now based on the component size (rather than the image size).
Some debug log message were added.
Some new integer overflow checks were added.
Some new safe integer add/multiply functions were added.
More pre-C99 cruft was removed. JasPer has numerous "hacks" to
handle pre-C99 compilers. JasPer now assumes C99 support. So, this
pre-C99 cruft is unnecessary and can be removed.
The regression jasper-doublefree-mem_close.jpg has been re-enabled.
Theoretically, it should work more predictably now.
|
jas_image_t *jas_image_chclrspc(jas_image_t *image, jas_cmprof_t *outprof,
int intent)
{
jas_image_t *inimage;
int minhstep;
int minvstep;
int i;
int j;
int k;
int n;
int hstep;
int vstep;
int numinauxchans;
int numoutauxchans;
int numinclrchans;
int numoutclrchans;
int prec;
jas_image_t *outimage;
int cmpttype;
int numoutchans;
jas_cmprof_t *inprof;
jas_cmprof_t *tmpprof;
jas_image_cmptparm_t cmptparm;
int width;
int height;
jas_cmxform_t *xform;
jas_cmpixmap_t inpixmap;
jas_cmpixmap_t outpixmap;
jas_cmcmptfmt_t *incmptfmts;
jas_cmcmptfmt_t *outcmptfmts;
#if 0
jas_eprintf("IMAGE\n");
jas_image_dump(image, stderr);
#endif
outimage = 0;
xform = 0;
if (!(inimage = jas_image_copy(image)))
goto error;
image = 0;
if (!jas_image_ishomosamp(inimage)) {
minhstep = jas_image_cmpthstep(inimage, 0);
minvstep = jas_image_cmptvstep(inimage, 0);
for (i = 1; i < jas_image_numcmpts(inimage); ++i) {
hstep = jas_image_cmpthstep(inimage, i);
vstep = jas_image_cmptvstep(inimage, i);
if (hstep < minhstep) {
minhstep = hstep;
}
if (vstep < minvstep) {
minvstep = vstep;
}
}
n = jas_image_numcmpts(inimage);
for (i = 0; i < n; ++i) {
cmpttype = jas_image_cmpttype(inimage, i);
if (jas_image_sampcmpt(inimage, i, i + 1, 0, 0, minhstep, minvstep,
jas_image_cmptsgnd(inimage, i), jas_image_cmptprec(inimage, i))) {
goto error;
}
jas_image_setcmpttype(inimage, i + 1, cmpttype);
jas_image_delcmpt(inimage, i);
}
}
width = jas_image_cmptwidth(inimage, 0);
height = jas_image_cmptheight(inimage, 0);
hstep = jas_image_cmpthstep(inimage, 0);
vstep = jas_image_cmptvstep(inimage, 0);
if (!(inprof = jas_image_cmprof(inimage))) {
abort();
}
numinclrchans = jas_clrspc_numchans(jas_cmprof_clrspc(inprof));
numinauxchans = jas_image_numcmpts(inimage) - numinclrchans;
numoutclrchans = jas_clrspc_numchans(jas_cmprof_clrspc(outprof));
numoutauxchans = 0;
numoutchans = numoutclrchans + numoutauxchans;
prec = 8;
if (!(outimage = jas_image_create0())) {
goto error;
}
/* Create a component for each of the colorants. */
for (i = 0; i < numoutclrchans; ++i) {
cmptparm.tlx = 0;
cmptparm.tly = 0;
cmptparm.hstep = hstep;
cmptparm.vstep = vstep;
cmptparm.width = width;
cmptparm.height = height;
cmptparm.prec = prec;
cmptparm.sgnd = 0;
if (jas_image_addcmpt(outimage, -1, &cmptparm))
goto error;
jas_image_setcmpttype(outimage, i, JAS_IMAGE_CT_COLOR(i));
}
#if 0
/* Copy the auxiliary components without modification. */
for (i = 0; i < jas_image_numcmpts(inimage); ++i) {
if (!ISCOLOR(jas_image_cmpttype(inimage, i))) {
jas_image_copycmpt(outimage, -1, inimage, i);
/* XXX - need to specify laydown of component on ref. grid */
}
}
#endif
if (!(tmpprof = jas_cmprof_copy(outprof)))
goto error;
assert(!jas_image_cmprof(outimage));
jas_image_setcmprof(outimage, tmpprof);
tmpprof = 0;
jas_image_setclrspc(outimage, jas_cmprof_clrspc(outprof));
if (!(xform = jas_cmxform_create(inprof, outprof, 0, JAS_CMXFORM_OP_FWD,
intent, 0))) {
goto error;
}
inpixmap.numcmpts = numinclrchans;
if (!(incmptfmts = jas_alloc2(numinclrchans, sizeof(jas_cmcmptfmt_t)))) {
abort();
}
inpixmap.cmptfmts = incmptfmts;
for (i = 0; i < numinclrchans; ++i) {
j = jas_image_getcmptbytype(inimage, JAS_IMAGE_CT_COLOR(i));
assert(j >= 0);
if (!(incmptfmts[i].buf = jas_alloc2(width, sizeof(long)))) {
goto error;
}
incmptfmts[i].prec = jas_image_cmptprec(inimage, j);
incmptfmts[i].sgnd = jas_image_cmptsgnd(inimage, j);
incmptfmts[i].width = width;
incmptfmts[i].height = 1;
}
outpixmap.numcmpts = numoutclrchans;
if (!(outcmptfmts = jas_alloc2(numoutclrchans, sizeof(jas_cmcmptfmt_t)))) {
abort();
}
outpixmap.cmptfmts = outcmptfmts;
for (i = 0; i < numoutclrchans; ++i) {
j = jas_image_getcmptbytype(outimage, JAS_IMAGE_CT_COLOR(i));
assert(j >= 0);
if (!(outcmptfmts[i].buf = jas_alloc2(width, sizeof(long))))
goto error;
outcmptfmts[i].prec = jas_image_cmptprec(outimage, j);
outcmptfmts[i].sgnd = jas_image_cmptsgnd(outimage, j);
outcmptfmts[i].width = width;
outcmptfmts[i].height = 1;
}
for (i = 0; i < height; ++i) {
for (j = 0; j < numinclrchans; ++j) {
k = jas_image_getcmptbytype(inimage, JAS_IMAGE_CT_COLOR(j));
if (jas_image_readcmpt2(inimage, k, 0, i, width, 1,
incmptfmts[j].buf))
goto error;
}
jas_cmxform_apply(xform, &inpixmap, &outpixmap);
for (j = 0; j < numoutclrchans; ++j) {
k = jas_image_getcmptbytype(outimage, JAS_IMAGE_CT_COLOR(j));
if (jas_image_writecmpt2(outimage, k, 0, i, width, 1,
outcmptfmts[j].buf))
goto error;
}
}
for (i = 0; i < numoutclrchans; ++i) {
jas_free(outcmptfmts[i].buf);
}
jas_free(outcmptfmts);
for (i = 0; i < numinclrchans; ++i) {
jas_free(incmptfmts[i].buf);
}
jas_free(incmptfmts);
jas_cmxform_destroy(xform);
jas_image_destroy(inimage);
#if 0
jas_eprintf("INIMAGE\n");
jas_image_dump(inimage, stderr);
jas_eprintf("OUTIMAGE\n");
jas_image_dump(outimage, stderr);
#endif
return outimage;
error:
if (xform)
jas_cmxform_destroy(xform);
if (inimage)
jas_image_destroy(inimage);
if (outimage)
jas_image_destroy(outimage);
return 0;
}
|
jas_image_t *jas_image_chclrspc(jas_image_t *image, jas_cmprof_t *outprof,
int intent)
{
jas_image_t *inimage;
int minhstep;
int minvstep;
int i;
int j;
int k;
int n;
int hstep;
int vstep;
int numinauxchans;
int numoutauxchans;
int numinclrchans;
int numoutclrchans;
int prec;
jas_image_t *outimage;
int cmpttype;
int numoutchans;
jas_cmprof_t *inprof;
jas_cmprof_t *tmpprof;
jas_image_cmptparm_t cmptparm;
int width;
int height;
jas_cmxform_t *xform;
jas_cmpixmap_t inpixmap;
jas_cmpixmap_t outpixmap;
jas_cmcmptfmt_t *incmptfmts;
jas_cmcmptfmt_t *outcmptfmts;
#if 0
jas_eprintf("IMAGE\n");
jas_image_dump(image, stderr);
#endif
outimage = 0;
xform = 0;
if (!(inimage = jas_image_copy(image)))
goto error;
image = 0;
if (!jas_image_ishomosamp(inimage)) {
minhstep = jas_image_cmpthstep(inimage, 0);
minvstep = jas_image_cmptvstep(inimage, 0);
for (i = 1; i < jas_image_numcmpts(inimage); ++i) {
hstep = jas_image_cmpthstep(inimage, i);
vstep = jas_image_cmptvstep(inimage, i);
if (hstep < minhstep) {
minhstep = hstep;
}
if (vstep < minvstep) {
minvstep = vstep;
}
}
n = jas_image_numcmpts(inimage);
for (i = 0; i < n; ++i) {
cmpttype = jas_image_cmpttype(inimage, i);
if (jas_image_sampcmpt(inimage, i, i + 1, 0, 0, minhstep, minvstep,
jas_image_cmptsgnd(inimage, i), jas_image_cmptprec(inimage, i))) {
goto error;
}
jas_image_setcmpttype(inimage, i + 1, cmpttype);
jas_image_delcmpt(inimage, i);
}
}
width = jas_image_cmptwidth(inimage, 0);
height = jas_image_cmptheight(inimage, 0);
hstep = jas_image_cmpthstep(inimage, 0);
vstep = jas_image_cmptvstep(inimage, 0);
if (!(inprof = jas_image_cmprof(inimage))) {
abort();
}
numinclrchans = jas_clrspc_numchans(jas_cmprof_clrspc(inprof));
numinauxchans = jas_image_numcmpts(inimage) - numinclrchans;
numoutclrchans = jas_clrspc_numchans(jas_cmprof_clrspc(outprof));
numoutauxchans = 0;
numoutchans = numoutclrchans + numoutauxchans;
prec = 8;
if (!(outimage = jas_image_create0())) {
goto error;
}
/* Create a component for each of the colorants. */
for (i = 0; i < numoutclrchans; ++i) {
cmptparm.tlx = 0;
cmptparm.tly = 0;
cmptparm.hstep = hstep;
cmptparm.vstep = vstep;
cmptparm.width = width;
cmptparm.height = height;
cmptparm.prec = prec;
cmptparm.sgnd = 0;
if (jas_image_addcmpt(outimage, -1, &cmptparm))
goto error;
jas_image_setcmpttype(outimage, i, JAS_IMAGE_CT_COLOR(i));
}
#if 0
/* Copy the auxiliary components without modification. */
for (i = 0; i < jas_image_numcmpts(inimage); ++i) {
if (!ISCOLOR(jas_image_cmpttype(inimage, i))) {
jas_image_copycmpt(outimage, -1, inimage, i);
/* XXX - need to specify laydown of component on ref. grid */
}
}
#endif
if (!(tmpprof = jas_cmprof_copy(outprof)))
goto error;
assert(!jas_image_cmprof(outimage));
jas_image_setcmprof(outimage, tmpprof);
tmpprof = 0;
jas_image_setclrspc(outimage, jas_cmprof_clrspc(outprof));
if (!(xform = jas_cmxform_create(inprof, outprof, 0, JAS_CMXFORM_OP_FWD,
intent, 0))) {
goto error;
}
inpixmap.numcmpts = numinclrchans;
if (!(incmptfmts = jas_alloc2(numinclrchans, sizeof(jas_cmcmptfmt_t)))) {
abort();
}
inpixmap.cmptfmts = incmptfmts;
for (i = 0; i < numinclrchans; ++i) {
j = jas_image_getcmptbytype(inimage, JAS_IMAGE_CT_COLOR(i));
assert(j >= 0);
if (!(incmptfmts[i].buf = jas_alloc2(width, sizeof(long)))) {
goto error;
}
incmptfmts[i].prec = jas_image_cmptprec(inimage, j);
incmptfmts[i].sgnd = jas_image_cmptsgnd(inimage, j);
incmptfmts[i].width = width;
incmptfmts[i].height = 1;
}
outpixmap.numcmpts = numoutclrchans;
if (!(outcmptfmts = jas_alloc2(numoutclrchans, sizeof(jas_cmcmptfmt_t)))) {
abort();
}
outpixmap.cmptfmts = outcmptfmts;
for (i = 0; i < numoutclrchans; ++i) {
j = jas_image_getcmptbytype(outimage, JAS_IMAGE_CT_COLOR(i));
assert(j >= 0);
if (!(outcmptfmts[i].buf = jas_alloc2(width, sizeof(long))))
goto error;
outcmptfmts[i].prec = jas_image_cmptprec(outimage, j);
outcmptfmts[i].sgnd = jas_image_cmptsgnd(outimage, j);
outcmptfmts[i].width = width;
outcmptfmts[i].height = 1;
}
for (i = 0; i < height; ++i) {
for (j = 0; j < numinclrchans; ++j) {
k = jas_image_getcmptbytype(inimage, JAS_IMAGE_CT_COLOR(j));
if (jas_image_readcmpt2(inimage, k, 0, i, width, 1,
incmptfmts[j].buf))
goto error;
}
jas_cmxform_apply(xform, &inpixmap, &outpixmap);
for (j = 0; j < numoutclrchans; ++j) {
k = jas_image_getcmptbytype(outimage, JAS_IMAGE_CT_COLOR(j));
if (jas_image_writecmpt2(outimage, k, 0, i, width, 1,
outcmptfmts[j].buf))
goto error;
}
}
for (i = 0; i < numoutclrchans; ++i) {
jas_free(outcmptfmts[i].buf);
}
jas_free(outcmptfmts);
for (i = 0; i < numinclrchans; ++i) {
jas_free(incmptfmts[i].buf);
}
jas_free(incmptfmts);
jas_cmxform_destroy(xform);
jas_image_destroy(inimage);
#if 0
jas_eprintf("INIMAGE\n");
jas_image_dump(inimage, stderr);
jas_eprintf("OUTIMAGE\n");
jas_image_dump(outimage, stderr);
#endif
return outimage;
error:
if (xform)
jas_cmxform_destroy(xform);
if (inimage)
jas_image_destroy(inimage);
if (outimage)
jas_image_destroy(outimage);
return 0;
}
|
C
|
jasper
| 0 |
CVE-2013-7271
|
https://www.cvedetails.com/cve/CVE-2013-7271/
|
CWE-20
|
https://github.com/torvalds/linux/commit/f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
|
net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int copy_msghdr_from_user(struct msghdr *kmsg,
struct msghdr __user *umsg)
{
if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
return -EFAULT;
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
return -EINVAL;
return 0;
}
|
static int copy_msghdr_from_user(struct msghdr *kmsg,
struct msghdr __user *umsg)
{
if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
return -EFAULT;
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
return -EINVAL;
return 0;
}
|
C
|
linux
| 0 |
CVE-2009-3605
|
https://www.cvedetails.com/cve/CVE-2009-3605/
|
CWE-189
|
https://cgit.freedesktop.org/poppler/poppler/commit/?id=7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
|
7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
| null |
void CairoOutputDev::startDoc(XRef *xrefA, Catalog *catalogA,
CairoFontEngine *parentFontEngine) {
xref = xrefA;
catalog = catalogA;
if (parentFontEngine) {
fontEngine = parentFontEngine;
} else {
if (fontEngine) {
delete fontEngine;
}
fontEngine = new CairoFontEngine(ft_lib);
fontEngine_owner = gTrue;
}
}
|
void CairoOutputDev::startDoc(XRef *xrefA, Catalog *catalogA,
CairoFontEngine *parentFontEngine) {
xref = xrefA;
catalog = catalogA;
if (parentFontEngine) {
fontEngine = parentFontEngine;
} else {
if (fontEngine) {
delete fontEngine;
}
fontEngine = new CairoFontEngine(ft_lib);
fontEngine_owner = gTrue;
}
}
|
CPP
|
poppler
| 0 |
CVE-2019-7308
|
https://www.cvedetails.com/cve/CVE-2019-7308/
|
CWE-189
|
https://github.com/torvalds/linux/commit/d3bd7413e0ca40b60cf60d4003246d067cafdeda
|
d3bd7413e0ca40b60cf60d4003246d067cafdeda
|
bpf: fix sanitation of alu op with pointer / scalar type from different paths
While 979d63d50c0c ("bpf: prevent out of bounds speculation on pointer
arithmetic") took care of rejecting alu op on pointer when e.g. pointer
came from two different map values with different map properties such as
value size, Jann reported that a case was not covered yet when a given
alu op is used in both "ptr_reg += reg" and "numeric_reg += reg" from
different branches where we would incorrectly try to sanitize based
on the pointer's limit. Catch this corner case and reject the program
instead.
Fixes: 979d63d50c0c ("bpf: prevent out of bounds speculation on pointer arithmetic")
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
|
static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
const struct bpf_call_arg_meta *meta,
enum bpf_access_type t)
{
switch (env->prog->type) {
/* Program types only with direct read access go here! */
case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT:
case BPF_PROG_TYPE_LWT_SEG6LOCAL:
case BPF_PROG_TYPE_SK_REUSEPORT:
case BPF_PROG_TYPE_FLOW_DISSECTOR:
case BPF_PROG_TYPE_CGROUP_SKB:
if (t == BPF_WRITE)
return false;
/* fallthrough */
/* Program types with direct read + write access go here! */
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
case BPF_PROG_TYPE_XDP:
case BPF_PROG_TYPE_LWT_XMIT:
case BPF_PROG_TYPE_SK_SKB:
case BPF_PROG_TYPE_SK_MSG:
if (meta)
return meta->pkt_access;
env->seen_direct_write = true;
return true;
default:
return false;
}
}
|
static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
const struct bpf_call_arg_meta *meta,
enum bpf_access_type t)
{
switch (env->prog->type) {
/* Program types only with direct read access go here! */
case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT:
case BPF_PROG_TYPE_LWT_SEG6LOCAL:
case BPF_PROG_TYPE_SK_REUSEPORT:
case BPF_PROG_TYPE_FLOW_DISSECTOR:
case BPF_PROG_TYPE_CGROUP_SKB:
if (t == BPF_WRITE)
return false;
/* fallthrough */
/* Program types with direct read + write access go here! */
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
case BPF_PROG_TYPE_XDP:
case BPF_PROG_TYPE_LWT_XMIT:
case BPF_PROG_TYPE_SK_SKB:
case BPF_PROG_TYPE_SK_MSG:
if (meta)
return meta->pkt_access;
env->seen_direct_write = true;
return true;
default:
return false;
}
}
|
C
|
linux
| 0 |
CVE-2018-6061
|
https://www.cvedetails.com/cve/CVE-2018-6061/
|
CWE-362
|
https://github.com/chromium/chromium/commit/70340ce072cee8a0bdcddb5f312d32567b2269f6
|
70340ce072cee8a0bdcddb5f312d32567b2269f6
|
vaapi vda: Delete owned objects on worker thread in Cleanup()
This CL adds a SEQUENCE_CHECKER to Vaapi*Accelerator classes, and
posts the destruction of those objects to the appropriate thread on
Cleanup().
Also makes {H264,VP8,VP9}Picture RefCountedThreadSafe, see miu@
comment in
https://chromium-review.googlesource.com/c/chromium/src/+/794091#message-a64bed985cfaf8a19499a517bb110a7ce581dc0f
TEST=play back VP9/VP8/H264 w/ simplechrome on soraka, Release build
unstripped, let video play for a few seconds then navigate back; no
crashes. Unittests as before:
video_decode_accelerator_unittest --test_video_data=test-25fps.vp9:320:240:250:250:35:150:12
video_decode_accelerator_unittest --test_video_data=test-25fps.vp8:320:240:250:250:35:150:11
video_decode_accelerator_unittest --test_video_data=test-25fps.h264:320:240:250:258:35:150:1
Bug: 789160
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: I7d96aaf89c92bf46f00c8b8b36798e057a842ed2
Reviewed-on: https://chromium-review.googlesource.com/794091
Reviewed-by: Pawel Osciak <[email protected]>
Commit-Queue: Miguel Casas <[email protected]>
Cr-Commit-Position: refs/heads/master@{#523372}
|
bool IsFlushRequest() const { return shm_ == nullptr; }
|
bool IsFlushRequest() const { return shm_ == nullptr; }
|
C
|
Chrome
| 0 |
CVE-2013-0925
|
https://www.cvedetails.com/cve/CVE-2013-0925/
|
CWE-264
|
https://github.com/chromium/chromium/commit/f7ae1f7a918f1973dca241a7a23169906eaf4fe3
|
f7ae1f7a918f1973dca241a7a23169906eaf4fe3
|
Do not pass URLs in onUpdated events to extensions unless they have the
"tabs" permission.
BUG=168442
Review URL: https://chromiumcodereview.appspot.com/11824004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@176406 0039d316-1c4b-4281-b951-d872f2087c98
|
void BrowserEventRouter::TabCreatedAt(WebContents* contents,
int index,
bool active) {
Profile* profile = Profile::FromBrowserContext(contents->GetBrowserContext());
scoped_ptr<ListValue> args(new ListValue());
scoped_ptr<Event> event(new Event(events::kOnTabCreated, args.Pass()));
event->restrict_to_profile = profile;
event->user_gesture = EventRouter::USER_GESTURE_NOT_ENABLED;
event->will_dispatch_callback =
base::Bind(&WillDispatchTabCreatedEvent, contents, active);
ExtensionSystem::Get(profile)->event_router()->BroadcastEvent(event.Pass());
RegisterForTabNotifications(contents);
}
|
void BrowserEventRouter::TabCreatedAt(WebContents* contents,
int index,
bool active) {
Profile* profile = Profile::FromBrowserContext(contents->GetBrowserContext());
scoped_ptr<ListValue> args(new ListValue());
scoped_ptr<Event> event(new Event(events::kOnTabCreated, args.Pass()));
event->restrict_to_profile = profile;
event->user_gesture = EventRouter::USER_GESTURE_NOT_ENABLED;
event->will_dispatch_callback =
base::Bind(&WillDispatchTabCreatedEvent, contents, active);
ExtensionSystem::Get(profile)->event_router()->BroadcastEvent(event.Pass());
RegisterForTabNotifications(contents);
}
|
C
|
Chrome
| 0 |
CVE-2015-1294
|
https://www.cvedetails.com/cve/CVE-2015-1294/
| null |
https://github.com/chromium/chromium/commit/3ff403eecdd23a39853a4ebca52023fbba6c5d00
|
3ff403eecdd23a39853a4ebca52023fbba6c5d00
|
Introduce RunLoop::Type::NESTABLE_TASKS_ALLOWED to replace MessageLoop::ScopedNestableTaskAllower.
(as well as MessageLoop::SetNestableTasksAllowed())
Surveying usage: the scoped object is always instantiated right before
RunLoop().Run(). The intent is really to allow nestable tasks in that
RunLoop so it's better to explicitly label that RunLoop as such and it
allows us to break the last dependency that forced some RunLoop users
to use MessageLoop APIs.
There's also the odd case of allowing nestable tasks for loops that are
reentrant from a native task (without going through RunLoop), these
are the minority but will have to be handled (after cleaning up the
majority of cases that are RunLoop induced).
As highlighted by robliao@ in https://chromium-review.googlesource.com/c/600517
(which was merged in this CL).
[email protected]
Bug: 750779
Change-Id: I43d122c93ec903cff3a6fe7b77ec461ea0656448
Reviewed-on: https://chromium-review.googlesource.com/594713
Commit-Queue: Gabriel Charette <[email protected]>
Reviewed-by: Robert Liao <[email protected]>
Reviewed-by: danakj <[email protected]>
Cr-Commit-Position: refs/heads/master@{#492263}
|
RunLoop::Delegate::Client* RunLoop::RegisterDelegateForCurrentThread(
Delegate* delegate) {
DCHECK(!delegate->bound_);
DCHECK_CALLED_ON_VALID_THREAD(delegate->bound_thread_checker_);
DCHECK(!tls_delegate.Get().Get());
tls_delegate.Get().Set(delegate);
delegate->bound_ = true;
return &delegate->client_interface_;
}
|
RunLoop::Delegate::Client* RunLoop::RegisterDelegateForCurrentThread(
Delegate* delegate) {
DCHECK(!delegate->bound_);
DCHECK_CALLED_ON_VALID_THREAD(delegate->bound_thread_checker_);
DCHECK(!tls_delegate.Get().Get());
tls_delegate.Get().Set(delegate);
delegate->bound_ = true;
return &delegate->client_interface_;
}
|
C
|
Chrome
| 0 |
CVE-2014-1700
|
https://www.cvedetails.com/cve/CVE-2014-1700/
|
CWE-399
|
https://github.com/chromium/chromium/commit/d926098e2e2be270c80a5ba25ab8a611b80b8556
|
d926098e2e2be270c80a5ba25ab8a611b80b8556
|
Connect WebUSB client interface to the devices app
This provides a basic WebUSB client interface in
content/renderer. Most of the interface is unimplemented,
but this CL hooks up navigator.usb.getDevices() to the
browser's Mojo devices app to enumerate available USB
devices.
BUG=492204
Review URL: https://codereview.chromium.org/1293253002
Cr-Commit-Position: refs/heads/master@{#344881}
|
void RenderFrameImpl::InstallCreateHook(
CreateRenderFrameImplFunction create_render_frame_impl) {
CHECK(!g_create_render_frame_impl);
g_create_render_frame_impl = create_render_frame_impl;
}
|
void RenderFrameImpl::InstallCreateHook(
CreateRenderFrameImplFunction create_render_frame_impl) {
CHECK(!g_create_render_frame_impl);
g_create_render_frame_impl = create_render_frame_impl;
}
|
C
|
Chrome
| 0 |
CVE-2012-5155
|
https://www.cvedetails.com/cve/CVE-2012-5155/
|
CWE-264
|
https://github.com/chromium/chromium/commit/0d7717faeaef5b72434632c95c78bee4883e2573
|
0d7717faeaef5b72434632c95c78bee4883e2573
|
Fix OS_MACOS typos. Should be OS_MACOSX.
BUG=163208
TEST=none
Review URL: https://codereview.chromium.org/12829005
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@189130 0039d316-1c4b-4281-b951-d872f2087c98
|
bool DownloadPathReservationTrackerTest::IsPathInUse(
const base::FilePath& path) {
return DownloadPathReservationTracker::IsPathInUseForTesting(path);
}
|
bool DownloadPathReservationTrackerTest::IsPathInUse(
const base::FilePath& path) {
return DownloadPathReservationTracker::IsPathInUseForTesting(path);
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/790613cb3725005dda8f7fbfaa344a9e99a8f2a8
|
790613cb3725005dda8f7fbfaa344a9e99a8f2a8
|
Replaces the % character with \x when generating Windows shortcuts via
File->"Create application shortcuts." The \x is converted back to % in
handling the --app switch.
BUG=23693
TEST=None
Review URL: http://codereview.chromium.org/515028
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@35377 0039d316-1c4b-4281-b951-d872f2087c98
|
bool BrowserInit::InProcessStartup() {
return in_startup;
}
|
bool BrowserInit::InProcessStartup() {
return in_startup;
}
|
C
|
Chrome
| 0 |
CVE-2017-5522
|
https://www.cvedetails.com/cve/CVE-2017-5522/
|
CWE-119
|
https://github.com/mapserver/mapserver/commit/e52a436c0e1c5e9f7ef13428dba83194a800f4df
|
e52a436c0e1c5e9f7ef13428dba83194a800f4df
|
security fix (patch by EvenR)
|
static int FLTGetTopBBOXInternal(FilterEncodingNode *psNode, FilterEncodingNode** ppsTopBBOX, int *pnCount)
{
if (psNode->pszValue && strcasecmp(psNode->pszValue, "BBOX") == 0) {
(*pnCount) ++;
if( *pnCount == 1 )
{
*ppsTopBBOX = psNode;
return TRUE;
}
*ppsTopBBOX = NULL;
return FALSE;
}
else if (psNode->pszValue && strcasecmp(psNode->pszValue, "AND") == 0) {
return FLTGetTopBBOXInternal(psNode->psLeftNode, ppsTopBBOX, pnCount) &&
FLTGetTopBBOXInternal(psNode->psRightNode, ppsTopBBOX, pnCount);
}
else
{
return TRUE;
}
}
|
static int FLTGetTopBBOXInternal(FilterEncodingNode *psNode, FilterEncodingNode** ppsTopBBOX, int *pnCount)
{
if (psNode->pszValue && strcasecmp(psNode->pszValue, "BBOX") == 0) {
(*pnCount) ++;
if( *pnCount == 1 )
{
*ppsTopBBOX = psNode;
return TRUE;
}
*ppsTopBBOX = NULL;
return FALSE;
}
else if (psNode->pszValue && strcasecmp(psNode->pszValue, "AND") == 0) {
return FLTGetTopBBOXInternal(psNode->psLeftNode, ppsTopBBOX, pnCount) &&
FLTGetTopBBOXInternal(psNode->psRightNode, ppsTopBBOX, pnCount);
}
else
{
return TRUE;
}
}
|
C
|
mapserver
| 0 |
CVE-2018-6063
|
https://www.cvedetails.com/cve/CVE-2018-6063/
|
CWE-787
|
https://github.com/chromium/chromium/commit/673ce95d481ea9368c4d4d43ac756ba1d6d9e608
|
673ce95d481ea9368c4d4d43ac756ba1d6d9e608
|
Correct mojo::WrapSharedMemoryHandle usage
Fixes some incorrect uses of mojo::WrapSharedMemoryHandle which
were assuming that the call actually has any control over the memory
protection applied to a handle when mapped.
Where fixing usage is infeasible for this CL, TODOs are added to
annotate follow-up work.
Also updates the API and documentation to (hopefully) improve clarity
and avoid similar mistakes from being made in the future.
BUG=792900
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: I0578aaa9ca3bfcb01aaf2451315d1ede95458477
Reviewed-on: https://chromium-review.googlesource.com/818282
Reviewed-by: Wei Li <[email protected]>
Reviewed-by: Lei Zhang <[email protected]>
Reviewed-by: John Abd-El-Malek <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Reviewed-by: Sadrul Chowdhury <[email protected]>
Reviewed-by: Yuzhu Shen <[email protected]>
Reviewed-by: Robert Sesek <[email protected]>
Commit-Queue: Ken Rockot <[email protected]>
Cr-Commit-Position: refs/heads/master@{#530268}
|
ScopedHandle WrapPlatformFile(base::PlatformFile platform_file) {
MojoPlatformHandle platform_handle;
platform_handle.struct_size = sizeof(MojoPlatformHandle);
platform_handle.type = kPlatformFileHandleType;
platform_handle.value = PlatformHandleValueFromPlatformFile(platform_file);
MojoHandle mojo_handle;
MojoResult result = MojoWrapPlatformHandle(&platform_handle, &mojo_handle);
CHECK_EQ(result, MOJO_RESULT_OK);
return ScopedHandle(Handle(mojo_handle));
}
|
ScopedHandle WrapPlatformFile(base::PlatformFile platform_file) {
MojoPlatformHandle platform_handle;
platform_handle.struct_size = sizeof(MojoPlatformHandle);
platform_handle.type = kPlatformFileHandleType;
platform_handle.value = PlatformHandleValueFromPlatformFile(platform_file);
MojoHandle mojo_handle;
MojoResult result = MojoWrapPlatformHandle(&platform_handle, &mojo_handle);
CHECK_EQ(result, MOJO_RESULT_OK);
return ScopedHandle(Handle(mojo_handle));
}
|
C
|
Chrome
| 0 |
CVE-2017-5019
|
https://www.cvedetails.com/cve/CVE-2017-5019/
|
CWE-416
|
https://github.com/chromium/chromium/commit/f03ea5a5c2ff26e239dfd23e263b15da2d9cee93
|
f03ea5a5c2ff26e239dfd23e263b15da2d9cee93
|
Convert FrameHostMsg_DidAddMessageToConsole to Mojo.
Note: Since this required changing the test
RenderViewImplTest.DispatchBeforeUnloadCanDetachFrame, I manually
re-introduced https://crbug.com/666714 locally (the bug the test was
added for), and reran the test to confirm that it still covers the bug.
Bug: 786836
Change-Id: I110668fa6f0f261fd2ac36bb91a8d8b31c99f4f1
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1526270
Commit-Queue: Lowell Manners <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Reviewed-by: Camille Lamy <[email protected]>
Cr-Commit-Position: refs/heads/master@{#653137}
|
void ServiceWorkerContextCore::OnErrorReported(
ServiceWorkerVersion* version,
const base::string16& error_message,
int line_number,
int column_number,
const GURL& source_url) {
observer_list_->Notify(
FROM_HERE, &ServiceWorkerContextCoreObserver::OnErrorReported,
version->version_id(),
ServiceWorkerContextCoreObserver::ErrorInfo(error_message, line_number,
column_number, source_url));
}
|
void ServiceWorkerContextCore::OnErrorReported(
ServiceWorkerVersion* version,
const base::string16& error_message,
int line_number,
int column_number,
const GURL& source_url) {
observer_list_->Notify(
FROM_HERE, &ServiceWorkerContextCoreObserver::OnErrorReported,
version->version_id(),
ServiceWorkerContextCoreObserver::ErrorInfo(error_message, line_number,
column_number, source_url));
}
|
C
|
Chrome
| 0 |
CVE-2015-9016
|
https://www.cvedetails.com/cve/CVE-2015-9016/
|
CWE-362
|
https://github.com/torvalds/linux/commit/0048b4837affd153897ed1222283492070027aa9
|
0048b4837affd153897ed1222283492070027aa9
|
blk-mq: fix race between timeout and freeing request
Inside timeout handler, blk_mq_tag_to_rq() is called
to retrieve the request from one tag. This way is obviously
wrong because the request can be freed any time and some
fiedds of the request can't be trusted, then kernel oops
might be triggered[1].
Currently wrt. blk_mq_tag_to_rq(), the only special case is
that the flush request can share same tag with the request
cloned from, and the two requests can't be active at the same
time, so this patch fixes the above issue by updating tags->rqs[tag]
with the active request(either flush rq or the request cloned
from) of the tag.
Also blk_mq_tag_to_rq() gets much simplified with this patch.
Given blk_mq_tag_to_rq() is mainly for drivers and the caller must
make sure the request can't be freed, so in bt_for_each() this
helper is replaced with tags->rqs[tag].
[1] kernel oops log
[ 439.696220] BUG: unable to handle kernel NULL pointer dereference at 0000000000000158^M
[ 439.697162] IP: [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.700653] PGD 7ef765067 PUD 7ef764067 PMD 0 ^M
[ 439.700653] Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC ^M
[ 439.700653] Dumping ftrace buffer:^M
[ 439.700653] (ftrace buffer empty)^M
[ 439.700653] Modules linked in: nbd ipv6 kvm_intel kvm serio_raw^M
[ 439.700653] CPU: 6 PID: 2779 Comm: stress-ng-sigfd Not tainted 4.2.0-rc5-next-20150805+ #265^M
[ 439.730500] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011^M
[ 439.730500] task: ffff880605308000 ti: ffff88060530c000 task.ti: ffff88060530c000^M
[ 439.730500] RIP: 0010:[<ffffffff812d89ba>] [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.730500] RSP: 0018:ffff880819203da0 EFLAGS: 00010283^M
[ 439.730500] RAX: ffff880811b0e000 RBX: ffff8800bb465f00 RCX: 0000000000000002^M
[ 439.730500] RDX: 0000000000000000 RSI: 0000000000000202 RDI: 0000000000000000^M
[ 439.730500] RBP: ffff880819203db0 R08: 0000000000000002 R09: 0000000000000000^M
[ 439.730500] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000202^M
[ 439.730500] R13: ffff880814104800 R14: 0000000000000002 R15: ffff880811a2ea00^M
[ 439.730500] FS: 00007f165b3f5740(0000) GS:ffff880819200000(0000) knlGS:0000000000000000^M
[ 439.730500] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b^M
[ 439.730500] CR2: 0000000000000158 CR3: 00000007ef766000 CR4: 00000000000006e0^M
[ 439.730500] Stack:^M
[ 439.730500] 0000000000000008 ffff8808114eed90 ffff880819203e00 ffffffff812dc104^M
[ 439.755663] ffff880819203e40 ffffffff812d9f5e 0000020000000000 ffff8808114eed80^M
[ 439.755663] Call Trace:^M
[ 439.755663] <IRQ> ^M
[ 439.755663] [<ffffffff812dc104>] bt_for_each+0x6e/0xc8^M
[ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M
[ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M
[ 439.755663] [<ffffffff812dc1b3>] blk_mq_tag_busy_iter+0x55/0x5e^M
[ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M
[ 439.755663] [<ffffffff812d8911>] blk_mq_rq_timer+0x5d/0xd4^M
[ 439.755663] [<ffffffff810a3e10>] call_timer_fn+0xf7/0x284^M
[ 439.755663] [<ffffffff810a3d1e>] ? call_timer_fn+0x5/0x284^M
[ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M
[ 439.755663] [<ffffffff810a46d6>] run_timer_softirq+0x1ce/0x1f8^M
[ 439.755663] [<ffffffff8104c367>] __do_softirq+0x181/0x3a4^M
[ 439.755663] [<ffffffff8104c76e>] irq_exit+0x40/0x94^M
[ 439.755663] [<ffffffff81031482>] smp_apic_timer_interrupt+0x33/0x3e^M
[ 439.755663] [<ffffffff815559a4>] apic_timer_interrupt+0x84/0x90^M
[ 439.755663] <EOI> ^M
[ 439.755663] [<ffffffff81554350>] ? _raw_spin_unlock_irq+0x32/0x4a^M
[ 439.755663] [<ffffffff8106a98b>] finish_task_switch+0xe0/0x163^M
[ 439.755663] [<ffffffff8106a94d>] ? finish_task_switch+0xa2/0x163^M
[ 439.755663] [<ffffffff81550066>] __schedule+0x469/0x6cd^M
[ 439.755663] [<ffffffff8155039b>] schedule+0x82/0x9a^M
[ 439.789267] [<ffffffff8119b28b>] signalfd_read+0x186/0x49a^M
[ 439.790911] [<ffffffff8106d86a>] ? wake_up_q+0x47/0x47^M
[ 439.790911] [<ffffffff811618c2>] __vfs_read+0x28/0x9f^M
[ 439.790911] [<ffffffff8117a289>] ? __fget_light+0x4d/0x74^M
[ 439.790911] [<ffffffff811620a7>] vfs_read+0x7a/0xc6^M
[ 439.790911] [<ffffffff8116292b>] SyS_read+0x49/0x7f^M
[ 439.790911] [<ffffffff81554c17>] entry_SYSCALL_64_fastpath+0x12/0x6f^M
[ 439.790911] Code: 48 89 e5 e8 a9 b8 e7 ff 5d c3 0f 1f 44 00 00 55 89
f2 48 89 e5 41 54 41 89 f4 53 48 8b 47 60 48 8b 1c d0 48 8b 7b 30 48 8b
53 38 <48> 8b 87 58 01 00 00 48 85 c0 75 09 48 8b 97 88 0c 00 00 eb 10
^M
[ 439.790911] RIP [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.790911] RSP <ffff880819203da0>^M
[ 439.790911] CR2: 0000000000000158^M
[ 439.790911] ---[ end trace d40af58949325661 ]---^M
Cc: <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
struct request_queue *q)
{
q->tag_set = set;
mutex_lock(&set->tag_list_lock);
list_add_tail(&q->tag_set_list, &set->tag_list);
blk_mq_update_tag_set_depth(set);
mutex_unlock(&set->tag_list_lock);
}
|
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
struct request_queue *q)
{
q->tag_set = set;
mutex_lock(&set->tag_list_lock);
list_add_tail(&q->tag_set_list, &set->tag_list);
blk_mq_update_tag_set_depth(set);
mutex_unlock(&set->tag_list_lock);
}
|
C
|
linux
| 0 |
CVE-2018-1000127
|
https://www.cvedetails.com/cve/CVE-2018-1000127/
|
CWE-190
|
https://github.com/memcached/memcached/commit/a8c4a82787b8b6c256d61bd5c42fb7f92d1bae00
|
a8c4a82787b8b6c256d61bd5c42fb7f92d1bae00
|
Don't overflow item refcount on get
Counts as a miss if the refcount is too high. ASCII multigets are the only
time refcounts can be held for so long.
doing a dirty read of refcount. is aligned.
trying to avoid adding an extra refcount branch for all calls of item_get due
to performance. might be able to move it in there after logging refactoring
simplifies some of the branches.
|
enum store_item_type do_store_item(item *it, int comm, conn *c, const uint32_t hv) {
char *key = ITEM_key(it);
item *old_it = do_item_get(key, it->nkey, hv, c, DONT_UPDATE);
enum store_item_type stored = NOT_STORED;
item *new_it = NULL;
uint32_t flags;
if (old_it != NULL && comm == NREAD_ADD) {
/* add only adds a nonexistent item, but promote to head of LRU */
do_item_update(old_it);
} else if (!old_it && (comm == NREAD_REPLACE
|| comm == NREAD_APPEND || comm == NREAD_PREPEND))
{
/* replace only replaces an existing value; don't store */
} else if (comm == NREAD_CAS) {
/* validate cas operation */
if(old_it == NULL) {
stored = NOT_FOUND;
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.cas_misses++;
pthread_mutex_unlock(&c->thread->stats.mutex);
}
else if (ITEM_get_cas(it) == ITEM_get_cas(old_it)) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(old_it)].cas_hits++;
pthread_mutex_unlock(&c->thread->stats.mutex);
item_replace(old_it, it, hv);
stored = STORED;
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(old_it)].cas_badval++;
pthread_mutex_unlock(&c->thread->stats.mutex);
if(settings.verbose > 1) {
fprintf(stderr, "CAS: failure: expected %llu, got %llu\n",
(unsigned long long)ITEM_get_cas(old_it),
(unsigned long long)ITEM_get_cas(it));
}
stored = EXISTS;
}
} else {
int failed_alloc = 0;
/*
* Append - combine new and old record into single one. Here it's
* atomic and thread-safe.
*/
if (comm == NREAD_APPEND || comm == NREAD_PREPEND) {
/*
* Validate CAS
*/
if (ITEM_get_cas(it) != 0) {
if (ITEM_get_cas(it) != ITEM_get_cas(old_it)) {
stored = EXISTS;
}
}
if (stored == NOT_STORED) {
/* we have it and old_it here - alloc memory to hold both */
/* flags was already lost - so recover them from ITEM_suffix(it) */
if (settings.inline_ascii_response) {
flags = (uint32_t) strtoul(ITEM_suffix(old_it), (char **) NULL, 10);
} else {
flags = *((uint32_t *)ITEM_suffix(old_it));
}
new_it = do_item_alloc(key, it->nkey, flags, old_it->exptime, it->nbytes + old_it->nbytes - 2 /* CRLF */);
/* copy data from it and old_it to new_it */
if (new_it == NULL || _store_item_copy_data(comm, old_it, new_it, it) == -1) {
failed_alloc = 1;
stored = NOT_STORED;
if (new_it != NULL)
item_remove(new_it);
} else {
it = new_it;
}
}
}
if (stored == NOT_STORED && failed_alloc == 0) {
if (old_it != NULL)
item_replace(old_it, it, hv);
else
do_item_link(it, hv);
c->cas = ITEM_get_cas(it);
stored = STORED;
}
}
if (old_it != NULL)
do_item_remove(old_it); /* release our reference */
if (new_it != NULL)
do_item_remove(new_it);
if (stored == STORED) {
c->cas = ITEM_get_cas(it);
}
LOGGER_LOG(c->thread->l, LOG_MUTATIONS, LOGGER_ITEM_STORE, NULL,
stored, comm, ITEM_key(it), it->nkey, it->exptime, ITEM_clsid(it));
return stored;
}
|
enum store_item_type do_store_item(item *it, int comm, conn *c, const uint32_t hv) {
char *key = ITEM_key(it);
item *old_it = do_item_get(key, it->nkey, hv, c, DONT_UPDATE);
enum store_item_type stored = NOT_STORED;
item *new_it = NULL;
uint32_t flags;
if (old_it != NULL && comm == NREAD_ADD) {
/* add only adds a nonexistent item, but promote to head of LRU */
do_item_update(old_it);
} else if (!old_it && (comm == NREAD_REPLACE
|| comm == NREAD_APPEND || comm == NREAD_PREPEND))
{
/* replace only replaces an existing value; don't store */
} else if (comm == NREAD_CAS) {
/* validate cas operation */
if(old_it == NULL) {
stored = NOT_FOUND;
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.cas_misses++;
pthread_mutex_unlock(&c->thread->stats.mutex);
}
else if (ITEM_get_cas(it) == ITEM_get_cas(old_it)) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(old_it)].cas_hits++;
pthread_mutex_unlock(&c->thread->stats.mutex);
item_replace(old_it, it, hv);
stored = STORED;
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(old_it)].cas_badval++;
pthread_mutex_unlock(&c->thread->stats.mutex);
if(settings.verbose > 1) {
fprintf(stderr, "CAS: failure: expected %llu, got %llu\n",
(unsigned long long)ITEM_get_cas(old_it),
(unsigned long long)ITEM_get_cas(it));
}
stored = EXISTS;
}
} else {
int failed_alloc = 0;
/*
* Append - combine new and old record into single one. Here it's
* atomic and thread-safe.
*/
if (comm == NREAD_APPEND || comm == NREAD_PREPEND) {
/*
* Validate CAS
*/
if (ITEM_get_cas(it) != 0) {
if (ITEM_get_cas(it) != ITEM_get_cas(old_it)) {
stored = EXISTS;
}
}
if (stored == NOT_STORED) {
/* we have it and old_it here - alloc memory to hold both */
/* flags was already lost - so recover them from ITEM_suffix(it) */
if (settings.inline_ascii_response) {
flags = (uint32_t) strtoul(ITEM_suffix(old_it), (char **) NULL, 10);
} else {
flags = *((uint32_t *)ITEM_suffix(old_it));
}
new_it = do_item_alloc(key, it->nkey, flags, old_it->exptime, it->nbytes + old_it->nbytes - 2 /* CRLF */);
/* copy data from it and old_it to new_it */
if (new_it == NULL || _store_item_copy_data(comm, old_it, new_it, it) == -1) {
failed_alloc = 1;
stored = NOT_STORED;
if (new_it != NULL)
item_remove(new_it);
} else {
it = new_it;
}
}
}
if (stored == NOT_STORED && failed_alloc == 0) {
if (old_it != NULL)
item_replace(old_it, it, hv);
else
do_item_link(it, hv);
c->cas = ITEM_get_cas(it);
stored = STORED;
}
}
if (old_it != NULL)
do_item_remove(old_it); /* release our reference */
if (new_it != NULL)
do_item_remove(new_it);
if (stored == STORED) {
c->cas = ITEM_get_cas(it);
}
LOGGER_LOG(c->thread->l, LOG_MUTATIONS, LOGGER_ITEM_STORE, NULL,
stored, comm, ITEM_key(it), it->nkey, it->exptime, ITEM_clsid(it));
return stored;
}
|
C
|
memcached
| 0 |
CVE-2016-10746
|
https://www.cvedetails.com/cve/CVE-2016-10746/
|
CWE-254
|
https://github.com/libvirt/libvirt/commit/506e9d6c2d4baaf580d489fff0690c0ff2ff588f
|
506e9d6c2d4baaf580d489fff0690c0ff2ff588f
|
virDomainGetTime: Deny on RO connections
We have a policy that if API may end up talking to a guest agent
it should require RW connection. We don't obey the rule in
virDomainGetTime().
Signed-off-by: Michal Privoznik <[email protected]>
|
virDomainBlockPeek(virDomainPtr dom,
const char *disk,
unsigned long long offset /* really 64 bits */,
size_t size,
void *buffer,
unsigned int flags)
{
virConnectPtr conn;
VIR_DOMAIN_DEBUG(dom, "disk=%s, offset=%lld, size=%zi, buffer=%p, flags=%x",
disk, offset, size, buffer, flags);
virResetLastError();
virCheckDomainReturn(dom, -1);
conn = dom->conn;
virCheckReadOnlyGoto(conn->flags, error);
virCheckNonEmptyStringArgGoto(disk, error);
/* Allow size == 0 as an access test. */
if (size > 0)
virCheckNonNullArgGoto(buffer, error);
if (conn->driver->domainBlockPeek) {
int ret;
ret = conn->driver->domainBlockPeek(dom, disk, offset, size,
buffer, flags);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(dom->conn);
return -1;
}
|
virDomainBlockPeek(virDomainPtr dom,
const char *disk,
unsigned long long offset /* really 64 bits */,
size_t size,
void *buffer,
unsigned int flags)
{
virConnectPtr conn;
VIR_DOMAIN_DEBUG(dom, "disk=%s, offset=%lld, size=%zi, buffer=%p, flags=%x",
disk, offset, size, buffer, flags);
virResetLastError();
virCheckDomainReturn(dom, -1);
conn = dom->conn;
virCheckReadOnlyGoto(conn->flags, error);
virCheckNonEmptyStringArgGoto(disk, error);
/* Allow size == 0 as an access test. */
if (size > 0)
virCheckNonNullArgGoto(buffer, error);
if (conn->driver->domainBlockPeek) {
int ret;
ret = conn->driver->domainBlockPeek(dom, disk, offset, size,
buffer, flags);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(dom->conn);
return -1;
}
|
C
|
libvirt
| 0 |
CVE-2017-9059
|
https://www.cvedetails.com/cve/CVE-2017-9059/
|
CWE-404
|
https://github.com/torvalds/linux/commit/c70422f760c120480fee4de6c38804c72aa26bc1
|
c70422f760c120480fee4de6c38804c72aa26bc1
|
Merge tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields:
"Another RDMA update from Chuck Lever, and a bunch of miscellaneous
bugfixes"
* tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux: (26 commits)
nfsd: Fix up the "supattr_exclcreat" attributes
nfsd: encoders mustn't use unitialized values in error cases
nfsd: fix undefined behavior in nfsd4_layout_verify
lockd: fix lockd shutdown race
NFSv4: Fix callback server shutdown
SUNRPC: Refactor svc_set_num_threads()
NFSv4.x/callback: Create the callback service through svc_create_pooled
lockd: remove redundant check on block
svcrdma: Clean out old XDR encoders
svcrdma: Remove the req_map cache
svcrdma: Remove unused RDMA Write completion handler
svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt
svcrdma: Clean up RPC-over-RDMA backchannel reply processing
svcrdma: Report Write/Reply chunk overruns
svcrdma: Clean up RDMA_ERROR path
svcrdma: Use rdma_rw API in RPC reply path
svcrdma: Introduce local rdma_rw API helpers
svcrdma: Clean up svc_rdma_get_inv_rkey()
svcrdma: Add helper to save pages under I/O
svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT
...
|
nfsd_print_client_locks(struct nfs4_client *clp)
{
u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
nfsd_print_count(clp, count, "locked files");
return count;
}
|
nfsd_print_client_locks(struct nfs4_client *clp)
{
u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
nfsd_print_count(clp, count, "locked files");
return count;
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/19190765882e272a6a2162c89acdb29110f7e3cf
|
19190765882e272a6a2162c89acdb29110f7e3cf
|
Revert 102184 - [Sync] use base::Time in sync
Make EntryKernel/Entry/BaseNode use base::Time instead of int64s.
Add sync/util/time.h, with utility functions to manage the sync proto
time format.
Store times on disk in proto format instead of the local system.
This requires a database version bump (to 77).
Update SessionChangeProcessor/SessionModelAssociator
to use base::Time, too.
Remove hackish Now() function.
Remove ZeroFields() function, and instead zero-initialize in EntryKernel::EntryKernel() directly.
BUG=
TEST=
Review URL: http://codereview.chromium.org/7981006
[email protected]
Review URL: http://codereview.chromium.org/7977034
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@102186 0039d316-1c4b-4281-b951-d872f2087c98
|
void SyncNotifierRemoveObserver(
sync_notifier::SyncNotifierObserver* sync_notifier_observer) {
EXPECT_EQ(sync_notifier_observer_, sync_notifier_observer);
sync_notifier_observer_ = NULL;
}
|
void SyncNotifierRemoveObserver(
sync_notifier::SyncNotifierObserver* sync_notifier_observer) {
EXPECT_EQ(sync_notifier_observer_, sync_notifier_observer);
sync_notifier_observer_ = NULL;
}
|
C
|
Chrome
| 0 |
CVE-2018-17204
|
https://www.cvedetails.com/cve/CVE-2018-17204/
|
CWE-617
|
https://github.com/openvswitch/ovs/commit/4af6da3b275b764b1afe194df6499b33d2bf4cde
|
4af6da3b275b764b1afe194df6499b33d2bf4cde
|
ofp-group: Don't assert-fail decoding bad OF1.5 group mod type or command.
When decoding a group mod, the current code validates the group type and
command after the whole group mod has been decoded. The OF1.5 decoder,
however, tries to use the type and command earlier, when it might still be
invalid. This caused an assertion failure (via OVS_NOT_REACHED). This
commit fixes the problem.
ovs-vswitchd does not enable support for OpenFlow 1.5 by default.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9249
Signed-off-by: Ben Pfaff <[email protected]>
Reviewed-by: Yifeng Sun <[email protected]>
|
ofputil_free_bundle_msgs(struct ofputil_bundle_msg *bms, size_t n_bms)
{
for (size_t i = 0; i < n_bms; i++) {
switch ((int)bms[i].type) {
case OFPTYPE_FLOW_MOD:
free(CONST_CAST(struct ofpact *, bms[i].fm.ofpacts));
break;
case OFPTYPE_GROUP_MOD:
ofputil_uninit_group_mod(&bms[i].gm);
break;
case OFPTYPE_PACKET_OUT:
free(bms[i].po.ofpacts);
free(CONST_CAST(void *, bms[i].po.packet));
break;
default:
break;
}
}
free(bms);
}
|
ofputil_free_bundle_msgs(struct ofputil_bundle_msg *bms, size_t n_bms)
{
for (size_t i = 0; i < n_bms; i++) {
switch ((int)bms[i].type) {
case OFPTYPE_FLOW_MOD:
free(CONST_CAST(struct ofpact *, bms[i].fm.ofpacts));
break;
case OFPTYPE_GROUP_MOD:
ofputil_uninit_group_mod(&bms[i].gm);
break;
case OFPTYPE_PACKET_OUT:
free(bms[i].po.ofpacts);
free(CONST_CAST(void *, bms[i].po.packet));
break;
default:
break;
}
}
free(bms);
}
|
C
|
ovs
| 0 |
CVE-2013-7271
|
https://www.cvedetails.com/cve/CVE-2013-7271/
|
CWE-20
|
https://github.com/torvalds/linux/commit/f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
|
net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int ipxitf_add_local_route(struct ipx_interface *intrfc)
{
return ipxrtr_add_route(intrfc->if_netnum, intrfc, NULL);
}
|
static int ipxitf_add_local_route(struct ipx_interface *intrfc)
{
return ipxrtr_add_route(intrfc->if_netnum, intrfc, NULL);
}
|
C
|
linux
| 0 |
CVE-2013-0921
|
https://www.cvedetails.com/cve/CVE-2013-0921/
|
CWE-264
|
https://github.com/chromium/chromium/commit/e9841fbdaf41b4a2baaa413f94d5c0197f9261f4
|
e9841fbdaf41b4a2baaa413f94d5c0197f9261f4
|
Ensure extensions and the Chrome Web Store are loaded in new BrowsingInstances.
BUG=174943
TEST=Can't post message to CWS. See bug for repro steps.
Review URL: https://chromiumcodereview.appspot.com/12301013
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@184208 0039d316-1c4b-4281-b951-d872f2087c98
|
bool ContentBrowserClient::AllowWorkerIndexedDB(
const GURL& url,
const string16& name,
ResourceContext* context,
const std::vector<std::pair<int, int> >& render_views) {
return true;
}
|
bool ContentBrowserClient::AllowWorkerIndexedDB(
const GURL& url,
const string16& name,
ResourceContext* context,
const std::vector<std::pair<int, int> >& render_views) {
return true;
}
|
C
|
Chrome
| 0 |
CVE-2017-11142
|
https://www.cvedetails.com/cve/CVE-2017-11142/
|
CWE-400
|
https://github.com/php/php-src/commit/0f8cf3b8497dc45c010c44ed9e96518e11e19fc3
|
0f8cf3b8497dc45c010c44ed9e96518e11e19fc3
|
Fix bug #73807
|
static zend_bool add_post_var(zval *arr, post_var_data_t *var, zend_bool eof TSRMLS_DC)
{
char *start, *ksep, *vsep, *val;
size_t klen, vlen;
/* FIXME: string-size_t */
unsigned int new_vlen;
if (var->ptr >= var->end) {
return 0;
}
start = var->ptr + var->already_scanned;
vsep = memchr(start, '&', var->end - start);
if (!vsep) {
if (!eof) {
var->already_scanned = var->end - var->ptr;
return 0;
} else {
vsep = var->end;
}
}
ksep = memchr(var->ptr, '=', vsep - var->ptr);
if (ksep) {
*ksep = '\0';
/* "foo=bar&" or "foo=&" */
klen = ksep - var->ptr;
vlen = vsep - ++ksep;
} else {
ksep = "";
/* "foo&" */
klen = vsep - var->ptr;
vlen = 0;
}
php_url_decode(var->ptr, klen);
val = estrndup(ksep, vlen);
if (vlen) {
vlen = php_url_decode(val, vlen);
}
if (sapi_module.input_filter(PARSE_POST, var->ptr, &val, vlen, &new_vlen TSRMLS_CC)) {
php_register_variable_safe(var->ptr, val, new_vlen, arr TSRMLS_CC);
}
efree(val);
var->ptr = vsep + (vsep != var->end);
var->already_scanned = 0;
return 1;
}
|
static zend_bool add_post_var(zval *arr, post_var_data_t *var, zend_bool eof TSRMLS_DC)
{
char *ksep, *vsep, *val;
size_t klen, vlen;
/* FIXME: string-size_t */
unsigned int new_vlen;
if (var->ptr >= var->end) {
return 0;
}
vsep = memchr(var->ptr, '&', var->end - var->ptr);
if (!vsep) {
if (!eof) {
return 0;
} else {
vsep = var->end;
}
}
ksep = memchr(var->ptr, '=', vsep - var->ptr);
if (ksep) {
*ksep = '\0';
/* "foo=bar&" or "foo=&" */
klen = ksep - var->ptr;
vlen = vsep - ++ksep;
} else {
ksep = "";
/* "foo&" */
klen = vsep - var->ptr;
vlen = 0;
}
php_url_decode(var->ptr, klen);
val = estrndup(ksep, vlen);
if (vlen) {
vlen = php_url_decode(val, vlen);
}
if (sapi_module.input_filter(PARSE_POST, var->ptr, &val, vlen, &new_vlen TSRMLS_CC)) {
php_register_variable_safe(var->ptr, val, new_vlen, arr TSRMLS_CC);
}
efree(val);
var->ptr = vsep + (vsep != var->end);
return 1;
}
|
C
|
php-src
| 1 |
CVE-2012-2880
|
https://www.cvedetails.com/cve/CVE-2012-2880/
|
CWE-362
|
https://github.com/chromium/chromium/commit/fcd3a7a671ecf2d5f46ea34787d27507a914d2f5
|
fcd3a7a671ecf2d5f46ea34787d27507a914d2f5
|
[Sync] Cleanup all tab sync enabling logic now that its on by default.
BUG=none
TEST=
Review URL: https://chromiumcodereview.appspot.com/10443046
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@139462 0039d316-1c4b-4281-b951-d872f2087c98
|
sync_pb::SyncEnums::Action GetClientToServerResponseAction(
const browser_sync::ClientAction& action) {
switch (action) {
case browser_sync::UPGRADE_CLIENT:
return sync_pb::SyncEnums::UPGRADE_CLIENT;
case browser_sync::CLEAR_USER_DATA_AND_RESYNC:
return sync_pb::SyncEnums::CLEAR_USER_DATA_AND_RESYNC;
case browser_sync::ENABLE_SYNC_ON_ACCOUNT:
return sync_pb::SyncEnums::ENABLE_SYNC_ON_ACCOUNT;
case browser_sync::STOP_AND_RESTART_SYNC:
return sync_pb::SyncEnums::STOP_AND_RESTART_SYNC;
case browser_sync::DISABLE_SYNC_ON_CLIENT:
return sync_pb::SyncEnums::DISABLE_SYNC_ON_CLIENT;
case browser_sync::UNKNOWN_ACTION:
return sync_pb::SyncEnums::UNKNOWN_ACTION;
default:
NOTREACHED();
return sync_pb::SyncEnums::UNKNOWN_ACTION;
}
}
|
sync_pb::SyncEnums::Action GetClientToServerResponseAction(
const browser_sync::ClientAction& action) {
switch (action) {
case browser_sync::UPGRADE_CLIENT:
return sync_pb::SyncEnums::UPGRADE_CLIENT;
case browser_sync::CLEAR_USER_DATA_AND_RESYNC:
return sync_pb::SyncEnums::CLEAR_USER_DATA_AND_RESYNC;
case browser_sync::ENABLE_SYNC_ON_ACCOUNT:
return sync_pb::SyncEnums::ENABLE_SYNC_ON_ACCOUNT;
case browser_sync::STOP_AND_RESTART_SYNC:
return sync_pb::SyncEnums::STOP_AND_RESTART_SYNC;
case browser_sync::DISABLE_SYNC_ON_CLIENT:
return sync_pb::SyncEnums::DISABLE_SYNC_ON_CLIENT;
case browser_sync::UNKNOWN_ACTION:
return sync_pb::SyncEnums::UNKNOWN_ACTION;
default:
NOTREACHED();
return sync_pb::SyncEnums::UNKNOWN_ACTION;
}
}
|
C
|
Chrome
| 0 |
CVE-2017-6074
|
https://www.cvedetails.com/cve/CVE-2017-6074/
|
CWE-415
|
https://github.com/torvalds/linux/commit/5edabca9d4cff7f1f2b68f0bac55ef99d9798ba4
|
5edabca9d4cff7f1f2b68f0bac55ef99d9798ba4
|
dccp: fix freeing skb too early for IPV6_RECVPKTINFO
In the current DCCP implementation an skb for a DCCP_PKT_REQUEST packet
is forcibly freed via __kfree_skb in dccp_rcv_state_process if
dccp_v6_conn_request successfully returns.
However, if IPV6_RECVPKTINFO is set on a socket, the address of the skb
is saved to ireq->pktopts and the ref count for skb is incremented in
dccp_v6_conn_request, so skb is still in use. Nevertheless, it gets freed
in dccp_rcv_state_process.
Fix by calling consume_skb instead of doing goto discard and therefore
calling __kfree_skb.
Similar fixes for TCP:
fb7e2399ec17f1004c0e0ccfd17439f8759ede01 [TCP]: skb is unexpectedly freed.
0aea76d35c9651d55bbaf746e7914e5f9ae5a25d tcp: SYN packets are now
simply consumed
Signed-off-by: Andrey Konovalov <[email protected]>
Acked-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
const struct dccp_hdr *dh, const unsigned int len)
{
if (dccp_check_seqno(sk, skb))
goto discard;
if (dccp_parse_options(sk, NULL, skb))
return 1;
dccp_handle_ackvec_processing(sk, skb);
dccp_deliver_input_to_ccids(sk, skb);
return __dccp_rcv_established(sk, skb, dh, len);
discard:
__kfree_skb(skb);
return 0;
}
|
int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
const struct dccp_hdr *dh, const unsigned int len)
{
if (dccp_check_seqno(sk, skb))
goto discard;
if (dccp_parse_options(sk, NULL, skb))
return 1;
dccp_handle_ackvec_processing(sk, skb);
dccp_deliver_input_to_ccids(sk, skb);
return __dccp_rcv_established(sk, skb, dh, len);
discard:
__kfree_skb(skb);
return 0;
}
|
C
|
linux
| 0 |
CVE-2016-1674
|
https://www.cvedetails.com/cve/CVE-2016-1674/
| null |
https://github.com/chromium/chromium/commit/14ff9d0cded8ae8032ef027d1f33c6666a695019
|
14ff9d0cded8ae8032ef027d1f33c6666a695019
|
[Extensions] Add more bindings access checks
BUG=598165
Review URL: https://codereview.chromium.org/1854983002
Cr-Commit-Position: refs/heads/master@{#385282}
|
NotificationsNativeHandler::NotificationsNativeHandler(ScriptContext* context)
: ObjectBackedNativeHandler(context) {
RouteFunction(
"GetNotificationImageSizes", "notifications",
base::Bind(&NotificationsNativeHandler::GetNotificationImageSizes,
base::Unretained(this)));
}
|
NotificationsNativeHandler::NotificationsNativeHandler(ScriptContext* context)
: ObjectBackedNativeHandler(context) {
RouteFunction(
"GetNotificationImageSizes",
base::Bind(&NotificationsNativeHandler::GetNotificationImageSizes,
base::Unretained(this)));
}
|
C
|
Chrome
| 1 |
CVE-2016-5219
|
https://www.cvedetails.com/cve/CVE-2016-5219/
|
CWE-416
|
https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
This makes the query of GL_COMPLETION_STATUS_KHR to programs much
cheaper by minimizing the round-trip to the GPU thread.
Bug: 881152, 957001
Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630
Commit-Queue: Kenneth Russell <[email protected]>
Reviewed-by: Kentaro Hara <[email protected]>
Reviewed-by: Geoff Lang <[email protected]>
Reviewed-by: Kenneth Russell <[email protected]>
Cr-Commit-Position: refs/heads/master@{#657568}
|
void GLES2DecoderImpl::DoGetBufferParameteri64v(GLenum target,
GLenum pname,
GLint64* params,
GLsizei params_size) {
buffer_manager()->ValidateAndDoGetBufferParameteri64v(
&state_, error_state_.get(), target, pname, params);
}
|
void GLES2DecoderImpl::DoGetBufferParameteri64v(GLenum target,
GLenum pname,
GLint64* params,
GLsizei params_size) {
buffer_manager()->ValidateAndDoGetBufferParameteri64v(
&state_, error_state_.get(), target, pname, params);
}
|
C
|
Chrome
| 0 |
CVE-2017-5118
|
https://www.cvedetails.com/cve/CVE-2017-5118/
|
CWE-732
|
https://github.com/chromium/chromium/commit/0ab2412a104d2f235d7b9fe19d30ef605a410832
|
0ab2412a104d2f235d7b9fe19d30ef605a410832
|
Inherit CSP when we inherit the security origin
This prevents attacks that use main window navigation to get out of the
existing csp constraints such as the related bug
Bug: 747847
Change-Id: I1e57b50da17f65d38088205b0a3c7c49ef2ae4d8
Reviewed-on: https://chromium-review.googlesource.com/592027
Reviewed-by: Mike West <[email protected]>
Commit-Queue: Andy Paicu <[email protected]>
Cr-Commit-Position: refs/heads/master@{#492333}
|
DEFINE_TRACE(DocumentLoader) {
visitor->Trace(frame_);
visitor->Trace(fetcher_);
visitor->Trace(main_resource_);
visitor->Trace(history_item_);
visitor->Trace(writer_);
visitor->Trace(subresource_filter_);
visitor->Trace(document_load_timing_);
visitor->Trace(application_cache_host_);
visitor->Trace(content_security_policy_);
RawResourceClient::Trace(visitor);
}
|
DEFINE_TRACE(DocumentLoader) {
visitor->Trace(frame_);
visitor->Trace(fetcher_);
visitor->Trace(main_resource_);
visitor->Trace(history_item_);
visitor->Trace(writer_);
visitor->Trace(subresource_filter_);
visitor->Trace(document_load_timing_);
visitor->Trace(application_cache_host_);
visitor->Trace(content_security_policy_);
RawResourceClient::Trace(visitor);
}
|
C
|
Chrome
| 0 |
CVE-2011-4621
|
https://www.cvedetails.com/cve/CVE-2011-4621/
| null |
https://github.com/torvalds/linux/commit/f26f9aff6aaf67e9a430d16c266f91b13a5bff64
|
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
|
Sched: fix skip_clock_update optimization
idle_balance() drops/retakes rq->lock, leaving the previous task
vulnerable to set_tsk_need_resched(). Clear it after we return
from balancing instead, and in setup_thread_stack() as well, so
no successfully descheduled or never scheduled task has it set.
Need resched confused the skip_clock_update logic, which assumes
that the next call to update_rq_clock() will come nearly immediately
after being set. Make the optimization robust against the waking
a sleeper before it sucessfully deschedules case by checking that
the current task has not been dequeued before setting the flag,
since it is that useless clock update we're trying to save, and
clear unconditionally in schedule() proper instead of conditionally
in put_prev_task().
Signed-off-by: Mike Galbraith <[email protected]>
Reported-by: Bjoern B. Brandenburg <[email protected]>
Tested-by: Yong Zhang <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: [email protected]
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
{
unsigned int cpu;
struct rq *rq;
if (!sched_feat(OWNER_SPIN))
return 0;
#ifdef CONFIG_DEBUG_PAGEALLOC
/*
* Need to access the cpu field knowing that
* DEBUG_PAGEALLOC could have unmapped it if
* the mutex owner just released it and exited.
*/
if (probe_kernel_address(&owner->cpu, cpu))
return 0;
#else
cpu = owner->cpu;
#endif
/*
* Even if the access succeeded (likely case),
* the cpu field may no longer be valid.
*/
if (cpu >= nr_cpumask_bits)
return 0;
/*
* We need to validate that we can do a
* get_cpu() and that we have the percpu area.
*/
if (!cpu_online(cpu))
return 0;
rq = cpu_rq(cpu);
for (;;) {
/*
* Owner changed, break to re-assess state.
*/
if (lock->owner != owner) {
/*
* If the lock has switched to a different owner,
* we likely have heavy contention. Return 0 to quit
* optimistic spinning and not contend further:
*/
if (lock->owner)
return 0;
break;
}
/*
* Is that owner really running on that cpu?
*/
if (task_thread_info(rq->curr) != owner || need_resched())
return 0;
cpu_relax();
}
return 1;
}
|
int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
{
unsigned int cpu;
struct rq *rq;
if (!sched_feat(OWNER_SPIN))
return 0;
#ifdef CONFIG_DEBUG_PAGEALLOC
/*
* Need to access the cpu field knowing that
* DEBUG_PAGEALLOC could have unmapped it if
* the mutex owner just released it and exited.
*/
if (probe_kernel_address(&owner->cpu, cpu))
return 0;
#else
cpu = owner->cpu;
#endif
/*
* Even if the access succeeded (likely case),
* the cpu field may no longer be valid.
*/
if (cpu >= nr_cpumask_bits)
return 0;
/*
* We need to validate that we can do a
* get_cpu() and that we have the percpu area.
*/
if (!cpu_online(cpu))
return 0;
rq = cpu_rq(cpu);
for (;;) {
/*
* Owner changed, break to re-assess state.
*/
if (lock->owner != owner) {
/*
* If the lock has switched to a different owner,
* we likely have heavy contention. Return 0 to quit
* optimistic spinning and not contend further:
*/
if (lock->owner)
return 0;
break;
}
/*
* Is that owner really running on that cpu?
*/
if (task_thread_info(rq->curr) != owner || need_resched())
return 0;
cpu_relax();
}
return 1;
}
|
C
|
linux
| 0 |
CVE-2017-9304
|
https://www.cvedetails.com/cve/CVE-2017-9304/
|
CWE-674
|
https://github.com/VirusTotal/yara/commit/925bcf3c3b0a28b5b78e25d9efda5c0bf27ae699
|
925bcf3c3b0a28b5b78e25d9efda5c0bf27ae699
|
Fix issue #674. Move regexp limits to limits.h.
|
yyparse (void *yyscanner, RE_LEX_ENVIRONMENT *lex_env)
{
/* The lookahead symbol. */
int yychar;
/* The semantic value of the lookahead symbol. */
/* Default value used for initialization, for pacifying older GCCs
or non-GCC compilers. */
YY_INITIAL_VALUE (static YYSTYPE yyval_default;)
YYSTYPE yylval YY_INITIAL_VALUE (= yyval_default);
/* Number of syntax errors so far. */
int yynerrs;
int yystate;
/* Number of tokens to shift before error messages enabled. */
int yyerrstatus;
/* The stacks and their tools:
'yyss': related to states.
'yyvs': related to semantic values.
Refer to the stacks through separate pointers, to allow yyoverflow
to reallocate them elsewhere. */
/* The state stack. */
yytype_int16 yyssa[YYINITDEPTH];
yytype_int16 *yyss;
yytype_int16 *yyssp;
/* The semantic value stack. */
YYSTYPE yyvsa[YYINITDEPTH];
YYSTYPE *yyvs;
YYSTYPE *yyvsp;
YYSIZE_T yystacksize;
int yyn;
int yyresult;
/* Lookahead token as an internal (translated) token number. */
int yytoken = 0;
/* The variables used to return semantic value and location from the
action routines. */
YYSTYPE yyval;
#if YYERROR_VERBOSE
/* Buffer for error messages, and its allocated size. */
char yymsgbuf[128];
char *yymsg = yymsgbuf;
YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
#endif
#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
/* The number of symbols on the RHS of the reduced rule.
Keep to zero when no symbol should be popped. */
int yylen = 0;
yyssp = yyss = yyssa;
yyvsp = yyvs = yyvsa;
yystacksize = YYINITDEPTH;
YYDPRINTF ((stderr, "Starting parse\n"));
yystate = 0;
yyerrstatus = 0;
yynerrs = 0;
yychar = YYEMPTY; /* Cause a token to be read. */
goto yysetstate;
/*------------------------------------------------------------.
| yynewstate -- Push a new state, which is found in yystate. |
`------------------------------------------------------------*/
yynewstate:
/* In all cases, when you get here, the value and location stacks
have just been pushed. So pushing a state here evens the stacks. */
yyssp++;
yysetstate:
*yyssp = yystate;
if (yyss + yystacksize - 1 <= yyssp)
{
/* Get the current used size of the three stacks, in elements. */
YYSIZE_T yysize = yyssp - yyss + 1;
#ifdef yyoverflow
{
/* Give user a chance to reallocate the stack. Use copies of
these so that the &'s don't force the real ones into
memory. */
YYSTYPE *yyvs1 = yyvs;
yytype_int16 *yyss1 = yyss;
/* Each stack pointer address is followed by the size of the
data in use in that stack, in bytes. This used to be a
conditional around just the two extra args, but that might
be undefined if yyoverflow is a macro. */
yyoverflow (YY_("memory exhausted"),
&yyss1, yysize * sizeof (*yyssp),
&yyvs1, yysize * sizeof (*yyvsp),
&yystacksize);
yyss = yyss1;
yyvs = yyvs1;
}
#else /* no yyoverflow */
# ifndef YYSTACK_RELOCATE
goto yyexhaustedlab;
# else
/* Extend the stack our own way. */
if (YYMAXDEPTH <= yystacksize)
goto yyexhaustedlab;
yystacksize *= 2;
if (YYMAXDEPTH < yystacksize)
yystacksize = YYMAXDEPTH;
{
yytype_int16 *yyss1 = yyss;
union yyalloc *yyptr =
(union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
if (! yyptr)
goto yyexhaustedlab;
YYSTACK_RELOCATE (yyss_alloc, yyss);
YYSTACK_RELOCATE (yyvs_alloc, yyvs);
# undef YYSTACK_RELOCATE
if (yyss1 != yyssa)
YYSTACK_FREE (yyss1);
}
# endif
#endif /* no yyoverflow */
yyssp = yyss + yysize - 1;
yyvsp = yyvs + yysize - 1;
YYDPRINTF ((stderr, "Stack size increased to %lu\n",
(unsigned long int) yystacksize));
if (yyss + yystacksize - 1 <= yyssp)
YYABORT;
}
YYDPRINTF ((stderr, "Entering state %d\n", yystate));
if (yystate == YYFINAL)
YYACCEPT;
goto yybackup;
/*-----------.
| yybackup. |
`-----------*/
yybackup:
/* Do appropriate processing given the current state. Read a
lookahead token if we need one and don't already have one. */
/* First try to decide what to do without reference to lookahead token. */
yyn = yypact[yystate];
if (yypact_value_is_default (yyn))
goto yydefault;
/* Not known => get a lookahead token if don't already have one. */
/* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
if (yychar == YYEMPTY)
{
YYDPRINTF ((stderr, "Reading a token: "));
yychar = yylex (&yylval, yyscanner, lex_env);
}
if (yychar <= YYEOF)
{
yychar = yytoken = YYEOF;
YYDPRINTF ((stderr, "Now at end of input.\n"));
}
else
{
yytoken = YYTRANSLATE (yychar);
YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
}
/* If the proper action on seeing token YYTOKEN is to reduce or to
detect an error, take that action. */
yyn += yytoken;
if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
goto yydefault;
yyn = yytable[yyn];
if (yyn <= 0)
{
if (yytable_value_is_error (yyn))
goto yyerrlab;
yyn = -yyn;
goto yyreduce;
}
/* Count tokens shifted since error; after three, turn off error
status. */
if (yyerrstatus)
yyerrstatus--;
/* Shift the lookahead token. */
YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
/* Discard the shifted token. */
yychar = YYEMPTY;
yystate = yyn;
YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
*++yyvsp = yylval;
YY_IGNORE_MAYBE_UNINITIALIZED_END
goto yynewstate;
/*-----------------------------------------------------------.
| yydefault -- do the default action for the current state. |
`-----------------------------------------------------------*/
yydefault:
yyn = yydefact[yystate];
if (yyn == 0)
goto yyerrlab;
goto yyreduce;
/*-----------------------------.
| yyreduce -- Do a reduction. |
`-----------------------------*/
yyreduce:
/* yyn is the number of a rule to reduce with. */
yylen = yyr2[yyn];
/* If YYLEN is nonzero, implement the default value of the action:
'$$ = $1'.
Otherwise, the following line sets YYVAL to garbage.
This behavior is undocumented and Bison
users should not rely upon it. Assigning to YYVAL
unconditionally makes the parser a bit smaller, and it avoids a
GCC warning that YYVAL may be used uninitialized. */
yyval = yyvsp[1-yylen];
YY_REDUCE_PRINT (yyn);
switch (yyn)
{
case 2:
#line 113 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast = yyget_extra(yyscanner);
re_ast->root_node = (yyvsp[0].re_node);
}
#line 1348 "re_grammar.c" /* yacc.c:1646 */
break;
case 4:
#line 122 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = (yyvsp[0].re_node);
}
#line 1356 "re_grammar.c" /* yacc.c:1646 */
break;
case 5:
#line 126 "re_grammar.y" /* yacc.c:1646 */
{
mark_as_not_fast_regexp();
incr_ast_levels();
(yyval.re_node) = yr_re_node_create(RE_NODE_ALT, (yyvsp[-2].re_node), (yyvsp[0].re_node));
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-2].re_node));
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[0].re_node));
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1372 "re_grammar.c" /* yacc.c:1646 */
break;
case 6:
#line 138 "re_grammar.y" /* yacc.c:1646 */
{
RE_NODE* node;
mark_as_not_fast_regexp();
incr_ast_levels();
node = yr_re_node_create(RE_NODE_EMPTY, NULL, NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-1].re_node));
ERROR_IF(node == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node) = yr_re_node_create(RE_NODE_ALT, (yyvsp[-1].re_node), node);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1392 "re_grammar.c" /* yacc.c:1646 */
break;
case 7:
#line 157 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = (yyvsp[0].re_node);
}
#line 1400 "re_grammar.c" /* yacc.c:1646 */
break;
case 8:
#line 161 "re_grammar.y" /* yacc.c:1646 */
{
incr_ast_levels();
(yyval.re_node) = yr_re_node_create(RE_NODE_CONCAT, (yyvsp[-1].re_node), (yyvsp[0].re_node));
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-1].re_node));
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[0].re_node));
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1414 "re_grammar.c" /* yacc.c:1646 */
break;
case 9:
#line 174 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast;
mark_as_not_fast_regexp();
re_ast = yyget_extra(yyscanner);
re_ast->flags |= RE_FLAGS_GREEDY;
(yyval.re_node) = yr_re_node_create(RE_NODE_STAR, (yyvsp[-1].re_node), NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-1].re_node));
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1432 "re_grammar.c" /* yacc.c:1646 */
break;
case 10:
#line 188 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast;
mark_as_not_fast_regexp();
re_ast = yyget_extra(yyscanner);
re_ast->flags |= RE_FLAGS_UNGREEDY;
(yyval.re_node) = yr_re_node_create(RE_NODE_STAR, (yyvsp[-2].re_node), NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-2].re_node));
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node)->greedy = FALSE;
}
#line 1452 "re_grammar.c" /* yacc.c:1646 */
break;
case 11:
#line 204 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast;
mark_as_not_fast_regexp();
re_ast = yyget_extra(yyscanner);
re_ast->flags |= RE_FLAGS_GREEDY;
(yyval.re_node) = yr_re_node_create(RE_NODE_PLUS, (yyvsp[-1].re_node), NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-1].re_node));
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1470 "re_grammar.c" /* yacc.c:1646 */
break;
case 12:
#line 218 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast;
mark_as_not_fast_regexp();
re_ast = yyget_extra(yyscanner);
re_ast->flags |= RE_FLAGS_UNGREEDY;
(yyval.re_node) = yr_re_node_create(RE_NODE_PLUS, (yyvsp[-2].re_node), NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-2].re_node));
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node)->greedy = FALSE;
}
#line 1490 "re_grammar.c" /* yacc.c:1646 */
break;
case 13:
#line 234 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast = yyget_extra(yyscanner);
re_ast->flags |= RE_FLAGS_GREEDY;
if ((yyvsp[-1].re_node)->type == RE_NODE_ANY)
{
(yyval.re_node) = yr_re_node_create(RE_NODE_RANGE_ANY, NULL, NULL);
DESTROY_NODE_IF(TRUE, (yyvsp[-1].re_node));
}
else
{
mark_as_not_fast_regexp();
(yyval.re_node) = yr_re_node_create(RE_NODE_RANGE, (yyvsp[-1].re_node), NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-1].re_node));
}
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-1].re_node));
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node)->start = 0;
(yyval.re_node)->end = 1;
}
#line 1517 "re_grammar.c" /* yacc.c:1646 */
break;
case 14:
#line 257 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast = yyget_extra(yyscanner);
re_ast->flags |= RE_FLAGS_UNGREEDY;
if ((yyvsp[-2].re_node)->type == RE_NODE_ANY)
{
(yyval.re_node) = yr_re_node_create(RE_NODE_RANGE_ANY, NULL, NULL);
DESTROY_NODE_IF(TRUE, (yyvsp[-2].re_node));
}
else
{
mark_as_not_fast_regexp();
(yyval.re_node) = yr_re_node_create(RE_NODE_RANGE, (yyvsp[-2].re_node), NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-2].re_node));
}
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-2].re_node));
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node)->start = 0;
(yyval.re_node)->end = 1;
(yyval.re_node)->greedy = FALSE;
}
#line 1545 "re_grammar.c" /* yacc.c:1646 */
break;
case 15:
#line 281 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast = yyget_extra(yyscanner);
re_ast->flags |= RE_FLAGS_GREEDY;
if ((yyvsp[-1].re_node)->type == RE_NODE_ANY)
{
(yyval.re_node) = yr_re_node_create(RE_NODE_RANGE_ANY, NULL, NULL);
DESTROY_NODE_IF(TRUE, (yyvsp[-1].re_node));
}
else
{
mark_as_not_fast_regexp();
(yyval.re_node) = yr_re_node_create(RE_NODE_RANGE, (yyvsp[-1].re_node), NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-1].re_node));
}
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node)->start = (yyvsp[0].range) & 0xFFFF;;
(yyval.re_node)->end = (yyvsp[0].range) >> 16;;
}
#line 1571 "re_grammar.c" /* yacc.c:1646 */
break;
case 16:
#line 303 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast = yyget_extra(yyscanner);
re_ast->flags |= RE_FLAGS_UNGREEDY;
if ((yyvsp[-2].re_node)->type == RE_NODE_ANY)
{
(yyval.re_node) = yr_re_node_create(RE_NODE_RANGE_ANY, NULL, NULL);
DESTROY_NODE_IF(TRUE, (yyvsp[-2].re_node));
}
else
{
mark_as_not_fast_regexp();
(yyval.re_node) = yr_re_node_create(RE_NODE_RANGE, (yyvsp[-2].re_node), NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-2].re_node));
}
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node)->start = (yyvsp[-1].range) & 0xFFFF;;
(yyval.re_node)->end = (yyvsp[-1].range) >> 16;;
(yyval.re_node)->greedy = FALSE;
}
#line 1598 "re_grammar.c" /* yacc.c:1646 */
break;
case 17:
#line 326 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = (yyvsp[0].re_node);
}
#line 1606 "re_grammar.c" /* yacc.c:1646 */
break;
case 18:
#line 330 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_WORD_BOUNDARY, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1616 "re_grammar.c" /* yacc.c:1646 */
break;
case 19:
#line 336 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_NON_WORD_BOUNDARY, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1626 "re_grammar.c" /* yacc.c:1646 */
break;
case 20:
#line 342 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_ANCHOR_START, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1636 "re_grammar.c" /* yacc.c:1646 */
break;
case 21:
#line 348 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_ANCHOR_END, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1646 "re_grammar.c" /* yacc.c:1646 */
break;
case 22:
#line 357 "re_grammar.y" /* yacc.c:1646 */
{
incr_ast_levels();
(yyval.re_node) = (yyvsp[-1].re_node);
}
#line 1656 "re_grammar.c" /* yacc.c:1646 */
break;
case 23:
#line 363 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_ANY, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1666 "re_grammar.c" /* yacc.c:1646 */
break;
case 24:
#line 369 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_LITERAL, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node)->value = (yyvsp[0].integer);
}
#line 1678 "re_grammar.c" /* yacc.c:1646 */
break;
case 25:
#line 377 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_WORD_CHAR, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1688 "re_grammar.c" /* yacc.c:1646 */
break;
case 26:
#line 383 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_NON_WORD_CHAR, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1698 "re_grammar.c" /* yacc.c:1646 */
break;
case 27:
#line 389 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_SPACE, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1708 "re_grammar.c" /* yacc.c:1646 */
break;
case 28:
#line 395 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_NON_SPACE, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1718 "re_grammar.c" /* yacc.c:1646 */
break;
case 29:
#line 401 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_DIGIT, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1728 "re_grammar.c" /* yacc.c:1646 */
break;
case 30:
#line 407 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_NON_DIGIT, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1738 "re_grammar.c" /* yacc.c:1646 */
break;
case 31:
#line 413 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_CLASS, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node)->class_vector = (yyvsp[0].class_vector);
}
#line 1750 "re_grammar.c" /* yacc.c:1646 */
break;
#line 1754 "re_grammar.c" /* yacc.c:1646 */
default: break;
}
/* User semantic actions sometimes alter yychar, and that requires
that yytoken be updated with the new translation. We take the
approach of translating immediately before every use of yytoken.
One alternative is translating here after every semantic action,
but that translation would be missed if the semantic action invokes
YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an
incorrect destructor might then be invoked immediately. In the
case of YYERROR or YYBACKUP, subsequent parser actions might lead
to an incorrect destructor call or verbose syntax error message
before the lookahead is translated. */
YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
*++yyvsp = yyval;
/* Now 'shift' the result of the reduction. Determine what state
that goes to, based on the state we popped back to and the rule
number reduced by. */
yyn = yyr1[yyn];
yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
yystate = yytable[yystate];
else
yystate = yydefgoto[yyn - YYNTOKENS];
goto yynewstate;
/*--------------------------------------.
| yyerrlab -- here on detecting error. |
`--------------------------------------*/
yyerrlab:
/* Make sure we have latest lookahead translation. See comments at
user semantic actions for why this is necessary. */
yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
/* If not already recovering from an error, report this error. */
if (!yyerrstatus)
{
++yynerrs;
#if ! YYERROR_VERBOSE
yyerror (yyscanner, lex_env, YY_("syntax error"));
#else
# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
yyssp, yytoken)
{
char const *yymsgp = YY_("syntax error");
int yysyntax_error_status;
yysyntax_error_status = YYSYNTAX_ERROR;
if (yysyntax_error_status == 0)
yymsgp = yymsg;
else if (yysyntax_error_status == 1)
{
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc);
if (!yymsg)
{
yymsg = yymsgbuf;
yymsg_alloc = sizeof yymsgbuf;
yysyntax_error_status = 2;
}
else
{
yysyntax_error_status = YYSYNTAX_ERROR;
yymsgp = yymsg;
}
}
yyerror (yyscanner, lex_env, yymsgp);
if (yysyntax_error_status == 2)
goto yyexhaustedlab;
}
# undef YYSYNTAX_ERROR
#endif
}
if (yyerrstatus == 3)
{
/* If just tried and failed to reuse lookahead token after an
error, discard it. */
if (yychar <= YYEOF)
{
/* Return failure if at end of input. */
if (yychar == YYEOF)
YYABORT;
}
else
{
yydestruct ("Error: discarding",
yytoken, &yylval, yyscanner, lex_env);
yychar = YYEMPTY;
}
}
/* Else will try to reuse lookahead token after shifting the error
token. */
goto yyerrlab1;
/*---------------------------------------------------.
| yyerrorlab -- error raised explicitly by YYERROR. |
`---------------------------------------------------*/
yyerrorlab:
/* Pacify compilers like GCC when the user code never invokes
YYERROR and the label yyerrorlab therefore never appears in user
code. */
if (/*CONSTCOND*/ 0)
goto yyerrorlab;
/* Do not reclaim the symbols of the rule whose action triggered
this YYERROR. */
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
yystate = *yyssp;
goto yyerrlab1;
/*-------------------------------------------------------------.
| yyerrlab1 -- common code for both syntax error and YYERROR. |
`-------------------------------------------------------------*/
yyerrlab1:
yyerrstatus = 3; /* Each real token shifted decrements this. */
for (;;)
{
yyn = yypact[yystate];
if (!yypact_value_is_default (yyn))
{
yyn += YYTERROR;
if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
{
yyn = yytable[yyn];
if (0 < yyn)
break;
}
}
/* Pop the current state because it cannot handle the error token. */
if (yyssp == yyss)
YYABORT;
yydestruct ("Error: popping",
yystos[yystate], yyvsp, yyscanner, lex_env);
YYPOPSTACK (1);
yystate = *yyssp;
YY_STACK_PRINT (yyss, yyssp);
}
YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
*++yyvsp = yylval;
YY_IGNORE_MAYBE_UNINITIALIZED_END
/* Shift the error token. */
YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
yystate = yyn;
goto yynewstate;
/*-------------------------------------.
| yyacceptlab -- YYACCEPT comes here. |
`-------------------------------------*/
yyacceptlab:
yyresult = 0;
goto yyreturn;
/*-----------------------------------.
| yyabortlab -- YYABORT comes here. |
`-----------------------------------*/
yyabortlab:
yyresult = 1;
goto yyreturn;
#if !defined yyoverflow || YYERROR_VERBOSE
/*-------------------------------------------------.
| yyexhaustedlab -- memory exhaustion comes here. |
`-------------------------------------------------*/
yyexhaustedlab:
yyerror (yyscanner, lex_env, YY_("memory exhausted"));
yyresult = 2;
/* Fall through. */
#endif
yyreturn:
if (yychar != YYEMPTY)
{
/* Make sure we have latest lookahead translation. See comments at
user semantic actions for why this is necessary. */
yytoken = YYTRANSLATE (yychar);
yydestruct ("Cleanup: discarding lookahead",
yytoken, &yylval, yyscanner, lex_env);
}
/* Do not reclaim the symbols of the rule whose action triggered
this YYABORT or YYACCEPT. */
YYPOPSTACK (yylen);
YY_STACK_PRINT (yyss, yyssp);
while (yyssp != yyss)
{
yydestruct ("Cleanup: popping",
yystos[*yyssp], yyvsp, yyscanner, lex_env);
YYPOPSTACK (1);
}
#ifndef yyoverflow
if (yyss != yyssa)
YYSTACK_FREE (yyss);
#endif
#if YYERROR_VERBOSE
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
#endif
return yyresult;
}
|
yyparse (void *yyscanner, RE_LEX_ENVIRONMENT *lex_env)
{
/* The lookahead symbol. */
int yychar;
/* The semantic value of the lookahead symbol. */
/* Default value used for initialization, for pacifying older GCCs
or non-GCC compilers. */
YY_INITIAL_VALUE (static YYSTYPE yyval_default;)
YYSTYPE yylval YY_INITIAL_VALUE (= yyval_default);
/* Number of syntax errors so far. */
int yynerrs;
int yystate;
/* Number of tokens to shift before error messages enabled. */
int yyerrstatus;
/* The stacks and their tools:
'yyss': related to states.
'yyvs': related to semantic values.
Refer to the stacks through separate pointers, to allow yyoverflow
to reallocate them elsewhere. */
/* The state stack. */
yytype_int16 yyssa[YYINITDEPTH];
yytype_int16 *yyss;
yytype_int16 *yyssp;
/* The semantic value stack. */
YYSTYPE yyvsa[YYINITDEPTH];
YYSTYPE *yyvs;
YYSTYPE *yyvsp;
YYSIZE_T yystacksize;
int yyn;
int yyresult;
/* Lookahead token as an internal (translated) token number. */
int yytoken = 0;
/* The variables used to return semantic value and location from the
action routines. */
YYSTYPE yyval;
#if YYERROR_VERBOSE
/* Buffer for error messages, and its allocated size. */
char yymsgbuf[128];
char *yymsg = yymsgbuf;
YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
#endif
#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
/* The number of symbols on the RHS of the reduced rule.
Keep to zero when no symbol should be popped. */
int yylen = 0;
yyssp = yyss = yyssa;
yyvsp = yyvs = yyvsa;
yystacksize = YYINITDEPTH;
YYDPRINTF ((stderr, "Starting parse\n"));
yystate = 0;
yyerrstatus = 0;
yynerrs = 0;
yychar = YYEMPTY; /* Cause a token to be read. */
goto yysetstate;
/*------------------------------------------------------------.
| yynewstate -- Push a new state, which is found in yystate. |
`------------------------------------------------------------*/
yynewstate:
/* In all cases, when you get here, the value and location stacks
have just been pushed. So pushing a state here evens the stacks. */
yyssp++;
yysetstate:
*yyssp = yystate;
if (yyss + yystacksize - 1 <= yyssp)
{
/* Get the current used size of the three stacks, in elements. */
YYSIZE_T yysize = yyssp - yyss + 1;
#ifdef yyoverflow
{
/* Give user a chance to reallocate the stack. Use copies of
these so that the &'s don't force the real ones into
memory. */
YYSTYPE *yyvs1 = yyvs;
yytype_int16 *yyss1 = yyss;
/* Each stack pointer address is followed by the size of the
data in use in that stack, in bytes. This used to be a
conditional around just the two extra args, but that might
be undefined if yyoverflow is a macro. */
yyoverflow (YY_("memory exhausted"),
&yyss1, yysize * sizeof (*yyssp),
&yyvs1, yysize * sizeof (*yyvsp),
&yystacksize);
yyss = yyss1;
yyvs = yyvs1;
}
#else /* no yyoverflow */
# ifndef YYSTACK_RELOCATE
goto yyexhaustedlab;
# else
/* Extend the stack our own way. */
if (YYMAXDEPTH <= yystacksize)
goto yyexhaustedlab;
yystacksize *= 2;
if (YYMAXDEPTH < yystacksize)
yystacksize = YYMAXDEPTH;
{
yytype_int16 *yyss1 = yyss;
union yyalloc *yyptr =
(union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
if (! yyptr)
goto yyexhaustedlab;
YYSTACK_RELOCATE (yyss_alloc, yyss);
YYSTACK_RELOCATE (yyvs_alloc, yyvs);
# undef YYSTACK_RELOCATE
if (yyss1 != yyssa)
YYSTACK_FREE (yyss1);
}
# endif
#endif /* no yyoverflow */
yyssp = yyss + yysize - 1;
yyvsp = yyvs + yysize - 1;
YYDPRINTF ((stderr, "Stack size increased to %lu\n",
(unsigned long int) yystacksize));
if (yyss + yystacksize - 1 <= yyssp)
YYABORT;
}
YYDPRINTF ((stderr, "Entering state %d\n", yystate));
if (yystate == YYFINAL)
YYACCEPT;
goto yybackup;
/*-----------.
| yybackup. |
`-----------*/
yybackup:
/* Do appropriate processing given the current state. Read a
lookahead token if we need one and don't already have one. */
/* First try to decide what to do without reference to lookahead token. */
yyn = yypact[yystate];
if (yypact_value_is_default (yyn))
goto yydefault;
/* Not known => get a lookahead token if don't already have one. */
/* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
if (yychar == YYEMPTY)
{
YYDPRINTF ((stderr, "Reading a token: "));
yychar = yylex (&yylval, yyscanner, lex_env);
}
if (yychar <= YYEOF)
{
yychar = yytoken = YYEOF;
YYDPRINTF ((stderr, "Now at end of input.\n"));
}
else
{
yytoken = YYTRANSLATE (yychar);
YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
}
/* If the proper action on seeing token YYTOKEN is to reduce or to
detect an error, take that action. */
yyn += yytoken;
if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
goto yydefault;
yyn = yytable[yyn];
if (yyn <= 0)
{
if (yytable_value_is_error (yyn))
goto yyerrlab;
yyn = -yyn;
goto yyreduce;
}
/* Count tokens shifted since error; after three, turn off error
status. */
if (yyerrstatus)
yyerrstatus--;
/* Shift the lookahead token. */
YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
/* Discard the shifted token. */
yychar = YYEMPTY;
yystate = yyn;
YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
*++yyvsp = yylval;
YY_IGNORE_MAYBE_UNINITIALIZED_END
goto yynewstate;
/*-----------------------------------------------------------.
| yydefault -- do the default action for the current state. |
`-----------------------------------------------------------*/
yydefault:
yyn = yydefact[yystate];
if (yyn == 0)
goto yyerrlab;
goto yyreduce;
/*-----------------------------.
| yyreduce -- Do a reduction. |
`-----------------------------*/
yyreduce:
/* yyn is the number of a rule to reduce with. */
yylen = yyr2[yyn];
/* If YYLEN is nonzero, implement the default value of the action:
'$$ = $1'.
Otherwise, the following line sets YYVAL to garbage.
This behavior is undocumented and Bison
users should not rely upon it. Assigning to YYVAL
unconditionally makes the parser a bit smaller, and it avoids a
GCC warning that YYVAL may be used uninitialized. */
yyval = yyvsp[1-yylen];
YY_REDUCE_PRINT (yyn);
switch (yyn)
{
case 2:
#line 105 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast = yyget_extra(yyscanner);
re_ast->root_node = (yyvsp[0].re_node);
}
#line 1340 "re_grammar.c" /* yacc.c:1646 */
break;
case 4:
#line 114 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = (yyvsp[0].re_node);
}
#line 1348 "re_grammar.c" /* yacc.c:1646 */
break;
case 5:
#line 118 "re_grammar.y" /* yacc.c:1646 */
{
mark_as_not_fast_regexp();
(yyval.re_node) = yr_re_node_create(RE_NODE_ALT, (yyvsp[-2].re_node), (yyvsp[0].re_node));
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-2].re_node));
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[0].re_node));
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1363 "re_grammar.c" /* yacc.c:1646 */
break;
case 6:
#line 129 "re_grammar.y" /* yacc.c:1646 */
{
RE_NODE* node;
mark_as_not_fast_regexp();
node = yr_re_node_create(RE_NODE_EMPTY, NULL, NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-1].re_node));
ERROR_IF(node == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node) = yr_re_node_create(RE_NODE_ALT, (yyvsp[-1].re_node), node);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1382 "re_grammar.c" /* yacc.c:1646 */
break;
case 7:
#line 147 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = (yyvsp[0].re_node);
}
#line 1390 "re_grammar.c" /* yacc.c:1646 */
break;
case 8:
#line 151 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_CONCAT, (yyvsp[-1].re_node), (yyvsp[0].re_node));
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-1].re_node));
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[0].re_node));
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1402 "re_grammar.c" /* yacc.c:1646 */
break;
case 9:
#line 162 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast;
mark_as_not_fast_regexp();
re_ast = yyget_extra(yyscanner);
re_ast->flags |= RE_FLAGS_GREEDY;
(yyval.re_node) = yr_re_node_create(RE_NODE_STAR, (yyvsp[-1].re_node), NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-1].re_node));
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1420 "re_grammar.c" /* yacc.c:1646 */
break;
case 10:
#line 176 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast;
mark_as_not_fast_regexp();
re_ast = yyget_extra(yyscanner);
re_ast->flags |= RE_FLAGS_UNGREEDY;
(yyval.re_node) = yr_re_node_create(RE_NODE_STAR, (yyvsp[-2].re_node), NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-2].re_node));
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node)->greedy = FALSE;
}
#line 1440 "re_grammar.c" /* yacc.c:1646 */
break;
case 11:
#line 192 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast;
mark_as_not_fast_regexp();
re_ast = yyget_extra(yyscanner);
re_ast->flags |= RE_FLAGS_GREEDY;
(yyval.re_node) = yr_re_node_create(RE_NODE_PLUS, (yyvsp[-1].re_node), NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-1].re_node));
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1458 "re_grammar.c" /* yacc.c:1646 */
break;
case 12:
#line 206 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast;
mark_as_not_fast_regexp();
re_ast = yyget_extra(yyscanner);
re_ast->flags |= RE_FLAGS_UNGREEDY;
(yyval.re_node) = yr_re_node_create(RE_NODE_PLUS, (yyvsp[-2].re_node), NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-2].re_node));
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node)->greedy = FALSE;
}
#line 1478 "re_grammar.c" /* yacc.c:1646 */
break;
case 13:
#line 222 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast = yyget_extra(yyscanner);
re_ast->flags |= RE_FLAGS_GREEDY;
if ((yyvsp[-1].re_node)->type == RE_NODE_ANY)
{
(yyval.re_node) = yr_re_node_create(RE_NODE_RANGE_ANY, NULL, NULL);
DESTROY_NODE_IF(TRUE, (yyvsp[-1].re_node));
}
else
{
mark_as_not_fast_regexp();
(yyval.re_node) = yr_re_node_create(RE_NODE_RANGE, (yyvsp[-1].re_node), NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-1].re_node));
}
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-1].re_node));
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node)->start = 0;
(yyval.re_node)->end = 1;
}
#line 1505 "re_grammar.c" /* yacc.c:1646 */
break;
case 14:
#line 245 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast = yyget_extra(yyscanner);
re_ast->flags |= RE_FLAGS_UNGREEDY;
if ((yyvsp[-2].re_node)->type == RE_NODE_ANY)
{
(yyval.re_node) = yr_re_node_create(RE_NODE_RANGE_ANY, NULL, NULL);
DESTROY_NODE_IF(TRUE, (yyvsp[-2].re_node));
}
else
{
mark_as_not_fast_regexp();
(yyval.re_node) = yr_re_node_create(RE_NODE_RANGE, (yyvsp[-2].re_node), NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-2].re_node));
}
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-2].re_node));
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node)->start = 0;
(yyval.re_node)->end = 1;
(yyval.re_node)->greedy = FALSE;
}
#line 1533 "re_grammar.c" /* yacc.c:1646 */
break;
case 15:
#line 269 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast = yyget_extra(yyscanner);
re_ast->flags |= RE_FLAGS_GREEDY;
if ((yyvsp[-1].re_node)->type == RE_NODE_ANY)
{
(yyval.re_node) = yr_re_node_create(RE_NODE_RANGE_ANY, NULL, NULL);
DESTROY_NODE_IF(TRUE, (yyvsp[-1].re_node));
}
else
{
mark_as_not_fast_regexp();
(yyval.re_node) = yr_re_node_create(RE_NODE_RANGE, (yyvsp[-1].re_node), NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-1].re_node));
}
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node)->start = (yyvsp[0].range) & 0xFFFF;;
(yyval.re_node)->end = (yyvsp[0].range) >> 16;;
}
#line 1559 "re_grammar.c" /* yacc.c:1646 */
break;
case 16:
#line 291 "re_grammar.y" /* yacc.c:1646 */
{
RE_AST* re_ast = yyget_extra(yyscanner);
re_ast->flags |= RE_FLAGS_UNGREEDY;
if ((yyvsp[-2].re_node)->type == RE_NODE_ANY)
{
(yyval.re_node) = yr_re_node_create(RE_NODE_RANGE_ANY, NULL, NULL);
DESTROY_NODE_IF(TRUE, (yyvsp[-2].re_node));
}
else
{
mark_as_not_fast_regexp();
(yyval.re_node) = yr_re_node_create(RE_NODE_RANGE, (yyvsp[-2].re_node), NULL);
DESTROY_NODE_IF((yyval.re_node) == NULL, (yyvsp[-2].re_node));
}
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node)->start = (yyvsp[-1].range) & 0xFFFF;;
(yyval.re_node)->end = (yyvsp[-1].range) >> 16;;
(yyval.re_node)->greedy = FALSE;
}
#line 1586 "re_grammar.c" /* yacc.c:1646 */
break;
case 17:
#line 314 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = (yyvsp[0].re_node);
}
#line 1594 "re_grammar.c" /* yacc.c:1646 */
break;
case 18:
#line 318 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_WORD_BOUNDARY, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1604 "re_grammar.c" /* yacc.c:1646 */
break;
case 19:
#line 324 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_NON_WORD_BOUNDARY, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1614 "re_grammar.c" /* yacc.c:1646 */
break;
case 20:
#line 330 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_ANCHOR_START, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1624 "re_grammar.c" /* yacc.c:1646 */
break;
case 21:
#line 336 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_ANCHOR_END, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1634 "re_grammar.c" /* yacc.c:1646 */
break;
case 22:
#line 345 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = (yyvsp[-1].re_node);
}
#line 1642 "re_grammar.c" /* yacc.c:1646 */
break;
case 23:
#line 349 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_ANY, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1652 "re_grammar.c" /* yacc.c:1646 */
break;
case 24:
#line 355 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_LITERAL, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node)->value = (yyvsp[0].integer);
}
#line 1664 "re_grammar.c" /* yacc.c:1646 */
break;
case 25:
#line 363 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_WORD_CHAR, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1674 "re_grammar.c" /* yacc.c:1646 */
break;
case 26:
#line 369 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_NON_WORD_CHAR, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1684 "re_grammar.c" /* yacc.c:1646 */
break;
case 27:
#line 375 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_SPACE, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1694 "re_grammar.c" /* yacc.c:1646 */
break;
case 28:
#line 381 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_NON_SPACE, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1704 "re_grammar.c" /* yacc.c:1646 */
break;
case 29:
#line 387 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_DIGIT, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1714 "re_grammar.c" /* yacc.c:1646 */
break;
case 30:
#line 393 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_NON_DIGIT, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
}
#line 1724 "re_grammar.c" /* yacc.c:1646 */
break;
case 31:
#line 399 "re_grammar.y" /* yacc.c:1646 */
{
(yyval.re_node) = yr_re_node_create(RE_NODE_CLASS, NULL, NULL);
ERROR_IF((yyval.re_node) == NULL, ERROR_INSUFFICIENT_MEMORY);
(yyval.re_node)->class_vector = (yyvsp[0].class_vector);
}
#line 1736 "re_grammar.c" /* yacc.c:1646 */
break;
#line 1740 "re_grammar.c" /* yacc.c:1646 */
default: break;
}
/* User semantic actions sometimes alter yychar, and that requires
that yytoken be updated with the new translation. We take the
approach of translating immediately before every use of yytoken.
One alternative is translating here after every semantic action,
but that translation would be missed if the semantic action invokes
YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an
incorrect destructor might then be invoked immediately. In the
case of YYERROR or YYBACKUP, subsequent parser actions might lead
to an incorrect destructor call or verbose syntax error message
before the lookahead is translated. */
YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
*++yyvsp = yyval;
/* Now 'shift' the result of the reduction. Determine what state
that goes to, based on the state we popped back to and the rule
number reduced by. */
yyn = yyr1[yyn];
yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
yystate = yytable[yystate];
else
yystate = yydefgoto[yyn - YYNTOKENS];
goto yynewstate;
/*--------------------------------------.
| yyerrlab -- here on detecting error. |
`--------------------------------------*/
yyerrlab:
/* Make sure we have latest lookahead translation. See comments at
user semantic actions for why this is necessary. */
yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
/* If not already recovering from an error, report this error. */
if (!yyerrstatus)
{
++yynerrs;
#if ! YYERROR_VERBOSE
yyerror (yyscanner, lex_env, YY_("syntax error"));
#else
# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
yyssp, yytoken)
{
char const *yymsgp = YY_("syntax error");
int yysyntax_error_status;
yysyntax_error_status = YYSYNTAX_ERROR;
if (yysyntax_error_status == 0)
yymsgp = yymsg;
else if (yysyntax_error_status == 1)
{
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc);
if (!yymsg)
{
yymsg = yymsgbuf;
yymsg_alloc = sizeof yymsgbuf;
yysyntax_error_status = 2;
}
else
{
yysyntax_error_status = YYSYNTAX_ERROR;
yymsgp = yymsg;
}
}
yyerror (yyscanner, lex_env, yymsgp);
if (yysyntax_error_status == 2)
goto yyexhaustedlab;
}
# undef YYSYNTAX_ERROR
#endif
}
if (yyerrstatus == 3)
{
/* If just tried and failed to reuse lookahead token after an
error, discard it. */
if (yychar <= YYEOF)
{
/* Return failure if at end of input. */
if (yychar == YYEOF)
YYABORT;
}
else
{
yydestruct ("Error: discarding",
yytoken, &yylval, yyscanner, lex_env);
yychar = YYEMPTY;
}
}
/* Else will try to reuse lookahead token after shifting the error
token. */
goto yyerrlab1;
/*---------------------------------------------------.
| yyerrorlab -- error raised explicitly by YYERROR. |
`---------------------------------------------------*/
yyerrorlab:
/* Pacify compilers like GCC when the user code never invokes
YYERROR and the label yyerrorlab therefore never appears in user
code. */
if (/*CONSTCOND*/ 0)
goto yyerrorlab;
/* Do not reclaim the symbols of the rule whose action triggered
this YYERROR. */
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
yystate = *yyssp;
goto yyerrlab1;
/*-------------------------------------------------------------.
| yyerrlab1 -- common code for both syntax error and YYERROR. |
`-------------------------------------------------------------*/
yyerrlab1:
yyerrstatus = 3; /* Each real token shifted decrements this. */
for (;;)
{
yyn = yypact[yystate];
if (!yypact_value_is_default (yyn))
{
yyn += YYTERROR;
if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
{
yyn = yytable[yyn];
if (0 < yyn)
break;
}
}
/* Pop the current state because it cannot handle the error token. */
if (yyssp == yyss)
YYABORT;
yydestruct ("Error: popping",
yystos[yystate], yyvsp, yyscanner, lex_env);
YYPOPSTACK (1);
yystate = *yyssp;
YY_STACK_PRINT (yyss, yyssp);
}
YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
*++yyvsp = yylval;
YY_IGNORE_MAYBE_UNINITIALIZED_END
/* Shift the error token. */
YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
yystate = yyn;
goto yynewstate;
/*-------------------------------------.
| yyacceptlab -- YYACCEPT comes here. |
`-------------------------------------*/
yyacceptlab:
yyresult = 0;
goto yyreturn;
/*-----------------------------------.
| yyabortlab -- YYABORT comes here. |
`-----------------------------------*/
yyabortlab:
yyresult = 1;
goto yyreturn;
#if !defined yyoverflow || YYERROR_VERBOSE
/*-------------------------------------------------.
| yyexhaustedlab -- memory exhaustion comes here. |
`-------------------------------------------------*/
yyexhaustedlab:
yyerror (yyscanner, lex_env, YY_("memory exhausted"));
yyresult = 2;
/* Fall through. */
#endif
yyreturn:
if (yychar != YYEMPTY)
{
/* Make sure we have latest lookahead translation. See comments at
user semantic actions for why this is necessary. */
yytoken = YYTRANSLATE (yychar);
yydestruct ("Cleanup: discarding lookahead",
yytoken, &yylval, yyscanner, lex_env);
}
/* Do not reclaim the symbols of the rule whose action triggered
this YYABORT or YYACCEPT. */
YYPOPSTACK (yylen);
YY_STACK_PRINT (yyss, yyssp);
while (yyssp != yyss)
{
yydestruct ("Cleanup: popping",
yystos[*yyssp], yyvsp, yyscanner, lex_env);
YYPOPSTACK (1);
}
#ifndef yyoverflow
if (yyss != yyssa)
YYSTACK_FREE (yyss);
#endif
#if YYERROR_VERBOSE
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
#endif
return yyresult;
}
|
C
|
yara
| 1 |
CVE-2018-6096
|
https://www.cvedetails.com/cve/CVE-2018-6096/
| null |
https://github.com/chromium/chromium/commit/36f801fdbec07d116a6f4f07bb363f10897d6a51
|
36f801fdbec07d116a6f4f07bb363f10897d6a51
|
If a page calls |window.focus()|, kick it out of fullscreen.
BUG=776418, 800056
Change-Id: I1880fe600e4814c073f247c43b1c1ac80c8fc017
Reviewed-on: https://chromium-review.googlesource.com/852378
Reviewed-by: Nasko Oskov <[email protected]>
Reviewed-by: Philip Jägenstedt <[email protected]>
Commit-Queue: Avi Drissman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#533790}
|
void RenderViewImpl::OnUpdateWebPreferences(const WebPreferences& prefs) {
webkit_preferences_ = prefs;
ApplyWebPreferencesInternal(webkit_preferences_, webview(), compositor_deps_);
}
|
void RenderViewImpl::OnUpdateWebPreferences(const WebPreferences& prefs) {
webkit_preferences_ = prefs;
ApplyWebPreferencesInternal(webkit_preferences_, webview(), compositor_deps_);
}
|
C
|
Chrome
| 0 |
CVE-2018-18344
|
https://www.cvedetails.com/cve/CVE-2018-18344/
|
CWE-20
|
https://github.com/chromium/chromium/commit/c71d8045ce0592cf3f4290744ab57b23c1d1b4c6
|
c71d8045ce0592cf3f4290744ab57b23c1d1b4c6
|
[DevTools] Do not allow Page.setDownloadBehavior for extensions
Bug: 866426
Change-Id: I71b672978e1a8ec779ede49da16b21198567d3a4
Reviewed-on: https://chromium-review.googlesource.com/c/1270007
Commit-Queue: Dmitry Gozman <[email protected]>
Reviewed-by: Devlin <[email protected]>
Cr-Commit-Position: refs/heads/master@{#598004}
|
void DebuggerFunction::FormatErrorMessage(const std::string& format) {
if (debuggee_.tab_id)
error_ = ErrorUtils::FormatErrorMessage(
format, debugger_api_constants::kTabTargetType,
base::IntToString(*debuggee_.tab_id));
else if (debuggee_.extension_id)
error_ = ErrorUtils::FormatErrorMessage(
format, debugger_api_constants::kBackgroundPageTargetType,
*debuggee_.extension_id);
else
error_ = ErrorUtils::FormatErrorMessage(
format, debugger_api_constants::kOpaqueTargetType,
*debuggee_.target_id);
}
|
void DebuggerFunction::FormatErrorMessage(const std::string& format) {
if (debuggee_.tab_id)
error_ = ErrorUtils::FormatErrorMessage(
format, debugger_api_constants::kTabTargetType,
base::IntToString(*debuggee_.tab_id));
else if (debuggee_.extension_id)
error_ = ErrorUtils::FormatErrorMessage(
format, debugger_api_constants::kBackgroundPageTargetType,
*debuggee_.extension_id);
else
error_ = ErrorUtils::FormatErrorMessage(
format, debugger_api_constants::kOpaqueTargetType,
*debuggee_.target_id);
}
|
C
|
Chrome
| 0 |
CVE-2017-8849
|
https://www.cvedetails.com/cve/CVE-2017-8849/
|
CWE-20
|
https://cgit.kde.org/smb4k.git/commit/?id=a90289b0962663bc1d247bbbd31b9e65b2ca000e
|
a90289b0962663bc1d247bbbd31b9e65b2ca000e
| null |
bool Smb4KGlobal::addWorkgroup(Smb4KWorkgroup *workgroup)
{
Q_ASSERT(workgroup);
bool added = false;
mutex.lock();
if (!findWorkgroup(workgroup->workgroupName()))
{
p->workgroupsList.append(workgroup);
added = true;
}
else
{
}
mutex.unlock();
return added;
}
|
bool Smb4KGlobal::addWorkgroup(Smb4KWorkgroup *workgroup)
{
Q_ASSERT(workgroup);
bool added = false;
mutex.lock();
if (!findWorkgroup(workgroup->workgroupName()))
{
p->workgroupsList.append(workgroup);
added = true;
}
else
{
}
mutex.unlock();
return added;
}
|
CPP
|
kde
| 0 |
CVE-2018-1065
|
https://www.cvedetails.com/cve/CVE-2018-1065/
|
CWE-476
|
https://github.com/torvalds/linux/commit/57ebd808a97d7c5b1e1afb937c2db22beba3c1f8
|
57ebd808a97d7c5b1e1afb937c2db22beba3c1f8
|
netfilter: add back stackpointer size checks
The rationale for removing the check is only correct for rulesets
generated by ip(6)tables.
In iptables, a jump can only occur to a user-defined chain, i.e.
because we size the stack based on number of user-defined chains we
cannot exceed stack size.
However, the underlying binary format has no such restriction,
and the validation step only ensures that the jump target is a
valid rule start point.
IOW, its possible to build a rule blob that has no user-defined
chains but does contain a jump.
If this happens, no jump stack gets allocated and crash occurs
because no jumpstack was allocated.
Fixes: 7814b6ec6d0d6 ("netfilter: xtables: don't save/restore jumpstack offset")
Reported-by: [email protected]
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
static inline int check_entry_size_and_hooks(struct arpt_entry *e,
struct xt_table_info *newinfo,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
unsigned int valid_hooks)
{
unsigned int h;
int err;
if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
(unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
(unsigned char *)e + e->next_offset > limit)
return -EINVAL;
if (e->next_offset
< sizeof(struct arpt_entry) + sizeof(struct xt_entry_target))
return -EINVAL;
if (!arp_checkentry(&e->arp))
return -EINVAL;
err = xt_check_entry_offsets(e, e->elems, e->target_offset,
e->next_offset);
if (err)
return err;
/* Check hooks & underflows */
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
continue;
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e))
return -EINVAL;
newinfo->underflow[h] = underflows[h];
}
}
/* Clear counters and comefrom */
e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
return 0;
}
|
static inline int check_entry_size_and_hooks(struct arpt_entry *e,
struct xt_table_info *newinfo,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
unsigned int valid_hooks)
{
unsigned int h;
int err;
if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
(unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
(unsigned char *)e + e->next_offset > limit)
return -EINVAL;
if (e->next_offset
< sizeof(struct arpt_entry) + sizeof(struct xt_entry_target))
return -EINVAL;
if (!arp_checkentry(&e->arp))
return -EINVAL;
err = xt_check_entry_offsets(e, e->elems, e->target_offset,
e->next_offset);
if (err)
return err;
/* Check hooks & underflows */
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
continue;
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e))
return -EINVAL;
newinfo->underflow[h] = underflows[h];
}
}
/* Clear counters and comefrom */
e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
return 0;
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/b9e2ecab97a8a7f3cce06951ab92a3eaef559206
|
b9e2ecab97a8a7f3cce06951ab92a3eaef559206
|
Do not discount a MANUAL_SUBFRAME load just because it involved
some redirects.
R=brettw
BUG=21353
TEST=none
Review URL: http://codereview.chromium.org/246073
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@27887 0039d316-1c4b-4281-b951-d872f2087c98
|
NavigationEntry* NavigationController::GetEntryWithPageID(
SiteInstance* instance, int32 page_id) const {
int index = GetEntryIndexWithPageID(instance, page_id);
return (index != -1) ? entries_[index].get() : NULL;
}
|
NavigationEntry* NavigationController::GetEntryWithPageID(
SiteInstance* instance, int32 page_id) const {
int index = GetEntryIndexWithPageID(instance, page_id);
return (index != -1) ? entries_[index].get() : NULL;
}
|
C
|
Chrome
| 0 |
CVE-2016-10150
|
https://www.cvedetails.com/cve/CVE-2016-10150/
|
CWE-416
|
https://github.com/torvalds/linux/commit/a0f1d21c1ccb1da66629627a74059dd7f5ac9c61
|
a0f1d21c1ccb1da66629627a74059dd7f5ac9c61
|
KVM: use after free in kvm_ioctl_create_device()
We should move the ops->destroy(dev) after the list_del(&dev->vm_node)
so that we don't use "dev" after freeing it.
Fixes: a28ebea2adc4 ("KVM: Protect device ops->create and list_add with kvm->lock")
Signed-off-by: Dan Carpenter <[email protected]>
Reviewed-by: David Hildenbrand <[email protected]>
Signed-off-by: Radim Krčmář <[email protected]>
|
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
gfn_t gfn)
{
return gfn_to_hva_many(slot, gfn, NULL);
}
|
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
gfn_t gfn)
{
return gfn_to_hva_many(slot, gfn, NULL);
}
|
C
|
linux
| 0 |
CVE-2017-9994
|
https://www.cvedetails.com/cve/CVE-2017-9994/
|
CWE-119
|
https://github.com/FFmpeg/FFmpeg/commit/6b5d3fb26fb4be48e4966e4b1d97c2165538d4ef
|
6b5d3fb26fb4be48e4966e4b1d97c2165538d4ef
|
avcodec/webp: Always set pix_fmt
Fixes: out of array access
Fixes: 1434/clusterfuzz-testcase-minimized-6314998085189632
Fixes: 1435/clusterfuzz-testcase-minimized-6483783723253760
Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg
Reviewed-by: "Ronald S. Bultje" <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
|
static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
{
VP8Context *s = avctx->priv_data;
int ret;
s->avctx = avctx;
if ((ret = vp8_init_frames(s)) < 0) {
ff_vp8_decode_free(avctx);
return ret;
}
return 0;
}
|
static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
{
VP8Context *s = avctx->priv_data;
int ret;
s->avctx = avctx;
if ((ret = vp8_init_frames(s)) < 0) {
ff_vp8_decode_free(avctx);
return ret;
}
return 0;
}
|
C
|
FFmpeg
| 0 |
CVE-2017-6903
|
https://www.cvedetails.com/cve/CVE-2017-6903/
|
CWE-269
|
https://github.com/iortcw/iortcw/commit/11a83410153756ae350a82ed41b08d128ff7f998
|
11a83410153756ae350a82ed41b08d128ff7f998
|
All: Merge some file writing extension checks
|
char *Com_StringContains( char *str1, char *str2, int casesensitive ) {
int len, i, j;
len = strlen( str1 ) - strlen( str2 );
for ( i = 0; i <= len; i++, str1++ ) {
for ( j = 0; str2[j]; j++ ) {
if ( casesensitive ) {
if ( str1[j] != str2[j] ) {
break;
}
} else {
if ( toupper( str1[j] ) != toupper( str2[j] ) ) {
break;
}
}
}
if ( !str2[j] ) {
return str1;
}
}
return NULL;
}
|
char *Com_StringContains( char *str1, char *str2, int casesensitive ) {
int len, i, j;
len = strlen( str1 ) - strlen( str2 );
for ( i = 0; i <= len; i++, str1++ ) {
for ( j = 0; str2[j]; j++ ) {
if ( casesensitive ) {
if ( str1[j] != str2[j] ) {
break;
}
} else {
if ( toupper( str1[j] ) != toupper( str2[j] ) ) {
break;
}
}
}
if ( !str2[j] ) {
return str1;
}
}
return NULL;
}
|
C
|
OpenJK
| 0 |
CVE-2017-9949
|
https://www.cvedetails.com/cve/CVE-2017-9949/
|
CWE-787
|
https://github.com/radare/radare2/commit/796dd28aaa6b9fa76d99c42c4d5ff8b257cc2191
|
796dd28aaa6b9fa76d99c42c4d5ff8b257cc2191
|
Fix ext2 buffer overflow in r2_sbu_grub_memmove
|
static int _server_handle_qSupported(libgdbr_t *g) {
int ret;
char *buf;
if (!(buf = malloc (128))) {
return -1;
}
snprintf (buf, 127, "PacketSize=%x", (ut32) (g->read_max - 1));
if ((ret = handle_qSupported (g)) < 0) {
return -1;
}
ret = send_msg (g, buf);
free (buf);
return ret;
}
|
static int _server_handle_qSupported(libgdbr_t *g) {
int ret;
char *buf;
if (!(buf = malloc (128))) {
return -1;
}
snprintf (buf, 127, "PacketSize=%x", (ut32) (g->read_max - 1));
if ((ret = handle_qSupported (g)) < 0) {
return -1;
}
ret = send_msg (g, buf);
free (buf);
return ret;
}
|
C
|
radare2
| 0 |
CVE-2019-13306
|
https://www.cvedetails.com/cve/CVE-2019-13306/
|
CWE-119
|
https://github.com/ImageMagick/ImageMagick6/commit/cb5ec7d98195aa74d5ed299b38eff2a68122f3fa
|
cb5ec7d98195aa74d5ed299b38eff2a68122f3fa
|
https://github.com/ImageMagick/ImageMagick/issues/1612
|
static MagickBooleanType WritePNMImage(const ImageInfo *image_info,Image *image)
{
char
buffer[MaxTextExtent],
format,
magick[MaxTextExtent];
const char
*value;
IndexPacket
index;
MagickBooleanType
status;
MagickOffsetType
scene;
QuantumAny
pixel;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
register unsigned char
*pixels,
*q;
size_t
extent,
imageListLength,
packet_size;
ssize_t
count,
y;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
scene=0;
imageListLength=GetImageListLength(image);
do
{
QuantumAny
max_value;
/*
Write PNM file header.
*/
max_value=GetQuantumRange(image->depth);
packet_size=3;
quantum_type=RGBQuantum;
(void) CopyMagickString(magick,image_info->magick,MaxTextExtent);
switch (magick[1])
{
case 'A':
case 'a':
{
format='7';
break;
}
case 'B':
case 'b':
{
format='4';
if (image_info->compression == NoCompression)
format='1';
break;
}
case 'F':
case 'f':
{
format='F';
if (SetImageGray(image,&image->exception) != MagickFalse)
format='f';
break;
}
case 'G':
case 'g':
{
format='5';
if (image_info->compression == NoCompression)
format='2';
break;
}
case 'N':
case 'n':
{
if ((image_info->type != TrueColorType) &&
(SetImageGray(image,&image->exception) != MagickFalse))
{
format='5';
if (image_info->compression == NoCompression)
format='2';
if (SetImageMonochrome(image,&image->exception) != MagickFalse)
{
format='4';
if (image_info->compression == NoCompression)
format='1';
}
break;
}
}
default:
{
format='6';
if (image_info->compression == NoCompression)
format='3';
break;
}
}
(void) FormatLocaleString(buffer,MaxTextExtent,"P%c\n",format);
(void) WriteBlobString(image,buffer);
value=GetImageProperty(image,"comment");
if (value != (const char *) NULL)
{
register const char
*p;
/*
Write comments to file.
*/
(void) WriteBlobByte(image,'#');
for (p=value; *p != '\0'; p++)
{
(void) WriteBlobByte(image,(unsigned char) *p);
if ((*p == '\n') || (*p == '\r'))
(void) WriteBlobByte(image,'#');
}
(void) WriteBlobByte(image,'\n');
}
if (format != '7')
{
(void) FormatLocaleString(buffer,MaxTextExtent,"%.20g %.20g\n",
(double) image->columns,(double) image->rows);
(void) WriteBlobString(image,buffer);
}
else
{
char
type[MaxTextExtent];
/*
PAM header.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,
"WIDTH %.20g\nHEIGHT %.20g\n",(double) image->columns,(double)
image->rows);
(void) WriteBlobString(image,buffer);
quantum_type=GetQuantumType(image,&image->exception);
switch (quantum_type)
{
case CMYKQuantum:
case CMYKAQuantum:
{
packet_size=4;
(void) CopyMagickString(type,"CMYK",MaxTextExtent);
break;
}
case GrayQuantum:
case GrayAlphaQuantum:
{
packet_size=1;
(void) CopyMagickString(type,"GRAYSCALE",MaxTextExtent);
if (IdentifyImageMonochrome(image,&image->exception) != MagickFalse)
(void) CopyMagickString(type,"BLACKANDWHITE",MaxTextExtent);
break;
}
default:
{
quantum_type=RGBQuantum;
if (image->matte != MagickFalse)
quantum_type=RGBAQuantum;
packet_size=3;
(void) CopyMagickString(type,"RGB",MaxTextExtent);
break;
}
}
if (image->matte != MagickFalse)
{
packet_size++;
(void) ConcatenateMagickString(type,"_ALPHA",MaxTextExtent);
}
if (image->depth > 32)
image->depth=32;
(void) FormatLocaleString(buffer,MaxTextExtent,
"DEPTH %.20g\nMAXVAL %.20g\n",(double) packet_size,(double)
((MagickOffsetType) GetQuantumRange(image->depth)));
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MaxTextExtent,"TUPLTYPE %s\nENDHDR\n",
type);
(void) WriteBlobString(image,buffer);
}
/*
Convert to PNM raster pixels.
*/
switch (format)
{
case '1':
{
unsigned char
pixels[2048];
/*
Convert image to a PBM image.
*/
(void) SetImageType(image,BilevelType);
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(unsigned char) (GetPixelLuma(image,p) >= (QuantumRange/2.0) ?
'0' : '1');
*q++=' ';
if ((q-pixels+1) >= (ssize_t) sizeof(pixels))
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
p++;
}
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
break;
}
case '2':
{
unsigned char
pixels[2048];
/*
Convert image to a PGM image.
*/
if (image->depth <= 8)
(void) WriteBlobString(image,"255\n");
else
if (image->depth <= 16)
(void) WriteBlobString(image,"65535\n");
else
(void) WriteBlobString(image,"4294967295\n");
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=ClampToQuantum(GetPixelLuma(image,p));
if (image->depth <= 8)
count=(ssize_t) FormatLocaleString(buffer,MaxTextExtent,"%u ",
ScaleQuantumToChar(index));
else
if (image->depth <= 16)
count=(ssize_t) FormatLocaleString(buffer,MaxTextExtent,"%u ",
ScaleQuantumToShort(index));
else
count=(ssize_t) FormatLocaleString(buffer,MaxTextExtent,"%u ",
ScaleQuantumToLong(index));
extent=(size_t) count;
(void) strncpy((char *) q,buffer,extent);
q+=extent;
if ((q-pixels+extent+2) >= sizeof(pixels))
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
p++;
}
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
break;
}
case '3':
{
unsigned char
pixels[2048];
/*
Convert image to a PNM image.
*/
(void) TransformImageColorspace(image,sRGBColorspace);
if (image->depth <= 8)
(void) WriteBlobString(image,"255\n");
else
if (image->depth <= 16)
(void) WriteBlobString(image,"65535\n");
else
(void) WriteBlobString(image,"4294967295\n");
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->depth <= 8)
count=(ssize_t) FormatLocaleString(buffer,MaxTextExtent,
"%u %u %u ",ScaleQuantumToChar(GetPixelRed(p)),
ScaleQuantumToChar(GetPixelGreen(p)),
ScaleQuantumToChar(GetPixelBlue(p)));
else
if (image->depth <= 16)
count=(ssize_t) FormatLocaleString(buffer,MaxTextExtent,
"%u %u %u ",ScaleQuantumToShort(GetPixelRed(p)),
ScaleQuantumToShort(GetPixelGreen(p)),
ScaleQuantumToShort(GetPixelBlue(p)));
else
count=(ssize_t) FormatLocaleString(buffer,MaxTextExtent,
"%u %u %u ",ScaleQuantumToLong(GetPixelRed(p)),
ScaleQuantumToLong(GetPixelGreen(p)),
ScaleQuantumToLong(GetPixelBlue(p)));
extent=(size_t) count;
(void) strncpy((char *) q,buffer,extent);
q+=extent;
if ((q-pixels+extent+2) >= sizeof(pixels))
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
p++;
}
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
break;
}
case '4':
{
/*
Convert image to a PBM image.
*/
(void) SetImageType(image,BilevelType);
image->depth=1;
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
(void) SetQuantumEndian(image,quantum_info,MSBEndian);
quantum_info->min_is_white=MagickTrue;
pixels=GetQuantumPixels(quantum_info);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
extent=ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,GrayQuantum,pixels,&image->exception);
count=WriteBlob(image,extent,pixels);
if (count != (ssize_t) extent)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
break;
}
case '5':
{
/*
Convert image to a PGM image.
*/
if (image->depth > 32)
image->depth=32;
(void) FormatLocaleString(buffer,MaxTextExtent,"%.20g\n",(double)
((MagickOffsetType) GetQuantumRange(image->depth)));
(void) WriteBlobString(image,buffer);
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
(void) SetQuantumEndian(image,quantum_info,MSBEndian);
quantum_info->min_is_white=MagickTrue;
pixels=GetQuantumPixels(quantum_info);
extent=GetQuantumExtent(image,quantum_info,GrayQuantum);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
q=pixels;
switch (image->depth)
{
case 8:
case 16:
case 32:
{
extent=ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,GrayQuantum,pixels,&image->exception);
break;
}
default:
{
if (image->depth <= 8)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
pixel=ScaleQuantumToAny(ClampToQuantum(
GetPixelLuma(image,p)),max_value);
else
{
if (image->depth == 8)
pixel=ScaleQuantumToChar(GetPixelRed(p));
else
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
}
q=PopCharPixel((unsigned char) pixel,q);
p++;
}
extent=(size_t) (q-pixels);
break;
}
if (image->depth <= 16)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
pixel=ScaleQuantumToAny(ClampToQuantum(
GetPixelLuma(image,p)),max_value);
else
{
if (image->depth == 16)
pixel=ScaleQuantumToShort(GetPixelRed(p));
else
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
}
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
p++;
}
extent=(size_t) (q-pixels);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
pixel=ScaleQuantumToAny(ClampToQuantum(
GetPixelLuma(image,p)),max_value);
else
{
if (image->depth == 32)
pixel=ScaleQuantumToLong(GetPixelRed(p));
else
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
}
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
p++;
}
extent=(size_t) (q-pixels);
break;
}
}
count=WriteBlob(image,extent,pixels);
if (count != (ssize_t) extent)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
break;
}
case '6':
{
/*
Convert image to a PNM image.
*/
(void) TransformImageColorspace(image,sRGBColorspace);
if (image->depth > 32)
image->depth=32;
(void) FormatLocaleString(buffer,MaxTextExtent,"%.20g\n",(double)
((MagickOffsetType) GetQuantumRange(image->depth)));
(void) WriteBlobString(image,buffer);
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
(void) SetQuantumEndian(image,quantum_info,MSBEndian);
(void) SetQuantumEndian(image,quantum_info,MSBEndian);
pixels=GetQuantumPixels(quantum_info);
extent=GetQuantumExtent(image,quantum_info,quantum_type);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
q=pixels;
switch (image->depth)
{
case 8:
case 16:
case 32:
{
extent=ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,quantum_type,pixels,&image->exception);
break;
}
default:
{
if (image->depth <= 8)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
p++;
}
extent=(size_t) (q-pixels);
break;
}
if (image->depth <= 16)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
p++;
}
extent=(size_t) (q-pixels);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned short) pixel,q);
p++;
}
extent=(size_t) (q-pixels);
break;
}
}
count=WriteBlob(image,extent,pixels);
if (count != (ssize_t) extent)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
break;
}
case '7':
{
/*
Convert image to a PAM.
*/
if (image->depth > 32)
image->depth=32;
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
(void) SetQuantumEndian(image,quantum_info,MSBEndian);
pixels=GetQuantumPixels(quantum_info);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
q=pixels;
switch (image->depth)
{
case 8:
case 16:
case 32:
{
extent=ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,quantum_type,pixels,&image->exception);
break;
}
default:
{
switch (quantum_type)
{
case GrayQuantum:
case GrayAlphaQuantum:
{
if (image->depth <= 8)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(ClampToQuantum(
GetPixelLuma(image,p)),max_value);
q=PopCharPixel((unsigned char) pixel,q);
if (image->matte != MagickFalse)
{
pixel=(unsigned char) ScaleQuantumToAny(
GetPixelOpacity(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
}
p++;
}
break;
}
if (image->depth <= 16)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(ClampToQuantum(
GetPixelLuma(image,p)),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
if (image->matte != MagickFalse)
{
pixel=(unsigned char) ScaleQuantumToAny(
GetPixelOpacity(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
}
p++;
}
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(ClampToQuantum(
GetPixelLuma(image,p)),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
if (image->matte != MagickFalse)
{
pixel=(unsigned char) ScaleQuantumToAny(
GetPixelOpacity(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
}
p++;
}
break;
}
case CMYKQuantum:
case CMYKAQuantum:
{
if (image->depth <= 8)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(GetPixelIndex(indexes+x),
max_value);
q=PopCharPixel((unsigned char) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
GetPixelOpacity(p)),max_value);
q=PopCharPixel((unsigned char) pixel,q);
}
p++;
}
break;
}
if (image->depth <= 16)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelIndex(indexes+x),
max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
GetPixelOpacity(p)),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
}
p++;
}
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
pixel=ScaleQuantumToAny(GetPixelIndex(indexes+x),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
GetPixelOpacity(p)),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
}
p++;
}
break;
}
default:
{
if (image->depth <= 8)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
GetPixelOpacity(p)),max_value);
q=PopCharPixel((unsigned char) pixel,q);
}
p++;
}
break;
}
if (image->depth <= 16)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
GetPixelOpacity(p)),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
}
p++;
}
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
GetPixelOpacity(p)),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
}
p++;
}
break;
}
}
extent=(size_t) (q-pixels);
break;
}
}
count=WriteBlob(image,extent,pixels);
if (count != (ssize_t) extent)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
break;
}
case 'F':
case 'f':
{
(void) WriteBlobString(image,image->endian == LSBEndian ? "-1.0\n" :
"1.0\n");
image->depth=32;
quantum_type=format == 'f' ? GrayQuantum : RGBQuantum;
quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat);
if (status == MagickFalse)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
pixels=GetQuantumPixels(quantum_info);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
register const PixelPacket
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
extent=ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,quantum_type,pixels,&image->exception);
(void) WriteBlob(image,extent,pixels);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
break;
}
}
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength);
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
|
static MagickBooleanType WritePNMImage(const ImageInfo *image_info,Image *image)
{
char
buffer[MaxTextExtent],
format,
magick[MaxTextExtent];
const char
*value;
IndexPacket
index;
MagickBooleanType
status;
MagickOffsetType
scene;
QuantumAny
pixel;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
register unsigned char
*pixels,
*q;
size_t
extent,
imageListLength,
packet_size;
ssize_t
count,
y;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
scene=0;
imageListLength=GetImageListLength(image);
do
{
QuantumAny
max_value;
/*
Write PNM file header.
*/
max_value=GetQuantumRange(image->depth);
packet_size=3;
quantum_type=RGBQuantum;
(void) CopyMagickString(magick,image_info->magick,MaxTextExtent);
switch (magick[1])
{
case 'A':
case 'a':
{
format='7';
break;
}
case 'B':
case 'b':
{
format='4';
if (image_info->compression == NoCompression)
format='1';
break;
}
case 'F':
case 'f':
{
format='F';
if (SetImageGray(image,&image->exception) != MagickFalse)
format='f';
break;
}
case 'G':
case 'g':
{
format='5';
if (image_info->compression == NoCompression)
format='2';
break;
}
case 'N':
case 'n':
{
if ((image_info->type != TrueColorType) &&
(SetImageGray(image,&image->exception) != MagickFalse))
{
format='5';
if (image_info->compression == NoCompression)
format='2';
if (SetImageMonochrome(image,&image->exception) != MagickFalse)
{
format='4';
if (image_info->compression == NoCompression)
format='1';
}
break;
}
}
default:
{
format='6';
if (image_info->compression == NoCompression)
format='3';
break;
}
}
(void) FormatLocaleString(buffer,MaxTextExtent,"P%c\n",format);
(void) WriteBlobString(image,buffer);
value=GetImageProperty(image,"comment");
if (value != (const char *) NULL)
{
register const char
*p;
/*
Write comments to file.
*/
(void) WriteBlobByte(image,'#');
for (p=value; *p != '\0'; p++)
{
(void) WriteBlobByte(image,(unsigned char) *p);
if ((*p == '\n') || (*p == '\r'))
(void) WriteBlobByte(image,'#');
}
(void) WriteBlobByte(image,'\n');
}
if (format != '7')
{
(void) FormatLocaleString(buffer,MaxTextExtent,"%.20g %.20g\n",
(double) image->columns,(double) image->rows);
(void) WriteBlobString(image,buffer);
}
else
{
char
type[MaxTextExtent];
/*
PAM header.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,
"WIDTH %.20g\nHEIGHT %.20g\n",(double) image->columns,(double)
image->rows);
(void) WriteBlobString(image,buffer);
quantum_type=GetQuantumType(image,&image->exception);
switch (quantum_type)
{
case CMYKQuantum:
case CMYKAQuantum:
{
packet_size=4;
(void) CopyMagickString(type,"CMYK",MaxTextExtent);
break;
}
case GrayQuantum:
case GrayAlphaQuantum:
{
packet_size=1;
(void) CopyMagickString(type,"GRAYSCALE",MaxTextExtent);
if (IdentifyImageMonochrome(image,&image->exception) != MagickFalse)
(void) CopyMagickString(type,"BLACKANDWHITE",MaxTextExtent);
break;
}
default:
{
quantum_type=RGBQuantum;
if (image->matte != MagickFalse)
quantum_type=RGBAQuantum;
packet_size=3;
(void) CopyMagickString(type,"RGB",MaxTextExtent);
break;
}
}
if (image->matte != MagickFalse)
{
packet_size++;
(void) ConcatenateMagickString(type,"_ALPHA",MaxTextExtent);
}
if (image->depth > 32)
image->depth=32;
(void) FormatLocaleString(buffer,MaxTextExtent,
"DEPTH %.20g\nMAXVAL %.20g\n",(double) packet_size,(double)
((MagickOffsetType) GetQuantumRange(image->depth)));
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MaxTextExtent,"TUPLTYPE %s\nENDHDR\n",
type);
(void) WriteBlobString(image,buffer);
}
/*
Convert to PNM raster pixels.
*/
switch (format)
{
case '1':
{
unsigned char
pixels[2048];
/*
Convert image to a PBM image.
*/
(void) SetImageType(image,BilevelType);
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(unsigned char) (GetPixelLuma(image,p) >= (QuantumRange/2.0) ?
'0' : '1');
*q++=' ';
if ((q-pixels+1) >= (ssize_t) sizeof(pixels))
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
p++;
}
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
break;
}
case '2':
{
unsigned char
pixels[2048];
/*
Convert image to a PGM image.
*/
if (image->depth <= 8)
(void) WriteBlobString(image,"255\n");
else
if (image->depth <= 16)
(void) WriteBlobString(image,"65535\n");
else
(void) WriteBlobString(image,"4294967295\n");
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=ClampToQuantum(GetPixelLuma(image,p));
if (image->depth <= 8)
count=(ssize_t) FormatLocaleString(buffer,MaxTextExtent,"%u ",
ScaleQuantumToChar(index));
else
if (image->depth <= 16)
count=(ssize_t) FormatLocaleString(buffer,MaxTextExtent,"%u ",
ScaleQuantumToShort(index));
else
count=(ssize_t) FormatLocaleString(buffer,MaxTextExtent,"%u ",
ScaleQuantumToLong(index));
extent=(size_t) count;
(void) strncpy((char *) q,buffer,extent);
q+=extent;
if ((q-pixels+extent+1) >= sizeof(pixels))
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
p++;
}
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
break;
}
case '3':
{
unsigned char
pixels[2048];
/*
Convert image to a PNM image.
*/
(void) TransformImageColorspace(image,sRGBColorspace);
if (image->depth <= 8)
(void) WriteBlobString(image,"255\n");
else
if (image->depth <= 16)
(void) WriteBlobString(image,"65535\n");
else
(void) WriteBlobString(image,"4294967295\n");
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->depth <= 8)
count=(ssize_t) FormatLocaleString(buffer,MaxTextExtent,
"%u %u %u ",ScaleQuantumToChar(GetPixelRed(p)),
ScaleQuantumToChar(GetPixelGreen(p)),
ScaleQuantumToChar(GetPixelBlue(p)));
else
if (image->depth <= 16)
count=(ssize_t) FormatLocaleString(buffer,MaxTextExtent,
"%u %u %u ",ScaleQuantumToShort(GetPixelRed(p)),
ScaleQuantumToShort(GetPixelGreen(p)),
ScaleQuantumToShort(GetPixelBlue(p)));
else
count=(ssize_t) FormatLocaleString(buffer,MaxTextExtent,
"%u %u %u ",ScaleQuantumToLong(GetPixelRed(p)),
ScaleQuantumToLong(GetPixelGreen(p)),
ScaleQuantumToLong(GetPixelBlue(p)));
extent=(size_t) count;
(void) strncpy((char *) q,buffer,extent);
q+=extent;
if ((q-pixels+extent+1) >= sizeof(pixels))
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
p++;
}
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
break;
}
case '4':
{
/*
Convert image to a PBM image.
*/
(void) SetImageType(image,BilevelType);
image->depth=1;
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
(void) SetQuantumEndian(image,quantum_info,MSBEndian);
quantum_info->min_is_white=MagickTrue;
pixels=GetQuantumPixels(quantum_info);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
extent=ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,GrayQuantum,pixels,&image->exception);
count=WriteBlob(image,extent,pixels);
if (count != (ssize_t) extent)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
break;
}
case '5':
{
/*
Convert image to a PGM image.
*/
if (image->depth > 32)
image->depth=32;
(void) FormatLocaleString(buffer,MaxTextExtent,"%.20g\n",(double)
((MagickOffsetType) GetQuantumRange(image->depth)));
(void) WriteBlobString(image,buffer);
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
(void) SetQuantumEndian(image,quantum_info,MSBEndian);
quantum_info->min_is_white=MagickTrue;
pixels=GetQuantumPixels(quantum_info);
extent=GetQuantumExtent(image,quantum_info,GrayQuantum);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
q=pixels;
switch (image->depth)
{
case 8:
case 16:
case 32:
{
extent=ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,GrayQuantum,pixels,&image->exception);
break;
}
default:
{
if (image->depth <= 8)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
pixel=ScaleQuantumToAny(ClampToQuantum(
GetPixelLuma(image,p)),max_value);
else
{
if (image->depth == 8)
pixel=ScaleQuantumToChar(GetPixelRed(p));
else
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
}
q=PopCharPixel((unsigned char) pixel,q);
p++;
}
extent=(size_t) (q-pixels);
break;
}
if (image->depth <= 16)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
pixel=ScaleQuantumToAny(ClampToQuantum(
GetPixelLuma(image,p)),max_value);
else
{
if (image->depth == 16)
pixel=ScaleQuantumToShort(GetPixelRed(p));
else
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
}
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
p++;
}
extent=(size_t) (q-pixels);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
pixel=ScaleQuantumToAny(ClampToQuantum(
GetPixelLuma(image,p)),max_value);
else
{
if (image->depth == 32)
pixel=ScaleQuantumToLong(GetPixelRed(p));
else
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
}
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
p++;
}
extent=(size_t) (q-pixels);
break;
}
}
count=WriteBlob(image,extent,pixels);
if (count != (ssize_t) extent)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
break;
}
case '6':
{
/*
Convert image to a PNM image.
*/
(void) TransformImageColorspace(image,sRGBColorspace);
if (image->depth > 32)
image->depth=32;
(void) FormatLocaleString(buffer,MaxTextExtent,"%.20g\n",(double)
((MagickOffsetType) GetQuantumRange(image->depth)));
(void) WriteBlobString(image,buffer);
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
(void) SetQuantumEndian(image,quantum_info,MSBEndian);
(void) SetQuantumEndian(image,quantum_info,MSBEndian);
pixels=GetQuantumPixels(quantum_info);
extent=GetQuantumExtent(image,quantum_info,quantum_type);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
q=pixels;
switch (image->depth)
{
case 8:
case 16:
case 32:
{
extent=ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,quantum_type,pixels,&image->exception);
break;
}
default:
{
if (image->depth <= 8)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
p++;
}
extent=(size_t) (q-pixels);
break;
}
if (image->depth <= 16)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
p++;
}
extent=(size_t) (q-pixels);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned short) pixel,q);
p++;
}
extent=(size_t) (q-pixels);
break;
}
}
count=WriteBlob(image,extent,pixels);
if (count != (ssize_t) extent)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
break;
}
case '7':
{
/*
Convert image to a PAM.
*/
if (image->depth > 32)
image->depth=32;
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
(void) SetQuantumEndian(image,quantum_info,MSBEndian);
pixels=GetQuantumPixels(quantum_info);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
q=pixels;
switch (image->depth)
{
case 8:
case 16:
case 32:
{
extent=ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,quantum_type,pixels,&image->exception);
break;
}
default:
{
switch (quantum_type)
{
case GrayQuantum:
case GrayAlphaQuantum:
{
if (image->depth <= 8)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(ClampToQuantum(
GetPixelLuma(image,p)),max_value);
q=PopCharPixel((unsigned char) pixel,q);
if (image->matte != MagickFalse)
{
pixel=(unsigned char) ScaleQuantumToAny(
GetPixelOpacity(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
}
p++;
}
break;
}
if (image->depth <= 16)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(ClampToQuantum(
GetPixelLuma(image,p)),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
if (image->matte != MagickFalse)
{
pixel=(unsigned char) ScaleQuantumToAny(
GetPixelOpacity(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
}
p++;
}
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(ClampToQuantum(
GetPixelLuma(image,p)),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
if (image->matte != MagickFalse)
{
pixel=(unsigned char) ScaleQuantumToAny(
GetPixelOpacity(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
}
p++;
}
break;
}
case CMYKQuantum:
case CMYKAQuantum:
{
if (image->depth <= 8)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(GetPixelIndex(indexes+x),
max_value);
q=PopCharPixel((unsigned char) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
GetPixelOpacity(p)),max_value);
q=PopCharPixel((unsigned char) pixel,q);
}
p++;
}
break;
}
if (image->depth <= 16)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelIndex(indexes+x),
max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
GetPixelOpacity(p)),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
}
p++;
}
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
pixel=ScaleQuantumToAny(GetPixelIndex(indexes+x),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
GetPixelOpacity(p)),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
}
p++;
}
break;
}
default:
{
if (image->depth <= 8)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopCharPixel((unsigned char) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
GetPixelOpacity(p)),max_value);
q=PopCharPixel((unsigned char) pixel,q);
}
p++;
}
break;
}
if (image->depth <= 16)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
GetPixelOpacity(p)),max_value);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
}
p++;
}
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=ScaleQuantumToAny(GetPixelRed(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
pixel=ScaleQuantumToAny(GetPixelGreen(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
pixel=ScaleQuantumToAny(GetPixelBlue(p),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
GetPixelOpacity(p)),max_value);
q=PopLongPixel(MSBEndian,(unsigned int) pixel,q);
}
p++;
}
break;
}
}
extent=(size_t) (q-pixels);
break;
}
}
count=WriteBlob(image,extent,pixels);
if (count != (ssize_t) extent)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
break;
}
case 'F':
case 'f':
{
(void) WriteBlobString(image,image->endian == LSBEndian ? "-1.0\n" :
"1.0\n");
image->depth=32;
quantum_type=format == 'f' ? GrayQuantum : RGBQuantum;
quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat);
if (status == MagickFalse)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
pixels=GetQuantumPixels(quantum_info);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
register const PixelPacket
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
extent=ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,quantum_type,pixels,&image->exception);
(void) WriteBlob(image,extent,pixels);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
break;
}
}
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength);
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
|
C
|
ImageMagick6
| 1 |
CVE-2011-4930
|
https://www.cvedetails.com/cve/CVE-2011-4930/
|
CWE-134
|
https://htcondor-git.cs.wisc.edu/?p=condor.git;a=commitdiff;h=5e5571d1a431eb3c61977b6dd6ec90186ef79867
|
5e5571d1a431eb3c61977b6dd6ec90186ef79867
| null |
GahpClient::globus_gram_client_job_signal(const char * job_contact,
globus_gram_protocol_job_signal_t signal,
const char * signal_arg,
int * job_status,
int * failure_code)
{
static const char* command = "GRAM_JOB_SIGNAL";
if (server->m_commands_supported->contains_anycase(command)==FALSE) {
return GAHPCLIENT_COMMAND_NOT_SUPPORTED;
}
if (!job_contact) job_contact=NULLSTRING;
if (!signal_arg) signal_arg=NULLSTRING;
std::string reqline;
char *esc1 = strdup( escapeGahpString(job_contact) );
char *esc2 = strdup( escapeGahpString(signal_arg) );
int x = sprintf(reqline,"%s %d %s",esc1,signal,esc2);
free( esc1 );
free( esc2 );
ASSERT( x > 0 );
const char *buf = reqline.c_str();
if ( !is_pending(command,buf) ) {
if ( m_mode == results_only ) {
return GAHPCLIENT_COMMAND_NOT_SUBMITTED;
}
now_pending(command,buf,normal_proxy);
}
Gahp_Args* result = get_pending_result(command,buf);
if ( result ) {
if (result->argc != 4) {
EXCEPT("Bad %s Result",command);
}
int rc = atoi(result->argv[1]);
*failure_code = atoi(result->argv[2]);
if ( rc == 0 ) {
*job_status = atoi(result->argv[3]);
}
delete result;
return rc;
}
if ( check_pending_timeout(command,buf) ) {
sprintf( error_string, "%s timed out", command );
return GAHPCLIENT_COMMAND_TIMED_OUT;
}
return GAHPCLIENT_COMMAND_PENDING;
}
|
GahpClient::globus_gram_client_job_signal(const char * job_contact,
globus_gram_protocol_job_signal_t signal,
const char * signal_arg,
int * job_status,
int * failure_code)
{
static const char* command = "GRAM_JOB_SIGNAL";
if (server->m_commands_supported->contains_anycase(command)==FALSE) {
return GAHPCLIENT_COMMAND_NOT_SUPPORTED;
}
if (!job_contact) job_contact=NULLSTRING;
if (!signal_arg) signal_arg=NULLSTRING;
std::string reqline;
char *esc1 = strdup( escapeGahpString(job_contact) );
char *esc2 = strdup( escapeGahpString(signal_arg) );
int x = sprintf(reqline,"%s %d %s",esc1,signal,esc2);
free( esc1 );
free( esc2 );
ASSERT( x > 0 );
const char *buf = reqline.c_str();
if ( !is_pending(command,buf) ) {
if ( m_mode == results_only ) {
return GAHPCLIENT_COMMAND_NOT_SUBMITTED;
}
now_pending(command,buf,normal_proxy);
}
Gahp_Args* result = get_pending_result(command,buf);
if ( result ) {
if (result->argc != 4) {
EXCEPT("Bad %s Result",command);
}
int rc = atoi(result->argv[1]);
*failure_code = atoi(result->argv[2]);
if ( rc == 0 ) {
*job_status = atoi(result->argv[3]);
}
delete result;
return rc;
}
if ( check_pending_timeout(command,buf) ) {
sprintf( error_string, "%s timed out", command );
return GAHPCLIENT_COMMAND_TIMED_OUT;
}
return GAHPCLIENT_COMMAND_PENDING;
}
|
CPP
|
htcondor
| 0 |
CVE-2015-0291
|
https://www.cvedetails.com/cve/CVE-2015-0291/
| null |
https://git.openssl.org/?p=openssl.git;a=commit;h=76343947ada960b6269090638f5391068daee88d
|
76343947ada960b6269090638f5391068daee88d
| null |
static int tls1_check_cert_param(SSL *s, X509 *x, int set_ee_md)
{
unsigned char comp_id, curve_id[2];
EVP_PKEY *pkey;
int rv;
pkey = X509_get_pubkey(x);
if (!pkey)
return 0;
/* If not EC nothing to do */
if (pkey->type != EVP_PKEY_EC) {
EVP_PKEY_free(pkey);
return 1;
}
rv = tls1_set_ec_id(curve_id, &comp_id, pkey->pkey.ec);
EVP_PKEY_free(pkey);
if (!rv)
return 0;
/*
* Can't check curve_id for client certs as we don't have a supported
* curves extension.
*/
rv = tls1_check_ec_key(s, s->server ? curve_id : NULL, &comp_id);
if (!rv)
return 0;
/*
* Special case for suite B. We *MUST* sign using SHA256+P-256 or
* SHA384+P-384, adjust digest if necessary.
*/
if (set_ee_md && tls1_suiteb(s)) {
int check_md;
size_t i;
CERT *c = s->cert;
if (curve_id[0])
return 0;
/* Check to see we have necessary signing algorithm */
if (curve_id[1] == TLSEXT_curve_P_256)
check_md = NID_ecdsa_with_SHA256;
else if (curve_id[1] == TLSEXT_curve_P_384)
check_md = NID_ecdsa_with_SHA384;
else
return 0; /* Should never happen */
for (i = 0; i < c->shared_sigalgslen; i++)
if (check_md == c->shared_sigalgs[i].signandhash_nid)
break;
if (i == c->shared_sigalgslen)
return 0;
if (set_ee_md == 2) {
if (check_md == NID_ecdsa_with_SHA256)
c->pkeys[SSL_PKEY_ECC].digest = EVP_sha256();
else
c->pkeys[SSL_PKEY_ECC].digest = EVP_sha384();
}
}
return rv;
}
|
static int tls1_check_cert_param(SSL *s, X509 *x, int set_ee_md)
{
unsigned char comp_id, curve_id[2];
EVP_PKEY *pkey;
int rv;
pkey = X509_get_pubkey(x);
if (!pkey)
return 0;
/* If not EC nothing to do */
if (pkey->type != EVP_PKEY_EC) {
EVP_PKEY_free(pkey);
return 1;
}
rv = tls1_set_ec_id(curve_id, &comp_id, pkey->pkey.ec);
EVP_PKEY_free(pkey);
if (!rv)
return 0;
/*
* Can't check curve_id for client certs as we don't have a supported
* curves extension.
*/
rv = tls1_check_ec_key(s, s->server ? curve_id : NULL, &comp_id);
if (!rv)
return 0;
/*
* Special case for suite B. We *MUST* sign using SHA256+P-256 or
* SHA384+P-384, adjust digest if necessary.
*/
if (set_ee_md && tls1_suiteb(s)) {
int check_md;
size_t i;
CERT *c = s->cert;
if (curve_id[0])
return 0;
/* Check to see we have necessary signing algorithm */
if (curve_id[1] == TLSEXT_curve_P_256)
check_md = NID_ecdsa_with_SHA256;
else if (curve_id[1] == TLSEXT_curve_P_384)
check_md = NID_ecdsa_with_SHA384;
else
return 0; /* Should never happen */
for (i = 0; i < c->shared_sigalgslen; i++)
if (check_md == c->shared_sigalgs[i].signandhash_nid)
break;
if (i == c->shared_sigalgslen)
return 0;
if (set_ee_md == 2) {
if (check_md == NID_ecdsa_with_SHA256)
c->pkeys[SSL_PKEY_ECC].digest = EVP_sha256();
else
c->pkeys[SSL_PKEY_ECC].digest = EVP_sha384();
}
}
return rv;
}
|
C
|
openssl
| 0 |
CVE-2012-5148
|
https://www.cvedetails.com/cve/CVE-2012-5148/
|
CWE-20
|
https://github.com/chromium/chromium/commit/e89cfcb9090e8c98129ae9160c513f504db74599
|
e89cfcb9090e8c98129ae9160c513f504db74599
|
Remove TabContents from TabStripModelObserver::TabDetachedAt.
BUG=107201
TEST=no visible change
Review URL: https://chromiumcodereview.appspot.com/11293205
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98
|
void Browser::TabDeactivated(TabContents* contents) {
fullscreen_controller_->OnTabDeactivated(contents);
search_delegate_->OnTabDeactivated(contents->web_contents());
window_->GetLocationBar()->SaveStateToContents(contents->web_contents());
}
|
void Browser::TabDeactivated(TabContents* contents) {
fullscreen_controller_->OnTabDeactivated(contents);
search_delegate_->OnTabDeactivated(contents->web_contents());
window_->GetLocationBar()->SaveStateToContents(contents->web_contents());
}
|
C
|
Chrome
| 0 |
CVE-2018-6031
|
https://www.cvedetails.com/cve/CVE-2018-6031/
|
CWE-416
|
https://github.com/chromium/chromium/commit/01c9a7e71ca435651723e8cbcab0b3ad4c5351e2
|
01c9a7e71ca435651723e8cbcab0b3ad4c5351e2
|
[pdf] Use a temporary list when unloading pages
When traversing the |deferred_page_unloads_| list and handling the
unloads it's possible for new pages to get added to the list which will
invalidate the iterator.
This CL swaps the list with an empty list and does the iteration on the
list copy. New items that are unloaded while handling the defers will be
unloaded at a later point.
Bug: 780450
Change-Id: Ic7ced1c82227109784fb536ce19a4dd51b9119ac
Reviewed-on: https://chromium-review.googlesource.com/758916
Commit-Queue: dsinclair <[email protected]>
Reviewed-by: Lei Zhang <[email protected]>
Cr-Commit-Position: refs/heads/master@{#515056}
|
PDFiumEngine::MouseDownState::MouseDownState(
const PDFiumPage::Area& area,
const PDFiumPage::LinkTarget& target)
: area_(area), target_(target) {}
|
PDFiumEngine::MouseDownState::MouseDownState(
const PDFiumPage::Area& area,
const PDFiumPage::LinkTarget& target)
: area_(area), target_(target) {}
|
C
|
Chrome
| 0 |
CVE-2011-4930
|
https://www.cvedetails.com/cve/CVE-2011-4930/
|
CWE-134
|
https://htcondor-git.cs.wisc.edu/?p=condor.git;a=commitdiff;h=5e5571d1a431eb3c61977b6dd6ec90186ef79867
|
5e5571d1a431eb3c61977b6dd6ec90186ef79867
| null |
char * SafeSock::serialize(char *buf)
{
char sinful_string[28];
char usernamebuf[128];
char *ptmp, *ptr = NULL;
ASSERT(buf);
memset(sinful_string, 0, 28);
memset(usernamebuf, 0, 128);
ptmp = Sock::serialize(buf);
ASSERT( ptmp );
int itmp;
sscanf(ptmp,"%d*",&itmp);
_special_state=safesock_state(itmp);
ptmp = strchr(ptmp, '*');
if(ptmp) ptmp++;
if (ptmp && (ptr = strchr(ptmp, '*')) != NULL) {
memcpy(sinful_string, ptmp, ptr - ptmp);
ptmp = ++ptr;
}
else if(ptmp) {
sscanf(ptmp,"%s",sinful_string);
}
_who.from_sinful(sinful_string);
return NULL;
}
|
char * SafeSock::serialize(char *buf)
{
char sinful_string[28];
char usernamebuf[128];
char *ptmp, *ptr = NULL;
ASSERT(buf);
memset(sinful_string, 0, 28);
memset(usernamebuf, 0, 128);
ptmp = Sock::serialize(buf);
ASSERT( ptmp );
int itmp;
sscanf(ptmp,"%d*",&itmp);
_special_state=safesock_state(itmp);
ptmp = strchr(ptmp, '*');
if(ptmp) ptmp++;
if (ptmp && (ptr = strchr(ptmp, '*')) != NULL) {
memcpy(sinful_string, ptmp, ptr - ptmp);
ptmp = ++ptr;
}
else if(ptmp) {
sscanf(ptmp,"%s",sinful_string);
}
_who.from_sinful(sinful_string);
return NULL;
}
|
CPP
|
htcondor
| 0 |
CVE-2017-6903
|
https://www.cvedetails.com/cve/CVE-2017-6903/
|
CWE-269
|
https://github.com/ioquake/ioq3/commit/376267d534476a875d8b9228149c4ee18b74a4fd
|
376267d534476a875d8b9228149c4ee18b74a4fd
|
Don't load .pk3s as .dlls, and don't load user config files from .pk3s.
|
void CL_ClearState (void) {
Com_Memset( &cl, 0, sizeof( cl ) );
}
|
void CL_ClearState (void) {
Com_Memset( &cl, 0, sizeof( cl ) );
}
|
C
|
OpenJK
| 0 |
CVE-2017-5118
|
https://www.cvedetails.com/cve/CVE-2017-5118/
|
CWE-732
|
https://github.com/chromium/chromium/commit/0ab2412a104d2f235d7b9fe19d30ef605a410832
|
0ab2412a104d2f235d7b9fe19d30ef605a410832
|
Inherit CSP when we inherit the security origin
This prevents attacks that use main window navigation to get out of the
existing csp constraints such as the related bug
Bug: 747847
Change-Id: I1e57b50da17f65d38088205b0a3c7c49ef2ae4d8
Reviewed-on: https://chromium-review.googlesource.com/592027
Reviewed-by: Mike West <[email protected]>
Commit-Queue: Andy Paicu <[email protected]>
Cr-Commit-Position: refs/heads/master@{#492333}
|
LayoutViewItem Document::GetLayoutViewItem() const {
return LayoutViewItem(layout_view_);
}
|
LayoutViewItem Document::GetLayoutViewItem() const {
return LayoutViewItem(layout_view_);
}
|
C
|
Chrome
| 0 |
CVE-2012-5148
|
https://www.cvedetails.com/cve/CVE-2012-5148/
|
CWE-20
|
https://github.com/chromium/chromium/commit/e89cfcb9090e8c98129ae9160c513f504db74599
|
e89cfcb9090e8c98129ae9160c513f504db74599
|
Remove TabContents from TabStripModelObserver::TabDetachedAt.
BUG=107201
TEST=no visible change
Review URL: https://chromiumcodereview.appspot.com/11293205
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98
|
bool BrowserWindowGtk::DrawFrameAsActive() const {
if (ui::ActiveWindowWatcherX::WMSupportsActivation())
return is_active_;
return true;
}
|
bool BrowserWindowGtk::DrawFrameAsActive() const {
if (ui::ActiveWindowWatcherX::WMSupportsActivation())
return is_active_;
return true;
}
|
C
|
Chrome
| 0 |
CVE-2017-7375
|
https://www.cvedetails.com/cve/CVE-2017-7375/
|
CWE-611
|
https://android.googlesource.com/platform/external/libxml2/+/308396a55280f69ad4112d4f9892f4cbeff042aa
|
308396a55280f69ad4112d4f9892f4cbeff042aa
|
DO NOT MERGE: Add validation for eternal enities
https://bugzilla.gnome.org/show_bug.cgi?id=780691
Bug: 36556310
Change-Id: I9450743e167c3c73af5e4071f3fc85e81d061648
(cherry picked from commit bef9af3d89d241bcb518c20cba6da2a2fd9ba049)
|
xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
int normalize)
{
xmlChar limit = 0;
const xmlChar *in = NULL, *start, *end, *last;
xmlChar *ret = NULL;
int line, col;
GROW;
in = (xmlChar *) CUR_PTR;
line = ctxt->input->line;
col = ctxt->input->col;
if (*in != '"' && *in != '\'') {
xmlFatalErr(ctxt, XML_ERR_ATTRIBUTE_NOT_STARTED, NULL);
return (NULL);
}
ctxt->instate = XML_PARSER_ATTRIBUTE_VALUE;
/*
* try to handle in this routine the most common case where no
* allocation of a new string is required and where content is
* pure ASCII.
*/
limit = *in++;
col++;
end = ctxt->input->end;
start = in;
if (in >= end) {
const xmlChar *oldbase = ctxt->input->base;
GROW;
if (oldbase != ctxt->input->base) {
long delta = ctxt->input->base - oldbase;
start = start + delta;
in = in + delta;
}
end = ctxt->input->end;
}
if (normalize) {
/*
* Skip any leading spaces
*/
while ((in < end) && (*in != limit) &&
((*in == 0x20) || (*in == 0x9) ||
(*in == 0xA) || (*in == 0xD))) {
if (*in == 0xA) {
line++; col = 1;
} else {
col++;
}
in++;
start = in;
if (in >= end) {
const xmlChar *oldbase = ctxt->input->base;
GROW;
if (ctxt->instate == XML_PARSER_EOF)
return(NULL);
if (oldbase != ctxt->input->base) {
long delta = ctxt->input->base - oldbase;
start = start + delta;
in = in + delta;
}
end = ctxt->input->end;
if (((in - start) > XML_MAX_TEXT_LENGTH) &&
((ctxt->options & XML_PARSE_HUGE) == 0)) {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue length too long\n");
return(NULL);
}
}
}
while ((in < end) && (*in != limit) && (*in >= 0x20) &&
(*in <= 0x7f) && (*in != '&') && (*in != '<')) {
col++;
if ((*in++ == 0x20) && (*in == 0x20)) break;
if (in >= end) {
const xmlChar *oldbase = ctxt->input->base;
GROW;
if (ctxt->instate == XML_PARSER_EOF)
return(NULL);
if (oldbase != ctxt->input->base) {
long delta = ctxt->input->base - oldbase;
start = start + delta;
in = in + delta;
}
end = ctxt->input->end;
if (((in - start) > XML_MAX_TEXT_LENGTH) &&
((ctxt->options & XML_PARSE_HUGE) == 0)) {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue length too long\n");
return(NULL);
}
}
}
last = in;
/*
* skip the trailing blanks
*/
while ((last[-1] == 0x20) && (last > start)) last--;
while ((in < end) && (*in != limit) &&
((*in == 0x20) || (*in == 0x9) ||
(*in == 0xA) || (*in == 0xD))) {
if (*in == 0xA) {
line++, col = 1;
} else {
col++;
}
in++;
if (in >= end) {
const xmlChar *oldbase = ctxt->input->base;
GROW;
if (ctxt->instate == XML_PARSER_EOF)
return(NULL);
if (oldbase != ctxt->input->base) {
long delta = ctxt->input->base - oldbase;
start = start + delta;
in = in + delta;
last = last + delta;
}
end = ctxt->input->end;
if (((in - start) > XML_MAX_TEXT_LENGTH) &&
((ctxt->options & XML_PARSE_HUGE) == 0)) {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue length too long\n");
return(NULL);
}
}
}
if (((in - start) > XML_MAX_TEXT_LENGTH) &&
((ctxt->options & XML_PARSE_HUGE) == 0)) {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue length too long\n");
return(NULL);
}
if (*in != limit) goto need_complex;
} else {
while ((in < end) && (*in != limit) && (*in >= 0x20) &&
(*in <= 0x7f) && (*in != '&') && (*in != '<')) {
in++;
col++;
if (in >= end) {
const xmlChar *oldbase = ctxt->input->base;
GROW;
if (ctxt->instate == XML_PARSER_EOF)
return(NULL);
if (oldbase != ctxt->input->base) {
long delta = ctxt->input->base - oldbase;
start = start + delta;
in = in + delta;
}
end = ctxt->input->end;
if (((in - start) > XML_MAX_TEXT_LENGTH) &&
((ctxt->options & XML_PARSE_HUGE) == 0)) {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue length too long\n");
return(NULL);
}
}
}
last = in;
if (((in - start) > XML_MAX_TEXT_LENGTH) &&
((ctxt->options & XML_PARSE_HUGE) == 0)) {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue length too long\n");
return(NULL);
}
if (*in != limit) goto need_complex;
}
in++;
col++;
if (len != NULL) {
*len = last - start;
ret = (xmlChar *) start;
} else {
if (alloc) *alloc = 1;
ret = xmlStrndup(start, last - start);
}
CUR_PTR = in;
ctxt->input->line = line;
ctxt->input->col = col;
if (alloc) *alloc = 0;
return ret;
need_complex:
if (alloc) *alloc = 1;
return xmlParseAttValueComplex(ctxt, len, normalize);
}
|
xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
int normalize)
{
xmlChar limit = 0;
const xmlChar *in = NULL, *start, *end, *last;
xmlChar *ret = NULL;
int line, col;
GROW;
in = (xmlChar *) CUR_PTR;
line = ctxt->input->line;
col = ctxt->input->col;
if (*in != '"' && *in != '\'') {
xmlFatalErr(ctxt, XML_ERR_ATTRIBUTE_NOT_STARTED, NULL);
return (NULL);
}
ctxt->instate = XML_PARSER_ATTRIBUTE_VALUE;
/*
* try to handle in this routine the most common case where no
* allocation of a new string is required and where content is
* pure ASCII.
*/
limit = *in++;
col++;
end = ctxt->input->end;
start = in;
if (in >= end) {
const xmlChar *oldbase = ctxt->input->base;
GROW;
if (oldbase != ctxt->input->base) {
long delta = ctxt->input->base - oldbase;
start = start + delta;
in = in + delta;
}
end = ctxt->input->end;
}
if (normalize) {
/*
* Skip any leading spaces
*/
while ((in < end) && (*in != limit) &&
((*in == 0x20) || (*in == 0x9) ||
(*in == 0xA) || (*in == 0xD))) {
if (*in == 0xA) {
line++; col = 1;
} else {
col++;
}
in++;
start = in;
if (in >= end) {
const xmlChar *oldbase = ctxt->input->base;
GROW;
if (ctxt->instate == XML_PARSER_EOF)
return(NULL);
if (oldbase != ctxt->input->base) {
long delta = ctxt->input->base - oldbase;
start = start + delta;
in = in + delta;
}
end = ctxt->input->end;
if (((in - start) > XML_MAX_TEXT_LENGTH) &&
((ctxt->options & XML_PARSE_HUGE) == 0)) {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue length too long\n");
return(NULL);
}
}
}
while ((in < end) && (*in != limit) && (*in >= 0x20) &&
(*in <= 0x7f) && (*in != '&') && (*in != '<')) {
col++;
if ((*in++ == 0x20) && (*in == 0x20)) break;
if (in >= end) {
const xmlChar *oldbase = ctxt->input->base;
GROW;
if (ctxt->instate == XML_PARSER_EOF)
return(NULL);
if (oldbase != ctxt->input->base) {
long delta = ctxt->input->base - oldbase;
start = start + delta;
in = in + delta;
}
end = ctxt->input->end;
if (((in - start) > XML_MAX_TEXT_LENGTH) &&
((ctxt->options & XML_PARSE_HUGE) == 0)) {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue length too long\n");
return(NULL);
}
}
}
last = in;
/*
* skip the trailing blanks
*/
while ((last[-1] == 0x20) && (last > start)) last--;
while ((in < end) && (*in != limit) &&
((*in == 0x20) || (*in == 0x9) ||
(*in == 0xA) || (*in == 0xD))) {
if (*in == 0xA) {
line++, col = 1;
} else {
col++;
}
in++;
if (in >= end) {
const xmlChar *oldbase = ctxt->input->base;
GROW;
if (ctxt->instate == XML_PARSER_EOF)
return(NULL);
if (oldbase != ctxt->input->base) {
long delta = ctxt->input->base - oldbase;
start = start + delta;
in = in + delta;
last = last + delta;
}
end = ctxt->input->end;
if (((in - start) > XML_MAX_TEXT_LENGTH) &&
((ctxt->options & XML_PARSE_HUGE) == 0)) {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue length too long\n");
return(NULL);
}
}
}
if (((in - start) > XML_MAX_TEXT_LENGTH) &&
((ctxt->options & XML_PARSE_HUGE) == 0)) {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue length too long\n");
return(NULL);
}
if (*in != limit) goto need_complex;
} else {
while ((in < end) && (*in != limit) && (*in >= 0x20) &&
(*in <= 0x7f) && (*in != '&') && (*in != '<')) {
in++;
col++;
if (in >= end) {
const xmlChar *oldbase = ctxt->input->base;
GROW;
if (ctxt->instate == XML_PARSER_EOF)
return(NULL);
if (oldbase != ctxt->input->base) {
long delta = ctxt->input->base - oldbase;
start = start + delta;
in = in + delta;
}
end = ctxt->input->end;
if (((in - start) > XML_MAX_TEXT_LENGTH) &&
((ctxt->options & XML_PARSE_HUGE) == 0)) {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue length too long\n");
return(NULL);
}
}
}
last = in;
if (((in - start) > XML_MAX_TEXT_LENGTH) &&
((ctxt->options & XML_PARSE_HUGE) == 0)) {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue length too long\n");
return(NULL);
}
if (*in != limit) goto need_complex;
}
in++;
col++;
if (len != NULL) {
*len = last - start;
ret = (xmlChar *) start;
} else {
if (alloc) *alloc = 1;
ret = xmlStrndup(start, last - start);
}
CUR_PTR = in;
ctxt->input->line = line;
ctxt->input->col = col;
if (alloc) *alloc = 0;
return ret;
need_complex:
if (alloc) *alloc = 1;
return xmlParseAttValueComplex(ctxt, len, normalize);
}
|
C
|
Android
| 0 |
CVE-2018-18386
|
https://www.cvedetails.com/cve/CVE-2018-18386/
|
CWE-704
|
https://github.com/torvalds/linux/commit/966031f340185eddd05affcf72b740549f056348
|
966031f340185eddd05affcf72b740549f056348
|
n_tty: fix EXTPROC vs ICANON interaction with TIOCINQ (aka FIONREAD)
We added support for EXTPROC back in 2010 in commit 26df6d13406d ("tty:
Add EXTPROC support for LINEMODE") and the intent was to allow it to
override some (all?) ICANON behavior. Quoting from that original commit
message:
There is a new bit in the termios local flag word, EXTPROC.
When this bit is set, several aspects of the terminal driver
are disabled. Input line editing, character echo, and mapping
of signals are all disabled. This allows the telnetd to turn
off these functions when in linemode, but still keep track of
what state the user wants the terminal to be in.
but the problem turns out that "several aspects of the terminal driver
are disabled" is a bit ambiguous, and you can really confuse the n_tty
layer by setting EXTPROC and then causing some of the ICANON invariants
to no longer be maintained.
This fixes at least one such case (TIOCINQ) becoming unhappy because of
the confusion over whether ICANON really means ICANON when EXTPROC is set.
This basically makes TIOCINQ match the case of read: if EXTPROC is set,
we ignore ICANON. Also, make sure to reset the ICANON state ie EXTPROC
changes, not just if ICANON changes.
Fixes: 26df6d13406d ("tty: Add EXTPROC support for LINEMODE")
Reported-by: Tetsuo Handa <[email protected]>
Reported-by: syzkaller <[email protected]>
Cc: Jiri Slaby <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
{
*read_buf_addr(ldata, ldata->read_head) = c;
ldata->read_head++;
}
|
static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
{
*read_buf_addr(ldata, ldata->read_head) = c;
ldata->read_head++;
}
|
C
|
linux
| 0 |
CVE-2011-4621
|
https://www.cvedetails.com/cve/CVE-2011-4621/
| null |
https://github.com/torvalds/linux/commit/f26f9aff6aaf67e9a430d16c266f91b13a5bff64
|
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
|
Sched: fix skip_clock_update optimization
idle_balance() drops/retakes rq->lock, leaving the previous task
vulnerable to set_tsk_need_resched(). Clear it after we return
from balancing instead, and in setup_thread_stack() as well, so
no successfully descheduled or never scheduled task has it set.
Need resched confused the skip_clock_update logic, which assumes
that the next call to update_rq_clock() will come nearly immediately
after being set. Make the optimization robust against the waking
a sleeper before it sucessfully deschedules case by checking that
the current task has not been dequeued before setting the flag,
since it is that useless clock update we're trying to save, and
clear unconditionally in schedule() proper instead of conditionally
in put_prev_task().
Signed-off-by: Mike Galbraith <[email protected]>
Reported-by: Bjoern B. Brandenburg <[email protected]>
Tested-by: Yong Zhang <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: [email protected]
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
unsigned long wait_task_inactive(struct task_struct *p, long match_state)
{
unsigned long flags;
int running, on_rq;
unsigned long ncsw;
struct rq *rq;
for (;;) {
/*
* We do the initial early heuristics without holding
* any task-queue locks at all. We'll only try to get
* the runqueue lock when things look like they will
* work out!
*/
rq = task_rq(p);
/*
* If the task is actively running on another CPU
* still, just relax and busy-wait without holding
* any locks.
*
* NOTE! Since we don't hold any locks, it's not
* even sure that "rq" stays as the right runqueue!
* But we don't care, since "task_running()" will
* return false if the runqueue has changed and p
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
if (match_state && unlikely(p->state != match_state))
return 0;
cpu_relax();
}
/*
* Ok, time to look more closely! We need the rq
* lock now, to be *sure*. If we're wrong, we'll
* just go back and repeat.
*/
rq = task_rq_lock(p, &flags);
trace_sched_wait_task(p);
running = task_running(rq, p);
on_rq = p->se.on_rq;
ncsw = 0;
if (!match_state || p->state == match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, &flags);
/*
* If it changed from the expected state, bail out now.
*/
if (unlikely(!ncsw))
break;
/*
* Was it really running after all now that we
* checked with the proper locks actually held?
*
* Oops. Go back and try again..
*/
if (unlikely(running)) {
cpu_relax();
continue;
}
/*
* It's not enough that it's not actively running,
* it must be off the runqueue _entirely_, and not
* preempted!
*
* So if it was still runnable (but just not actively
* running right now), it's preempted, and we should
* yield - it could be a while.
*/
if (unlikely(on_rq)) {
schedule_timeout_uninterruptible(1);
continue;
}
/*
* Ahh, all good. It wasn't running, and it wasn't
* runnable, which means that it will never become
* running in the future either. We're all done!
*/
break;
}
return ncsw;
}
|
unsigned long wait_task_inactive(struct task_struct *p, long match_state)
{
unsigned long flags;
int running, on_rq;
unsigned long ncsw;
struct rq *rq;
for (;;) {
/*
* We do the initial early heuristics without holding
* any task-queue locks at all. We'll only try to get
* the runqueue lock when things look like they will
* work out!
*/
rq = task_rq(p);
/*
* If the task is actively running on another CPU
* still, just relax and busy-wait without holding
* any locks.
*
* NOTE! Since we don't hold any locks, it's not
* even sure that "rq" stays as the right runqueue!
* But we don't care, since "task_running()" will
* return false if the runqueue has changed and p
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
if (match_state && unlikely(p->state != match_state))
return 0;
cpu_relax();
}
/*
* Ok, time to look more closely! We need the rq
* lock now, to be *sure*. If we're wrong, we'll
* just go back and repeat.
*/
rq = task_rq_lock(p, &flags);
trace_sched_wait_task(p);
running = task_running(rq, p);
on_rq = p->se.on_rq;
ncsw = 0;
if (!match_state || p->state == match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, &flags);
/*
* If it changed from the expected state, bail out now.
*/
if (unlikely(!ncsw))
break;
/*
* Was it really running after all now that we
* checked with the proper locks actually held?
*
* Oops. Go back and try again..
*/
if (unlikely(running)) {
cpu_relax();
continue;
}
/*
* It's not enough that it's not actively running,
* it must be off the runqueue _entirely_, and not
* preempted!
*
* So if it was still runnable (but just not actively
* running right now), it's preempted, and we should
* yield - it could be a while.
*/
if (unlikely(on_rq)) {
schedule_timeout_uninterruptible(1);
continue;
}
/*
* Ahh, all good. It wasn't running, and it wasn't
* runnable, which means that it will never become
* running in the future either. We're all done!
*/
break;
}
return ncsw;
}
|
C
|
linux
| 0 |
CVE-2016-6516
|
https://www.cvedetails.com/cve/CVE-2016-6516/
|
CWE-119
|
https://github.com/torvalds/linux/commit/10eec60ce79187686e052092e5383c99b4420a20
|
10eec60ce79187686e052092e5383c99b4420a20
|
vfs: ioctl: prevent double-fetch in dedupe ioctl
This prevents a double-fetch from user space that can lead to to an
undersized allocation and heap overflow.
Fixes: 54dbc1517237 ("vfs: hoist the btrfs deduplication ioctl to the vfs")
Signed-off-by: Scott Bauer <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int ioctl_fsthaw(struct file *filp)
{
struct super_block *sb = file_inode(filp)->i_sb;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/* Thaw */
if (sb->s_op->thaw_super)
return sb->s_op->thaw_super(sb);
return thaw_super(sb);
}
|
static int ioctl_fsthaw(struct file *filp)
{
struct super_block *sb = file_inode(filp)->i_sb;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/* Thaw */
if (sb->s_op->thaw_super)
return sb->s_op->thaw_super(sb);
return thaw_super(sb);
}
|
C
|
linux
| 0 |
CVE-2013-4160
|
https://www.cvedetails.com/cve/CVE-2013-4160/
| null |
https://github.com/mm2/Little-CMS/commit/91c2db7f2559be504211b283bc3a2c631d6f06d9
|
91c2db7f2559be504211b283bc3a2c631d6f06d9
|
Non happy-path fixes
|
cmsBool CMSEXPORT cmsDictAddEntry(cmsHANDLE hDict, const wchar_t* Name, const wchar_t* Value, const cmsMLU *DisplayName, const cmsMLU *DisplayValue)
{
_cmsDICT* dict = (_cmsDICT*) hDict;
cmsDICTentry *entry;
_cmsAssert(dict != NULL);
_cmsAssert(Name != NULL);
entry = (cmsDICTentry*) _cmsMallocZero(dict ->ContextID, sizeof(cmsDICTentry));
if (entry == NULL) return FALSE;
entry ->DisplayName = cmsMLUdup(DisplayName);
entry ->DisplayValue = cmsMLUdup(DisplayValue);
entry ->Name = DupWcs(dict ->ContextID, Name);
entry ->Value = DupWcs(dict ->ContextID, Value);
entry ->Next = dict ->head;
dict ->head = entry;
return TRUE;
}
|
cmsBool CMSEXPORT cmsDictAddEntry(cmsHANDLE hDict, const wchar_t* Name, const wchar_t* Value, const cmsMLU *DisplayName, const cmsMLU *DisplayValue)
{
_cmsDICT* dict = (_cmsDICT*) hDict;
cmsDICTentry *entry;
_cmsAssert(dict != NULL);
_cmsAssert(Name != NULL);
entry = (cmsDICTentry*) _cmsMallocZero(dict ->ContextID, sizeof(cmsDICTentry));
if (entry == NULL) return FALSE;
entry ->DisplayName = cmsMLUdup(DisplayName);
entry ->DisplayValue = cmsMLUdup(DisplayValue);
entry ->Name = DupWcs(dict ->ContextID, Name);
entry ->Value = DupWcs(dict ->ContextID, Value);
entry ->Next = dict ->head;
dict ->head = entry;
return TRUE;
}
|
C
|
Little-CMS
| 0 |
CVE-2013-1767
|
https://www.cvedetails.com/cve/CVE-2013-1767/
|
CWE-399
|
https://github.com/torvalds/linux/commit/5f00110f7273f9ff04ac69a5f85bb535a4fd0987
|
5f00110f7273f9ff04ac69a5f85bb535a4fd0987
|
tmpfs: fix use-after-free of mempolicy object
The tmpfs remount logic preserves filesystem mempolicy if the mpol=M
option is not specified in the remount request. A new policy can be
specified if mpol=M is given.
Before this patch remounting an mpol bound tmpfs without specifying
mpol= mount option in the remount request would set the filesystem's
mempolicy object to a freed mempolicy object.
To reproduce the problem boot a DEBUG_PAGEALLOC kernel and run:
# mkdir /tmp/x
# mount -t tmpfs -o size=100M,mpol=interleave nodev /tmp/x
# grep /tmp/x /proc/mounts
nodev /tmp/x tmpfs rw,relatime,size=102400k,mpol=interleave:0-3 0 0
# mount -o remount,size=200M nodev /tmp/x
# grep /tmp/x /proc/mounts
nodev /tmp/x tmpfs rw,relatime,size=204800k,mpol=??? 0 0
# note ? garbage in mpol=... output above
# dd if=/dev/zero of=/tmp/x/f count=1
# panic here
Panic:
BUG: unable to handle kernel NULL pointer dereference at (null)
IP: [< (null)>] (null)
[...]
Oops: 0010 [#1] SMP DEBUG_PAGEALLOC
Call Trace:
mpol_shared_policy_init+0xa5/0x160
shmem_get_inode+0x209/0x270
shmem_mknod+0x3e/0xf0
shmem_create+0x18/0x20
vfs_create+0xb5/0x130
do_last+0x9a1/0xea0
path_openat+0xb3/0x4d0
do_filp_open+0x42/0xa0
do_sys_open+0xfe/0x1e0
compat_sys_open+0x1b/0x20
cstar_dispatch+0x7/0x1f
Non-debug kernels will not crash immediately because referencing the
dangling mpol will not cause a fault. Instead the filesystem will
reference a freed mempolicy object, which will cause unpredictable
behavior.
The problem boils down to a dropped mpol reference below if
shmem_parse_options() does not allocate a new mpol:
config = *sbinfo
shmem_parse_options(data, &config, true)
mpol_put(sbinfo->mpol)
sbinfo->mpol = config.mpol /* BUG: saves unreferenced mpol */
This patch avoids the crash by not releasing the mempolicy if
shmem_parse_options() doesn't create a new mpol.
How far back does this issue go? I see it in both 2.6.36 and 3.3. I did
not look back further.
Signed-off-by: Greg Thelen <[email protected]>
Acked-by: Hugh Dickins <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
umode_t mode, dev_t dev, unsigned long flags)
{
struct inode *inode;
struct shmem_inode_info *info;
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
if (shmem_reserve_inode(sb))
return NULL;
inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
inode->i_blocks = 0;
inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_generation = get_seconds();
info = SHMEM_I(inode);
memset(info, 0, (char *)inode - (char *)info);
spin_lock_init(&info->lock);
info->flags = flags & VM_NORESERVE;
INIT_LIST_HEAD(&info->swaplist);
simple_xattrs_init(&info->xattrs);
cache_no_acl(inode);
switch (mode & S_IFMT) {
default:
inode->i_op = &shmem_special_inode_operations;
init_special_inode(inode, mode, dev);
break;
case S_IFREG:
inode->i_mapping->a_ops = &shmem_aops;
inode->i_op = &shmem_inode_operations;
inode->i_fop = &shmem_file_operations;
mpol_shared_policy_init(&info->policy,
shmem_get_sbmpol(sbinfo));
break;
case S_IFDIR:
inc_nlink(inode);
/* Some things misbehave if size == 0 on a directory */
inode->i_size = 2 * BOGO_DIRENT_SIZE;
inode->i_op = &shmem_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
break;
case S_IFLNK:
/*
* Must not load anything in the rbtree,
* mpol_free_shared_policy will not be called.
*/
mpol_shared_policy_init(&info->policy, NULL);
break;
}
} else
shmem_free_inode(sb);
return inode;
}
|
static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
umode_t mode, dev_t dev, unsigned long flags)
{
struct inode *inode;
struct shmem_inode_info *info;
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
if (shmem_reserve_inode(sb))
return NULL;
inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
inode->i_blocks = 0;
inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_generation = get_seconds();
info = SHMEM_I(inode);
memset(info, 0, (char *)inode - (char *)info);
spin_lock_init(&info->lock);
info->flags = flags & VM_NORESERVE;
INIT_LIST_HEAD(&info->swaplist);
simple_xattrs_init(&info->xattrs);
cache_no_acl(inode);
switch (mode & S_IFMT) {
default:
inode->i_op = &shmem_special_inode_operations;
init_special_inode(inode, mode, dev);
break;
case S_IFREG:
inode->i_mapping->a_ops = &shmem_aops;
inode->i_op = &shmem_inode_operations;
inode->i_fop = &shmem_file_operations;
mpol_shared_policy_init(&info->policy,
shmem_get_sbmpol(sbinfo));
break;
case S_IFDIR:
inc_nlink(inode);
/* Some things misbehave if size == 0 on a directory */
inode->i_size = 2 * BOGO_DIRENT_SIZE;
inode->i_op = &shmem_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
break;
case S_IFLNK:
/*
* Must not load anything in the rbtree,
* mpol_free_shared_policy will not be called.
*/
mpol_shared_policy_init(&info->policy, NULL);
break;
}
} else
shmem_free_inode(sb);
return inode;
}
|
C
|
linux
| 0 |
CVE-2018-14395
|
https://www.cvedetails.com/cve/CVE-2018-14395/
|
CWE-369
|
https://github.com/FFmpeg/FFmpeg/commit/fa19fbcf712a6a6cc5a5cfdc3254a97b9bce6582
|
fa19fbcf712a6a6cc5a5cfdc3254a97b9bce6582
|
avformat/movenc: Write version 2 of audio atom if channels is not known
The version 1 needs the channel count and would divide by 0
Fixes: division by 0
Fixes: fpe_movenc.c_1108_1.ogg
Fixes: fpe_movenc.c_1108_2.ogg
Fixes: fpe_movenc.c_1108_3.wav
Found-by: #CHEN HONGXU# <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
|
static int get_samples_per_packet(MOVTrack *track)
{
int i, first_duration;
/* use 1 for raw PCM */
if (!track->audio_vbr)
return 1;
/* check to see if duration is constant for all clusters */
if (!track->entry)
return 0;
first_duration = get_cluster_duration(track, 0);
for (i = 1; i < track->entry; i++) {
if (get_cluster_duration(track, i) != first_duration)
return 0;
}
return first_duration;
}
|
static int get_samples_per_packet(MOVTrack *track)
{
int i, first_duration;
/* use 1 for raw PCM */
if (!track->audio_vbr)
return 1;
/* check to see if duration is constant for all clusters */
if (!track->entry)
return 0;
first_duration = get_cluster_duration(track, 0);
for (i = 1; i < track->entry; i++) {
if (get_cluster_duration(track, i) != first_duration)
return 0;
}
return first_duration;
}
|
C
|
FFmpeg
| 0 |
CVE-2013-6626
|
https://www.cvedetails.com/cve/CVE-2013-6626/
| null |
https://github.com/chromium/chromium/commit/90fb08ed0146c9beacfd4dde98a20fc45419fff3
|
90fb08ed0146c9beacfd4dde98a20fc45419fff3
|
Cancel JavaScript dialogs when an interstitial appears.
BUG=295695
TEST=See bug for repro steps.
Review URL: https://chromiumcodereview.appspot.com/24360011
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@225026 0039d316-1c4b-4281-b951-d872f2087c98
|
WebContents* WebContents::Create(const WebContents::CreateParams& params) {
return WebContentsImpl::CreateWithOpener(
params, static_cast<WebContentsImpl*>(params.opener));
}
|
WebContents* WebContents::Create(const WebContents::CreateParams& params) {
return WebContentsImpl::CreateWithOpener(
params, static_cast<WebContentsImpl*>(params.opener));
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/327585cb0eab0859518643a2d00917081f7e7645
|
327585cb0eab0859518643a2d00917081f7e7645
|
2010-10-26 Kenneth Russell <[email protected]>
Reviewed by Andreas Kling.
Valgrind failure in GraphicsContext3DInternal::reshape
https://bugs.webkit.org/show_bug.cgi?id=48284
* src/WebGraphicsContext3DDefaultImpl.cpp:
(WebKit::WebGraphicsContext3DDefaultImpl::WebGraphicsContext3DDefaultImpl):
git-svn-id: svn://svn.chromium.org/blink/trunk@70534 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void WebGraphicsContext3DDefaultImpl::vertexAttribPointer(unsigned long indx, int size, int type, bool normalized,
unsigned long stride, unsigned long offset)
{
makeContextCurrent();
if (m_boundArrayBuffer <= 0) {
return;
}
if (indx < NumTrackedPointerStates) {
VertexAttribPointerState& state = m_vertexAttribPointerState[indx];
state.buffer = m_boundArrayBuffer;
state.indx = indx;
state.size = size;
state.type = type;
state.normalized = normalized;
state.stride = stride;
state.offset = offset;
}
glVertexAttribPointer(indx, size, type, normalized, stride,
reinterpret_cast<void*>(static_cast<intptr_t>(offset)));
}
|
void WebGraphicsContext3DDefaultImpl::vertexAttribPointer(unsigned long indx, int size, int type, bool normalized,
unsigned long stride, unsigned long offset)
{
makeContextCurrent();
if (m_boundArrayBuffer <= 0) {
return;
}
if (indx < NumTrackedPointerStates) {
VertexAttribPointerState& state = m_vertexAttribPointerState[indx];
state.buffer = m_boundArrayBuffer;
state.indx = indx;
state.size = size;
state.type = type;
state.normalized = normalized;
state.stride = stride;
state.offset = offset;
}
glVertexAttribPointer(indx, size, type, normalized, stride,
reinterpret_cast<void*>(static_cast<intptr_t>(offset)));
}
|
C
|
Chrome
| 0 |
CVE-2016-5219
|
https://www.cvedetails.com/cve/CVE-2016-5219/
|
CWE-416
|
https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
This makes the query of GL_COMPLETION_STATUS_KHR to programs much
cheaper by minimizing the round-trip to the GPU thread.
Bug: 881152, 957001
Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630
Commit-Queue: Kenneth Russell <[email protected]>
Reviewed-by: Kentaro Hara <[email protected]>
Reviewed-by: Geoff Lang <[email protected]>
Reviewed-by: Kenneth Russell <[email protected]>
Cr-Commit-Position: refs/heads/master@{#657568}
|
error::Error GLES2DecoderPassthroughImpl::DoPopGroupMarkerEXT() {
api()->glPopGroupMarkerEXTFn();
return error::kNoError;
}
|
error::Error GLES2DecoderPassthroughImpl::DoPopGroupMarkerEXT() {
api()->glPopGroupMarkerEXTFn();
return error::kNoError;
}
|
C
|
Chrome
| 0 |
CVE-2018-6079
|
https://www.cvedetails.com/cve/CVE-2018-6079/
|
CWE-200
|
https://github.com/chromium/chromium/commit/d128139d53e9268e87921e82d89b3f2053cb83fd
|
d128139d53e9268e87921e82d89b3f2053cb83fd
|
Fix tabs sharing TEXTURE_2D_ARRAY/TEXTURE_3D data.
In linux and android, we are seeing an issue where texture data from one
tab overwrites the texture data of another tab. This is happening for apps
which are using webgl2 texture of type TEXTURE_2D_ARRAY/TEXTURE_3D.
Due to a bug in virtual context save/restore code for above texture formats,
the texture data is not properly restored while switching tabs. Hence
texture data from one tab overwrites other.
This CL has fix for that issue, an update for existing test expectations
and a new unit test for this bug.
Bug: 788448
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: Ie933984cdd2d1381f42eb4638f730c8245207a28
Reviewed-on: https://chromium-review.googlesource.com/930327
Reviewed-by: Zhenyao Mo <[email protected]>
Commit-Queue: vikas soni <[email protected]>
Cr-Commit-Position: refs/heads/master@{#539111}
|
void SetUpColoredUnitQuad(const GLfloat* color) {
GLuint program1 = SetupColoredVertexProgram();
GLuint position_loc1 = glGetAttribLocation(program1, "a_position");
GLuint color_loc1 = glGetAttribLocation(program1, "a_color");
GLTestHelper::SetupUnitQuad(position_loc1);
GLTestHelper::SetupColorsForUnitQuad(color_loc1, color, GL_STATIC_DRAW);
}
|
void SetUpColoredUnitQuad(const GLfloat* color) {
GLuint program1 = SetupColoredVertexProgram();
GLuint position_loc1 = glGetAttribLocation(program1, "a_position");
GLuint color_loc1 = glGetAttribLocation(program1, "a_color");
GLTestHelper::SetupUnitQuad(position_loc1);
GLTestHelper::SetupColorsForUnitQuad(color_loc1, color, GL_STATIC_DRAW);
}
|
C
|
Chrome
| 0 |
CVE-2011-2918
|
https://www.cvedetails.com/cve/CVE-2011-2918/
|
CWE-399
|
https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233
|
a8b0ca17b80e92faab46ee7179ba9e99ccb61233
|
perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: Michael Cree <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Deng-Cheng Zhu <[email protected]>
Cc: Anton Blanchard <[email protected]>
Cc: Eric B Munson <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: Paul Mundt <[email protected]>
Cc: David S. Miller <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Jason Wessel <[email protected]>
Cc: Don Zickus <[email protected]>
Link: http://lkml.kernel.org/n/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
static const struct arm_pmu *__init armv6pmu_init(void)
{
return NULL;
}
|
static const struct arm_pmu *__init armv6pmu_init(void)
{
return NULL;
}
|
C
|
linux
| 0 |
CVE-2016-2550
|
https://www.cvedetails.com/cve/CVE-2016-2550/
|
CWE-399
|
https://github.com/torvalds/linux/commit/415e3d3e90ce9e18727e8843ae343eda5a58fad6
|
415e3d3e90ce9e18727e8843ae343eda5a58fad6
|
unix: correctly track in-flight fds in sending process user_struct
The commit referenced in the Fixes tag incorrectly accounted the number
of in-flight fds over a unix domain socket to the original opener
of the file-descriptor. This allows another process to arbitrary
deplete the original file-openers resource limit for the maximum of
open files. Instead the sending processes and its struct cred should
be credited.
To do so, we add a reference counted struct user_struct pointer to the
scm_fp_list and use it to account for the number of inflight unix fds.
Fixes: 712f4aad406bb1 ("unix: properly account for FDs passed over unix sockets")
Reported-by: David Herrmann <[email protected]>
Cc: David Herrmann <[email protected]>
Cc: Willy Tarreau <[email protected]>
Cc: Linus Torvalds <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
{
struct cmsghdr __user *cm
= (__force struct cmsghdr __user*)msg->msg_control;
int fdmax = 0;
int fdnum = scm->fp->count;
struct file **fp = scm->fp->fp;
int __user *cmfptr;
int err = 0, i;
if (MSG_CMSG_COMPAT & msg->msg_flags) {
scm_detach_fds_compat(msg, scm);
return;
}
if (msg->msg_controllen > sizeof(struct cmsghdr))
fdmax = ((msg->msg_controllen - sizeof(struct cmsghdr))
/ sizeof(int));
if (fdnum < fdmax)
fdmax = fdnum;
for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
i++, cmfptr++)
{
struct socket *sock;
int new_fd;
err = security_file_receive(fp[i]);
if (err)
break;
err = get_unused_fd_flags(MSG_CMSG_CLOEXEC & msg->msg_flags
? O_CLOEXEC : 0);
if (err < 0)
break;
new_fd = err;
err = put_user(new_fd, cmfptr);
if (err) {
put_unused_fd(new_fd);
break;
}
/* Bump the usage count and install the file. */
sock = sock_from_file(fp[i], &err);
if (sock) {
sock_update_netprioidx(&sock->sk->sk_cgrp_data);
sock_update_classid(&sock->sk->sk_cgrp_data);
}
fd_install(new_fd, get_file(fp[i]));
}
if (i > 0)
{
int cmlen = CMSG_LEN(i*sizeof(int));
err = put_user(SOL_SOCKET, &cm->cmsg_level);
if (!err)
err = put_user(SCM_RIGHTS, &cm->cmsg_type);
if (!err)
err = put_user(cmlen, &cm->cmsg_len);
if (!err) {
cmlen = CMSG_SPACE(i*sizeof(int));
if (msg->msg_controllen < cmlen)
cmlen = msg->msg_controllen;
msg->msg_control += cmlen;
msg->msg_controllen -= cmlen;
}
}
if (i < fdnum || (fdnum && fdmax <= 0))
msg->msg_flags |= MSG_CTRUNC;
/*
* All of the files that fit in the message have had their
* usage counts incremented, so we just free the list.
*/
__scm_destroy(scm);
}
|
void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
{
struct cmsghdr __user *cm
= (__force struct cmsghdr __user*)msg->msg_control;
int fdmax = 0;
int fdnum = scm->fp->count;
struct file **fp = scm->fp->fp;
int __user *cmfptr;
int err = 0, i;
if (MSG_CMSG_COMPAT & msg->msg_flags) {
scm_detach_fds_compat(msg, scm);
return;
}
if (msg->msg_controllen > sizeof(struct cmsghdr))
fdmax = ((msg->msg_controllen - sizeof(struct cmsghdr))
/ sizeof(int));
if (fdnum < fdmax)
fdmax = fdnum;
for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
i++, cmfptr++)
{
struct socket *sock;
int new_fd;
err = security_file_receive(fp[i]);
if (err)
break;
err = get_unused_fd_flags(MSG_CMSG_CLOEXEC & msg->msg_flags
? O_CLOEXEC : 0);
if (err < 0)
break;
new_fd = err;
err = put_user(new_fd, cmfptr);
if (err) {
put_unused_fd(new_fd);
break;
}
/* Bump the usage count and install the file. */
sock = sock_from_file(fp[i], &err);
if (sock) {
sock_update_netprioidx(&sock->sk->sk_cgrp_data);
sock_update_classid(&sock->sk->sk_cgrp_data);
}
fd_install(new_fd, get_file(fp[i]));
}
if (i > 0)
{
int cmlen = CMSG_LEN(i*sizeof(int));
err = put_user(SOL_SOCKET, &cm->cmsg_level);
if (!err)
err = put_user(SCM_RIGHTS, &cm->cmsg_type);
if (!err)
err = put_user(cmlen, &cm->cmsg_len);
if (!err) {
cmlen = CMSG_SPACE(i*sizeof(int));
if (msg->msg_controllen < cmlen)
cmlen = msg->msg_controllen;
msg->msg_control += cmlen;
msg->msg_controllen -= cmlen;
}
}
if (i < fdnum || (fdnum && fdmax <= 0))
msg->msg_flags |= MSG_CTRUNC;
/*
* All of the files that fit in the message have had their
* usage counts incremented, so we just free the list.
*/
__scm_destroy(scm);
}
|
C
|
linux
| 0 |
CVE-2011-2486
|
https://www.cvedetails.com/cve/CVE-2011-2486/
|
CWE-264
|
https://github.com/davidben/nspluginwrapper/commit/7e4ab8e1189846041f955e6c83f72bc1624e7a98
|
7e4ab8e1189846041f955e6c83f72bc1624e7a98
|
Support all the new variables added
|
g_NPN_PostURL(NPP instance, const char *url, const char *target, uint32_t len, const char *buf, NPBool file)
{
if (!thread_check()) {
npw_printf("WARNING: NPN_PostURL not called from the main thread\n");
return NPERR_INVALID_INSTANCE_ERROR;
}
if (instance == NULL)
return NPERR_INVALID_INSTANCE_ERROR;
PluginInstance *plugin = PLUGIN_INSTANCE(instance);
if (plugin == NULL)
return NPERR_INVALID_INSTANCE_ERROR;
D(bugiI("NPN_PostURL instance=%p\n", instance));
npw_plugin_instance_ref(plugin);
NPError ret = invoke_NPN_PostURL(plugin, url, target, len, buf, file);
npw_plugin_instance_unref(plugin);
D(bugiD("NPN_PostURL return: %d [%s]\n", ret, string_of_NPError(ret)));
return ret;
}
|
g_NPN_PostURL(NPP instance, const char *url, const char *target, uint32_t len, const char *buf, NPBool file)
{
if (!thread_check()) {
npw_printf("WARNING: NPN_PostURL not called from the main thread\n");
return NPERR_INVALID_INSTANCE_ERROR;
}
if (instance == NULL)
return NPERR_INVALID_INSTANCE_ERROR;
PluginInstance *plugin = PLUGIN_INSTANCE(instance);
if (plugin == NULL)
return NPERR_INVALID_INSTANCE_ERROR;
D(bugiI("NPN_PostURL instance=%p\n", instance));
npw_plugin_instance_ref(plugin);
NPError ret = invoke_NPN_PostURL(plugin, url, target, len, buf, file);
npw_plugin_instance_unref(plugin);
D(bugiD("NPN_PostURL return: %d [%s]\n", ret, string_of_NPError(ret)));
return ret;
}
|
C
|
nspluginwrapper
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/d4cd2b2c0953ad7e9fa988c234eb9361be80fe81
|
d4cd2b2c0953ad7e9fa988c234eb9361be80fe81
|
DevTools: 'Overrides' UI overlay obstructs page and element inspector
BUG=302862
[email protected]
Review URL: https://codereview.chromium.org/40233006
git-svn-id: svn://svn.chromium.org/blink/trunk@160559 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void InspectorPageAgent::applyEmulatedMedia(String* media)
{
String emulatedMedia = m_state->getString(PageAgentState::pageAgentEmulatedMedia);
if (!emulatedMedia.isEmpty())
*media = emulatedMedia;
}
|
void InspectorPageAgent::applyEmulatedMedia(String* media)
{
String emulatedMedia = m_state->getString(PageAgentState::pageAgentEmulatedMedia);
if (!emulatedMedia.isEmpty())
*media = emulatedMedia;
}
|
C
|
Chrome
| 0 |
CVE-2016-7097
|
https://www.cvedetails.com/cve/CVE-2016-7097/
|
CWE-285
|
https://github.com/torvalds/linux/commit/073931017b49d9458aa351605b43a7e34598caef
|
073931017b49d9458aa351605b43a7e34598caef
|
posix_acl: Clear SGID bit when setting file permissions
When file permissions are modified via chmod(2) and the user is not in
the owning group or capable of CAP_FSETID, the setgid bit is cleared in
inode_change_ok(). Setting a POSIX ACL via setxattr(2) sets the file
permissions as well as the new ACL, but doesn't clear the setgid bit in
a similar way; this allows to bypass the check in chmod(2). Fix that.
References: CVE-2016-7097
Reviewed-by: Christoph Hellwig <[email protected]>
Reviewed-by: Jeff Layton <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Andreas Gruenbacher <[email protected]>
|
posix_acl_clone(const struct posix_acl *acl, gfp_t flags)
{
struct posix_acl *clone = NULL;
if (acl) {
int size = sizeof(struct posix_acl) + acl->a_count *
sizeof(struct posix_acl_entry);
clone = kmemdup(acl, size, flags);
if (clone)
atomic_set(&clone->a_refcount, 1);
}
return clone;
}
|
posix_acl_clone(const struct posix_acl *acl, gfp_t flags)
{
struct posix_acl *clone = NULL;
if (acl) {
int size = sizeof(struct posix_acl) + acl->a_count *
sizeof(struct posix_acl_entry);
clone = kmemdup(acl, size, flags);
if (clone)
atomic_set(&clone->a_refcount, 1);
}
return clone;
}
|
C
|
linux
| 0 |
CVE-2013-0889
|
https://www.cvedetails.com/cve/CVE-2013-0889/
|
CWE-264
|
https://github.com/chromium/chromium/commit/1538367452b549d929aabb13d54c85ab99f65cd3
|
1538367452b549d929aabb13d54c85ab99f65cd3
|
For "Dangerous" file type, no user gesture will bypass the download warning.
BUG=170569
Review URL: https://codereview.chromium.org/12039015
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@178072 0039d316-1c4b-4281-b951-d872f2087c98
|
bool ChromeDownloadManagerDelegate::ShouldOpenWithWebIntents(
const DownloadItem* item) {
if (!web_intents::IsWebIntentsEnabledForProfile(profile_))
return false;
if ((item->GetWebContents() && !item->GetWebContents()->GetDelegate()) &&
!web_intents::GetBrowserForBackgroundWebIntentDelivery(profile_)) {
return false;
}
if (!item->GetForcedFilePath().empty())
return false;
if (item->GetTargetDisposition() == DownloadItem::TARGET_DISPOSITION_PROMPT)
return false;
#if !defined(OS_CHROMEOS)
std::string mime_type = item->GetMimeType();
const char kQuickOfficeExtensionId[] = "gbkeegbaiigmenfmjfclcdgdpimamgkj";
const char kQuickOfficeDevExtensionId[] = "ionpfmkccalenbmnddpbmocokhaknphg";
ExtensionServiceInterface* extension_service =
extensions::ExtensionSystem::Get(profile_)->extension_service();
bool use_quickoffice = false;
if (extension_service &&
extension_service->GetInstalledExtension(kQuickOfficeExtensionId) &&
extension_service->IsExtensionEnabled(kQuickOfficeExtensionId))
use_quickoffice = true;
if (extension_service &&
extension_service->GetInstalledExtension(kQuickOfficeDevExtensionId) &&
extension_service->IsExtensionEnabled(kQuickOfficeDevExtensionId))
use_quickoffice = true;
if (use_quickoffice) {
if (mime_type == "application/msword" ||
mime_type == "application/vnd.ms-powerpoint" ||
mime_type == "application/vnd.ms-excel" ||
mime_type == "application/vnd.openxmlformats-officedocument."
"wordprocessingml.document" ||
mime_type == "application/vnd.openxmlformats-officedocument."
"presentationml.presentation" ||
mime_type == "application/vnd.openxmlformats-officedocument."
"spreadsheetml.sheet") {
return true;
}
}
#endif // !defined(OS_CHROMEOS)
return false;
}
|
bool ChromeDownloadManagerDelegate::ShouldOpenWithWebIntents(
const DownloadItem* item) {
if (!web_intents::IsWebIntentsEnabledForProfile(profile_))
return false;
if ((item->GetWebContents() && !item->GetWebContents()->GetDelegate()) &&
!web_intents::GetBrowserForBackgroundWebIntentDelivery(profile_)) {
return false;
}
if (!item->GetForcedFilePath().empty())
return false;
if (item->GetTargetDisposition() == DownloadItem::TARGET_DISPOSITION_PROMPT)
return false;
#if !defined(OS_CHROMEOS)
std::string mime_type = item->GetMimeType();
const char kQuickOfficeExtensionId[] = "gbkeegbaiigmenfmjfclcdgdpimamgkj";
const char kQuickOfficeDevExtensionId[] = "ionpfmkccalenbmnddpbmocokhaknphg";
ExtensionServiceInterface* extension_service =
extensions::ExtensionSystem::Get(profile_)->extension_service();
bool use_quickoffice = false;
if (extension_service &&
extension_service->GetInstalledExtension(kQuickOfficeExtensionId) &&
extension_service->IsExtensionEnabled(kQuickOfficeExtensionId))
use_quickoffice = true;
if (extension_service &&
extension_service->GetInstalledExtension(kQuickOfficeDevExtensionId) &&
extension_service->IsExtensionEnabled(kQuickOfficeDevExtensionId))
use_quickoffice = true;
if (use_quickoffice) {
if (mime_type == "application/msword" ||
mime_type == "application/vnd.ms-powerpoint" ||
mime_type == "application/vnd.ms-excel" ||
mime_type == "application/vnd.openxmlformats-officedocument."
"wordprocessingml.document" ||
mime_type == "application/vnd.openxmlformats-officedocument."
"presentationml.presentation" ||
mime_type == "application/vnd.openxmlformats-officedocument."
"spreadsheetml.sheet") {
return true;
}
}
#endif // !defined(OS_CHROMEOS)
return false;
}
|
C
|
Chrome
| 0 |
CVE-2013-4160
|
https://www.cvedetails.com/cve/CVE-2013-4160/
| null |
https://github.com/mm2/Little-CMS/commit/91c2db7f2559be504211b283bc3a2c631d6f06d9
|
91c2db7f2559be504211b283bc3a2c631d6f06d9
|
Non happy-path fixes
|
Prelin8Data* PrelinOpt8alloc(cmsContext ContextID, const cmsInterpParams* p, cmsToneCurve* G[3])
{
int i;
cmsUInt16Number Input[3];
cmsS15Fixed16Number v1, v2, v3;
Prelin8Data* p8;
p8 = _cmsMallocZero(ContextID, sizeof(Prelin8Data));
if (p8 == NULL) return NULL;
for (i=0; i < 256; i++) {
if (G != NULL) {
Input[0] = cmsEvalToneCurve16(G[0], FROM_8_TO_16(i));
Input[1] = cmsEvalToneCurve16(G[1], FROM_8_TO_16(i));
Input[2] = cmsEvalToneCurve16(G[2], FROM_8_TO_16(i));
}
else {
Input[0] = FROM_8_TO_16(i);
Input[1] = FROM_8_TO_16(i);
Input[2] = FROM_8_TO_16(i);
}
v1 = _cmsToFixedDomain(Input[0] * p -> Domain[0]);
v2 = _cmsToFixedDomain(Input[1] * p -> Domain[1]);
v3 = _cmsToFixedDomain(Input[2] * p -> Domain[2]);
p8 ->X0[i] = (p->opta[2] * FIXED_TO_INT(v1));
p8 ->Y0[i] = (p->opta[1] * FIXED_TO_INT(v2));
p8 ->Z0[i] = (p->opta[0] * FIXED_TO_INT(v3));
p8 ->rx[i] = (cmsUInt16Number) FIXED_REST_TO_INT(v1);
p8 ->ry[i] = (cmsUInt16Number) FIXED_REST_TO_INT(v2);
p8 ->rz[i] = (cmsUInt16Number) FIXED_REST_TO_INT(v3);
}
p8 ->ContextID = ContextID;
p8 ->p = p;
return p8;
}
|
Prelin8Data* PrelinOpt8alloc(cmsContext ContextID, const cmsInterpParams* p, cmsToneCurve* G[3])
{
int i;
cmsUInt16Number Input[3];
cmsS15Fixed16Number v1, v2, v3;
Prelin8Data* p8;
p8 = _cmsMallocZero(ContextID, sizeof(Prelin8Data));
if (p8 == NULL) return NULL;
for (i=0; i < 256; i++) {
if (G != NULL) {
Input[0] = cmsEvalToneCurve16(G[0], FROM_8_TO_16(i));
Input[1] = cmsEvalToneCurve16(G[1], FROM_8_TO_16(i));
Input[2] = cmsEvalToneCurve16(G[2], FROM_8_TO_16(i));
}
else {
Input[0] = FROM_8_TO_16(i);
Input[1] = FROM_8_TO_16(i);
Input[2] = FROM_8_TO_16(i);
}
v1 = _cmsToFixedDomain(Input[0] * p -> Domain[0]);
v2 = _cmsToFixedDomain(Input[1] * p -> Domain[1]);
v3 = _cmsToFixedDomain(Input[2] * p -> Domain[2]);
p8 ->X0[i] = (p->opta[2] * FIXED_TO_INT(v1));
p8 ->Y0[i] = (p->opta[1] * FIXED_TO_INT(v2));
p8 ->Z0[i] = (p->opta[0] * FIXED_TO_INT(v3));
p8 ->rx[i] = (cmsUInt16Number) FIXED_REST_TO_INT(v1);
p8 ->ry[i] = (cmsUInt16Number) FIXED_REST_TO_INT(v2);
p8 ->rz[i] = (cmsUInt16Number) FIXED_REST_TO_INT(v3);
}
p8 ->ContextID = ContextID;
p8 ->p = p;
return p8;
}
|
C
|
Little-CMS
| 0 |
Subsets and Splits
CWE-119 Function Changes
This query retrieves specific examples (before and after code changes) of vulnerabilities with CWE-119, providing basic filtering but limited insight.
Vulnerable Code with CWE IDs
The query filters and combines records from multiple datasets to list specific vulnerability details, providing a basic overview of vulnerable functions but lacking deeper insights.
Vulnerable Functions in BigVul
Retrieves details of vulnerable functions from both validation and test datasets where vulnerabilities are present, providing a basic set of data points for further analysis.
Vulnerable Code Functions
This query filters and shows raw data for vulnerable functions, which provides basic insight into specific vulnerabilities but lacks broader analytical value.
Top 100 Vulnerable Functions
Retrieves 100 samples of vulnerabilities from the training dataset, showing the CVE ID, CWE ID, and code changes before and after the vulnerability, which is a basic filtering of vulnerability data.