CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2017-14976
|
https://www.cvedetails.com/cve/CVE-2017-14976/
|
CWE-125
|
https://cgit.freedesktop.org/poppler/poppler/commit/?id=da63c35549e8852a410946ab016a3f25ac701bdf
|
da63c35549e8852a410946ab016a3f25ac701bdf
| null |
void FoFiType1C::getIndexVal(Type1CIndex *idx, int i,
Type1CIndexVal *val, GBool *ok) {
int pos0, pos1;
if (i < 0 || i >= idx->len) {
*ok = gFalse;
return;
}
pos0 = idx->startPos + getUVarBE(idx->pos + 3 + i * idx->offSize,
idx->offSize, ok);
pos1 = idx->startPos + getUVarBE(idx->pos + 3 + (i + 1) * idx->offSize,
idx->offSize, ok);
if (pos0 < idx->startPos || pos0 > idx->endPos ||
pos1 <= idx->startPos || pos1 > idx->endPos ||
pos1 < pos0) {
*ok = gFalse;
}
val->pos = pos0;
val->len = pos1 - pos0;
}
|
void FoFiType1C::getIndexVal(Type1CIndex *idx, int i,
Type1CIndexVal *val, GBool *ok) {
int pos0, pos1;
if (i < 0 || i >= idx->len) {
*ok = gFalse;
return;
}
pos0 = idx->startPos + getUVarBE(idx->pos + 3 + i * idx->offSize,
idx->offSize, ok);
pos1 = idx->startPos + getUVarBE(idx->pos + 3 + (i + 1) * idx->offSize,
idx->offSize, ok);
if (pos0 < idx->startPos || pos0 > idx->endPos ||
pos1 <= idx->startPos || pos1 > idx->endPos ||
pos1 < pos0) {
*ok = gFalse;
}
val->pos = pos0;
val->len = pos1 - pos0;
}
|
CPP
|
poppler
| 0 |
CVE-2018-7485
|
https://www.cvedetails.com/cve/CVE-2018-7485/
|
CWE-119
|
https://github.com/lurcher/unixODBC/commit/45ef78e037f578b15fc58938a3a3251655e71d6f#diff-d52750c7ba4e594410438569d8e2963aL24
|
45ef78e037f578b15fc58938a3a3251655e71d6f#diff-d52750c7ba4e594410438569d8e2963aL24
|
New Pre Source
|
void UWriteHeaderNormal( SQLHSTMT hStmt, SQLTCHAR *szSepLine )
{
SQLINTEGER nCol = 0;
SQLSMALLINT nColumns = 0;
SQLULEN nMaxLength = 10;
SQLTCHAR szColumn[MAX_DATA_WIDTH+20];
SQLTCHAR szColumnName[MAX_DATA_WIDTH+1];
SQLTCHAR szHdrLine[32001];
szColumn[ 0 ] = 0;
szColumnName[ 0 ] = 0;
szHdrLine[ 0 ] = 0;
if ( SQLNumResultCols( hStmt, &nColumns ) != SQL_SUCCESS )
nColumns = -1;
for ( nCol = 1; nCol <= nColumns; nCol++ )
{
SQLColAttribute( hStmt, nCol, SQL_DESC_DISPLAY_SIZE, NULL, 0, NULL, (SQLLEN*)&nMaxLength );
SQLColAttribute( hStmt, nCol, SQL_DESC_LABEL, szColumnName, sizeof(szColumnName), NULL, NULL );
if ( nMaxLength > MAX_DATA_WIDTH ) nMaxLength = MAX_DATA_WIDTH;
uc_to_ascii( szColumnName );
/* SEP */
memset( szColumn, '\0', sizeof(szColumn) );
memset( szColumn, '-', max( nMaxLength, strlen((char*)szColumnName) ) + 1 );
strcat((char*) szSepLine, "+" );
strcat((char*) szSepLine,(char*) szColumn );
/* HDR */
sprintf((char*) szColumn, "| %-*s", (int)max( nMaxLength, strlen((char*)szColumnName) ), (char*)szColumnName );
strcat((char*) szHdrLine,(char*) szColumn );
}
strcat((char*) szSepLine, "+\n" );
strcat((char*) szHdrLine, "|\n" );
puts((char*) szSepLine );
puts((char*) szHdrLine );
puts((char*) szSepLine );
}
|
void UWriteHeaderNormal( SQLHSTMT hStmt, SQLTCHAR *szSepLine )
{
SQLINTEGER nCol = 0;
SQLSMALLINT nColumns = 0;
SQLULEN nMaxLength = 10;
SQLTCHAR szColumn[MAX_DATA_WIDTH+20];
SQLTCHAR szColumnName[MAX_DATA_WIDTH+1];
SQLTCHAR szHdrLine[32001];
szColumn[ 0 ] = 0;
szColumnName[ 0 ] = 0;
szHdrLine[ 0 ] = 0;
if ( SQLNumResultCols( hStmt, &nColumns ) != SQL_SUCCESS )
nColumns = -1;
for ( nCol = 1; nCol <= nColumns; nCol++ )
{
SQLColAttribute( hStmt, nCol, SQL_DESC_DISPLAY_SIZE, NULL, 0, NULL, (SQLLEN*)&nMaxLength );
SQLColAttribute( hStmt, nCol, SQL_DESC_LABEL, szColumnName, sizeof(szColumnName), NULL, NULL );
if ( nMaxLength > MAX_DATA_WIDTH ) nMaxLength = MAX_DATA_WIDTH;
uc_to_ascii( szColumnName );
/* SEP */
memset( szColumn, '\0', sizeof(szColumn) );
memset( szColumn, '-', max( nMaxLength, strlen((char*)szColumnName) ) + 1 );
strcat((char*) szSepLine, "+" );
strcat((char*) szSepLine,(char*) szColumn );
/* HDR */
sprintf((char*) szColumn, "| %-*s", (int)max( nMaxLength, strlen((char*)szColumnName) ), (char*)szColumnName );
strcat((char*) szHdrLine,(char*) szColumn );
}
strcat((char*) szSepLine, "+\n" );
strcat((char*) szHdrLine, "|\n" );
puts((char*) szSepLine );
puts((char*) szHdrLine );
puts((char*) szSepLine );
}
|
C
|
unixODBC
| 0 |
CVE-2018-16427
|
https://www.cvedetails.com/cve/CVE-2018-16427/
|
CWE-125
|
https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
|
static int piv_card_ctl(sc_card_t *card, unsigned long cmd, void *ptr)
{
piv_private_data_t * priv = PIV_DATA(card);
u8 * opts; /* A or M, key_ref, alg_id */
LOG_FUNC_CALLED(card->ctx);
sc_log(card->ctx, "cmd=%ld ptr=%p", cmd, ptr);
if (priv == NULL) {
LOG_FUNC_RETURN(card->ctx, SC_ERROR_INTERNAL);
}
switch(cmd) {
case SC_CARDCTL_PIV_AUTHENTICATE:
opts = (u8 *)ptr;
switch (*opts) {
case 'A':
return piv_general_external_authenticate(card,
*(opts+1), *(opts+2));
break;
case 'M':
return piv_general_mutual_authenticate(card,
*(opts+1), *(opts+2));
break;
}
break;
case SC_CARDCTL_PIV_GENERATE_KEY:
return piv_generate_key(card,
(sc_cardctl_piv_genkey_info_t *) ptr);
break;
case SC_CARDCTL_GET_SERIALNR:
return piv_get_serial_nr_from_CHUI(card, (sc_serial_number_t *) ptr);
break;
case SC_CARDCTL_PIV_PIN_PREFERENCE:
return piv_get_pin_preference(card, ptr);
break;
case SC_CARDCTL_PIV_OBJECT_PRESENT:
return piv_is_object_present(card, ptr);
break;
}
LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED);
}
|
static int piv_card_ctl(sc_card_t *card, unsigned long cmd, void *ptr)
{
piv_private_data_t * priv = PIV_DATA(card);
u8 * opts; /* A or M, key_ref, alg_id */
LOG_FUNC_CALLED(card->ctx);
sc_log(card->ctx, "cmd=%ld ptr=%p", cmd, ptr);
if (priv == NULL) {
LOG_FUNC_RETURN(card->ctx, SC_ERROR_INTERNAL);
}
switch(cmd) {
case SC_CARDCTL_PIV_AUTHENTICATE:
opts = (u8 *)ptr;
switch (*opts) {
case 'A':
return piv_general_external_authenticate(card,
*(opts+1), *(opts+2));
break;
case 'M':
return piv_general_mutual_authenticate(card,
*(opts+1), *(opts+2));
break;
}
break;
case SC_CARDCTL_PIV_GENERATE_KEY:
return piv_generate_key(card,
(sc_cardctl_piv_genkey_info_t *) ptr);
break;
case SC_CARDCTL_GET_SERIALNR:
return piv_get_serial_nr_from_CHUI(card, (sc_serial_number_t *) ptr);
break;
case SC_CARDCTL_PIV_PIN_PREFERENCE:
return piv_get_pin_preference(card, ptr);
break;
case SC_CARDCTL_PIV_OBJECT_PRESENT:
return piv_is_object_present(card, ptr);
break;
}
LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED);
}
|
C
|
OpenSC
| 0 |
CVE-2013-1929
|
https://www.cvedetails.com/cve/CVE-2013-1929/
|
CWE-119
|
https://github.com/torvalds/linux/commit/715230a44310a8cf66fbfb5a46f9a62a9b2de424
|
715230a44310a8cf66fbfb5a46f9a62a9b2de424
|
tg3: fix length overflow in VPD firmware parsing
Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version
when present") introduced VPD parsing that contained a potential length
overflow.
Limit the hardware's reported firmware string length (max 255 bytes) to
stay inside the driver's firmware string length (32 bytes). On overflow,
truncate the formatted firmware string instead of potentially overwriting
portions of the tg3 struct.
http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf
Signed-off-by: Kees Cook <[email protected]>
Reported-by: Oded Horovitz <[email protected]>
Reported-by: Brad Spengler <[email protected]>
Cc: [email protected]
Cc: Matt Carlson <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
{
u32 phy;
if (!tg3_flag(tp, 5705_PLUS) ||
(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
return;
if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
u32 ephy;
if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
tg3_writephy(tp, MII_TG3_FET_TEST,
ephy | MII_TG3_FET_SHADOW_EN);
if (!tg3_readphy(tp, reg, &phy)) {
if (enable)
phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
else
phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
tg3_writephy(tp, reg, phy);
}
tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
}
} else {
int ret;
ret = tg3_phy_auxctl_read(tp,
MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
if (!ret) {
if (enable)
phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
else
phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
tg3_phy_auxctl_write(tp,
MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
}
}
}
|
static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
{
u32 phy;
if (!tg3_flag(tp, 5705_PLUS) ||
(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
return;
if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
u32 ephy;
if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
tg3_writephy(tp, MII_TG3_FET_TEST,
ephy | MII_TG3_FET_SHADOW_EN);
if (!tg3_readphy(tp, reg, &phy)) {
if (enable)
phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
else
phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
tg3_writephy(tp, reg, phy);
}
tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
}
} else {
int ret;
ret = tg3_phy_auxctl_read(tp,
MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
if (!ret) {
if (enable)
phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
else
phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
tg3_phy_auxctl_write(tp,
MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
}
}
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/eb7971fdb0c3b76bacfb77c1ecc76459ef481f17
|
eb7971fdb0c3b76bacfb77c1ecc76459ef481f17
|
Implement delegation to Metro file pickers.
[email protected],[email protected]
BUG=None
TEST=None
Review URL: https://chromiumcodereview.appspot.com/10310103
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@136624 0039d316-1c4b-4281-b951-d872f2087c98
|
bool SelectFileDialogImpl::RunOpenFileDialog(
const std::wstring& title,
const std::wstring& filter,
HWND owner,
FilePath* path) {
OPENFILENAME ofn;
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = owner;
wchar_t filename[MAX_PATH];
filename[0] = 0;
FilePath dir;
if (!path->empty()) {
bool is_dir;
base::PlatformFileInfo file_info;
if (file_util::GetFileInfo(*path, &file_info))
is_dir = file_info.is_directory;
else
is_dir = file_util::EndsWithSeparator(*path);
if (is_dir) {
ofn.lpstrInitialDir = path->value().c_str();
} else {
dir = path->DirName();
ofn.lpstrInitialDir = dir.value().c_str();
base::wcslcpy(filename, path->BaseName().value().c_str(),
arraysize(filename));
}
}
ofn.lpstrFile = filename;
ofn.nMaxFile = MAX_PATH;
ofn.Flags = OFN_FILEMUSTEXIST | OFN_NOCHANGEDIR;
if (!filter.empty())
ofn.lpstrFilter = filter.c_str();
bool success = CallGetOpenFileName(&ofn);
DisableOwner(owner);
if (success)
*path = FilePath(filename);
return success;
}
|
bool SelectFileDialogImpl::RunOpenFileDialog(
const std::wstring& title,
const std::wstring& filter,
HWND owner,
FilePath* path) {
OPENFILENAME ofn;
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = owner;
wchar_t filename[MAX_PATH];
filename[0] = 0;
FilePath dir;
if (!path->empty()) {
bool is_dir;
base::PlatformFileInfo file_info;
if (file_util::GetFileInfo(*path, &file_info))
is_dir = file_info.is_directory;
else
is_dir = file_util::EndsWithSeparator(*path);
if (is_dir) {
ofn.lpstrInitialDir = path->value().c_str();
} else {
dir = path->DirName();
ofn.lpstrInitialDir = dir.value().c_str();
base::wcslcpy(filename, path->BaseName().value().c_str(),
arraysize(filename));
}
}
ofn.lpstrFile = filename;
ofn.nMaxFile = MAX_PATH;
ofn.Flags = OFN_FILEMUSTEXIST | OFN_NOCHANGEDIR;
if (!filter.empty())
ofn.lpstrFilter = filter.c_str();
bool success = !!GetOpenFileName(&ofn);
DisableOwner(owner);
if (success)
*path = FilePath(filename);
return success;
}
|
C
|
Chrome
| 1 |
null | null | null |
https://github.com/chromium/chromium/commit/30b0f37300f8d671d29d91102ec7f475ed4cf7fe
|
30b0f37300f8d671d29d91102ec7f475ed4cf7fe
|
Use invalidation sets for :read-only and :read-write.
Gets rid of SubtreeStyleChange which relies on sibling tree recalcs.
[email protected],[email protected]
BUG=557440
Review URL: https://codereview.chromium.org/1454003002
Cr-Commit-Position: refs/heads/master@{#360298}
|
Node::InsertionNotificationRequest HTMLFormControlElement::insertedInto(ContainerNode* insertionPoint)
{
m_ancestorDisabledState = AncestorDisabledStateUnknown;
m_dataListAncestorState = Unknown;
setNeedsWillValidateCheck();
HTMLElement::insertedInto(insertionPoint);
FormAssociatedElement::insertedInto(insertionPoint);
fieldSetAncestorsSetNeedsValidityCheck(insertionPoint);
if (!formOwner() && insertionPoint->inDocument())
document().didAssociateFormControl(this);
return InsertionDone;
}
|
Node::InsertionNotificationRequest HTMLFormControlElement::insertedInto(ContainerNode* insertionPoint)
{
m_ancestorDisabledState = AncestorDisabledStateUnknown;
m_dataListAncestorState = Unknown;
setNeedsWillValidateCheck();
HTMLElement::insertedInto(insertionPoint);
FormAssociatedElement::insertedInto(insertionPoint);
fieldSetAncestorsSetNeedsValidityCheck(insertionPoint);
if (!formOwner() && insertionPoint->inDocument())
document().didAssociateFormControl(this);
return InsertionDone;
}
|
C
|
Chrome
| 0 |
CVE-2014-0237
|
https://www.cvedetails.com/cve/CVE-2014-0237/
|
CWE-399
|
https://github.com/file/file/commit/b8acc83781d5a24cc5101e525d15efe0482c280d
|
b8acc83781d5a24cc5101e525d15efe0482c280d
|
Remove loop that kept reading the same offset (Jan Kaluza)
|
cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h,
cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count)
{
size_t maxcount;
const cdf_summary_info_header_t *si =
CAST(const cdf_summary_info_header_t *, sst->sst_tab);
const cdf_section_declaration_t *sd =
CAST(const cdf_section_declaration_t *, (const void *)
((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET));
if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 ||
cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1)
return -1;
ssi->si_byte_order = CDF_TOLE2(si->si_byte_order);
ssi->si_os_version = CDF_TOLE2(si->si_os_version);
ssi->si_os = CDF_TOLE2(si->si_os);
ssi->si_class = si->si_class;
cdf_swap_class(&ssi->si_class);
ssi->si_count = CDF_TOLE4(si->si_count);
*count = 0;
maxcount = 0;
*info = NULL;
if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info,
count, &maxcount) == -1)
return -1;
return 0;
}
|
cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h,
cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count)
{
size_t i, maxcount;
const cdf_summary_info_header_t *si =
CAST(const cdf_summary_info_header_t *, sst->sst_tab);
const cdf_section_declaration_t *sd =
CAST(const cdf_section_declaration_t *, (const void *)
((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET));
if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 ||
cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1)
return -1;
ssi->si_byte_order = CDF_TOLE2(si->si_byte_order);
ssi->si_os_version = CDF_TOLE2(si->si_os_version);
ssi->si_os = CDF_TOLE2(si->si_os);
ssi->si_class = si->si_class;
cdf_swap_class(&ssi->si_class);
ssi->si_count = CDF_TOLE2(si->si_count);
*count = 0;
maxcount = 0;
*info = NULL;
for (i = 0; i < CDF_TOLE4(si->si_count); i++) {
if (i >= CDF_LOOP_LIMIT) {
DPRINTF(("Unpack summary info loop limit"));
errno = EFTYPE;
return -1;
}
if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset),
info, count, &maxcount) == -1) {
return -1;
}
}
return 0;
}
|
C
|
file
| 1 |
null | null | null |
https://github.com/chromium/chromium/commit/befb46ae3385fa13975521e9a2281e35805b339e
|
befb46ae3385fa13975521e9a2281e35805b339e
|
2009-10-23 Chris Evans <[email protected]>
Reviewed by Adam Barth.
Added test for bug 27239 (ignore Refresh for view source mode).
https://bugs.webkit.org/show_bug.cgi?id=27239
* http/tests/security/view-source-no-refresh.html: Added
* http/tests/security/view-source-no-refresh-expected.txt: Added
* http/tests/security/resources/view-source-no-refresh.php: Added
2009-10-23 Chris Evans <[email protected]>
Reviewed by Adam Barth.
Ignore the Refresh header if we're in view source mode.
https://bugs.webkit.org/show_bug.cgi?id=27239
Test: http/tests/security/view-source-no-refresh.html
* loader/FrameLoader.cpp: ignore Refresh in view-source mode.
git-svn-id: svn://svn.chromium.org/blink/trunk@50018 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void FrameLoader::receivedFirstData()
{
begin(m_workingURL, false);
dispatchDidCommitLoad();
dispatchWindowObjectAvailable();
if (m_documentLoader) {
String ptitle = m_documentLoader->title();
if (!ptitle.isNull())
m_client->dispatchDidReceiveTitle(ptitle);
}
m_workingURL = KURL();
double delay;
String url;
if (!m_documentLoader)
return;
if (m_frame->inViewSourceMode())
return;
if (!parseHTTPRefresh(m_documentLoader->response().httpHeaderField("Refresh"), false, delay, url))
return;
if (url.isEmpty())
url = m_URL.string();
else
url = m_frame->document()->completeURL(url).string();
m_frame->redirectScheduler()->scheduleRedirect(delay, url);
}
|
void FrameLoader::receivedFirstData()
{
begin(m_workingURL, false);
dispatchDidCommitLoad();
dispatchWindowObjectAvailable();
if (m_documentLoader) {
String ptitle = m_documentLoader->title();
if (!ptitle.isNull())
m_client->dispatchDidReceiveTitle(ptitle);
}
m_workingURL = KURL();
double delay;
String url;
if (!m_documentLoader)
return;
if (!parseHTTPRefresh(m_documentLoader->response().httpHeaderField("Refresh"), false, delay, url))
return;
if (url.isEmpty())
url = m_URL.string();
else
url = m_frame->document()->completeURL(url).string();
m_frame->redirectScheduler()->scheduleRedirect(delay, url);
}
|
C
|
Chrome
| 1 |
CVE-2018-6121
|
https://www.cvedetails.com/cve/CVE-2018-6121/
|
CWE-20
|
https://github.com/chromium/chromium/commit/7614790c80996d32a28218f4d1605b0908e9ddf6
|
7614790c80996d32a28218f4d1605b0908e9ddf6
|
Apply ExtensionNavigationThrottle filesystem/blob checks to all frames.
BUG=836858
Change-Id: I34333a72501129fd40b5a9aa6378c9f35f1e7fc2
Reviewed-on: https://chromium-review.googlesource.com/1028511
Reviewed-by: Devlin <[email protected]>
Reviewed-by: Alex Moshchuk <[email protected]>
Reviewed-by: Nick Carter <[email protected]>
Commit-Queue: Charlie Reis <[email protected]>
Cr-Commit-Position: refs/heads/master@{#553867}
|
bool IsWebcamAvailableOnSystem(WebContents* web_contents) {
std::string result;
EXPECT_TRUE(content::ExecuteScriptAndExtractString(
web_contents, kHasVideoInputDeviceOnSystem, &result));
return result == kHasVideoInputDevice;
}
|
bool IsWebcamAvailableOnSystem(WebContents* web_contents) {
std::string result;
EXPECT_TRUE(content::ExecuteScriptAndExtractString(
web_contents, kHasVideoInputDeviceOnSystem, &result));
return result == kHasVideoInputDevice;
}
|
C
|
Chrome
| 0 |
CVE-2016-5219
|
https://www.cvedetails.com/cve/CVE-2016-5219/
|
CWE-416
|
https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
This makes the query of GL_COMPLETION_STATUS_KHR to programs much
cheaper by minimizing the round-trip to the GPU thread.
Bug: 881152, 957001
Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630
Commit-Queue: Kenneth Russell <[email protected]>
Reviewed-by: Kentaro Hara <[email protected]>
Reviewed-by: Geoff Lang <[email protected]>
Reviewed-by: Kenneth Russell <[email protected]>
Cr-Commit-Position: refs/heads/master@{#657568}
|
void WebGLRenderingContextBase::DestroyContext() {
if (!GetDrawingBuffer())
return;
extensions_util_.reset();
base::RepeatingClosure null_closure;
base::RepeatingCallback<void(const char*, int32_t)> null_function;
GetDrawingBuffer()->ContextProvider()->SetLostContextCallback(
std::move(null_closure));
GetDrawingBuffer()->ContextProvider()->SetErrorMessageCallback(
std::move(null_function));
DCHECK(GetDrawingBuffer());
drawing_buffer_->BeginDestruction();
drawing_buffer_ = nullptr;
}
|
void WebGLRenderingContextBase::DestroyContext() {
if (!GetDrawingBuffer())
return;
extensions_util_.reset();
base::RepeatingClosure null_closure;
base::RepeatingCallback<void(const char*, int32_t)> null_function;
GetDrawingBuffer()->ContextProvider()->SetLostContextCallback(
std::move(null_closure));
GetDrawingBuffer()->ContextProvider()->SetErrorMessageCallback(
std::move(null_function));
DCHECK(GetDrawingBuffer());
drawing_buffer_->BeginDestruction();
drawing_buffer_ = nullptr;
}
|
C
|
Chrome
| 0 |
CVE-2019-13307
|
https://www.cvedetails.com/cve/CVE-2019-13307/
|
CWE-119
|
https://github.com/ImageMagick/ImageMagick6/commit/91e58d967a92250439ede038ccfb0913a81e59fe
|
91e58d967a92250439ede038ccfb0913a81e59fe
|
https://github.com/ImageMagick/ImageMagick/issues/1615
|
MagickExport ChannelPerceptualHash *GetImageChannelPerceptualHash(
const Image *image,ExceptionInfo *exception)
{
ChannelMoments
*moments;
ChannelPerceptualHash
*perceptual_hash;
Image
*hash_image;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
channel;
/*
Blur then transform to sRGB colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
return((ChannelPerceptualHash *) NULL);
hash_image->depth=8;
status=TransformImageColorspace(hash_image,sRGBColorspace);
if (status == MagickFalse)
return((ChannelPerceptualHash *) NULL);
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
return((ChannelPerceptualHash *) NULL);
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
CompositeChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].P[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
/*
Blur then transform to HCLp colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
hash_image->depth=8;
status=TransformImageColorspace(hash_image,HCLpColorspace);
if (status == MagickFalse)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].Q[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
return(perceptual_hash);
}
|
MagickExport ChannelPerceptualHash *GetImageChannelPerceptualHash(
const Image *image,ExceptionInfo *exception)
{
ChannelMoments
*moments;
ChannelPerceptualHash
*perceptual_hash;
Image
*hash_image;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
channel;
/*
Blur then transform to sRGB colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
return((ChannelPerceptualHash *) NULL);
hash_image->depth=8;
status=TransformImageColorspace(hash_image,sRGBColorspace);
if (status == MagickFalse)
return((ChannelPerceptualHash *) NULL);
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
return((ChannelPerceptualHash *) NULL);
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
CompositeChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].P[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
/*
Blur then transform to HCLp colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
hash_image->depth=8;
status=TransformImageColorspace(hash_image,HCLpColorspace);
if (status == MagickFalse)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].Q[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
return(perceptual_hash);
}
|
C
|
ImageMagick6
| 0 |
CVE-2011-1927
|
https://www.cvedetails.com/cve/CVE-2011-1927/
| null |
https://github.com/torvalds/linux/commit/64f3b9e203bd06855072e295557dca1485a2ecba
|
64f3b9e203bd06855072e295557dca1485a2ecba
|
net: ip_expire() must revalidate route
Commit 4a94445c9a5c (net: Use ip_route_input_noref() in input path)
added a bug in IP defragmentation handling, in case timeout is fired.
When a frame is defragmented, we use last skb dst field when building
final skb. Its dst is valid, since we are in rcu read section.
But if a timeout occurs, we take first queued fragment to build one ICMP
TIME EXCEEDED message. Problem is all queued skb have weak dst pointers,
since we escaped RCU critical section after their queueing. icmp_send()
might dereference a now freed (and possibly reused) part of memory.
Calling skb_dst_drop() and ip_route_input_noref() to revalidate route is
the only possible choice.
Reported-by: Denys Fedoryshchenko <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int __net_init ip4_frags_ns_ctl_register(struct net *net)
{
struct ctl_table *table;
struct ctl_table_header *hdr;
table = ip4_frags_ns_ctl_table;
if (!net_eq(net, &init_net)) {
table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
if (table == NULL)
goto err_alloc;
table[0].data = &net->ipv4.frags.high_thresh;
table[1].data = &net->ipv4.frags.low_thresh;
table[2].data = &net->ipv4.frags.timeout;
}
hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
if (hdr == NULL)
goto err_reg;
net->ipv4.frags_hdr = hdr;
return 0;
err_reg:
if (!net_eq(net, &init_net))
kfree(table);
err_alloc:
return -ENOMEM;
}
|
static int __net_init ip4_frags_ns_ctl_register(struct net *net)
{
struct ctl_table *table;
struct ctl_table_header *hdr;
table = ip4_frags_ns_ctl_table;
if (!net_eq(net, &init_net)) {
table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
if (table == NULL)
goto err_alloc;
table[0].data = &net->ipv4.frags.high_thresh;
table[1].data = &net->ipv4.frags.low_thresh;
table[2].data = &net->ipv4.frags.timeout;
}
hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
if (hdr == NULL)
goto err_reg;
net->ipv4.frags_hdr = hdr;
return 0;
err_reg:
if (!net_eq(net, &init_net))
kfree(table);
err_alloc:
return -ENOMEM;
}
|
C
|
linux
| 0 |
CVE-2016-5219
|
https://www.cvedetails.com/cve/CVE-2016-5219/
|
CWE-416
|
https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
This makes the query of GL_COMPLETION_STATUS_KHR to programs much
cheaper by minimizing the round-trip to the GPU thread.
Bug: 881152, 957001
Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630
Commit-Queue: Kenneth Russell <[email protected]>
Reviewed-by: Kentaro Hara <[email protected]>
Reviewed-by: Geoff Lang <[email protected]>
Reviewed-by: Kenneth Russell <[email protected]>
Cr-Commit-Position: refs/heads/master@{#657568}
|
error::Error GLES2DecoderImpl::HandleGetActiveAttrib(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
const volatile gles2::cmds::GetActiveAttrib& c =
*static_cast<const volatile gles2::cmds::GetActiveAttrib*>(cmd_data);
GLuint program_id = c.program;
GLuint index = c.index;
uint32_t name_bucket_id = c.name_bucket_id;
typedef cmds::GetActiveAttrib::Result Result;
Result* result = GetSharedMemoryAs<Result*>(
c.result_shm_id, c.result_shm_offset, sizeof(*result));
if (!result) {
return error::kOutOfBounds;
}
if (result->success != 0) {
return error::kInvalidArguments;
}
Program* program = GetProgramInfoNotShader(
program_id, "glGetActiveAttrib");
if (!program) {
return error::kNoError;
}
const Program::VertexAttrib* attrib_info =
program->GetAttribInfo(index);
if (!attrib_info) {
LOCAL_SET_GL_ERROR(
GL_INVALID_VALUE, "glGetActiveAttrib", "index out of range");
return error::kNoError;
}
result->success = 1; // true.
result->size = attrib_info->size;
result->type = attrib_info->type;
Bucket* bucket = CreateBucket(name_bucket_id);
bucket->SetFromString(attrib_info->name.c_str());
return error::kNoError;
}
|
error::Error GLES2DecoderImpl::HandleGetActiveAttrib(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
const volatile gles2::cmds::GetActiveAttrib& c =
*static_cast<const volatile gles2::cmds::GetActiveAttrib*>(cmd_data);
GLuint program_id = c.program;
GLuint index = c.index;
uint32_t name_bucket_id = c.name_bucket_id;
typedef cmds::GetActiveAttrib::Result Result;
Result* result = GetSharedMemoryAs<Result*>(
c.result_shm_id, c.result_shm_offset, sizeof(*result));
if (!result) {
return error::kOutOfBounds;
}
if (result->success != 0) {
return error::kInvalidArguments;
}
Program* program = GetProgramInfoNotShader(
program_id, "glGetActiveAttrib");
if (!program) {
return error::kNoError;
}
const Program::VertexAttrib* attrib_info =
program->GetAttribInfo(index);
if (!attrib_info) {
LOCAL_SET_GL_ERROR(
GL_INVALID_VALUE, "glGetActiveAttrib", "index out of range");
return error::kNoError;
}
result->success = 1; // true.
result->size = attrib_info->size;
result->type = attrib_info->type;
Bucket* bucket = CreateBucket(name_bucket_id);
bucket->SetFromString(attrib_info->name.c_str());
return error::kNoError;
}
|
C
|
Chrome
| 0 |
CVE-2013-1819
|
https://www.cvedetails.com/cve/CVE-2013-1819/
|
CWE-20
|
https://github.com/torvalds/linux/commit/eb178619f930fa2ba2348de332a1ff1c66a31424
|
eb178619f930fa2ba2348de332a1ff1c66a31424
|
xfs: fix _xfs_buf_find oops on blocks beyond the filesystem end
When _xfs_buf_find is passed an out of range address, it will fail
to find a relevant struct xfs_perag and oops with a null
dereference. This can happen when trying to walk a filesystem with a
metadata inode that has a partially corrupted extent map (i.e. the
block number returned is corrupt, but is otherwise intact) and we
try to read from the corrupted block address.
In this case, just fail the lookup. If it is readahead being issued,
it will simply not be done, but if it is real read that fails we
will get an error being reported. Ideally this case should result
in an EFSCORRUPTED error being reported, but we cannot return an
error through xfs_buf_read() or xfs_buf_get() so this lookup failure
may result in ENOMEM or EIO errors being reported instead.
Signed-off-by: Dave Chinner <[email protected]>
Reviewed-by: Brian Foster <[email protected]>
Reviewed-by: Ben Myers <[email protected]>
Signed-off-by: Ben Myers <[email protected]>
|
xfs_buf_lru_del(
struct xfs_buf *bp)
{
struct xfs_buftarg *btp = bp->b_target;
if (list_empty(&bp->b_lru))
return;
spin_lock(&btp->bt_lru_lock);
if (!list_empty(&bp->b_lru)) {
list_del_init(&bp->b_lru);
btp->bt_lru_nr--;
}
spin_unlock(&btp->bt_lru_lock);
}
|
xfs_buf_lru_del(
struct xfs_buf *bp)
{
struct xfs_buftarg *btp = bp->b_target;
if (list_empty(&bp->b_lru))
return;
spin_lock(&btp->bt_lru_lock);
if (!list_empty(&bp->b_lru)) {
list_del_init(&bp->b_lru);
btp->bt_lru_nr--;
}
spin_unlock(&btp->bt_lru_lock);
}
|
C
|
linux
| 0 |
CVE-2010-2527
|
https://www.cvedetails.com/cve/CVE-2010-2527/
|
CWE-119
|
https://git.savannah.gnu.org/cgit/freetype/freetype2-demos.git/commit/?id=b995299b73ba4cd259f221f500d4e63095508bec
|
b995299b73ba4cd259f221f500d4e63095508bec
| null |
write_message( RenderState state )
{
ADisplay adisplay = (ADisplay)state->display.disp;
if ( state->message == NULL )
{
FontFace face = &state->faces[state->face_index];
int idx, total;
idx = face->index;
total = 1;
while ( total + state->face_index < state->num_faces &&
face[total].filepath == face[0].filepath )
total++;
total += idx;
state->message = state->message0;
if ( total > 1 )
sprintf( state->message0, "%.100s %d/%d @ %5.1fpt",
state->filename, idx + 1, total,
state->char_size );
else
sprintf( state->message0, "%.100s @ %5.1fpt",
state->filename,
state->char_size );
}
grWriteCellString( adisplay->bitmap, 0, DIM_Y - 10, state->message,
adisplay->fore_color );
state->message = NULL;
}
|
write_message( RenderState state )
{
ADisplay adisplay = (ADisplay)state->display.disp;
if ( state->message == NULL )
{
FontFace face = &state->faces[state->face_index];
int idx, total;
idx = face->index;
total = 1;
while ( total + state->face_index < state->num_faces &&
face[total].filepath == face[0].filepath )
total++;
total += idx;
state->message = state->message0;
if ( total > 1 )
sprintf( state->message0, "%s %d/%d @ %5.1fpt",
state->filename, idx + 1, total,
state->char_size );
else
sprintf( state->message0, "%s @ %5.1fpt",
state->filename,
state->char_size );
}
grWriteCellString( adisplay->bitmap, 0, DIM_Y - 10, state->message,
adisplay->fore_color );
state->message = NULL;
}
|
C
|
savannah
| 1 |
CVE-2017-15994
|
https://www.cvedetails.com/cve/CVE-2017-15994/
|
CWE-354
|
https://git.samba.org/?p=rsync.git;a=commit;h=c252546ceeb0925eb8a4061315e3ff0a8c55b48b
|
c252546ceeb0925eb8a4061315e3ff0a8c55b48b
| null |
int parse_csum_name(const char *name, int len)
{
if (len < 0 && name)
len = strlen(name);
if (!name || (len == 4 && strncasecmp(name, "auto", 4) == 0)) {
if (protocol_version >= 30)
return CSUM_MD5;
if (protocol_version >= 27)
return CSUM_MD4_OLD;
if (protocol_version >= 21)
return CSUM_MD4_BUSTED;
return CSUM_MD4_ARCHAIC;
}
if (len == 3 && strncasecmp(name, "md4", 3) == 0)
return CSUM_MD4;
if (len == 3 && strncasecmp(name, "md5", 3) == 0)
return CSUM_MD5;
if (len == 4 && strncasecmp(name, "none", 4) == 0)
return CSUM_NONE;
rprintf(FERROR, "unknown checksum name: %s\n", name);
exit_cleanup(RERR_UNSUPPORTED);
}
|
int parse_csum_name(const char *name, int len)
{
if (len < 0 && name)
len = strlen(name);
if (!name || (len == 4 && strncasecmp(name, "auto", 4) == 0)) {
if (protocol_version >= 30)
return CSUM_MD5;
if (protocol_version >= 27)
return CSUM_MD4_OLD;
if (protocol_version >= 21)
return CSUM_MD4_BUSTED;
return CSUM_MD4_ARCHAIC;
}
if (len == 3 && strncasecmp(name, "md4", 3) == 0)
return CSUM_MD4;
if (len == 3 && strncasecmp(name, "md5", 3) == 0)
return CSUM_MD5;
if (len == 4 && strncasecmp(name, "none", 4) == 0)
return CSUM_NONE;
rprintf(FERROR, "unknown checksum name: %s\n", name);
exit_cleanup(RERR_UNSUPPORTED);
}
|
C
|
samba
| 0 |
CVE-2017-6345
|
https://www.cvedetails.com/cve/CVE-2017-6345/
|
CWE-20
|
https://github.com/torvalds/linux/commit/8b74d439e1697110c5e5c600643e823eb1dd0762
|
8b74d439e1697110c5e5c600643e823eb1dd0762
|
net/llc: avoid BUG_ON() in skb_orphan()
It seems nobody used LLC since linux-3.12.
Fortunately fuzzers like syzkaller still know how to run this code,
otherwise it would be no fun.
Setting skb->sk without skb->destructor leads to all kinds of
bugs, we now prefer to be very strict about it.
Ideally here we would use skb_set_owner() but this helper does not exist yet,
only CAN seems to have a private helper for that.
Fixes: 376c7311bdb6 ("net: add a temporary sanity check in skb_orphan()")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
struct sk_buff *skb)
{
struct llc_conn_state_trans **next_trans;
const llc_conn_ev_qfyr_t *next_qualifier;
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
struct llc_sock *llc = llc_sk(sk);
struct llc_conn_state *curr_state =
&llc_conn_state_table[llc->state - 1];
/* search thru events for this state until
* list exhausted or until no more
*/
for (next_trans = curr_state->transitions +
llc_find_offset(llc->state - 1, ev->type);
(*next_trans)->ev; next_trans++) {
if (!((*next_trans)->ev)(sk, skb)) {
/* got POSSIBLE event match; the event may require
* qualification based on the values of a number of
* state flags; if all qualifications are met (i.e.,
* if all qualifying functions return success, or 0,
* then this is THE event we're looking for
*/
for (next_qualifier = (*next_trans)->ev_qualifiers;
next_qualifier && *next_qualifier &&
!(*next_qualifier)(sk, skb); next_qualifier++)
/* nothing */;
if (!next_qualifier || !*next_qualifier)
/* all qualifiers executed successfully; this is
* our transition; return it so we can perform
* the associated actions & change the state
*/
return *next_trans;
}
}
return NULL;
}
|
static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
struct sk_buff *skb)
{
struct llc_conn_state_trans **next_trans;
const llc_conn_ev_qfyr_t *next_qualifier;
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
struct llc_sock *llc = llc_sk(sk);
struct llc_conn_state *curr_state =
&llc_conn_state_table[llc->state - 1];
/* search thru events for this state until
* list exhausted or until no more
*/
for (next_trans = curr_state->transitions +
llc_find_offset(llc->state - 1, ev->type);
(*next_trans)->ev; next_trans++) {
if (!((*next_trans)->ev)(sk, skb)) {
/* got POSSIBLE event match; the event may require
* qualification based on the values of a number of
* state flags; if all qualifications are met (i.e.,
* if all qualifying functions return success, or 0,
* then this is THE event we're looking for
*/
for (next_qualifier = (*next_trans)->ev_qualifiers;
next_qualifier && *next_qualifier &&
!(*next_qualifier)(sk, skb); next_qualifier++)
/* nothing */;
if (!next_qualifier || !*next_qualifier)
/* all qualifiers executed successfully; this is
* our transition; return it so we can perform
* the associated actions & change the state
*/
return *next_trans;
}
}
return NULL;
}
|
C
|
linux
| 0 |
CVE-2018-9505
|
https://www.cvedetails.com/cve/CVE-2018-9505/
|
CWE-125
|
https://android.googlesource.com/platform/system/bt/+/5216e6120160b28d76e9ee4dff9995e772647511
|
5216e6120160b28d76e9ee4dff9995e772647511
|
Add packet length checks in mca_ccb_hdl_req
Bug: 110791536
Test: manual
Change-Id: Ica5d8037246682fdb190b2747a86ed8d44c2869a
(cherry picked from commit 4de7ccdd914b7a178df9180d15f675b257ea6e02)
|
void mca_ccb_cong(tMCA_CCB* p_ccb, tMCA_CCB_EVT* p_data) {
MCA_TRACE_DEBUG("mca_ccb_cong cong=%d/%d", p_ccb->cong, p_data->llcong);
p_ccb->cong = p_data->llcong;
if (!p_ccb->cong) {
/* if there's a held packet, send it now */
if (p_ccb->p_tx_req && !p_ccb->p_tx_req->hdr.layer_specific) {
p_data = (tMCA_CCB_EVT*)p_ccb->p_tx_req;
p_ccb->p_tx_req = NULL;
mca_ccb_snd_req(p_ccb, p_data);
}
}
}
|
void mca_ccb_cong(tMCA_CCB* p_ccb, tMCA_CCB_EVT* p_data) {
MCA_TRACE_DEBUG("mca_ccb_cong cong=%d/%d", p_ccb->cong, p_data->llcong);
p_ccb->cong = p_data->llcong;
if (!p_ccb->cong) {
/* if there's a held packet, send it now */
if (p_ccb->p_tx_req && !p_ccb->p_tx_req->hdr.layer_specific) {
p_data = (tMCA_CCB_EVT*)p_ccb->p_tx_req;
p_ccb->p_tx_req = NULL;
mca_ccb_snd_req(p_ccb, p_data);
}
}
}
|
C
|
Android
| 0 |
CVE-2016-2860
|
https://www.cvedetails.com/cve/CVE-2016-2860/
|
CWE-284
|
http://git.openafs.org/?p=openafs.git;a=commitdiff;h=396240cf070a806b91fea81131d034e1399af1e0
|
396240cf070a806b91fea81131d034e1399af1e0
| null |
SPR_ListElements(struct rx_call *call, afs_int32 aid, prlist *alist,
afs_int32 *over)
{
afs_int32 code;
afs_int32 cid = ANONYMOUSID;
code = listElements(call, aid, alist, over, &cid);
osi_auditU(call, PTS_LstEleEvent, code, AUD_ID, aid, AUD_END);
ViceLog(125, ("PTS_ListElements: code %d cid %d aid %d\n", code, cid, aid));
return code;
}
|
SPR_ListElements(struct rx_call *call, afs_int32 aid, prlist *alist,
afs_int32 *over)
{
afs_int32 code;
afs_int32 cid = ANONYMOUSID;
code = listElements(call, aid, alist, over, &cid);
osi_auditU(call, PTS_LstEleEvent, code, AUD_ID, aid, AUD_END);
ViceLog(125, ("PTS_ListElements: code %d cid %d aid %d\n", code, cid, aid));
return code;
}
|
C
|
openafs
| 0 |
CVE-2012-5110
|
https://www.cvedetails.com/cve/CVE-2012-5110/
|
CWE-125
|
https://github.com/chromium/chromium/commit/7fa8bd35982700cb2cb6ce22d05128c019a2b587
|
7fa8bd35982700cb2cb6ce22d05128c019a2b587
|
SelectElement should remove an option when null is assigned by indexed setter
Fix bug embedded in r151449
see
http://src.chromium.org/viewvc/blink?revision=151449&view=revision
[email protected], [email protected], [email protected]
BUG=262365
TEST=fast/forms/select/select-assign-null.html
Review URL: https://chromiumcodereview.appspot.com/19947008
git-svn-id: svn://svn.chromium.org/blink/trunk@154743 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void HTMLSelectElement::restoreFormControlState(const FormControlState& state)
{
recalcListItems();
const Vector<HTMLElement*>& items = listItems();
size_t itemsSize = items.size();
if (!itemsSize)
return;
for (size_t i = 0; i < itemsSize; ++i) {
if (!items[i]->hasLocalName(optionTag))
continue;
toHTMLOptionElement(items[i])->setSelectedState(false);
}
if (!multiple()) {
size_t foundIndex = searchOptionsForValue(state[0], 0, itemsSize);
if (foundIndex != notFound)
toHTMLOptionElement(items[foundIndex])->setSelectedState(true);
} else {
size_t startIndex = 0;
for (size_t i = 0; i < state.valueSize(); ++i) {
const String& value = state[i];
size_t foundIndex = searchOptionsForValue(value, startIndex, itemsSize);
if (foundIndex == notFound)
foundIndex = searchOptionsForValue(value, 0, startIndex);
if (foundIndex == notFound)
continue;
toHTMLOptionElement(items[foundIndex])->setSelectedState(true);
startIndex = foundIndex + 1;
}
}
setOptionsChangedOnRenderer();
setNeedsValidityCheck();
}
|
void HTMLSelectElement::restoreFormControlState(const FormControlState& state)
{
recalcListItems();
const Vector<HTMLElement*>& items = listItems();
size_t itemsSize = items.size();
if (!itemsSize)
return;
for (size_t i = 0; i < itemsSize; ++i) {
if (!items[i]->hasLocalName(optionTag))
continue;
toHTMLOptionElement(items[i])->setSelectedState(false);
}
if (!multiple()) {
size_t foundIndex = searchOptionsForValue(state[0], 0, itemsSize);
if (foundIndex != notFound)
toHTMLOptionElement(items[foundIndex])->setSelectedState(true);
} else {
size_t startIndex = 0;
for (size_t i = 0; i < state.valueSize(); ++i) {
const String& value = state[i];
size_t foundIndex = searchOptionsForValue(value, startIndex, itemsSize);
if (foundIndex == notFound)
foundIndex = searchOptionsForValue(value, 0, startIndex);
if (foundIndex == notFound)
continue;
toHTMLOptionElement(items[foundIndex])->setSelectedState(true);
startIndex = foundIndex + 1;
}
}
setOptionsChangedOnRenderer();
setNeedsValidityCheck();
}
|
C
|
Chrome
| 0 |
CVE-2017-5019
|
https://www.cvedetails.com/cve/CVE-2017-5019/
|
CWE-416
|
https://github.com/chromium/chromium/commit/f03ea5a5c2ff26e239dfd23e263b15da2d9cee93
|
f03ea5a5c2ff26e239dfd23e263b15da2d9cee93
|
Convert FrameHostMsg_DidAddMessageToConsole to Mojo.
Note: Since this required changing the test
RenderViewImplTest.DispatchBeforeUnloadCanDetachFrame, I manually
re-introduced https://crbug.com/666714 locally (the bug the test was
added for), and reran the test to confirm that it still covers the bug.
Bug: 786836
Change-Id: I110668fa6f0f261fd2ac36bb91a8d8b31c99f4f1
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1526270
Commit-Queue: Lowell Manners <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Reviewed-by: Camille Lamy <[email protected]>
Cr-Commit-Position: refs/heads/master@{#653137}
|
FaviconURL::IconType ToFaviconType(blink::WebIconURL::Type type) {
switch (type) {
case blink::WebIconURL::kTypeFavicon:
return FaviconURL::IconType::kFavicon;
case blink::WebIconURL::kTypeTouch:
return FaviconURL::IconType::kTouchIcon;
case blink::WebIconURL::kTypeTouchPrecomposed:
return FaviconURL::IconType::kTouchPrecomposedIcon;
case blink::WebIconURL::kTypeInvalid:
return FaviconURL::IconType::kInvalid;
}
NOTREACHED();
return FaviconURL::IconType::kInvalid;
}
|
FaviconURL::IconType ToFaviconType(blink::WebIconURL::Type type) {
switch (type) {
case blink::WebIconURL::kTypeFavicon:
return FaviconURL::IconType::kFavicon;
case blink::WebIconURL::kTypeTouch:
return FaviconURL::IconType::kTouchIcon;
case blink::WebIconURL::kTypeTouchPrecomposed:
return FaviconURL::IconType::kTouchPrecomposedIcon;
case blink::WebIconURL::kTypeInvalid:
return FaviconURL::IconType::kInvalid;
}
NOTREACHED();
return FaviconURL::IconType::kInvalid;
}
|
C
|
Chrome
| 0 |
CVE-2016-3829
|
https://www.cvedetails.com/cve/CVE-2016-3829/
|
CWE-172
|
https://android.googlesource.com/platform/external/libavc/+/326fe991a4b7971e8aeaf4ac775491dd8abd85bb
|
326fe991a4b7971e8aeaf4ac775491dd8abd85bb
|
Decoder: Initialize first_pb_nal_in_pic for error slices
first_pb_nal_in_pic was uninitialized for error clips
Bug: 29023649
Change-Id: Ie4e0a94059c5f675bf619e31534846e2c2ca58ae
|
WORD32 ih264d_end_of_pic_dispbuf_mgr(dec_struct_t * ps_dec)
{
dec_slice_params_t *ps_cur_slice = ps_dec->ps_cur_slice;
UWORD8 u1_num_of_users = 0;
WORD32 ret;
H264_MUTEX_LOCK(&ps_dec->process_disp_mutex);
if(1)
{
{
ih264d_delete_nonref_nondisplay_pics(ps_dec->ps_dpb_mgr);
if(ps_cur_slice->u1_mmco_equalto5
|| (ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL))
{
ps_dec->ps_cur_pic->i4_poc = 0;
if(ps_dec->u2_total_mbs_coded
== (ps_dec->ps_cur_sps->u2_max_mb_addr + 1))
ih264d_reset_ref_bufs(ps_dec->ps_dpb_mgr);
ih264d_release_display_bufs(ps_dec);
}
if(IVD_DECODE_FRAME_OUT != ps_dec->e_frm_out_mode)
{
ret = ih264d_assign_display_seq(ps_dec);
if(ret != OK)
return ret;
}
}
if(ps_cur_slice->u1_nal_ref_idc)
{
/* Mark pic buf as needed for reference */
ih264_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
ps_dec->u1_pic_buf_id,
BUF_MGR_REF);
/* Mark mv buf as needed for reference */
ih264_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_mv_buf_mgr,
ps_dec->au1_pic_buf_id_mv_buf_id_map[ps_dec->u1_pic_buf_id],
BUF_MGR_REF);
ps_dec->au1_pic_buf_ref_flag[ps_dec->u1_pic_buf_id] = 1;
}
/* 420 consumer */
/* Increment the number of users by 1 for display based upon */
/*the SEEK KEY FRAME control sent to decoder */
if(((0 == ps_dec->u1_last_pic_not_decoded)
&& (0
== (ps_dec->ps_cur_pic->u4_pack_slc_typ
& ps_dec->u4_skip_frm_mask)))
|| (ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL))
{
/* Mark pic buf as needed for display */
ih264_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
ps_dec->u1_pic_buf_id,
BUF_MGR_IO);
}
if(!ps_cur_slice->u1_field_pic_flag
|| ((TOP_FIELD_ONLY | BOT_FIELD_ONLY)
!= ps_dec->u1_top_bottom_decoded))
{
pic_buffer_t *ps_cur_pic = ps_dec->ps_cur_pic;
ps_cur_pic->u2_disp_width = ps_dec->u2_disp_width;
ps_cur_pic->u2_disp_height = ps_dec->u2_disp_height >> 1;
ps_cur_pic->u2_crop_offset_y = ps_dec->u2_crop_offset_y;
ps_cur_pic->u2_crop_offset_uv = ps_dec->u2_crop_offset_uv;
ps_cur_pic->u1_pic_type = 0;
ret = ih264d_insert_pic_in_display_list(
ps_dec->ps_dpb_mgr,
ps_dec->u1_pic_buf_id,
ps_dec->i4_prev_max_display_seq
+ ps_dec->ps_cur_pic->i4_poc,
ps_dec->ps_cur_pic->i4_frame_num);
if(ret != OK)
return ret;
{
ivd_video_decode_op_t * ps_dec_output =
(ivd_video_decode_op_t *)ps_dec->pv_dec_out;
ps_dec_output->u4_frame_decoded_flag = 1;
}
if(ps_dec->au1_pic_buf_ref_flag[ps_dec->u1_pic_buf_id] == 0)
{
ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_mv_buf_mgr,
ps_dec->au1_pic_buf_id_mv_buf_id_map[ps_dec->u1_pic_buf_id],
BUF_MGR_REF);
ps_dec->au1_pic_buf_ref_flag[ps_dec->u1_pic_buf_id] = 0;
}
}
else
{
H264_DEC_DEBUG_PRINT("pic not inserted display %d %d\n",
ps_cur_slice->u1_field_pic_flag,
ps_dec->u1_second_field);
}
if(!ps_cur_slice->u1_field_pic_flag
|| ((TOP_FIELD_ONLY | BOT_FIELD_ONLY)
== ps_dec->u1_top_bottom_decoded))
{
if(IVD_DECODE_FRAME_OUT == ps_dec->e_frm_out_mode)
{
ret = ih264d_assign_display_seq(ps_dec);
if(ret != OK)
return ret;
}
}
}
H264_MUTEX_UNLOCK(&ps_dec->process_disp_mutex);
return OK;
}
|
WORD32 ih264d_end_of_pic_dispbuf_mgr(dec_struct_t * ps_dec)
{
dec_slice_params_t *ps_cur_slice = ps_dec->ps_cur_slice;
UWORD8 u1_num_of_users = 0;
WORD32 ret;
H264_MUTEX_LOCK(&ps_dec->process_disp_mutex);
if(1)
{
{
ih264d_delete_nonref_nondisplay_pics(ps_dec->ps_dpb_mgr);
if(ps_cur_slice->u1_mmco_equalto5
|| (ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL))
{
ps_dec->ps_cur_pic->i4_poc = 0;
if(ps_dec->u2_total_mbs_coded
== (ps_dec->ps_cur_sps->u2_max_mb_addr + 1))
ih264d_reset_ref_bufs(ps_dec->ps_dpb_mgr);
ih264d_release_display_bufs(ps_dec);
}
if(IVD_DECODE_FRAME_OUT != ps_dec->e_frm_out_mode)
{
ret = ih264d_assign_display_seq(ps_dec);
if(ret != OK)
return ret;
}
}
if(ps_cur_slice->u1_nal_ref_idc)
{
/* Mark pic buf as needed for reference */
ih264_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
ps_dec->u1_pic_buf_id,
BUF_MGR_REF);
/* Mark mv buf as needed for reference */
ih264_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_mv_buf_mgr,
ps_dec->au1_pic_buf_id_mv_buf_id_map[ps_dec->u1_pic_buf_id],
BUF_MGR_REF);
ps_dec->au1_pic_buf_ref_flag[ps_dec->u1_pic_buf_id] = 1;
}
/* 420 consumer */
/* Increment the number of users by 1 for display based upon */
/*the SEEK KEY FRAME control sent to decoder */
if(((0 == ps_dec->u1_last_pic_not_decoded)
&& (0
== (ps_dec->ps_cur_pic->u4_pack_slc_typ
& ps_dec->u4_skip_frm_mask)))
|| (ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL))
{
/* Mark pic buf as needed for display */
ih264_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
ps_dec->u1_pic_buf_id,
BUF_MGR_IO);
}
if(!ps_cur_slice->u1_field_pic_flag
|| ((TOP_FIELD_ONLY | BOT_FIELD_ONLY)
!= ps_dec->u1_top_bottom_decoded))
{
pic_buffer_t *ps_cur_pic = ps_dec->ps_cur_pic;
ps_cur_pic->u2_disp_width = ps_dec->u2_disp_width;
ps_cur_pic->u2_disp_height = ps_dec->u2_disp_height >> 1;
ps_cur_pic->u2_crop_offset_y = ps_dec->u2_crop_offset_y;
ps_cur_pic->u2_crop_offset_uv = ps_dec->u2_crop_offset_uv;
ps_cur_pic->u1_pic_type = 0;
ret = ih264d_insert_pic_in_display_list(
ps_dec->ps_dpb_mgr,
ps_dec->u1_pic_buf_id,
ps_dec->i4_prev_max_display_seq
+ ps_dec->ps_cur_pic->i4_poc,
ps_dec->ps_cur_pic->i4_frame_num);
if(ret != OK)
return ret;
{
ivd_video_decode_op_t * ps_dec_output =
(ivd_video_decode_op_t *)ps_dec->pv_dec_out;
ps_dec_output->u4_frame_decoded_flag = 1;
}
if(ps_dec->au1_pic_buf_ref_flag[ps_dec->u1_pic_buf_id] == 0)
{
ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_mv_buf_mgr,
ps_dec->au1_pic_buf_id_mv_buf_id_map[ps_dec->u1_pic_buf_id],
BUF_MGR_REF);
ps_dec->au1_pic_buf_ref_flag[ps_dec->u1_pic_buf_id] = 0;
}
}
else
{
H264_DEC_DEBUG_PRINT("pic not inserted display %d %d\n",
ps_cur_slice->u1_field_pic_flag,
ps_dec->u1_second_field);
}
if(!ps_cur_slice->u1_field_pic_flag
|| ((TOP_FIELD_ONLY | BOT_FIELD_ONLY)
== ps_dec->u1_top_bottom_decoded))
{
if(IVD_DECODE_FRAME_OUT == ps_dec->e_frm_out_mode)
{
ret = ih264d_assign_display_seq(ps_dec);
if(ret != OK)
return ret;
}
}
}
H264_MUTEX_UNLOCK(&ps_dec->process_disp_mutex);
return OK;
}
|
C
|
Android
| 0 |
CVE-2019-15903
|
https://www.cvedetails.com/cve/CVE-2019-15903/
|
CWE-611
|
https://github.com/libexpat/libexpat/commit/c20b758c332d9a13afbbb276d30db1d183a85d43
|
c20b758c332d9a13afbbb276d30db1d183a85d43
|
xmlparse.c: Deny internal entities closing the doctype
|
XML_GetBase(XML_Parser parser) {
if (parser == NULL)
return NULL;
return parser->m_curBase;
}
|
XML_GetBase(XML_Parser parser) {
if (parser == NULL)
return NULL;
return parser->m_curBase;
}
|
C
|
libexpat
| 0 |
CVE-2019-11810
|
https://www.cvedetails.com/cve/CVE-2019-11810/
|
CWE-476
|
https://github.com/torvalds/linux/commit/bcf3b67d16a4c8ffae0aa79de5853435e683945c
|
bcf3b67d16a4c8ffae0aa79de5853435e683945c
|
scsi: megaraid_sas: return error when create DMA pool failed
when create DMA pool for cmd frames failed, we should return -ENOMEM,
instead of 0.
In some case in:
megasas_init_adapter_fusion()
-->megasas_alloc_cmds()
-->megasas_create_frame_pool
create DMA pool failed,
--> megasas_free_cmds() [1]
-->megasas_alloc_cmds_fusion()
failed, then goto fail_alloc_cmds.
-->megasas_free_cmds() [2]
we will call megasas_free_cmds twice, [1] will kfree cmd_list,
[2] will use cmd_list.it will cause a problem:
Unable to handle kernel NULL pointer dereference at virtual address
00000000
pgd = ffffffc000f70000
[00000000] *pgd=0000001fbf893003, *pud=0000001fbf893003,
*pmd=0000001fbf894003, *pte=006000006d000707
Internal error: Oops: 96000005 [#1] SMP
Modules linked in:
CPU: 18 PID: 1 Comm: swapper/0 Not tainted
task: ffffffdfb9290000 ti: ffffffdfb923c000 task.ti: ffffffdfb923c000
PC is at megasas_free_cmds+0x30/0x70
LR is at megasas_free_cmds+0x24/0x70
...
Call trace:
[<ffffffc0005b779c>] megasas_free_cmds+0x30/0x70
[<ffffffc0005bca74>] megasas_init_adapter_fusion+0x2f4/0x4d8
[<ffffffc0005b926c>] megasas_init_fw+0x2dc/0x760
[<ffffffc0005b9ab0>] megasas_probe_one+0x3c0/0xcd8
[<ffffffc0004a5abc>] local_pci_probe+0x4c/0xb4
[<ffffffc0004a5c40>] pci_device_probe+0x11c/0x14c
[<ffffffc00053a5e4>] driver_probe_device+0x1ec/0x430
[<ffffffc00053a92c>] __driver_attach+0xa8/0xb0
[<ffffffc000538178>] bus_for_each_dev+0x74/0xc8
[<ffffffc000539e88>] driver_attach+0x28/0x34
[<ffffffc000539a18>] bus_add_driver+0x16c/0x248
[<ffffffc00053b234>] driver_register+0x6c/0x138
[<ffffffc0004a5350>] __pci_register_driver+0x5c/0x6c
[<ffffffc000ce3868>] megasas_init+0xc0/0x1a8
[<ffffffc000082a58>] do_one_initcall+0xe8/0x1ec
[<ffffffc000ca7be8>] kernel_init_freeable+0x1c8/0x284
[<ffffffc0008d90b8>] kernel_init+0x1c/0xe4
Signed-off-by: Jason Yan <[email protected]>
Acked-by: Sumit Saxena <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]>
|
megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
u8 alt_status)
{
int exception = 0;
struct megasas_header *hdr = &cmd->frame->hdr;
unsigned long flags;
struct fusion_context *fusion = instance->ctrl_context;
u32 opcode, status;
/* flag for the retry reset */
cmd->retry_for_fw_reset = 0;
if (cmd->scmd)
cmd->scmd->SCp.ptr = NULL;
switch (hdr->cmd) {
case MFI_CMD_INVALID:
/* Some older 1068 controller FW may keep a pended
MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
when booting the kdump kernel. Ignore this command to
prevent a kernel panic on shutdown of the kdump kernel. */
dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
"completed\n");
dev_warn(&instance->pdev->dev, "If you have a controller "
"other than PERC5, please upgrade your firmware\n");
break;
case MFI_CMD_PD_SCSI_IO:
case MFI_CMD_LD_SCSI_IO:
/*
* MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
* issued either through an IO path or an IOCTL path. If it
* was via IOCTL, we will send it to internal completion.
*/
if (cmd->sync_cmd) {
cmd->sync_cmd = 0;
megasas_complete_int_cmd(instance, cmd);
break;
}
/* fall through */
case MFI_CMD_LD_READ:
case MFI_CMD_LD_WRITE:
if (alt_status) {
cmd->scmd->result = alt_status << 16;
exception = 1;
}
if (exception) {
atomic_dec(&instance->fw_outstanding);
scsi_dma_unmap(cmd->scmd);
cmd->scmd->scsi_done(cmd->scmd);
megasas_return_cmd(instance, cmd);
break;
}
switch (hdr->cmd_status) {
case MFI_STAT_OK:
cmd->scmd->result = DID_OK << 16;
break;
case MFI_STAT_SCSI_IO_FAILED:
case MFI_STAT_LD_INIT_IN_PROGRESS:
cmd->scmd->result =
(DID_ERROR << 16) | hdr->scsi_status;
break;
case MFI_STAT_SCSI_DONE_WITH_ERROR:
cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
memset(cmd->scmd->sense_buffer, 0,
SCSI_SENSE_BUFFERSIZE);
memcpy(cmd->scmd->sense_buffer, cmd->sense,
hdr->sense_len);
cmd->scmd->result |= DRIVER_SENSE << 24;
}
break;
case MFI_STAT_LD_OFFLINE:
case MFI_STAT_DEVICE_NOT_FOUND:
cmd->scmd->result = DID_BAD_TARGET << 16;
break;
default:
dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
hdr->cmd_status);
cmd->scmd->result = DID_ERROR << 16;
break;
}
atomic_dec(&instance->fw_outstanding);
scsi_dma_unmap(cmd->scmd);
cmd->scmd->scsi_done(cmd->scmd);
megasas_return_cmd(instance, cmd);
break;
case MFI_CMD_SMP:
case MFI_CMD_STP:
case MFI_CMD_NVME:
megasas_complete_int_cmd(instance, cmd);
break;
case MFI_CMD_DCMD:
opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
/* Check for LD map update */
if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
fusion->fast_path_io = 0;
spin_lock_irqsave(instance->host->host_lock, flags);
status = cmd->frame->hdr.cmd_status;
instance->map_update_cmd = NULL;
if (status != MFI_STAT_OK) {
if (status != MFI_STAT_NOT_FOUND)
dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
cmd->frame->hdr.cmd_status);
else {
megasas_return_cmd(instance, cmd);
spin_unlock_irqrestore(
instance->host->host_lock,
flags);
break;
}
}
megasas_return_cmd(instance, cmd);
/*
* Set fast path IO to ZERO.
* Validate Map will set proper value.
* Meanwhile all IOs will go as LD IO.
*/
if (status == MFI_STAT_OK &&
(MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
instance->map_id++;
fusion->fast_path_io = 1;
} else {
fusion->fast_path_io = 0;
}
megasas_sync_map_info(instance);
spin_unlock_irqrestore(instance->host->host_lock,
flags);
break;
}
if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
opcode == MR_DCMD_CTRL_EVENT_GET) {
spin_lock_irqsave(&poll_aen_lock, flags);
megasas_poll_wait_aen = 0;
spin_unlock_irqrestore(&poll_aen_lock, flags);
}
/* FW has an updated PD sequence */
if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
(cmd->frame->dcmd.mbox.b[0] == 1)) {
spin_lock_irqsave(instance->host->host_lock, flags);
status = cmd->frame->hdr.cmd_status;
instance->jbod_seq_cmd = NULL;
megasas_return_cmd(instance, cmd);
if (status == MFI_STAT_OK) {
instance->pd_seq_map_id++;
/* Re-register a pd sync seq num cmd */
if (megasas_sync_pd_seq_num(instance, true))
instance->use_seqnum_jbod_fp = false;
} else
instance->use_seqnum_jbod_fp = false;
spin_unlock_irqrestore(instance->host->host_lock, flags);
break;
}
/*
* See if got an event notification
*/
if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
megasas_service_aen(instance, cmd);
else
megasas_complete_int_cmd(instance, cmd);
break;
case MFI_CMD_ABORT:
/*
* Cmd issued to abort another cmd returned
*/
megasas_complete_abort(instance, cmd);
break;
default:
dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
hdr->cmd);
megasas_complete_int_cmd(instance, cmd);
break;
}
}
|
megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
u8 alt_status)
{
int exception = 0;
struct megasas_header *hdr = &cmd->frame->hdr;
unsigned long flags;
struct fusion_context *fusion = instance->ctrl_context;
u32 opcode, status;
/* flag for the retry reset */
cmd->retry_for_fw_reset = 0;
if (cmd->scmd)
cmd->scmd->SCp.ptr = NULL;
switch (hdr->cmd) {
case MFI_CMD_INVALID:
/* Some older 1068 controller FW may keep a pended
MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
when booting the kdump kernel. Ignore this command to
prevent a kernel panic on shutdown of the kdump kernel. */
dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
"completed\n");
dev_warn(&instance->pdev->dev, "If you have a controller "
"other than PERC5, please upgrade your firmware\n");
break;
case MFI_CMD_PD_SCSI_IO:
case MFI_CMD_LD_SCSI_IO:
/*
* MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
* issued either through an IO path or an IOCTL path. If it
* was via IOCTL, we will send it to internal completion.
*/
if (cmd->sync_cmd) {
cmd->sync_cmd = 0;
megasas_complete_int_cmd(instance, cmd);
break;
}
/* fall through */
case MFI_CMD_LD_READ:
case MFI_CMD_LD_WRITE:
if (alt_status) {
cmd->scmd->result = alt_status << 16;
exception = 1;
}
if (exception) {
atomic_dec(&instance->fw_outstanding);
scsi_dma_unmap(cmd->scmd);
cmd->scmd->scsi_done(cmd->scmd);
megasas_return_cmd(instance, cmd);
break;
}
switch (hdr->cmd_status) {
case MFI_STAT_OK:
cmd->scmd->result = DID_OK << 16;
break;
case MFI_STAT_SCSI_IO_FAILED:
case MFI_STAT_LD_INIT_IN_PROGRESS:
cmd->scmd->result =
(DID_ERROR << 16) | hdr->scsi_status;
break;
case MFI_STAT_SCSI_DONE_WITH_ERROR:
cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
memset(cmd->scmd->sense_buffer, 0,
SCSI_SENSE_BUFFERSIZE);
memcpy(cmd->scmd->sense_buffer, cmd->sense,
hdr->sense_len);
cmd->scmd->result |= DRIVER_SENSE << 24;
}
break;
case MFI_STAT_LD_OFFLINE:
case MFI_STAT_DEVICE_NOT_FOUND:
cmd->scmd->result = DID_BAD_TARGET << 16;
break;
default:
dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
hdr->cmd_status);
cmd->scmd->result = DID_ERROR << 16;
break;
}
atomic_dec(&instance->fw_outstanding);
scsi_dma_unmap(cmd->scmd);
cmd->scmd->scsi_done(cmd->scmd);
megasas_return_cmd(instance, cmd);
break;
case MFI_CMD_SMP:
case MFI_CMD_STP:
case MFI_CMD_NVME:
megasas_complete_int_cmd(instance, cmd);
break;
case MFI_CMD_DCMD:
opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
/* Check for LD map update */
if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
fusion->fast_path_io = 0;
spin_lock_irqsave(instance->host->host_lock, flags);
status = cmd->frame->hdr.cmd_status;
instance->map_update_cmd = NULL;
if (status != MFI_STAT_OK) {
if (status != MFI_STAT_NOT_FOUND)
dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
cmd->frame->hdr.cmd_status);
else {
megasas_return_cmd(instance, cmd);
spin_unlock_irqrestore(
instance->host->host_lock,
flags);
break;
}
}
megasas_return_cmd(instance, cmd);
/*
* Set fast path IO to ZERO.
* Validate Map will set proper value.
* Meanwhile all IOs will go as LD IO.
*/
if (status == MFI_STAT_OK &&
(MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
instance->map_id++;
fusion->fast_path_io = 1;
} else {
fusion->fast_path_io = 0;
}
megasas_sync_map_info(instance);
spin_unlock_irqrestore(instance->host->host_lock,
flags);
break;
}
if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
opcode == MR_DCMD_CTRL_EVENT_GET) {
spin_lock_irqsave(&poll_aen_lock, flags);
megasas_poll_wait_aen = 0;
spin_unlock_irqrestore(&poll_aen_lock, flags);
}
/* FW has an updated PD sequence */
if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
(cmd->frame->dcmd.mbox.b[0] == 1)) {
spin_lock_irqsave(instance->host->host_lock, flags);
status = cmd->frame->hdr.cmd_status;
instance->jbod_seq_cmd = NULL;
megasas_return_cmd(instance, cmd);
if (status == MFI_STAT_OK) {
instance->pd_seq_map_id++;
/* Re-register a pd sync seq num cmd */
if (megasas_sync_pd_seq_num(instance, true))
instance->use_seqnum_jbod_fp = false;
} else
instance->use_seqnum_jbod_fp = false;
spin_unlock_irqrestore(instance->host->host_lock, flags);
break;
}
/*
* See if got an event notification
*/
if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
megasas_service_aen(instance, cmd);
else
megasas_complete_int_cmd(instance, cmd);
break;
case MFI_CMD_ABORT:
/*
* Cmd issued to abort another cmd returned
*/
megasas_complete_abort(instance, cmd);
break;
default:
dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
hdr->cmd);
megasas_complete_int_cmd(instance, cmd);
break;
}
}
|
C
|
linux
| 0 |
CVE-2013-2094
|
https://www.cvedetails.com/cve/CVE-2013-2094/
|
CWE-189
|
https://github.com/torvalds/linux/commit/8176cced706b5e5d15887584150764894e94e02f
|
8176cced706b5e5d15887584150764894e94e02f
|
perf: Treat attr.config as u64 in perf_swevent_init()
Trinity discovered that we fail to check all 64 bits of
attr.config passed by user space, resulting to out-of-bounds
access of the perf_swevent_enabled array in
sw_perf_event_destroy().
Introduced in commit b0a873ebb ("perf: Register PMU
implementations").
Signed-off-by: Tommi Rantala <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: [email protected]
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
static void free_event(struct perf_event *event)
{
irq_work_sync(&event->pending);
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_dec_deferred(&perf_sched_events);
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
if (event->attr.comm)
atomic_dec(&nr_comm_events);
if (event->attr.task)
atomic_dec(&nr_task_events);
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
put_callchain_buffers();
if (is_cgroup_event(event)) {
atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
static_key_slow_dec_deferred(&perf_sched_events);
}
if (has_branch_stack(event)) {
static_key_slow_dec_deferred(&perf_sched_events);
/* is system-wide event */
if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_dec(&per_cpu(perf_branch_stack_events,
event->cpu));
}
}
if (event->rb) {
ring_buffer_put(event->rb);
event->rb = NULL;
}
if (is_cgroup_event(event))
perf_detach_cgroup(event);
if (event->destroy)
event->destroy(event);
if (event->ctx)
put_ctx(event->ctx);
call_rcu(&event->rcu_head, free_event_rcu);
}
|
static void free_event(struct perf_event *event)
{
irq_work_sync(&event->pending);
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_dec_deferred(&perf_sched_events);
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
if (event->attr.comm)
atomic_dec(&nr_comm_events);
if (event->attr.task)
atomic_dec(&nr_task_events);
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
put_callchain_buffers();
if (is_cgroup_event(event)) {
atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
static_key_slow_dec_deferred(&perf_sched_events);
}
if (has_branch_stack(event)) {
static_key_slow_dec_deferred(&perf_sched_events);
/* is system-wide event */
if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_dec(&per_cpu(perf_branch_stack_events,
event->cpu));
}
}
if (event->rb) {
ring_buffer_put(event->rb);
event->rb = NULL;
}
if (is_cgroup_event(event))
perf_detach_cgroup(event);
if (event->destroy)
event->destroy(event);
if (event->ctx)
put_ctx(event->ctx);
call_rcu(&event->rcu_head, free_event_rcu);
}
|
C
|
linux
| 0 |
CVE-2014-1700
|
https://www.cvedetails.com/cve/CVE-2014-1700/
|
CWE-399
|
https://github.com/chromium/chromium/commit/d926098e2e2be270c80a5ba25ab8a611b80b8556
|
d926098e2e2be270c80a5ba25ab8a611b80b8556
|
Connect WebUSB client interface to the devices app
This provides a basic WebUSB client interface in
content/renderer. Most of the interface is unimplemented,
but this CL hooks up navigator.usb.getDevices() to the
browser's Mojo devices app to enumerate available USB
devices.
BUG=492204
Review URL: https://codereview.chromium.org/1293253002
Cr-Commit-Position: refs/heads/master@{#344881}
|
void RenderFrameImpl::DidHideExternalPopupMenu() {
external_popup_menu_.reset();
}
|
void RenderFrameImpl::DidHideExternalPopupMenu() {
external_popup_menu_.reset();
}
|
C
|
Chrome
| 0 |
CVE-2016-8645
|
https://www.cvedetails.com/cve/CVE-2016-8645/
|
CWE-284
|
https://github.com/torvalds/linux/commit/ac6e780070e30e4c35bd395acfe9191e6268bdd3
|
ac6e780070e30e4c35bd395acfe9191e6268bdd3
|
tcp: take care of truncations done by sk_filter()
With syzkaller help, Marco Grassi found a bug in TCP stack,
crashing in tcp_collapse()
Root cause is that sk_filter() can truncate the incoming skb,
but TCP stack was not really expecting this to happen.
It probably was expecting a simple DROP or ACCEPT behavior.
We first need to make sure no part of TCP header could be removed.
Then we need to adjust TCP_SKB_CB(skb)->end_seq
Many thanks to syzkaller team and Marco for giving us a reproducer.
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Marco Grassi <[email protected]>
Reported-by: Vladis Dronov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
const union tcp_md5_addr *addr,
int family)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_key *key;
unsigned int size = sizeof(struct in_addr);
const struct tcp_md5sig_info *md5sig;
/* caller either holds rcu_read_lock() or socket lock */
md5sig = rcu_dereference_check(tp->md5sig_info,
lockdep_sock_is_held(sk));
if (!md5sig)
return NULL;
#if IS_ENABLED(CONFIG_IPV6)
if (family == AF_INET6)
size = sizeof(struct in6_addr);
#endif
hlist_for_each_entry_rcu(key, &md5sig->head, node) {
if (key->family != family)
continue;
if (!memcmp(&key->addr, addr, size))
return key;
}
return NULL;
}
|
struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
const union tcp_md5_addr *addr,
int family)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_key *key;
unsigned int size = sizeof(struct in_addr);
const struct tcp_md5sig_info *md5sig;
/* caller either holds rcu_read_lock() or socket lock */
md5sig = rcu_dereference_check(tp->md5sig_info,
lockdep_sock_is_held(sk));
if (!md5sig)
return NULL;
#if IS_ENABLED(CONFIG_IPV6)
if (family == AF_INET6)
size = sizeof(struct in6_addr);
#endif
hlist_for_each_entry_rcu(key, &md5sig->head, node) {
if (key->family != family)
continue;
if (!memcmp(&key->addr, addr, size))
return key;
}
return NULL;
}
|
C
|
linux
| 0 |
CVE-2011-2918
|
https://www.cvedetails.com/cve/CVE-2011-2918/
|
CWE-399
|
https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233
|
a8b0ca17b80e92faab46ee7179ba9e99ccb61233
|
perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: Michael Cree <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Deng-Cheng Zhu <[email protected]>
Cc: Anton Blanchard <[email protected]>
Cc: Eric B Munson <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: Paul Mundt <[email protected]>
Cc: David S. Miller <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Jason Wessel <[email protected]>
Cc: Don Zickus <[email protected]>
Link: http://lkml.kernel.org/n/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags, ien, evtsel;
ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select();
switch (idx) {
case XSCALE_CYCLE_COUNTER:
ien |= XSCALE2_CCOUNT_INT_EN;
break;
case XSCALE_COUNTER0:
ien |= XSCALE2_COUNT0_INT_EN;
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
break;
case XSCALE_COUNTER1:
ien |= XSCALE2_COUNT1_INT_EN;
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
break;
case XSCALE_COUNTER2:
ien |= XSCALE2_COUNT2_INT_EN;
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
break;
case XSCALE_COUNTER3:
ien |= XSCALE2_COUNT3_INT_EN;
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
raw_spin_lock_irqsave(&pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
|
xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags, ien, evtsel;
ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select();
switch (idx) {
case XSCALE_CYCLE_COUNTER:
ien |= XSCALE2_CCOUNT_INT_EN;
break;
case XSCALE_COUNTER0:
ien |= XSCALE2_COUNT0_INT_EN;
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
break;
case XSCALE_COUNTER1:
ien |= XSCALE2_COUNT1_INT_EN;
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
break;
case XSCALE_COUNTER2:
ien |= XSCALE2_COUNT2_INT_EN;
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
break;
case XSCALE_COUNTER3:
ien |= XSCALE2_COUNT3_INT_EN;
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
raw_spin_lock_irqsave(&pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/7cb8e1ae121cf6b14aa0a59cc708de630c0ef965
|
7cb8e1ae121cf6b14aa0a59cc708de630c0ef965
|
Move variations prefs into the variations component
These prefs are used by variations code that is targeted for componentization.
BUG=382865
TBR=thakis
Review URL: https://codereview.chromium.org/1265423003
Cr-Commit-Position: refs/heads/master@{#343661}
|
void ChromeBrowserMainParts::PreBrowserStart() {
TRACE_EVENT0("startup", "ChromeBrowserMainParts::PreBrowserStart");
for (size_t i = 0; i < chrome_extra_parts_.size(); ++i)
chrome_extra_parts_[i]->PreBrowserStart();
three_d_observer_.reset(new ThreeDAPIObserver());
#if defined(OS_CHROMEOS)
g_browser_process->GetOomPriorityManager()->Start();
#elif defined(OS_WIN) || defined(OS_MACOSX)
const std::string group_name =
base::FieldTrialList::FindFullName("AutomaticTabDiscarding");
if (parsed_command_line().HasSwitch(switches::kEnableTabDiscarding) ||
base::StartsWith(group_name, "Enabled", base::CompareCase::SENSITIVE)) {
g_browser_process->GetOomPriorityManager()->Start();
}
#endif
}
|
void ChromeBrowserMainParts::PreBrowserStart() {
TRACE_EVENT0("startup", "ChromeBrowserMainParts::PreBrowserStart");
for (size_t i = 0; i < chrome_extra_parts_.size(); ++i)
chrome_extra_parts_[i]->PreBrowserStart();
three_d_observer_.reset(new ThreeDAPIObserver());
#if defined(OS_CHROMEOS)
g_browser_process->GetOomPriorityManager()->Start();
#elif defined(OS_WIN) || defined(OS_MACOSX)
const std::string group_name =
base::FieldTrialList::FindFullName("AutomaticTabDiscarding");
if (parsed_command_line().HasSwitch(switches::kEnableTabDiscarding) ||
base::StartsWith(group_name, "Enabled", base::CompareCase::SENSITIVE)) {
g_browser_process->GetOomPriorityManager()->Start();
}
#endif
}
|
C
|
Chrome
| 0 |
CVE-2015-3456
|
https://www.cvedetails.com/cve/CVE-2015-3456/
|
CWE-119
|
https://git.qemu.org/?p=qemu.git;a=commitdiff;h=e907746266721f305d67bc0718795fedee2e824c
|
e907746266721f305d67bc0718795fedee2e824c
| null |
static inline FDrive *drv2(FDCtrl *fdctrl)
{
if ((fdctrl->tdr & FD_TDR_BOOTSEL) < (2 << 2))
return &fdctrl->drives[2];
else
return &fdctrl->drives[1];
}
|
static inline FDrive *drv2(FDCtrl *fdctrl)
{
if ((fdctrl->tdr & FD_TDR_BOOTSEL) < (2 << 2))
return &fdctrl->drives[2];
else
return &fdctrl->drives[1];
}
|
C
|
qemu
| 0 |
CVE-2018-1000204
|
https://www.cvedetails.com/cve/CVE-2018-1000204/
| null |
https://github.com/torvalds/linux/commit/a45b599ad808c3c982fdcdc12b0b8611c2f92824
|
a45b599ad808c3c982fdcdc12b0b8611c2f92824
|
scsi: sg: allocate with __GFP_ZERO in sg_build_indirect()
This shall help avoid copying uninitialized memory to the userspace when
calling ioctl(fd, SG_IO) with an empty command.
Reported-by: [email protected]
Cc: [email protected]
Signed-off-by: Alexander Potapenko <[email protected]>
Acked-by: Douglas Gilbert <[email protected]>
Reviewed-by: Johannes Thumshirn <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]>
|
sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
{
int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
int sg_tablesize = sfp->parentdp->sg_tablesize;
int blk_size = buff_size, order;
gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
struct sg_device *sdp = sfp->parentdp;
if (blk_size < 0)
return -EFAULT;
if (0 == blk_size)
++blk_size; /* don't know why */
/* round request up to next highest SG_SECTOR_SZ byte boundary */
blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: buff_size=%d, blk_size=%d\n",
buff_size, blk_size));
/* N.B. ret_sz carried into this block ... */
mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
if (mx_sc_elems < 0)
return mx_sc_elems; /* most likely -ENOMEM */
num = scatter_elem_sz;
if (unlikely(num != scatter_elem_sz_prev)) {
if (num < PAGE_SIZE) {
scatter_elem_sz = PAGE_SIZE;
scatter_elem_sz_prev = PAGE_SIZE;
} else
scatter_elem_sz_prev = num;
}
if (sdp->device->host->unchecked_isa_dma)
gfp_mask |= GFP_DMA;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
gfp_mask |= __GFP_ZERO;
order = get_order(num);
retry:
ret_sz = 1 << (PAGE_SHIFT + order);
for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
k++, rem_sz -= ret_sz) {
num = (rem_sz > scatter_elem_sz_prev) ?
scatter_elem_sz_prev : rem_sz;
schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
if (!schp->pages[k])
goto out;
if (num == scatter_elem_sz_prev) {
if (unlikely(ret_sz > scatter_elem_sz_prev)) {
scatter_elem_sz = ret_sz;
scatter_elem_sz_prev = ret_sz;
}
}
SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: k=%d, num=%d, ret_sz=%d\n",
k, num, ret_sz));
} /* end of for loop */
schp->page_order = order;
schp->k_use_sg = k;
SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: k_use_sg=%d, rem_sz=%d\n",
k, rem_sz));
schp->bufflen = blk_size;
if (rem_sz > 0) /* must have failed */
return -ENOMEM;
return 0;
out:
for (i = 0; i < k; i++)
__free_pages(schp->pages[i], order);
if (--order >= 0)
goto retry;
return -ENOMEM;
}
|
sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
{
int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
int sg_tablesize = sfp->parentdp->sg_tablesize;
int blk_size = buff_size, order;
gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
struct sg_device *sdp = sfp->parentdp;
if (blk_size < 0)
return -EFAULT;
if (0 == blk_size)
++blk_size; /* don't know why */
/* round request up to next highest SG_SECTOR_SZ byte boundary */
blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: buff_size=%d, blk_size=%d\n",
buff_size, blk_size));
/* N.B. ret_sz carried into this block ... */
mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
if (mx_sc_elems < 0)
return mx_sc_elems; /* most likely -ENOMEM */
num = scatter_elem_sz;
if (unlikely(num != scatter_elem_sz_prev)) {
if (num < PAGE_SIZE) {
scatter_elem_sz = PAGE_SIZE;
scatter_elem_sz_prev = PAGE_SIZE;
} else
scatter_elem_sz_prev = num;
}
if (sdp->device->host->unchecked_isa_dma)
gfp_mask |= GFP_DMA;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
gfp_mask |= __GFP_ZERO;
order = get_order(num);
retry:
ret_sz = 1 << (PAGE_SHIFT + order);
for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
k++, rem_sz -= ret_sz) {
num = (rem_sz > scatter_elem_sz_prev) ?
scatter_elem_sz_prev : rem_sz;
schp->pages[k] = alloc_pages(gfp_mask, order);
if (!schp->pages[k])
goto out;
if (num == scatter_elem_sz_prev) {
if (unlikely(ret_sz > scatter_elem_sz_prev)) {
scatter_elem_sz = ret_sz;
scatter_elem_sz_prev = ret_sz;
}
}
SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: k=%d, num=%d, ret_sz=%d\n",
k, num, ret_sz));
} /* end of for loop */
schp->page_order = order;
schp->k_use_sg = k;
SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: k_use_sg=%d, rem_sz=%d\n",
k, rem_sz));
schp->bufflen = blk_size;
if (rem_sz > 0) /* must have failed */
return -ENOMEM;
return 0;
out:
for (i = 0; i < k; i++)
__free_pages(schp->pages[i], order);
if (--order >= 0)
goto retry;
return -ENOMEM;
}
|
C
|
linux
| 1 |
CVE-2019-1559
|
https://www.cvedetails.com/cve/CVE-2019-1559/
|
CWE-200
|
https://git.openssl.org/gitweb/?p=openssl.git;a=commitdiff;h=e9bbefbf0f24c57645e7ad6a5a71ae649d18ac8e
|
e9bbefbf0f24c57645e7ad6a5a71ae649d18ac8e
| null |
int dtls1_get_record(SSL *s)
{
int ssl_major, ssl_minor;
int i, n;
SSL3_RECORD *rr;
unsigned char *p = NULL;
unsigned short version;
DTLS1_BITMAP *bitmap;
unsigned int is_next_epoch;
rr = &(s->s3->rrec);
again:
/*
* The epoch may have changed. If so, process all the pending records.
* This is a non-blocking operation.
*/
if (!dtls1_process_buffered_records(s))
return -1;
/* if we're renegotiating, then there may be buffered records */
if (dtls1_get_processed_record(s))
return 1;
/* get something from the wire */
/* check if we have the header */
if ((s->rstate != SSL_ST_READ_BODY) ||
(s->packet_length < DTLS1_RT_HEADER_LENGTH)) {
n = ssl3_read_n(s, DTLS1_RT_HEADER_LENGTH, s->s3->rbuf.len, 0);
/* read timeout is handled by dtls1_read_bytes */
if (n <= 0)
return (n); /* error or non-blocking */
/* this packet contained a partial record, dump it */
if (s->packet_length != DTLS1_RT_HEADER_LENGTH) {
s->packet_length = 0;
goto again;
}
s->rstate = SSL_ST_READ_BODY;
p = s->packet;
if (s->msg_callback)
s->msg_callback(0, 0, SSL3_RT_HEADER, p, DTLS1_RT_HEADER_LENGTH,
s, s->msg_callback_arg);
/* Pull apart the header into the DTLS1_RECORD */
rr->type = *(p++);
ssl_major = *(p++);
ssl_minor = *(p++);
version = (ssl_major << 8) | ssl_minor;
/* sequence number is 64 bits, with top 2 bytes = epoch */
n2s(p, rr->epoch);
memcpy(&(s->s3->read_sequence[2]), p, 6);
p += 6;
n2s(p, rr->length);
/*
* Lets check the version. We tolerate alerts that don't have the exact
* version number (e.g. because of protocol version errors)
*/
if (!s->first_packet && rr->type != SSL3_RT_ALERT) {
if (version != s->version) {
/* unexpected version, silently discard */
rr->length = 0;
s->packet_length = 0;
goto again;
}
}
if ((version & 0xff00) != (s->version & 0xff00)) {
/* wrong version, silently discard record */
rr->length = 0;
s->packet_length = 0;
goto again;
}
if (rr->length > SSL3_RT_MAX_ENCRYPTED_LENGTH) {
/* record too long, silently discard it */
rr->length = 0;
s->packet_length = 0;
goto again;
}
/* now s->rstate == SSL_ST_READ_BODY */
}
/* s->rstate == SSL_ST_READ_BODY, get and decode the data */
if (rr->length > s->packet_length - DTLS1_RT_HEADER_LENGTH) {
/* now s->packet_length == DTLS1_RT_HEADER_LENGTH */
i = rr->length;
n = ssl3_read_n(s, i, i, 1);
/* this packet contained a partial record, dump it */
if (n != i) {
rr->length = 0;
s->packet_length = 0;
goto again;
}
/*
* now n == rr->length, and s->packet_length ==
* DTLS1_RT_HEADER_LENGTH + rr->length
*/
}
s->rstate = SSL_ST_READ_HEADER; /* set state for later operations */
/* match epochs. NULL means the packet is dropped on the floor */
bitmap = dtls1_get_bitmap(s, rr, &is_next_epoch);
if (bitmap == NULL) {
rr->length = 0;
s->packet_length = 0; /* dump this record */
goto again; /* get another record */
}
#ifndef OPENSSL_NO_SCTP
/* Only do replay check if no SCTP bio */
if (!BIO_dgram_is_sctp(SSL_get_rbio(s))) {
#endif
/*
* Check whether this is a repeat, or aged record. Don't check if
* we're listening and this message is a ClientHello. They can look
* as if they're replayed, since they arrive from different
* connections and would be dropped unnecessarily.
*/
if (!(s->d1->listen && rr->type == SSL3_RT_HANDSHAKE &&
s->packet_length > DTLS1_RT_HEADER_LENGTH &&
s->packet[DTLS1_RT_HEADER_LENGTH] == SSL3_MT_CLIENT_HELLO) &&
!dtls1_record_replay_check(s, bitmap)) {
rr->length = 0;
s->packet_length = 0; /* dump this record */
goto again; /* get another record */
}
#ifndef OPENSSL_NO_SCTP
}
#endif
/* just read a 0 length packet */
if (rr->length == 0)
goto again;
/*
* If this record is from the next epoch (either HM or ALERT), and a
* handshake is currently in progress, buffer it since it cannot be
* processed at this time. However, do not buffer anything while
* listening.
*/
if (is_next_epoch) {
if ((SSL_in_init(s) || s->in_handshake) && !s->d1->listen) {
if (dtls1_buffer_record
(s, &(s->d1->unprocessed_rcds), rr->seq_num) < 0)
return -1;
}
rr->length = 0;
s->packet_length = 0;
goto again;
}
if (!dtls1_process_record(s, bitmap)) {
rr->length = 0;
s->packet_length = 0; /* dump this record */
goto again; /* get another record */
}
return (1);
}
|
int dtls1_get_record(SSL *s)
{
int ssl_major, ssl_minor;
int i, n;
SSL3_RECORD *rr;
unsigned char *p = NULL;
unsigned short version;
DTLS1_BITMAP *bitmap;
unsigned int is_next_epoch;
rr = &(s->s3->rrec);
again:
/*
* The epoch may have changed. If so, process all the pending records.
* This is a non-blocking operation.
*/
if (!dtls1_process_buffered_records(s))
return -1;
/* if we're renegotiating, then there may be buffered records */
if (dtls1_get_processed_record(s))
return 1;
/* get something from the wire */
/* check if we have the header */
if ((s->rstate != SSL_ST_READ_BODY) ||
(s->packet_length < DTLS1_RT_HEADER_LENGTH)) {
n = ssl3_read_n(s, DTLS1_RT_HEADER_LENGTH, s->s3->rbuf.len, 0);
/* read timeout is handled by dtls1_read_bytes */
if (n <= 0)
return (n); /* error or non-blocking */
/* this packet contained a partial record, dump it */
if (s->packet_length != DTLS1_RT_HEADER_LENGTH) {
s->packet_length = 0;
goto again;
}
s->rstate = SSL_ST_READ_BODY;
p = s->packet;
if (s->msg_callback)
s->msg_callback(0, 0, SSL3_RT_HEADER, p, DTLS1_RT_HEADER_LENGTH,
s, s->msg_callback_arg);
/* Pull apart the header into the DTLS1_RECORD */
rr->type = *(p++);
ssl_major = *(p++);
ssl_minor = *(p++);
version = (ssl_major << 8) | ssl_minor;
/* sequence number is 64 bits, with top 2 bytes = epoch */
n2s(p, rr->epoch);
memcpy(&(s->s3->read_sequence[2]), p, 6);
p += 6;
n2s(p, rr->length);
/*
* Lets check the version. We tolerate alerts that don't have the exact
* version number (e.g. because of protocol version errors)
*/
if (!s->first_packet && rr->type != SSL3_RT_ALERT) {
if (version != s->version) {
/* unexpected version, silently discard */
rr->length = 0;
s->packet_length = 0;
goto again;
}
}
if ((version & 0xff00) != (s->version & 0xff00)) {
/* wrong version, silently discard record */
rr->length = 0;
s->packet_length = 0;
goto again;
}
if (rr->length > SSL3_RT_MAX_ENCRYPTED_LENGTH) {
/* record too long, silently discard it */
rr->length = 0;
s->packet_length = 0;
goto again;
}
/* now s->rstate == SSL_ST_READ_BODY */
}
/* s->rstate == SSL_ST_READ_BODY, get and decode the data */
if (rr->length > s->packet_length - DTLS1_RT_HEADER_LENGTH) {
/* now s->packet_length == DTLS1_RT_HEADER_LENGTH */
i = rr->length;
n = ssl3_read_n(s, i, i, 1);
/* this packet contained a partial record, dump it */
if (n != i) {
rr->length = 0;
s->packet_length = 0;
goto again;
}
/*
* now n == rr->length, and s->packet_length ==
* DTLS1_RT_HEADER_LENGTH + rr->length
*/
}
s->rstate = SSL_ST_READ_HEADER; /* set state for later operations */
/* match epochs. NULL means the packet is dropped on the floor */
bitmap = dtls1_get_bitmap(s, rr, &is_next_epoch);
if (bitmap == NULL) {
rr->length = 0;
s->packet_length = 0; /* dump this record */
goto again; /* get another record */
}
#ifndef OPENSSL_NO_SCTP
/* Only do replay check if no SCTP bio */
if (!BIO_dgram_is_sctp(SSL_get_rbio(s))) {
#endif
/*
* Check whether this is a repeat, or aged record. Don't check if
* we're listening and this message is a ClientHello. They can look
* as if they're replayed, since they arrive from different
* connections and would be dropped unnecessarily.
*/
if (!(s->d1->listen && rr->type == SSL3_RT_HANDSHAKE &&
s->packet_length > DTLS1_RT_HEADER_LENGTH &&
s->packet[DTLS1_RT_HEADER_LENGTH] == SSL3_MT_CLIENT_HELLO) &&
!dtls1_record_replay_check(s, bitmap)) {
rr->length = 0;
s->packet_length = 0; /* dump this record */
goto again; /* get another record */
}
#ifndef OPENSSL_NO_SCTP
}
#endif
/* just read a 0 length packet */
if (rr->length == 0)
goto again;
/*
* If this record is from the next epoch (either HM or ALERT), and a
* handshake is currently in progress, buffer it since it cannot be
* processed at this time. However, do not buffer anything while
* listening.
*/
if (is_next_epoch) {
if ((SSL_in_init(s) || s->in_handshake) && !s->d1->listen) {
if (dtls1_buffer_record
(s, &(s->d1->unprocessed_rcds), rr->seq_num) < 0)
return -1;
}
rr->length = 0;
s->packet_length = 0;
goto again;
}
if (!dtls1_process_record(s, bitmap)) {
rr->length = 0;
s->packet_length = 0; /* dump this record */
goto again; /* get another record */
}
return (1);
}
|
C
|
openssl
| 0 |
CVE-2018-13006
|
https://www.cvedetails.com/cve/CVE-2018-13006/
|
CWE-125
|
https://github.com/gpac/gpac/commit/bceb03fd2be95097a7b409ea59914f332fb6bc86
|
bceb03fd2be95097a7b409ea59914f332fb6bc86
|
fixed 2 possible heap overflows (inc. #1088)
|
GF_Err chpl_Read(GF_Box *s,GF_BitStream *bs)
{
GF_ChapterEntry *ce;
u32 nb_chaps, len, i, count;
GF_ChapterListBox *ptr = (GF_ChapterListBox *)s;
/*reserved or ???*/
gf_bs_read_u32(bs);
nb_chaps = gf_bs_read_u8(bs);
count = 0;
while (nb_chaps) {
GF_SAFEALLOC(ce, GF_ChapterEntry);
if (!ce) return GF_OUT_OF_MEM;
ce->start_time = gf_bs_read_u64(bs);
len = gf_bs_read_u8(bs);
if (len) {
ce->name = (char *)gf_malloc(sizeof(char)*(len+1));
gf_bs_read_data(bs, ce->name, len);
ce->name[len] = 0;
} else {
ce->name = gf_strdup("");
}
for (i=0; i<count; i++) {
GF_ChapterEntry *ace = (GF_ChapterEntry *) gf_list_get(ptr->list, i);
if (ace->start_time >= ce->start_time) {
gf_list_insert(ptr->list, ce, i);
ce = NULL;
break;
}
}
if (ce) gf_list_add(ptr->list, ce);
count++;
nb_chaps--;
}
return GF_OK;
}
|
GF_Err chpl_Read(GF_Box *s,GF_BitStream *bs)
{
GF_ChapterEntry *ce;
u32 nb_chaps, len, i, count;
GF_ChapterListBox *ptr = (GF_ChapterListBox *)s;
/*reserved or ???*/
gf_bs_read_u32(bs);
nb_chaps = gf_bs_read_u8(bs);
count = 0;
while (nb_chaps) {
GF_SAFEALLOC(ce, GF_ChapterEntry);
if (!ce) return GF_OUT_OF_MEM;
ce->start_time = gf_bs_read_u64(bs);
len = gf_bs_read_u8(bs);
if (len) {
ce->name = (char *)gf_malloc(sizeof(char)*(len+1));
gf_bs_read_data(bs, ce->name, len);
ce->name[len] = 0;
} else {
ce->name = gf_strdup("");
}
for (i=0; i<count; i++) {
GF_ChapterEntry *ace = (GF_ChapterEntry *) gf_list_get(ptr->list, i);
if (ace->start_time >= ce->start_time) {
gf_list_insert(ptr->list, ce, i);
ce = NULL;
break;
}
}
if (ce) gf_list_add(ptr->list, ce);
count++;
nb_chaps--;
}
return GF_OK;
}
|
C
|
gpac
| 0 |
CVE-2018-12684
|
https://www.cvedetails.com/cve/CVE-2018-12684/
|
CWE-125
|
https://github.com/civetweb/civetweb/commit/8fd069f6dedb064339f1091069ac96f3f8bdb552
|
8fd069f6dedb064339f1091069ac96f3f8bdb552
|
Check length of memcmp
|
connect_socket(struct mg_context *ctx /* may be NULL */,
const char *host,
int port,
int use_ssl,
char *ebuf,
size_t ebuf_len,
SOCKET *sock /* output: socket, must not be NULL */,
union usa *sa /* output: socket address, must not be NULL */
)
{
int ip_ver = 0;
int conn_ret = -1;
*sock = INVALID_SOCKET;
memset(sa, 0, sizeof(*sa));
if (ebuf_len > 0) {
*ebuf = 0;
}
if (host == NULL) {
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"%s",
"NULL host");
return 0;
}
if ((port <= 0) || !is_valid_port((unsigned)port)) {
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"%s",
"invalid port");
return 0;
}
#if !defined(NO_SSL)
#if !defined(NO_SSL_DL)
#if defined(OPENSSL_API_1_1)
if (use_ssl && (TLS_client_method == NULL)) {
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"%s",
"SSL is not initialized");
return 0;
}
#else
if (use_ssl && (SSLv23_client_method == NULL)) {
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"%s",
"SSL is not initialized");
return 0;
}
#endif /* OPENSSL_API_1_1 */
#else
(void)use_ssl;
#endif /* NO_SSL_DL */
#else
(void)use_ssl;
#endif /* !defined(NO_SSL) */
if (mg_inet_pton(AF_INET, host, &sa->sin, sizeof(sa->sin))) {
sa->sin.sin_family = AF_INET;
sa->sin.sin_port = htons((uint16_t)port);
ip_ver = 4;
#if defined(USE_IPV6)
} else if (mg_inet_pton(AF_INET6, host, &sa->sin6, sizeof(sa->sin6))) {
sa->sin6.sin6_family = AF_INET6;
sa->sin6.sin6_port = htons((uint16_t)port);
ip_ver = 6;
} else if (host[0] == '[') {
/* While getaddrinfo on Windows will work with [::1],
* getaddrinfo on Linux only works with ::1 (without []). */
size_t l = strlen(host + 1);
char *h = (l > 1) ? mg_strdup_ctx(host + 1, ctx) : NULL;
if (h) {
h[l - 1] = 0;
if (mg_inet_pton(AF_INET6, h, &sa->sin6, sizeof(sa->sin6))) {
sa->sin6.sin6_family = AF_INET6;
sa->sin6.sin6_port = htons((uint16_t)port);
ip_ver = 6;
}
mg_free(h);
}
#endif
}
if (ip_ver == 0) {
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"%s",
"host not found");
return 0;
}
if (ip_ver == 4) {
*sock = socket(PF_INET, SOCK_STREAM, 0);
}
#if defined(USE_IPV6)
else if (ip_ver == 6) {
*sock = socket(PF_INET6, SOCK_STREAM, 0);
}
#endif
if (*sock == INVALID_SOCKET) {
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"socket(): %s",
strerror(ERRNO));
return 0;
}
if (0 != set_non_blocking_mode(*sock)) {
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"Cannot set socket to non-blocking: %s",
strerror(ERRNO));
closesocket(*sock);
*sock = INVALID_SOCKET;
return 0;
}
set_close_on_exec(*sock, fc(ctx));
if (ip_ver == 4) {
/* connected with IPv4 */
conn_ret = connect(*sock,
(struct sockaddr *)((void *)&sa->sin),
sizeof(sa->sin));
}
#if defined(USE_IPV6)
else if (ip_ver == 6) {
/* connected with IPv6 */
conn_ret = connect(*sock,
(struct sockaddr *)((void *)&sa->sin6),
sizeof(sa->sin6));
}
#endif
#if defined(_WIN32)
if (conn_ret != 0) {
DWORD err = WSAGetLastError(); /* could return WSAEWOULDBLOCK */
conn_ret = (int)err;
#if !defined(EINPROGRESS)
#define EINPROGRESS (WSAEWOULDBLOCK) /* Winsock equivalent */
#endif /* if !defined(EINPROGRESS) */
}
#endif
if ((conn_ret != 0) && (conn_ret != EINPROGRESS)) {
/* Data for getsockopt */
int sockerr = -1;
void *psockerr = &sockerr;
#if defined(_WIN32)
int len = (int)sizeof(sockerr);
#else
socklen_t len = (socklen_t)sizeof(sockerr);
#endif
/* Data for poll */
struct pollfd pfd[1];
int pollres;
int ms_wait = 10000; /* 10 second timeout */
/* For a non-blocking socket, the connect sequence is:
* 1) call connect (will not block)
* 2) wait until the socket is ready for writing (select or poll)
* 3) check connection state with getsockopt
*/
pfd[0].fd = *sock;
pfd[0].events = POLLOUT;
pollres = mg_poll(pfd, 1, (int)(ms_wait), &(ctx->stop_flag));
if (pollres != 1) {
/* Not connected */
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"connect(%s:%d): timeout",
host,
port);
closesocket(*sock);
*sock = INVALID_SOCKET;
return 0;
}
#if defined(_WIN32)
getsockopt(*sock, SOL_SOCKET, SO_ERROR, (char *)psockerr, &len);
#else
getsockopt(*sock, SOL_SOCKET, SO_ERROR, psockerr, &len);
#endif
if (sockerr != 0) {
/* Not connected */
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"connect(%s:%d): error %s",
host,
port,
strerror(sockerr));
closesocket(*sock);
*sock = INVALID_SOCKET;
return 0;
}
}
return 1;
}
|
connect_socket(struct mg_context *ctx /* may be NULL */,
const char *host,
int port,
int use_ssl,
char *ebuf,
size_t ebuf_len,
SOCKET *sock /* output: socket, must not be NULL */,
union usa *sa /* output: socket address, must not be NULL */
)
{
int ip_ver = 0;
int conn_ret = -1;
*sock = INVALID_SOCKET;
memset(sa, 0, sizeof(*sa));
if (ebuf_len > 0) {
*ebuf = 0;
}
if (host == NULL) {
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"%s",
"NULL host");
return 0;
}
if ((port <= 0) || !is_valid_port((unsigned)port)) {
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"%s",
"invalid port");
return 0;
}
#if !defined(NO_SSL)
#if !defined(NO_SSL_DL)
#if defined(OPENSSL_API_1_1)
if (use_ssl && (TLS_client_method == NULL)) {
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"%s",
"SSL is not initialized");
return 0;
}
#else
if (use_ssl && (SSLv23_client_method == NULL)) {
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"%s",
"SSL is not initialized");
return 0;
}
#endif /* OPENSSL_API_1_1 */
#else
(void)use_ssl;
#endif /* NO_SSL_DL */
#else
(void)use_ssl;
#endif /* !defined(NO_SSL) */
if (mg_inet_pton(AF_INET, host, &sa->sin, sizeof(sa->sin))) {
sa->sin.sin_family = AF_INET;
sa->sin.sin_port = htons((uint16_t)port);
ip_ver = 4;
#if defined(USE_IPV6)
} else if (mg_inet_pton(AF_INET6, host, &sa->sin6, sizeof(sa->sin6))) {
sa->sin6.sin6_family = AF_INET6;
sa->sin6.sin6_port = htons((uint16_t)port);
ip_ver = 6;
} else if (host[0] == '[') {
/* While getaddrinfo on Windows will work with [::1],
* getaddrinfo on Linux only works with ::1 (without []). */
size_t l = strlen(host + 1);
char *h = (l > 1) ? mg_strdup_ctx(host + 1, ctx) : NULL;
if (h) {
h[l - 1] = 0;
if (mg_inet_pton(AF_INET6, h, &sa->sin6, sizeof(sa->sin6))) {
sa->sin6.sin6_family = AF_INET6;
sa->sin6.sin6_port = htons((uint16_t)port);
ip_ver = 6;
}
mg_free(h);
}
#endif
}
if (ip_ver == 0) {
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"%s",
"host not found");
return 0;
}
if (ip_ver == 4) {
*sock = socket(PF_INET, SOCK_STREAM, 0);
}
#if defined(USE_IPV6)
else if (ip_ver == 6) {
*sock = socket(PF_INET6, SOCK_STREAM, 0);
}
#endif
if (*sock == INVALID_SOCKET) {
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"socket(): %s",
strerror(ERRNO));
return 0;
}
if (0 != set_non_blocking_mode(*sock)) {
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"Cannot set socket to non-blocking: %s",
strerror(ERRNO));
closesocket(*sock);
*sock = INVALID_SOCKET;
return 0;
}
set_close_on_exec(*sock, fc(ctx));
if (ip_ver == 4) {
/* connected with IPv4 */
conn_ret = connect(*sock,
(struct sockaddr *)((void *)&sa->sin),
sizeof(sa->sin));
}
#if defined(USE_IPV6)
else if (ip_ver == 6) {
/* connected with IPv6 */
conn_ret = connect(*sock,
(struct sockaddr *)((void *)&sa->sin6),
sizeof(sa->sin6));
}
#endif
#if defined(_WIN32)
if (conn_ret != 0) {
DWORD err = WSAGetLastError(); /* could return WSAEWOULDBLOCK */
conn_ret = (int)err;
#if !defined(EINPROGRESS)
#define EINPROGRESS (WSAEWOULDBLOCK) /* Winsock equivalent */
#endif /* if !defined(EINPROGRESS) */
}
#endif
if ((conn_ret != 0) && (conn_ret != EINPROGRESS)) {
/* Data for getsockopt */
int sockerr = -1;
void *psockerr = &sockerr;
#if defined(_WIN32)
int len = (int)sizeof(sockerr);
#else
socklen_t len = (socklen_t)sizeof(sockerr);
#endif
/* Data for poll */
struct pollfd pfd[1];
int pollres;
int ms_wait = 10000; /* 10 second timeout */
/* For a non-blocking socket, the connect sequence is:
* 1) call connect (will not block)
* 2) wait until the socket is ready for writing (select or poll)
* 3) check connection state with getsockopt
*/
pfd[0].fd = *sock;
pfd[0].events = POLLOUT;
pollres = mg_poll(pfd, 1, (int)(ms_wait), &(ctx->stop_flag));
if (pollres != 1) {
/* Not connected */
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"connect(%s:%d): timeout",
host,
port);
closesocket(*sock);
*sock = INVALID_SOCKET;
return 0;
}
#if defined(_WIN32)
getsockopt(*sock, SOL_SOCKET, SO_ERROR, (char *)psockerr, &len);
#else
getsockopt(*sock, SOL_SOCKET, SO_ERROR, psockerr, &len);
#endif
if (sockerr != 0) {
/* Not connected */
mg_snprintf(NULL,
NULL, /* No truncation check for ebuf */
ebuf,
ebuf_len,
"connect(%s:%d): error %s",
host,
port,
strerror(sockerr));
closesocket(*sock);
*sock = INVALID_SOCKET;
return 0;
}
}
return 1;
}
|
C
|
civetweb
| 0 |
CVE-2016-9191
|
https://www.cvedetails.com/cve/CVE-2016-9191/
|
CWE-20
|
https://github.com/torvalds/linux/commit/93362fa47fe98b62e4a34ab408c4a418432e7939
|
93362fa47fe98b62e4a34ab408c4a418432e7939
|
sysctl: Drop reference added by grab_header in proc_sys_readdir
Fixes CVE-2016-9191, proc_sys_readdir doesn't drop reference
added by grab_header when return from !dir_emit_dots path.
It can cause any path called unregister_sysctl_table will
wait forever.
The calltrace of CVE-2016-9191:
[ 5535.960522] Call Trace:
[ 5535.963265] [<ffffffff817cdaaf>] schedule+0x3f/0xa0
[ 5535.968817] [<ffffffff817d33fb>] schedule_timeout+0x3db/0x6f0
[ 5535.975346] [<ffffffff817cf055>] ? wait_for_completion+0x45/0x130
[ 5535.982256] [<ffffffff817cf0d3>] wait_for_completion+0xc3/0x130
[ 5535.988972] [<ffffffff810d1fd0>] ? wake_up_q+0x80/0x80
[ 5535.994804] [<ffffffff8130de64>] drop_sysctl_table+0xc4/0xe0
[ 5536.001227] [<ffffffff8130de17>] drop_sysctl_table+0x77/0xe0
[ 5536.007648] [<ffffffff8130decd>] unregister_sysctl_table+0x4d/0xa0
[ 5536.014654] [<ffffffff8130deff>] unregister_sysctl_table+0x7f/0xa0
[ 5536.021657] [<ffffffff810f57f5>] unregister_sched_domain_sysctl+0x15/0x40
[ 5536.029344] [<ffffffff810d7704>] partition_sched_domains+0x44/0x450
[ 5536.036447] [<ffffffff817d0761>] ? __mutex_unlock_slowpath+0x111/0x1f0
[ 5536.043844] [<ffffffff81167684>] rebuild_sched_domains_locked+0x64/0xb0
[ 5536.051336] [<ffffffff8116789d>] update_flag+0x11d/0x210
[ 5536.057373] [<ffffffff817cf61f>] ? mutex_lock_nested+0x2df/0x450
[ 5536.064186] [<ffffffff81167acb>] ? cpuset_css_offline+0x1b/0x60
[ 5536.070899] [<ffffffff810fce3d>] ? trace_hardirqs_on+0xd/0x10
[ 5536.077420] [<ffffffff817cf61f>] ? mutex_lock_nested+0x2df/0x450
[ 5536.084234] [<ffffffff8115a9f5>] ? css_killed_work_fn+0x25/0x220
[ 5536.091049] [<ffffffff81167ae5>] cpuset_css_offline+0x35/0x60
[ 5536.097571] [<ffffffff8115aa2c>] css_killed_work_fn+0x5c/0x220
[ 5536.104207] [<ffffffff810bc83f>] process_one_work+0x1df/0x710
[ 5536.110736] [<ffffffff810bc7c0>] ? process_one_work+0x160/0x710
[ 5536.117461] [<ffffffff810bce9b>] worker_thread+0x12b/0x4a0
[ 5536.123697] [<ffffffff810bcd70>] ? process_one_work+0x710/0x710
[ 5536.130426] [<ffffffff810c3f7e>] kthread+0xfe/0x120
[ 5536.135991] [<ffffffff817d4baf>] ret_from_fork+0x1f/0x40
[ 5536.142041] [<ffffffff810c3e80>] ? kthread_create_on_node+0x230/0x230
One cgroup maintainer mentioned that "cgroup is trying to offline
a cpuset css, which takes place under cgroup_mutex. The offlining
ends up trying to drain active usages of a sysctl table which apprently
is not happening."
The real reason is that proc_sys_readdir doesn't drop reference added
by grab_header when return from !dir_emit_dots path. So this cpuset
offline path will wait here forever.
See here for details: http://www.openwall.com/lists/oss-security/2016/11/04/13
Fixes: f0c3b5093add ("[readdir] convert procfs")
Cc: [email protected]
Reported-by: CAI Qian <[email protected]>
Tested-by: Yang Shukui <[email protected]>
Signed-off-by: Zhou Chengming <[email protected]>
Acked-by: Al Viro <[email protected]>
Signed-off-by: Eric W. Biederman <[email protected]>
|
static void init_header(struct ctl_table_header *head,
struct ctl_table_root *root, struct ctl_table_set *set,
struct ctl_node *node, struct ctl_table *table)
{
head->ctl_table = table;
head->ctl_table_arg = table;
head->used = 0;
head->count = 1;
head->nreg = 1;
head->unregistering = NULL;
head->root = root;
head->set = set;
head->parent = NULL;
head->node = node;
if (node) {
struct ctl_table *entry;
for (entry = table; entry->procname; entry++, node++)
node->header = head;
}
}
|
static void init_header(struct ctl_table_header *head,
struct ctl_table_root *root, struct ctl_table_set *set,
struct ctl_node *node, struct ctl_table *table)
{
head->ctl_table = table;
head->ctl_table_arg = table;
head->used = 0;
head->count = 1;
head->nreg = 1;
head->unregistering = NULL;
head->root = root;
head->set = set;
head->parent = NULL;
head->node = node;
if (node) {
struct ctl_table *entry;
for (entry = table; entry->procname; entry++, node++)
node->header = head;
}
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/ec14f31eca3a51f665432973552ee575635132b3
|
ec14f31eca3a51f665432973552ee575635132b3
|
[EFL] Change the behavior of ewk_view_scale_set.
https://bugs.webkit.org/show_bug.cgi?id=70078
Reviewed by Eric Seidel.
Remove center point basis zoom alignment from ewk_view_scale_set to call
Page::setPageScaleFactor without any adjustment.
* ewk/ewk_view.cpp:
(ewk_view_scale_set):
* ewk/ewk_view.h:
git-svn-id: svn://svn.chromium.org/blink/trunk@103288 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
Eina_Bool ewk_view_visibility_state_set(Evas_Object* ewkView, Ewk_Page_Visibility_State pageVisibilityState, Eina_Bool initialState)
{
#if ENABLE(PAGE_VISIBILITY_API)
EWK_VIEW_SD_GET_OR_RETURN(ewkView, smartData, false);
EWK_VIEW_PRIV_GET_OR_RETURN(smartData, priv, false);
priv->page->setVisibilityState(static_cast<WebCore::PageVisibilityState>(pageVisibilityState), initialState);
return true;
#else
DBG("PAGE_VISIBILITY_API is disabled.");
return false;
#endif
}
|
Eina_Bool ewk_view_visibility_state_set(Evas_Object* ewkView, Ewk_Page_Visibility_State pageVisibilityState, Eina_Bool initialState)
{
#if ENABLE(PAGE_VISIBILITY_API)
EWK_VIEW_SD_GET_OR_RETURN(ewkView, smartData, false);
EWK_VIEW_PRIV_GET_OR_RETURN(smartData, priv, false);
priv->page->setVisibilityState(static_cast<WebCore::PageVisibilityState>(pageVisibilityState), initialState);
return true;
#else
DBG("PAGE_VISIBILITY_API is disabled.");
return false;
#endif
}
|
C
|
Chrome
| 0 |
CVE-2013-0895
|
https://www.cvedetails.com/cve/CVE-2013-0895/
|
CWE-22
|
https://github.com/chromium/chromium/commit/23803a58e481e464a787e4b2c461af9e62f03905
|
23803a58e481e464a787e4b2c461af9e62f03905
|
Fix creating target paths in file_util_posix CopyDirectory.
BUG=167840
Review URL: https://chromiumcodereview.appspot.com/11773018
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@176659 0039d316-1c4b-4281-b951-d872f2087c98
|
FILE* OpenFile(const std::string& filename, const char* mode) {
return OpenFile(FilePath(filename), mode);
}
|
FILE* OpenFile(const std::string& filename, const char* mode) {
return OpenFile(FilePath(filename), mode);
}
|
C
|
Chrome
| 0 |
CVE-2018-11594
|
https://www.cvedetails.com/cve/CVE-2018-11594/
|
CWE-119
|
https://github.com/espruino/Espruino/commit/c36d30529118aa049797db43f111ddad468aad29
|
c36d30529118aa049797db43f111ddad468aad29
|
Fix stack overflow if void void void... is repeated many times (fix #1434)
|
NO_INLINE void jspeBlockNoBrackets() {
if (JSP_SHOULD_EXECUTE) {
while (lex->tk && lex->tk!='}') {
jsvUnLock(jspeStatement());
if (JSP_HAS_ERROR) {
if (lex && !(execInfo.execute&EXEC_ERROR_LINE_REPORTED)) {
execInfo.execute = (JsExecFlags)(execInfo.execute | EXEC_ERROR_LINE_REPORTED);
JsVar *stackTrace = jsvObjectGetChild(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, JSV_STRING_0);
if (stackTrace) {
jsvAppendPrintf(stackTrace, "at ");
jspAppendStackTrace(stackTrace);
jsvUnLock(stackTrace);
}
}
}
if (JSP_SHOULDNT_PARSE)
return;
}
} else {
int brackets = 0;
while (lex->tk && (brackets || lex->tk != '}')) {
if (lex->tk == '{') brackets++;
if (lex->tk == '}') brackets--;
JSP_ASSERT_MATCH(lex->tk);
}
|
NO_INLINE void jspeBlockNoBrackets() {
if (JSP_SHOULD_EXECUTE) {
while (lex->tk && lex->tk!='}') {
jsvUnLock(jspeStatement());
if (JSP_HAS_ERROR) {
if (lex && !(execInfo.execute&EXEC_ERROR_LINE_REPORTED)) {
execInfo.execute = (JsExecFlags)(execInfo.execute | EXEC_ERROR_LINE_REPORTED);
JsVar *stackTrace = jsvObjectGetChild(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, JSV_STRING_0);
if (stackTrace) {
jsvAppendPrintf(stackTrace, "at ");
jspAppendStackTrace(stackTrace);
jsvUnLock(stackTrace);
}
}
}
if (JSP_SHOULDNT_PARSE)
return;
}
} else {
int brackets = 0;
while (lex->tk && (brackets || lex->tk != '}')) {
if (lex->tk == '{') brackets++;
if (lex->tk == '}') brackets--;
JSP_ASSERT_MATCH(lex->tk);
}
|
C
|
Espruino
| 0 |
CVE-2015-3215
|
https://www.cvedetails.com/cve/CVE-2015-3215/
|
CWE-20
|
https://github.com/YanVugenfirer/kvm-guest-drivers-windows/commit/fbfa4d1083ea84c5429992ca3e996d7d4fbc8238
|
fbfa4d1083ea84c5429992ca3e996d7d4fbc8238
|
NetKVM: BZ#1169718: More rigoruous testing of incoming packet
Signed-off-by: Joseph Hindin <[email protected]>
|
void WriteVirtIODeviceWord(ULONG_PTR ulRegister, u16 wValue)
{
#if 1
NdisRawWritePortUshort(ulRegister, wValue);
#else
static int nCounterToFail = 0;
static const int StartFail = 200, StopFail = 600;
BOOLEAN bFail = FALSE;
DPrintf(6, ("%s> R[%x] = %x\n", __FUNCTION__, (ULONG)ulRegister, wValue) );
if ((ulRegister & 0x1F) == 0x10)
{
nCounterToFail++;
bFail = nCounterToFail >= StartFail && nCounterToFail < StopFail;
}
if (!bFail) NdisRawWritePortUshort(ulRegister, wValue);
else
{
DPrintf(0, ("%s> FAILING R[%x] = %x\n", __FUNCTION__, (ULONG)ulRegister, wValue) );
}
#endif
}
|
void WriteVirtIODeviceWord(ULONG_PTR ulRegister, u16 wValue)
{
#if 1
NdisRawWritePortUshort(ulRegister, wValue);
#else
static int nCounterToFail = 0;
static const int StartFail = 200, StopFail = 600;
BOOLEAN bFail = FALSE;
DPrintf(6, ("%s> R[%x] = %x\n", __FUNCTION__, (ULONG)ulRegister, wValue) );
if ((ulRegister & 0x1F) == 0x10)
{
nCounterToFail++;
bFail = nCounterToFail >= StartFail && nCounterToFail < StopFail;
}
if (!bFail) NdisRawWritePortUshort(ulRegister, wValue);
else
{
DPrintf(0, ("%s> FAILING R[%x] = %x\n", __FUNCTION__, (ULONG)ulRegister, wValue) );
}
#endif
}
|
C
|
kvm-guest-drivers-windows
| 0 |
CVE-2016-10741
|
https://www.cvedetails.com/cve/CVE-2016-10741/
|
CWE-362
|
https://github.com/torvalds/linux/commit/04197b341f23b908193308b8d63d17ff23232598
|
04197b341f23b908193308b8d63d17ff23232598
|
xfs: don't BUG() on mixed direct and mapped I/O
We've had reports of generic/095 causing XFS to BUG() in
__xfs_get_blocks() due to the existence of delalloc blocks on a
direct I/O read. generic/095 issues a mix of various types of I/O,
including direct and memory mapped I/O to a single file. This is
clearly not supported behavior and is known to lead to such
problems. E.g., the lack of exclusion between the direct I/O and
write fault paths means that a write fault can allocate delalloc
blocks in a region of a file that was previously a hole after the
direct read has attempted to flush/inval the file range, but before
it actually reads the block mapping. In turn, the direct read
discovers a delalloc extent and cannot proceed.
While the appropriate solution here is to not mix direct and memory
mapped I/O to the same regions of the same file, the current
BUG_ON() behavior is probably overkill as it can crash the entire
system. Instead, localize the failure to the I/O in question by
returning an error for a direct I/O that cannot be handled safely
due to delalloc blocks. Be careful to allow the case of a direct
write to post-eof delalloc blocks. This can occur due to speculative
preallocation and is safe as post-eof blocks are not accompanied by
dirty pages in pagecache (conversely, preallocation within eof must
have been zeroed, and thus dirtied, before the inode size could have
been increased beyond said blocks).
Finally, provide an additional warning if a direct I/O write occurs
while the file is memory mapped. This may not catch all problematic
scenarios, but provides a hint that some known-to-be-problematic I/O
methods are in use.
Signed-off-by: Brian Foster <[email protected]>
Reviewed-by: Dave Chinner <[email protected]>
Signed-off-by: Dave Chinner <[email protected]>
|
xfs_do_writepage(
struct page *page,
struct writeback_control *wbc,
void *data)
{
struct xfs_writepage_ctx *wpc = data;
struct inode *inode = page->mapping->host;
loff_t offset;
__uint64_t end_offset;
pgoff_t end_index;
trace_xfs_writepage(inode, page, 0, 0);
ASSERT(page_has_buffers(page));
/*
* Refuse to write the page out if we are called from reclaim context.
*
* This avoids stack overflows when called from deeply used stacks in
* random callers for direct reclaim or memcg reclaim. We explicitly
* allow reclaim from kswapd as the stack usage there is relatively low.
*
* This should never happen except in the case of a VM regression so
* warn about it.
*/
if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
PF_MEMALLOC))
goto redirty;
/*
* Given that we do not allow direct reclaim to call us, we should
* never be called while in a filesystem transaction.
*/
if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
goto redirty;
/*
* Is this page beyond the end of the file?
*
* The page index is less than the end_index, adjust the end_offset
* to the highest offset that this page should represent.
* -----------------------------------------------------
* | file mapping | <EOF> |
* -----------------------------------------------------
* | Page ... | Page N-2 | Page N-1 | Page N | |
* ^--------------------------------^----------|--------
* | desired writeback range | see else |
* ---------------------------------^------------------|
*/
offset = i_size_read(inode);
end_index = offset >> PAGE_SHIFT;
if (page->index < end_index)
end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
else {
/*
* Check whether the page to write out is beyond or straddles
* i_size or not.
* -------------------------------------------------------
* | file mapping | <EOF> |
* -------------------------------------------------------
* | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
* ^--------------------------------^-----------|---------
* | | Straddles |
* ---------------------------------^-----------|--------|
*/
unsigned offset_into_page = offset & (PAGE_SIZE - 1);
/*
* Skip the page if it is fully outside i_size, e.g. due to a
* truncate operation that is in progress. We must redirty the
* page so that reclaim stops reclaiming it. Otherwise
* xfs_vm_releasepage() is called on it and gets confused.
*
* Note that the end_index is unsigned long, it would overflow
* if the given offset is greater than 16TB on 32-bit system
* and if we do check the page is fully outside i_size or not
* via "if (page->index >= end_index + 1)" as "end_index + 1"
* will be evaluated to 0. Hence this page will be redirtied
* and be written out repeatedly which would result in an
* infinite loop, the user program that perform this operation
* will hang. Instead, we can verify this situation by checking
* if the page to write is totally beyond the i_size or if it's
* offset is just equal to the EOF.
*/
if (page->index > end_index ||
(page->index == end_index && offset_into_page == 0))
goto redirty;
/*
* The page straddles i_size. It must be zeroed out on each
* and every writepage invocation because it may be mmapped.
* "A file is mapped in multiples of the page size. For a file
* that is not a multiple of the page size, the remaining
* memory is zeroed when mapped, and writes to that region are
* not written out to the file."
*/
zero_user_segment(page, offset_into_page, PAGE_SIZE);
/* Adjust the end_offset to the end of file */
end_offset = offset;
}
return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset);
redirty:
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
|
xfs_do_writepage(
struct page *page,
struct writeback_control *wbc,
void *data)
{
struct xfs_writepage_ctx *wpc = data;
struct inode *inode = page->mapping->host;
loff_t offset;
__uint64_t end_offset;
pgoff_t end_index;
trace_xfs_writepage(inode, page, 0, 0);
ASSERT(page_has_buffers(page));
/*
* Refuse to write the page out if we are called from reclaim context.
*
* This avoids stack overflows when called from deeply used stacks in
* random callers for direct reclaim or memcg reclaim. We explicitly
* allow reclaim from kswapd as the stack usage there is relatively low.
*
* This should never happen except in the case of a VM regression so
* warn about it.
*/
if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
PF_MEMALLOC))
goto redirty;
/*
* Given that we do not allow direct reclaim to call us, we should
* never be called while in a filesystem transaction.
*/
if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
goto redirty;
/*
* Is this page beyond the end of the file?
*
* The page index is less than the end_index, adjust the end_offset
* to the highest offset that this page should represent.
* -----------------------------------------------------
* | file mapping | <EOF> |
* -----------------------------------------------------
* | Page ... | Page N-2 | Page N-1 | Page N | |
* ^--------------------------------^----------|--------
* | desired writeback range | see else |
* ---------------------------------^------------------|
*/
offset = i_size_read(inode);
end_index = offset >> PAGE_SHIFT;
if (page->index < end_index)
end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
else {
/*
* Check whether the page to write out is beyond or straddles
* i_size or not.
* -------------------------------------------------------
* | file mapping | <EOF> |
* -------------------------------------------------------
* | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
* ^--------------------------------^-----------|---------
* | | Straddles |
* ---------------------------------^-----------|--------|
*/
unsigned offset_into_page = offset & (PAGE_SIZE - 1);
/*
* Skip the page if it is fully outside i_size, e.g. due to a
* truncate operation that is in progress. We must redirty the
* page so that reclaim stops reclaiming it. Otherwise
* xfs_vm_releasepage() is called on it and gets confused.
*
* Note that the end_index is unsigned long, it would overflow
* if the given offset is greater than 16TB on 32-bit system
* and if we do check the page is fully outside i_size or not
* via "if (page->index >= end_index + 1)" as "end_index + 1"
* will be evaluated to 0. Hence this page will be redirtied
* and be written out repeatedly which would result in an
* infinite loop, the user program that perform this operation
* will hang. Instead, we can verify this situation by checking
* if the page to write is totally beyond the i_size or if it's
* offset is just equal to the EOF.
*/
if (page->index > end_index ||
(page->index == end_index && offset_into_page == 0))
goto redirty;
/*
* The page straddles i_size. It must be zeroed out on each
* and every writepage invocation because it may be mmapped.
* "A file is mapped in multiples of the page size. For a file
* that is not a multiple of the page size, the remaining
* memory is zeroed when mapped, and writes to that region are
* not written out to the file."
*/
zero_user_segment(page, offset_into_page, PAGE_SIZE);
/* Adjust the end_offset to the end of file */
end_offset = offset;
}
return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset);
redirty:
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
|
C
|
linux
| 0 |
CVE-2017-12179
|
https://www.cvedetails.com/cve/CVE-2017-12179/
|
CWE-190
|
https://cgit.freedesktop.org/xorg/xserver/commit/?id=d088e3c1286b548a58e62afdc70bb40981cdb9e8
|
d088e3c1286b548a58e62afdc70bb40981cdb9e8
| null |
sort_min_max(INT16 *a, INT16 *b)
{
INT16 A, B;
if (*a < 0 || *b < 0)
return;
A = *a;
B = *b;
*a = min(A, B);
*b = max(A, B);
}
|
sort_min_max(INT16 *a, INT16 *b)
{
INT16 A, B;
if (*a < 0 || *b < 0)
return;
A = *a;
B = *b;
*a = min(A, B);
*b = max(A, B);
}
|
C
|
xserver
| 0 |
CVE-2018-12326
|
https://www.cvedetails.com/cve/CVE-2018-12326/
|
CWE-119
|
https://github.com/antirez/redis/commit/9fdcc15962f9ff4baebe6fdd947816f43f730d50
|
9fdcc15962f9ff4baebe6fdd947816f43f730d50
|
Security: fix redis-cli buffer overflow.
Thanks to Fakhri Zulkifli for reporting it.
The fix switched to dynamic allocation, copying the final prompt in the
static buffer only at the end.
|
static long long ustime(void) {
struct timeval tv;
long long ust;
gettimeofday(&tv, NULL);
ust = ((long long)tv.tv_sec)*1000000;
ust += tv.tv_usec;
return ust;
}
|
static long long ustime(void) {
struct timeval tv;
long long ust;
gettimeofday(&tv, NULL);
ust = ((long long)tv.tv_sec)*1000000;
ust += tv.tv_usec;
return ust;
}
|
C
|
redis
| 0 |
CVE-2013-0896
|
https://www.cvedetails.com/cve/CVE-2013-0896/
|
CWE-119
|
https://github.com/chromium/chromium/commit/58c433b2426f8d23ad27f1976635506ee3643034
|
58c433b2426f8d23ad27f1976635506ee3643034
|
Fix uninitialized access in QuicConnectionHelperTest
BUG=159928
Review URL: https://chromiumcodereview.appspot.com/11360153
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@166708 0039d316-1c4b-4281-b951-d872f2087c98
|
virtual ~TestConnectionHelper() {}
|
virtual ~TestConnectionHelper() {}
|
C
|
Chrome
| 0 |
CVE-2011-1079
|
https://www.cvedetails.com/cve/CVE-2011-1079/
|
CWE-20
|
https://github.com/torvalds/linux/commit/43629f8f5ea32a998d06d1bb41eefa0e821ff573
|
43629f8f5ea32a998d06d1bb41eefa0e821ff573
|
Bluetooth: bnep: fix buffer overflow
Struct ca is copied from userspace. It is not checked whether the "device"
field is NULL terminated. This potentially leads to BUG() inside of
alloc_netdev_mqs() and/or information leak by creating a device with a name
made of contents of kernel stack.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: Gustavo F. Padovan <[email protected]>
|
void __exit bnep_sock_cleanup(void)
{
if (bt_sock_unregister(BTPROTO_BNEP) < 0)
BT_ERR("Can't unregister BNEP socket");
proto_unregister(&bnep_proto);
}
|
void __exit bnep_sock_cleanup(void)
{
if (bt_sock_unregister(BTPROTO_BNEP) < 0)
BT_ERR("Can't unregister BNEP socket");
proto_unregister(&bnep_proto);
}
|
C
|
linux
| 0 |
CVE-2015-1274
|
https://www.cvedetails.com/cve/CVE-2015-1274/
|
CWE-254
|
https://github.com/chromium/chromium/commit/d27468a832d5316884bd02f459cbf493697fd7e1
|
d27468a832d5316884bd02f459cbf493697fd7e1
|
Switch to equalIgnoringASCIICase throughout modules/accessibility
BUG=627682
Review-Url: https://codereview.chromium.org/2793913007
Cr-Commit-Position: refs/heads/master@{#461858}
|
bool AXNodeObject::isEnabled() const {
if (isDescendantOfDisabledNode())
return false;
Node* node = this->getNode();
if (!node || !node->isElementNode())
return true;
return !toElement(node)->isDisabledFormControl();
}
|
bool AXNodeObject::isEnabled() const {
if (isDescendantOfDisabledNode())
return false;
Node* node = this->getNode();
if (!node || !node->isElementNode())
return true;
return !toElement(node)->isDisabledFormControl();
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/99844692ee805d18d5ee7fd9c62f14d2dffa2e06
|
99844692ee805d18d5ee7fd9c62f14d2dffa2e06
|
Removing unnecessary DCHECK from SafeBrowsing interstitial.
BUG=30079
TEST=None.
Review URL: http://codereview.chromium.org/1131003
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@42049 0039d316-1c4b-4281-b951-d872f2087c98
|
void InterstitialPage::UpdateTitle(RenderViewHost* render_view_host,
int32 page_id,
const std::wstring& title) {
DCHECK(render_view_host == render_view_host_);
NavigationEntry* entry = tab_->controller().GetActiveEntry();
if (!entry) {
NOTREACHED();
return;
}
if (!new_navigation_ && !should_revert_tab_title_) {
original_tab_title_ = UTF16ToWideHack(entry->title());
should_revert_tab_title_ = true;
}
entry->set_title(WideToUTF16Hack(title));
tab_->NotifyNavigationStateChanged(TabContents::INVALIDATE_TITLE);
}
|
void InterstitialPage::UpdateTitle(RenderViewHost* render_view_host,
int32 page_id,
const std::wstring& title) {
DCHECK(render_view_host == render_view_host_);
NavigationEntry* entry = tab_->controller().GetActiveEntry();
if (!entry) {
NOTREACHED();
return;
}
if (!new_navigation_ && !should_revert_tab_title_) {
original_tab_title_ = UTF16ToWideHack(entry->title());
should_revert_tab_title_ = true;
}
entry->set_title(WideToUTF16Hack(title));
tab_->NotifyNavigationStateChanged(TabContents::INVALIDATE_TITLE);
}
|
C
|
Chrome
| 0 |
CVE-2015-1191
|
https://www.cvedetails.com/cve/CVE-2015-1191/
|
CWE-22
|
https://github.com/madler/pigz/commit/fdad1406b3ec809f4954ff7cdf9e99eb18c2458f
|
fdad1406b3ec809f4954ff7cdf9e99eb18c2458f
|
When decompressing with -N or -NT, strip any path from header name.
This uses the path of the compressed file combined with the name
from the header as the name of the decompressed output file. Any
path information in the header name is stripped. This avoids a
possible vulnerability where absolute or descending paths are put
in the gzip header.
|
local int read_extra(unsigned len, int save)
{
unsigned id, size, tmp2;
unsigned long tmp4;
/* process extra blocks */
while (len >= 4) {
id = GET2();
size = GET2();
if (g.in_eof)
return -1;
len -= 4;
if (size > len)
break;
len -= size;
if (id == 0x0001) {
/* Zip64 Extended Information Extra Field */
if (g.zip_ulen == LOW32 && size >= 8) {
g.zip_ulen = GET4();
SKIP(4);
size -= 8;
}
if (g.zip_clen == LOW32 && size >= 8) {
g.zip_clen = GET4();
SKIP(4);
size -= 8;
}
}
if (save) {
if ((id == 0x000d || id == 0x5855) && size >= 8) {
/* PKWare Unix or Info-ZIP Type 1 Unix block */
SKIP(4);
g.stamp = tolong(GET4());
size -= 8;
}
if (id == 0x5455 && size >= 5) {
/* Extended Timestamp block */
size--;
if (GET() & 1) {
g.stamp = tolong(GET4());
size -= 4;
}
}
}
SKIP(size);
}
SKIP(len);
return 0;
}
|
local int read_extra(unsigned len, int save)
{
unsigned id, size, tmp2;
unsigned long tmp4;
/* process extra blocks */
while (len >= 4) {
id = GET2();
size = GET2();
if (g.in_eof)
return -1;
len -= 4;
if (size > len)
break;
len -= size;
if (id == 0x0001) {
/* Zip64 Extended Information Extra Field */
if (g.zip_ulen == LOW32 && size >= 8) {
g.zip_ulen = GET4();
SKIP(4);
size -= 8;
}
if (g.zip_clen == LOW32 && size >= 8) {
g.zip_clen = GET4();
SKIP(4);
size -= 8;
}
}
if (save) {
if ((id == 0x000d || id == 0x5855) && size >= 8) {
/* PKWare Unix or Info-ZIP Type 1 Unix block */
SKIP(4);
g.stamp = tolong(GET4());
size -= 8;
}
if (id == 0x5455 && size >= 5) {
/* Extended Timestamp block */
size--;
if (GET() & 1) {
g.stamp = tolong(GET4());
size -= 4;
}
}
}
SKIP(size);
}
SKIP(len);
return 0;
}
|
C
|
pigz
| 0 |
CVE-2013-0918
|
https://www.cvedetails.com/cve/CVE-2013-0918/
|
CWE-264
|
https://github.com/chromium/chromium/commit/0a57375ad73780e61e1770a9d88b0529b0dbd33b
|
0a57375ad73780e61e1770a9d88b0529b0dbd33b
|
Let the browser handle external navigations from DevTools.
BUG=180555
Review URL: https://chromiumcodereview.appspot.com/12531004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@186793 0039d316-1c4b-4281-b951-d872f2087c98
|
RenderView* RenderView::FromWebView(WebKit::WebView* webview) {
return RenderViewImpl::FromWebView(webview);
}
|
RenderView* RenderView::FromWebView(WebKit::WebView* webview) {
return RenderViewImpl::FromWebView(webview);
}
|
C
|
Chrome
| 0 |
CVE-2017-11176
|
https://www.cvedetails.com/cve/CVE-2017-11176/
|
CWE-416
|
https://github.com/torvalds/linux/commit/f991af3daabaecff34684fd51fac80319d1baad1
|
f991af3daabaecff34684fd51fac80319d1baad1
|
mqueue: fix a use-after-free in sys_mq_notify()
The retry logic for netlink_attachskb() inside sys_mq_notify()
is nasty and vulnerable:
1) The sock refcnt is already released when retry is needed
2) The fd is controllable by user-space because we already
release the file refcnt
so we when retry but the fd has been just closed by user-space
during this small window, we end up calling netlink_detachskb()
on the error path which releases the sock again, later when
the user-space closes this socket a use-after-free could be
triggered.
Setting 'sock' to NULL here should be sufficient to fix it.
Reported-by: GeneBlue <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Manfred Spraul <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
|
static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
{
int mq_treesize;
unsigned long total_size;
if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
return -EINVAL;
if (capable(CAP_SYS_RESOURCE)) {
if (attr->mq_maxmsg > HARD_MSGMAX ||
attr->mq_msgsize > HARD_MSGSIZEMAX)
return -EINVAL;
} else {
if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
attr->mq_msgsize > ipc_ns->mq_msgsize_max)
return -EINVAL;
}
/* check for overflow */
if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
return -EOVERFLOW;
mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
sizeof(struct posix_msg_tree_node);
total_size = attr->mq_maxmsg * attr->mq_msgsize;
if (total_size + mq_treesize < total_size)
return -EOVERFLOW;
return 0;
}
|
static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
{
int mq_treesize;
unsigned long total_size;
if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
return -EINVAL;
if (capable(CAP_SYS_RESOURCE)) {
if (attr->mq_maxmsg > HARD_MSGMAX ||
attr->mq_msgsize > HARD_MSGSIZEMAX)
return -EINVAL;
} else {
if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
attr->mq_msgsize > ipc_ns->mq_msgsize_max)
return -EINVAL;
}
/* check for overflow */
if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
return -EOVERFLOW;
mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
sizeof(struct posix_msg_tree_node);
total_size = attr->mq_maxmsg * attr->mq_msgsize;
if (total_size + mq_treesize < total_size)
return -EOVERFLOW;
return 0;
}
|
C
|
linux
| 0 |
CVE-2014-9904
|
https://www.cvedetails.com/cve/CVE-2014-9904/
| null |
https://github.com/torvalds/linux/commit/6217e5ede23285ddfee10d2e4ba0cc2d4c046205
|
6217e5ede23285ddfee10d2e4ba0cc2d4c046205
|
ALSA: compress: fix an integer overflow check
I previously added an integer overflow check here but looking at it now,
it's still buggy.
The bug happens in snd_compr_allocate_buffer(). We multiply
".fragments" and ".fragment_size" and that doesn't overflow but then we
save it in an unsigned int so it truncates the high bits away and we
allocate a smaller than expected size.
Fixes: b35cc8225845 ('ALSA: compress_core: integer overflow in snd_compr_allocate_buffer()')
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
|
static int snd_compress_add_device(struct snd_compr *device)
{
int ret;
if (!device->card)
return -EINVAL;
/* register the card */
ret = snd_card_register(device->card);
if (ret)
goto out;
return 0;
out:
pr_err("failed with %d\n", ret);
return ret;
}
|
static int snd_compress_add_device(struct snd_compr *device)
{
int ret;
if (!device->card)
return -EINVAL;
/* register the card */
ret = snd_card_register(device->card);
if (ret)
goto out;
return 0;
out:
pr_err("failed with %d\n", ret);
return ret;
}
|
C
|
linux
| 0 |
CVE-2018-16075
|
https://www.cvedetails.com/cve/CVE-2018-16075/
|
CWE-254
|
https://github.com/chromium/chromium/commit/d913f72b4875cf0814fc3f03ad7c00642097c4a4
|
d913f72b4875cf0814fc3f03ad7c00642097c4a4
|
Remove RequireCSSExtensionForFile runtime enabled flag.
The feature has long since been stable (since M64) and doesn't seem
to be a need for this flag.
BUG=788936
Change-Id: I666390b869289c328acb4a2daa5bf4154e1702c0
Reviewed-on: https://chromium-review.googlesource.com/c/1324143
Reviewed-by: Mike West <[email protected]>
Reviewed-by: Camille Lamy <[email protected]>
Commit-Queue: Dave Tapuska <[email protected]>
Cr-Commit-Position: refs/heads/master@{#607329}
|
void WebRuntimeFeatures::EnableModernMediaControls(bool enable) {
RuntimeEnabledFeatures::SetModernMediaControlsEnabled(enable);
}
|
void WebRuntimeFeatures::EnableModernMediaControls(bool enable) {
RuntimeEnabledFeatures::SetModernMediaControlsEnabled(enable);
}
|
C
|
Chrome
| 0 |
CVE-2017-5042
|
https://www.cvedetails.com/cve/CVE-2017-5042/
|
CWE-311
|
https://github.com/chromium/chromium/commit/7cde8513c12a6e8ec5d1d1eb1cfd078d9adad3ef
|
7cde8513c12a6e8ec5d1d1eb1cfd078d9adad3ef
|
Revert "PageInfo: decouple safe browsing and TLS statii."
This reverts commit ee95bc44021230127c7e6e9a8cf9d3820760f77c.
Reason for revert: suspect causing unit_tests failure on Linux MSAN Tests:
https://ci.chromium.org/p/chromium/builders/ci/Linux%20MSan%20Tests/17649
PageInfoBubbleViewTest.ChangingFlashSettingForSiteIsRemembered
PageInfoBubbleViewTest.EnsureCloseCallback
PageInfoBubbleViewTest.NotificationPermissionRevokeUkm
PageInfoBubbleViewTest.OpenPageInfoBubbleAfterNavigationStart
PageInfoBubbleViewTest.SetPermissionInfo
PageInfoBubbleViewTest.SetPermissionInfoForUsbGuard
PageInfoBubbleViewTest.SetPermissionInfoWithPolicyUsbDevices
PageInfoBubbleViewTest.SetPermissionInfoWithUsbDevice
PageInfoBubbleViewTest.SetPermissionInfoWithUserAndPolicyUsbDevices
PageInfoBubbleViewTest.UpdatingSiteDataRetainsLayout
https://logs.chromium.org/logs/chromium/buildbucket/cr-buildbucket.appspot.com/8909718923797040064/+/steps/unit_tests/0/logs/Deterministic_failure:_PageInfoBubbleViewTest.ChangingFlashSettingForSiteIsRemembered__status_CRASH_/0
[ RUN ] PageInfoBubbleViewTest.ChangingFlashSettingForSiteIsRemembered
==9056==WARNING: MemorySanitizer: use-of-uninitialized-value
#0 0x561baaab15ec in PageInfoUI::GetSecurityDescription(PageInfoUI::IdentityInfo const&) const ./../../chrome/browser/ui/page_info/page_info_ui.cc:250:3
#1 0x561bab6a1548 in PageInfoBubbleView::SetIdentityInfo(PageInfoUI::IdentityInfo const&) ./../../chrome/browser/ui/views/page_info/page_info_bubble_view.cc:802:7
#2 0x561baaaab3bb in PageInfo::PresentSiteIdentity() ./../../chrome/browser/ui/page_info/page_info.cc:969:8
#3 0x561baaaa0a21 in PageInfo::PageInfo(PageInfoUI*, Profile*, TabSpecificContentSettings*, content::WebContents*, GURL const&, security_state::SecurityLevel, security_state::VisibleSecurityState const&) ./../../chrome/browser/ui/page_info/page_info.cc:344:3
#4 0x561bab69b6dd in PageInfoBubbleView::PageInfoBubbleView(views::View*, gfx::Rect const&, aura::Window*, Profile*, content::WebContents*, GURL const&, security_state::SecurityLevel, security_state::VisibleSecurityState const&, base::OnceCallback<void (views::Widget::ClosedReason, bool)>) ./../../chrome/browser/ui/views/page_info/page_info_bubble_view.cc:576:24
...
Original change's description:
> PageInfo: decouple safe browsing and TLS statii.
>
> Previously, the Page Info bubble maintained a single variable to
> identify all reasons that a page might have a non-standard status. This
> lead to the display logic making assumptions about, for instance, the
> validity of a certificate when the page was flagged by Safe Browsing.
>
> This CL separates out the Safe Browsing status from the site identity
> status so that the page info bubble can inform the user that the site's
> certificate is invalid, even if it's also flagged by Safe Browsing.
>
> Bug: 869925
> Change-Id: I34107225b4206c8f32771ccd75e9367668d0a72b
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1662537
> Reviewed-by: Mustafa Emre Acer <[email protected]>
> Reviewed-by: Bret Sepulveda <[email protected]>
> Auto-Submit: Joe DeBlasio <[email protected]>
> Commit-Queue: Joe DeBlasio <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#671847}
[email protected],[email protected],[email protected]
Change-Id: I8be652952e7276bcc9266124693352e467159cc4
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: 869925
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1673985
Reviewed-by: Takashi Sakamoto <[email protected]>
Commit-Queue: Takashi Sakamoto <[email protected]>
Cr-Commit-Position: refs/heads/master@{#671932}
|
const security_state::VisibleSecurityState& visible_security_state() {
return visible_security_state_;
}
|
const security_state::VisibleSecurityState& visible_security_state() {
return visible_security_state_;
}
|
C
|
Chrome
| 0 |
CVE-2015-0273
|
https://www.cvedetails.com/cve/CVE-2015-0273/
| null |
https://git.php.net/?p=php-src.git;a=commit;h=71335e6ebabc1b12c057d8017fd811892ecdfd24
|
71335e6ebabc1b12c057d8017fd811892ecdfd24
| null |
static void date_object_free_storage_interval(void *object TSRMLS_DC)
{
php_interval_obj *intern = (php_interval_obj *)object;
timelib_rel_time_dtor(intern->diff);
zend_object_std_dtor(&intern->std TSRMLS_CC);
efree(object);
}
|
static void date_object_free_storage_interval(void *object TSRMLS_DC)
{
php_interval_obj *intern = (php_interval_obj *)object;
timelib_rel_time_dtor(intern->diff);
zend_object_std_dtor(&intern->std TSRMLS_CC);
efree(object);
}
|
C
|
php
| 0 |
CVE-2012-3552
|
https://www.cvedetails.com/cve/CVE-2012-3552/
|
CWE-362
|
https://github.com/torvalds/linux/commit/f6d8bd051c391c1c0458a30b2a7abcd939329259
|
f6d8bd051c391c1c0458a30b2a7abcd939329259
|
inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
{
struct inet_sock *inet = inet_sk(sk);
__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
}
|
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
{
struct inet_sock *inet = inet_sk(sk);
__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
}
|
C
|
linux
| 0 |
CVE-2015-6787
|
https://www.cvedetails.com/cve/CVE-2015-6787/
| null |
https://github.com/chromium/chromium/commit/f911e11e7f6b5c0d6f5ee694a9871de6619889f7
|
f911e11e7f6b5c0d6f5ee694a9871de6619889f7
|
Reland "[CI] Make paint property nodes non-ref-counted"
This reverts commit 887383b30842d9d9006e11bb6932660a3cb5b1b7.
Reason for revert: Retry in M69.
Original change's description:
> Revert "[CI] Make paint property nodes non-ref-counted"
>
> This reverts commit 70fc0b018c9517558b7aa2be00edf2debb449123.
>
> Reason for revert: Caused bugs found by clusterfuzz
>
> Original change's description:
> > [CI] Make paint property nodes non-ref-counted
> >
> > Now all paint property nodes are owned by ObjectPaintProperties
> > (and LocalFrameView temporarily before removing non-RLS mode).
> > Others just use raw pointers or references.
> >
> > Bug: 833496
> > Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> > Change-Id: I2d544fe153bb94698623248748df63c8aa2081ae
> > Reviewed-on: https://chromium-review.googlesource.com/1031101
> > Reviewed-by: Tien-Ren Chen <[email protected]>
> > Commit-Queue: Xianzhu Wang <[email protected]>
> > Cr-Commit-Position: refs/heads/master@{#554626}
>
> [email protected],[email protected],[email protected]
>
> Change-Id: I02bb50d6744cb81a797246a0116b677e80a3c69f
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Bug: 833496,837932,837943
> Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> Reviewed-on: https://chromium-review.googlesource.com/1034292
> Reviewed-by: Xianzhu Wang <[email protected]>
> Commit-Queue: Xianzhu Wang <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#554653}
[email protected],[email protected],[email protected]
# Not skipping CQ checks because original CL landed > 1 day ago.
Bug: 833496, 837932, 837943
Change-Id: I0b4ef70db1f1f211ba97c30d617225355c750992
Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
Reviewed-on: https://chromium-review.googlesource.com/1083491
Commit-Queue: Xianzhu Wang <[email protected]>
Reviewed-by: Xianzhu Wang <[email protected]>
Cr-Commit-Position: refs/heads/master@{#563930}
|
int PropertyTreeManager::EnsureCompositorClipNode(
const ClipPaintPropertyNode* clip_node) {
DCHECK(clip_node);
if (!clip_node)
return kSecondaryRootNodeId;
auto it = clip_node_map_.find(clip_node);
if (it != clip_node_map_.end())
return it->value;
int parent_id = EnsureCompositorClipNode(clip_node->Parent());
int id = GetClipTree().Insert(cc::ClipNode(), parent_id);
cc::ClipNode& compositor_node = *GetClipTree().Node(id);
compositor_node.clip = clip_node->ClipRect().Rect();
compositor_node.transform_id =
EnsureCompositorTransformNode(clip_node->LocalTransformSpace());
compositor_node.clip_type = cc::ClipNode::ClipType::APPLIES_LOCAL_CLIP;
auto result = clip_node_map_.Set(clip_node, id);
DCHECK(result.is_new_entry);
GetClipTree().set_needs_update(true);
return id;
}
|
int PropertyTreeManager::EnsureCompositorClipNode(
const ClipPaintPropertyNode* clip_node) {
DCHECK(clip_node);
if (!clip_node)
return kSecondaryRootNodeId;
auto it = clip_node_map_.find(clip_node);
if (it != clip_node_map_.end())
return it->value;
int parent_id = EnsureCompositorClipNode(clip_node->Parent());
int id = GetClipTree().Insert(cc::ClipNode(), parent_id);
cc::ClipNode& compositor_node = *GetClipTree().Node(id);
compositor_node.clip = clip_node->ClipRect().Rect();
compositor_node.transform_id =
EnsureCompositorTransformNode(clip_node->LocalTransformSpace());
compositor_node.clip_type = cc::ClipNode::ClipType::APPLIES_LOCAL_CLIP;
auto result = clip_node_map_.Set(clip_node, id);
DCHECK(result.is_new_entry);
GetClipTree().set_needs_update(true);
return id;
}
|
C
|
Chrome
| 0 |
CVE-2017-17052
|
https://www.cvedetails.com/cve/CVE-2017-17052/
|
CWE-416
|
https://github.com/torvalds/linux/commit/2b7e8665b4ff51c034c55df3cff76518d1a9ee3a
|
2b7e8665b4ff51c034c55df3cff76518d1a9ee3a
|
fork: fix incorrect fput of ->exe_file causing use-after-free
Commit 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for
write killable") made it possible to kill a forking task while it is
waiting to acquire its ->mmap_sem for write, in dup_mmap().
However, it was overlooked that this introduced an new error path before
a reference is taken on the mm_struct's ->exe_file. Since the
->exe_file of the new mm_struct was already set to the old ->exe_file by
the memcpy() in dup_mm(), it was possible for the mmput() in the error
path of dup_mm() to drop a reference to ->exe_file which was never
taken.
This caused the struct file to later be freed prematurely.
Fix it by updating mm_init() to NULL out the ->exe_file, in the same
place it clears other things like the list of mmaps.
This bug was found by syzkaller. It can be reproduced using the
following C program:
#define _GNU_SOURCE
#include <pthread.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/wait.h>
#include <unistd.h>
static void *mmap_thread(void *_arg)
{
for (;;) {
mmap(NULL, 0x1000000, PROT_READ,
MAP_POPULATE|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
}
}
static void *fork_thread(void *_arg)
{
usleep(rand() % 10000);
fork();
}
int main(void)
{
fork();
fork();
fork();
for (;;) {
if (fork() == 0) {
pthread_t t;
pthread_create(&t, NULL, mmap_thread, NULL);
pthread_create(&t, NULL, fork_thread, NULL);
usleep(rand() % 10000);
syscall(__NR_exit_group, 0);
}
wait(NULL);
}
}
No special kernel config options are needed. It usually causes a NULL
pointer dereference in __remove_shared_vm_struct() during exit, or in
dup_mmap() (which is usually inlined into copy_process()) during fork.
Both are due to a vm_area_struct's ->vm_file being used after it's
already been freed.
Google Bug Id: 64772007
Link: http://lkml.kernel.org/r/[email protected]
Fixes: 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for write killable")
Signed-off-by: Eric Biggers <[email protected]>
Tested-by: Mark Rutland <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Konstantin Khlebnikov <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: <[email protected]> [v4.7+]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
int node)
{
return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
}
|
static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
int node)
{
return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
}
|
C
|
linux
| 0 |
CVE-2015-6787
|
https://www.cvedetails.com/cve/CVE-2015-6787/
| null |
https://github.com/chromium/chromium/commit/f911e11e7f6b5c0d6f5ee694a9871de6619889f7
|
f911e11e7f6b5c0d6f5ee694a9871de6619889f7
|
Reland "[CI] Make paint property nodes non-ref-counted"
This reverts commit 887383b30842d9d9006e11bb6932660a3cb5b1b7.
Reason for revert: Retry in M69.
Original change's description:
> Revert "[CI] Make paint property nodes non-ref-counted"
>
> This reverts commit 70fc0b018c9517558b7aa2be00edf2debb449123.
>
> Reason for revert: Caused bugs found by clusterfuzz
>
> Original change's description:
> > [CI] Make paint property nodes non-ref-counted
> >
> > Now all paint property nodes are owned by ObjectPaintProperties
> > (and LocalFrameView temporarily before removing non-RLS mode).
> > Others just use raw pointers or references.
> >
> > Bug: 833496
> > Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> > Change-Id: I2d544fe153bb94698623248748df63c8aa2081ae
> > Reviewed-on: https://chromium-review.googlesource.com/1031101
> > Reviewed-by: Tien-Ren Chen <[email protected]>
> > Commit-Queue: Xianzhu Wang <[email protected]>
> > Cr-Commit-Position: refs/heads/master@{#554626}
>
> [email protected],[email protected],[email protected]
>
> Change-Id: I02bb50d6744cb81a797246a0116b677e80a3c69f
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Bug: 833496,837932,837943
> Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> Reviewed-on: https://chromium-review.googlesource.com/1034292
> Reviewed-by: Xianzhu Wang <[email protected]>
> Commit-Queue: Xianzhu Wang <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#554653}
[email protected],[email protected],[email protected]
# Not skipping CQ checks because original CL landed > 1 day ago.
Bug: 833496, 837932, 837943
Change-Id: I0b4ef70db1f1f211ba97c30d617225355c750992
Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
Reviewed-on: https://chromium-review.googlesource.com/1083491
Commit-Queue: Xianzhu Wang <[email protected]>
Reviewed-by: Xianzhu Wang <[email protected]>
Cr-Commit-Position: refs/heads/master@{#563930}
|
SkMatrix GetSkMatrix(
const TransformPaintPropertyNode* target_transform) const {
return SkMatrix(TransformationMatrix::ToSkMatrix44(
GeometryMapper::SourceToDestinationProjection(target_transform,
current_transform_)));
}
|
SkMatrix GetSkMatrix(
const TransformPaintPropertyNode* target_transform) const {
return SkMatrix(TransformationMatrix::ToSkMatrix44(
GeometryMapper::SourceToDestinationProjection(target_transform,
current_transform_)));
}
|
C
|
Chrome
| 0 |
CVE-2011-2799
|
https://www.cvedetails.com/cve/CVE-2011-2799/
|
CWE-399
|
https://github.com/chromium/chromium/commit/5a2de6455f565783c73e53eae2c8b953e7d48520
|
5a2de6455f565783c73e53eae2c8b953e7d48520
|
2011-06-02 Joone Hur <[email protected]>
Reviewed by Martin Robinson.
[GTK] Only load dictionaries if spell check is enabled
https://bugs.webkit.org/show_bug.cgi?id=32879
We don't need to call enchant if enable-spell-checking is false.
* webkit/webkitwebview.cpp:
(webkit_web_view_update_settings): Skip loading dictionaries when enable-spell-checking is false.
(webkit_web_view_settings_notify): Ditto.
git-svn-id: svn://svn.chromium.org/blink/trunk@87925 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void webkit_web_view_zoom_out(WebKitWebView* webView)
{
g_return_if_fail(WEBKIT_IS_WEB_VIEW(webView));
WebKitWebViewPrivate* priv = webView->priv;
gfloat zoomMultiplierRatio;
g_object_get(priv->webSettings.get(), "zoom-step", &zoomMultiplierRatio, NULL);
webkit_web_view_set_zoom_level(webView, webkit_web_view_get_zoom_level(webView) - zoomMultiplierRatio);
}
|
void webkit_web_view_zoom_out(WebKitWebView* webView)
{
g_return_if_fail(WEBKIT_IS_WEB_VIEW(webView));
WebKitWebViewPrivate* priv = webView->priv;
gfloat zoomMultiplierRatio;
g_object_get(priv->webSettings.get(), "zoom-step", &zoomMultiplierRatio, NULL);
webkit_web_view_set_zoom_level(webView, webkit_web_view_get_zoom_level(webView) - zoomMultiplierRatio);
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/93dd81929416a0170935e6eeac03d10aed60df18
|
93dd81929416a0170935e6eeac03d10aed60df18
|
Implement NPN_RemoveProperty
https://bugs.webkit.org/show_bug.cgi?id=43315
Reviewed by Sam Weinig.
WebKit2:
* WebProcess/Plugins/NPJSObject.cpp:
(WebKit::NPJSObject::removeProperty):
Try to remove the property.
(WebKit::NPJSObject::npClass):
Add NP_RemoveProperty.
(WebKit::NPJSObject::NP_RemoveProperty):
Call NPJSObject::removeProperty.
* WebProcess/Plugins/Netscape/NetscapeBrowserFuncs.cpp:
(WebKit::NPN_RemoveProperty):
Call the NPClass::removeProperty function.
WebKitTools:
* DumpRenderTree/DumpRenderTree.xcodeproj/project.pbxproj:
Add NPRuntimeRemoveProperty.cpp
* DumpRenderTree/TestNetscapePlugIn/PluginTest.cpp:
(PluginTest::NPN_GetStringIdentifier):
(PluginTest::NPN_GetIntIdentifier):
(PluginTest::NPN_RemoveProperty):
Add NPN_ helpers.
* DumpRenderTree/TestNetscapePlugIn/PluginTest.h:
Support more NPClass functions.
* DumpRenderTree/TestNetscapePlugIn/Tests/NPRuntimeRemoveProperty.cpp: Added.
(NPRuntimeRemoveProperty::NPRuntimeRemoveProperty):
Test for NPN_RemoveProperty.
(NPRuntimeRemoveProperty::TestObject::hasMethod):
(NPRuntimeRemoveProperty::TestObject::invoke):
Add a testRemoveProperty method.
(NPRuntimeRemoveProperty::NPP_GetValue):
Return the test object.
* DumpRenderTree/TestNetscapePlugIn/win/TestNetscapePlugin.vcproj:
* DumpRenderTree/qt/TestNetscapePlugin/TestNetscapePlugin.pro:
* GNUmakefile.am:
Add NPRuntimeRemoveProperty.cpp
LayoutTests:
Add a test for NPN_RemoveProperty.
* plugins/npruntime/remove-property-expected.txt: Added.
* plugins/npruntime/remove-property.html: Added.
git-svn-id: svn://svn.chromium.org/blink/trunk@64444 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static bool NPN_SetProperty(NPP, NPObject* npObject, NPIdentifier propertyName, const NPVariant* value)
{
if (npObject->_class->setProperty)
return npObject->_class->setProperty(npObject, propertyName, value);
return false;
}
|
static bool NPN_SetProperty(NPP, NPObject* npObject, NPIdentifier propertyName, const NPVariant* value)
{
if (npObject->_class->setProperty)
return npObject->_class->setProperty(npObject, propertyName, value);
return false;
}
|
C
|
Chrome
| 0 |
CVE-2017-9051
|
https://www.cvedetails.com/cve/CVE-2017-9051/
|
CWE-476
|
https://github.com/libav/libav/commit/fe6eea99efac66839052af547426518efd970b24
|
fe6eea99efac66839052af547426518efd970b24
|
nsvdec: don't ignore the return value of av_get_packet()
Fixes invalid reads with corrupted files.
CC: [email protected]
Bug-Id: 1039
|
static int nsv_probe(AVProbeData *p)
{
int i;
int score;
int vsize, asize, auxcount;
score = 0;
av_log(NULL, AV_LOG_TRACE, "nsv_probe(), buf_size %d\n", p->buf_size);
/* check file header */
/* streamed files might not have any header */
if (p->buf[0] == 'N' && p->buf[1] == 'S' &&
p->buf[2] == 'V' && (p->buf[3] == 'f' || p->buf[3] == 's'))
return AVPROBE_SCORE_MAX;
/* XXX: do streamed files always start at chunk boundary ?? */
/* or do we need to search NSVs in the byte stream ? */
/* seems the servers don't bother starting clean chunks... */
/* sometimes even the first header is at 9KB or something :^) */
for (i = 1; i < p->buf_size - 3; i++) {
if (p->buf[i+0] == 'N' && p->buf[i+1] == 'S' &&
p->buf[i+2] == 'V' && p->buf[i+3] == 's') {
score = AVPROBE_SCORE_MAX/5;
/* Get the chunk size and check if at the end we are getting 0xBEEF */
auxcount = p->buf[i+19];
vsize = p->buf[i+20] | p->buf[i+21] << 8;
asize = p->buf[i+22] | p->buf[i+23] << 8;
vsize = (vsize << 4) | (auxcount >> 4);
if ((asize + vsize + i + 23) < p->buf_size - 2) {
if (p->buf[i+23+asize+vsize+1] == 0xEF &&
p->buf[i+23+asize+vsize+2] == 0xBE)
return AVPROBE_SCORE_MAX-20;
}
}
}
/* so we'll have more luck on extension... */
if (av_match_ext(p->filename, "nsv"))
return AVPROBE_SCORE_EXTENSION;
/* FIXME: add mime-type check */
return score;
}
|
static int nsv_probe(AVProbeData *p)
{
int i;
int score;
int vsize, asize, auxcount;
score = 0;
av_log(NULL, AV_LOG_TRACE, "nsv_probe(), buf_size %d\n", p->buf_size);
/* check file header */
/* streamed files might not have any header */
if (p->buf[0] == 'N' && p->buf[1] == 'S' &&
p->buf[2] == 'V' && (p->buf[3] == 'f' || p->buf[3] == 's'))
return AVPROBE_SCORE_MAX;
/* XXX: do streamed files always start at chunk boundary ?? */
/* or do we need to search NSVs in the byte stream ? */
/* seems the servers don't bother starting clean chunks... */
/* sometimes even the first header is at 9KB or something :^) */
for (i = 1; i < p->buf_size - 3; i++) {
if (p->buf[i+0] == 'N' && p->buf[i+1] == 'S' &&
p->buf[i+2] == 'V' && p->buf[i+3] == 's') {
score = AVPROBE_SCORE_MAX/5;
/* Get the chunk size and check if at the end we are getting 0xBEEF */
auxcount = p->buf[i+19];
vsize = p->buf[i+20] | p->buf[i+21] << 8;
asize = p->buf[i+22] | p->buf[i+23] << 8;
vsize = (vsize << 4) | (auxcount >> 4);
if ((asize + vsize + i + 23) < p->buf_size - 2) {
if (p->buf[i+23+asize+vsize+1] == 0xEF &&
p->buf[i+23+asize+vsize+2] == 0xBE)
return AVPROBE_SCORE_MAX-20;
}
}
}
/* so we'll have more luck on extension... */
if (av_match_ext(p->filename, "nsv"))
return AVPROBE_SCORE_EXTENSION;
/* FIXME: add mime-type check */
return score;
}
|
C
|
libav
| 0 |
CVE-2015-1793
|
https://www.cvedetails.com/cve/CVE-2015-1793/
|
CWE-254
|
https://git.openssl.org/?p=openssl.git;a=commit;h=9a0db453ba017ebcaccbee933ee6511a9ae4d1c8
|
9a0db453ba017ebcaccbee933ee6511a9ae4d1c8
| null |
static void crl_akid_check(X509_STORE_CTX *ctx, X509_CRL *crl,
X509 **pissuer, int *pcrl_score)
{
X509 *crl_issuer = NULL;
X509_NAME *cnm = X509_CRL_get_issuer(crl);
int cidx = ctx->error_depth;
int i;
if (cidx != sk_X509_num(ctx->chain) - 1)
cidx++;
crl_issuer = sk_X509_value(ctx->chain, cidx);
if (X509_check_akid(crl_issuer, crl->akid) == X509_V_OK) {
if (*pcrl_score & CRL_SCORE_ISSUER_NAME) {
*pcrl_score |= CRL_SCORE_AKID | CRL_SCORE_ISSUER_CERT;
*pissuer = crl_issuer;
return;
}
}
for (cidx++; cidx < sk_X509_num(ctx->chain); cidx++) {
crl_issuer = sk_X509_value(ctx->chain, cidx);
if (X509_NAME_cmp(X509_get_subject_name(crl_issuer), cnm))
continue;
if (X509_check_akid(crl_issuer, crl->akid) == X509_V_OK) {
*pcrl_score |= CRL_SCORE_AKID | CRL_SCORE_SAME_PATH;
*pissuer = crl_issuer;
return;
}
}
/* Anything else needs extended CRL support */
if (!(ctx->param->flags & X509_V_FLAG_EXTENDED_CRL_SUPPORT))
return;
/*
* Otherwise the CRL issuer is not on the path. Look for it in the set of
* untrusted certificates.
*/
for (i = 0; i < sk_X509_num(ctx->untrusted); i++) {
crl_issuer = sk_X509_value(ctx->untrusted, i);
if (X509_NAME_cmp(X509_get_subject_name(crl_issuer), cnm))
continue;
if (X509_check_akid(crl_issuer, crl->akid) == X509_V_OK) {
*pissuer = crl_issuer;
*pcrl_score |= CRL_SCORE_AKID;
return;
}
}
}
|
static void crl_akid_check(X509_STORE_CTX *ctx, X509_CRL *crl,
X509 **pissuer, int *pcrl_score)
{
X509 *crl_issuer = NULL;
X509_NAME *cnm = X509_CRL_get_issuer(crl);
int cidx = ctx->error_depth;
int i;
if (cidx != sk_X509_num(ctx->chain) - 1)
cidx++;
crl_issuer = sk_X509_value(ctx->chain, cidx);
if (X509_check_akid(crl_issuer, crl->akid) == X509_V_OK) {
if (*pcrl_score & CRL_SCORE_ISSUER_NAME) {
*pcrl_score |= CRL_SCORE_AKID | CRL_SCORE_ISSUER_CERT;
*pissuer = crl_issuer;
return;
}
}
for (cidx++; cidx < sk_X509_num(ctx->chain); cidx++) {
crl_issuer = sk_X509_value(ctx->chain, cidx);
if (X509_NAME_cmp(X509_get_subject_name(crl_issuer), cnm))
continue;
if (X509_check_akid(crl_issuer, crl->akid) == X509_V_OK) {
*pcrl_score |= CRL_SCORE_AKID | CRL_SCORE_SAME_PATH;
*pissuer = crl_issuer;
return;
}
}
/* Anything else needs extended CRL support */
if (!(ctx->param->flags & X509_V_FLAG_EXTENDED_CRL_SUPPORT))
return;
/*
* Otherwise the CRL issuer is not on the path. Look for it in the set of
* untrusted certificates.
*/
for (i = 0; i < sk_X509_num(ctx->untrusted); i++) {
crl_issuer = sk_X509_value(ctx->untrusted, i);
if (X509_NAME_cmp(X509_get_subject_name(crl_issuer), cnm))
continue;
if (X509_check_akid(crl_issuer, crl->akid) == X509_V_OK) {
*pissuer = crl_issuer;
*pcrl_score |= CRL_SCORE_AKID;
return;
}
}
}
|
C
|
openssl
| 0 |
CVE-2015-1213
|
https://www.cvedetails.com/cve/CVE-2015-1213/
|
CWE-119
|
https://github.com/chromium/chromium/commit/faaa2fd0a05f1622d9a8806da118d4f3b602e707
|
faaa2fd0a05f1622d9a8806da118d4f3b602e707
|
[Blink>Media] Allow autoplay muted on Android by default
There was a mistake causing autoplay muted is shipped on Android
but it will be disabled if the chromium embedder doesn't specify
content setting for "AllowAutoplay" preference. This CL makes the
AllowAutoplay preference true by default so that it is allowed by
embedders (including AndroidWebView) unless they explicitly
disable it.
Intent to ship:
https://groups.google.com/a/chromium.org/d/msg/blink-dev/Q1cnzNI2GpI/AL_eyUNABgAJ
BUG=689018
Review-Url: https://codereview.chromium.org/2677173002
Cr-Commit-Position: refs/heads/master@{#448423}
|
bool HTMLMediaElement::supportsSave() const {
return webMediaPlayer() && webMediaPlayer()->supportsSave();
}
|
bool HTMLMediaElement::supportsSave() const {
return webMediaPlayer() && webMediaPlayer()->supportsSave();
}
|
C
|
Chrome
| 0 |
CVE-2018-7480
|
https://www.cvedetails.com/cve/CVE-2018-7480/
|
CWE-415
|
https://github.com/torvalds/linux/commit/9b54d816e00425c3a517514e0d677bb3cec49258
|
9b54d816e00425c3a517514e0d677bb3cec49258
|
blkcg: fix double free of new_blkg in blkcg_init_queue
If blkg_create fails, new_blkg passed as an argument will
be freed by blkg_create, so there is no need to free it again.
Signed-off-by: Hou Tao <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
struct blkg_policy_data *pd,
int off)
{
struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
NULL, off);
return __blkg_prfill_rwstat(sf, pd, &rwstat);
}
|
static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
struct blkg_policy_data *pd,
int off)
{
struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
NULL, off);
return __blkg_prfill_rwstat(sf, pd, &rwstat);
}
|
C
|
linux
| 0 |
CVE-2017-14140
|
https://www.cvedetails.com/cve/CVE-2017-14140/
|
CWE-200
|
https://github.com/torvalds/linux/commit/197e7e521384a23b9e585178f3f11c9fa08274b9
|
197e7e521384a23b9e585178f3f11c9fa08274b9
|
Sanitize 'move_pages()' permission checks
The 'move_paghes()' system call was introduced long long ago with the
same permission checks as for sending a signal (except using
CAP_SYS_NICE instead of CAP_SYS_KILL for the overriding capability).
That turns out to not be a great choice - while the system call really
only moves physical page allocations around (and you need other
capabilities to do a lot of it), you can check the return value to map
out some the virtual address choices and defeat ASLR of a binary that
still shares your uid.
So change the access checks to the more common 'ptrace_may_access()'
model instead.
This tightens the access checks for the uid, and also effectively
changes the CAP_SYS_NICE check to CAP_SYS_PTRACE, but it's unlikely that
anybody really _uses_ this legacy system call any more (we hav ebetter
NUMA placement models these days), so I expect nobody to notice.
Famous last words.
Reported-by: Otto Ebeling <[email protected]>
Acked-by: Eric W. Biederman <[email protected]>
Cc: Willy Tarreau <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
|
static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *old)
{
struct page_vma_mapped_walk pvmw = {
.page = old,
.vma = vma,
.address = addr,
.flags = PVMW_SYNC | PVMW_MIGRATION,
};
struct page *new;
pte_t pte;
swp_entry_t entry;
VM_BUG_ON_PAGE(PageTail(page), page);
while (page_vma_mapped_walk(&pvmw)) {
if (PageKsm(page))
new = page;
else
new = page - pvmw.page->index +
linear_page_index(vma, pvmw.address);
get_page(new);
pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
if (pte_swp_soft_dirty(*pvmw.pte))
pte = pte_mksoft_dirty(pte);
/*
* Recheck VMA as permissions can change since migration started
*/
entry = pte_to_swp_entry(*pvmw.pte);
if (is_write_migration_entry(entry))
pte = maybe_mkwrite(pte, vma);
flush_dcache_page(new);
#ifdef CONFIG_HUGETLB_PAGE
if (PageHuge(new)) {
pte = pte_mkhuge(pte);
pte = arch_make_huge_pte(pte, vma, new, 0);
set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
if (PageAnon(new))
hugepage_add_anon_rmap(new, vma, pvmw.address);
else
page_dup_rmap(new, true);
} else
#endif
{
set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
if (PageAnon(new))
page_add_anon_rmap(new, vma, pvmw.address, false);
else
page_add_file_rmap(new, false);
}
if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
mlock_vma_page(new);
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, pvmw.address, pvmw.pte);
}
return true;
}
|
static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *old)
{
struct page_vma_mapped_walk pvmw = {
.page = old,
.vma = vma,
.address = addr,
.flags = PVMW_SYNC | PVMW_MIGRATION,
};
struct page *new;
pte_t pte;
swp_entry_t entry;
VM_BUG_ON_PAGE(PageTail(page), page);
while (page_vma_mapped_walk(&pvmw)) {
if (PageKsm(page))
new = page;
else
new = page - pvmw.page->index +
linear_page_index(vma, pvmw.address);
get_page(new);
pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
if (pte_swp_soft_dirty(*pvmw.pte))
pte = pte_mksoft_dirty(pte);
/*
* Recheck VMA as permissions can change since migration started
*/
entry = pte_to_swp_entry(*pvmw.pte);
if (is_write_migration_entry(entry))
pte = maybe_mkwrite(pte, vma);
flush_dcache_page(new);
#ifdef CONFIG_HUGETLB_PAGE
if (PageHuge(new)) {
pte = pte_mkhuge(pte);
pte = arch_make_huge_pte(pte, vma, new, 0);
set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
if (PageAnon(new))
hugepage_add_anon_rmap(new, vma, pvmw.address);
else
page_dup_rmap(new, true);
} else
#endif
{
set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
if (PageAnon(new))
page_add_anon_rmap(new, vma, pvmw.address, false);
else
page_add_file_rmap(new, false);
}
if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
mlock_vma_page(new);
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, pvmw.address, pvmw.pte);
}
return true;
}
|
C
|
linux
| 0 |
CVE-2016-3746
|
https://www.cvedetails.com/cve/CVE-2016-3746/
| null |
https://android.googlesource.com/platform/hardware/qcom/media/+/5b82f4f90c3d531313714df4b936f92fb0ff15cf
|
5b82f4f90c3d531313714df4b936f92fb0ff15cf
|
DO NOT MERGE mm-video-v4l2: vdec: Avoid processing ETBs/FTBs in invalid states
(per the spec) ETB/FTB should not be handled in states other than
Executing, Paused and Idle. This avoids accessing invalid buffers.
Also add a lock to protect the private-buffers from being deleted
while accessing from another thread.
Bug: 27890802
Security Vulnerability - Heap Use-After-Free and Possible LPE in
MediaServer (libOmxVdec problem #6)
CRs-Fixed: 1008882
Change-Id: Iaac2e383cd53cf9cf8042c9ed93ddc76dba3907e
|
OMX_ERRORTYPE omx_vdec::allocate_input_buffer(
OMX_IN OMX_HANDLETYPE hComp,
OMX_INOUT OMX_BUFFERHEADERTYPE** bufferHdr,
OMX_IN OMX_U32 port,
OMX_IN OMX_PTR appData,
OMX_IN OMX_U32 bytes)
{
OMX_ERRORTYPE eRet = OMX_ErrorNone;
struct vdec_setbuffer_cmd setbuffers;
OMX_BUFFERHEADERTYPE *input = NULL;
unsigned i = 0;
unsigned char *buf_addr = NULL;
int pmem_fd = -1;
(void) hComp;
(void) port;
if (bytes != drv_ctx.ip_buf.buffer_size) {
DEBUG_PRINT_LOW("Requested Size is wrong %u epected is %u",
(unsigned int)bytes, (unsigned int)drv_ctx.ip_buf.buffer_size);
return OMX_ErrorBadParameter;
}
if (!m_inp_mem_ptr) {
DEBUG_PRINT_HIGH("Allocate i/p buffer Header: Cnt(%d) Sz(%u)",
drv_ctx.ip_buf.actualcount,
(unsigned int)drv_ctx.ip_buf.buffer_size);
m_inp_mem_ptr = (OMX_BUFFERHEADERTYPE*) \
calloc( (sizeof(OMX_BUFFERHEADERTYPE)), drv_ctx.ip_buf.actualcount);
if (m_inp_mem_ptr == NULL) {
return OMX_ErrorInsufficientResources;
}
drv_ctx.ptr_inputbuffer = (struct vdec_bufferpayload *) \
calloc ((sizeof (struct vdec_bufferpayload)),drv_ctx.ip_buf.actualcount);
if (drv_ctx.ptr_inputbuffer == NULL) {
return OMX_ErrorInsufficientResources;
}
#ifdef USE_ION
drv_ctx.ip_buf_ion_info = (struct vdec_ion *) \
calloc ((sizeof (struct vdec_ion)),drv_ctx.ip_buf.actualcount);
if (drv_ctx.ip_buf_ion_info == NULL) {
return OMX_ErrorInsufficientResources;
}
#endif
for (i=0; i < drv_ctx.ip_buf.actualcount; i++) {
drv_ctx.ptr_inputbuffer [i].pmem_fd = -1;
#ifdef USE_ION
drv_ctx.ip_buf_ion_info[i].ion_device_fd = -1;
#endif
}
}
for (i=0; i< drv_ctx.ip_buf.actualcount; i++) {
if (BITMASK_ABSENT(&m_inp_bm_count,i)) {
DEBUG_PRINT_LOW("Free Input Buffer Index %d",i);
break;
}
}
if (i < drv_ctx.ip_buf.actualcount) {
struct v4l2_buffer buf;
struct v4l2_plane plane;
int rc;
DEBUG_PRINT_LOW("Allocate input Buffer");
#ifdef USE_ION
drv_ctx.ip_buf_ion_info[i].ion_device_fd = alloc_map_ion_memory(
drv_ctx.ip_buf.buffer_size,drv_ctx.op_buf.alignment,
&drv_ctx.ip_buf_ion_info[i].ion_alloc_data,
&drv_ctx.ip_buf_ion_info[i].fd_ion_data, secure_mode ? ION_SECURE
#ifndef DISABLE_INPUT_BUFFER_CACHE
: ION_FLAG_CACHED
#else
: 0
#endif
);
if (drv_ctx.ip_buf_ion_info[i].ion_device_fd < 0) {
return OMX_ErrorInsufficientResources;
}
pmem_fd = drv_ctx.ip_buf_ion_info[i].fd_ion_data.fd;
#else
pmem_fd = open (MEM_DEVICE,O_RDWR);
if (pmem_fd < 0) {
DEBUG_PRINT_ERROR("open failed for pmem/adsp for input buffer");
return OMX_ErrorInsufficientResources;
}
if (pmem_fd == 0) {
pmem_fd = open (MEM_DEVICE,O_RDWR);
if (pmem_fd < 0) {
DEBUG_PRINT_ERROR("open failed for pmem/adsp for input buffer");
return OMX_ErrorInsufficientResources;
}
}
if (!align_pmem_buffers(pmem_fd, drv_ctx.ip_buf.buffer_size,
drv_ctx.ip_buf.alignment)) {
DEBUG_PRINT_ERROR("align_pmem_buffers() failed");
close(pmem_fd);
return OMX_ErrorInsufficientResources;
}
#endif
if (!secure_mode) {
buf_addr = (unsigned char *)mmap(NULL,
drv_ctx.ip_buf.buffer_size,
PROT_READ|PROT_WRITE, MAP_SHARED, pmem_fd, 0);
if (buf_addr == MAP_FAILED) {
close(pmem_fd);
#ifdef USE_ION
free_ion_memory(&drv_ctx.ip_buf_ion_info[i]);
#endif
DEBUG_PRINT_ERROR("Map Failed to allocate input buffer");
return OMX_ErrorInsufficientResources;
}
}
*bufferHdr = (m_inp_mem_ptr + i);
if (secure_mode)
drv_ctx.ptr_inputbuffer [i].bufferaddr = *bufferHdr;
else
drv_ctx.ptr_inputbuffer [i].bufferaddr = buf_addr;
drv_ctx.ptr_inputbuffer [i].pmem_fd = pmem_fd;
drv_ctx.ptr_inputbuffer [i].buffer_len = drv_ctx.ip_buf.buffer_size;
drv_ctx.ptr_inputbuffer [i].mmaped_size = drv_ctx.ip_buf.buffer_size;
drv_ctx.ptr_inputbuffer [i].offset = 0;
buf.index = i;
buf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
buf.memory = V4L2_MEMORY_USERPTR;
plane.bytesused = 0;
plane.length = drv_ctx.ptr_inputbuffer [i].mmaped_size;
plane.m.userptr = (unsigned long)drv_ctx.ptr_inputbuffer[i].bufferaddr;
plane.reserved[0] =drv_ctx.ptr_inputbuffer [i].pmem_fd;
plane.reserved[1] = 0;
plane.data_offset = drv_ctx.ptr_inputbuffer[i].offset;
buf.m.planes = &plane;
buf.length = 1;
DEBUG_PRINT_LOW("Set the input Buffer Idx: %d Addr: %p", i,
drv_ctx.ptr_inputbuffer[i].bufferaddr);
rc = ioctl(drv_ctx.video_driver_fd, VIDIOC_PREPARE_BUF, &buf);
if (rc) {
DEBUG_PRINT_ERROR("Failed to prepare bufs");
/*TODO: How to handle this case */
return OMX_ErrorInsufficientResources;
}
input = *bufferHdr;
BITMASK_SET(&m_inp_bm_count,i);
DEBUG_PRINT_LOW("Buffer address %p of pmem",*bufferHdr);
if (secure_mode)
input->pBuffer = (OMX_U8 *)(intptr_t)drv_ctx.ptr_inputbuffer [i].pmem_fd;
else
input->pBuffer = (OMX_U8 *)buf_addr;
input->nSize = sizeof(OMX_BUFFERHEADERTYPE);
input->nVersion.nVersion = OMX_SPEC_VERSION;
input->nAllocLen = drv_ctx.ip_buf.buffer_size;
input->pAppPrivate = appData;
input->nInputPortIndex = OMX_CORE_INPUT_PORT_INDEX;
input->pInputPortPrivate = (void *)&drv_ctx.ptr_inputbuffer [i];
if (drv_ctx.disable_dmx) {
eRet = allocate_desc_buffer(i);
}
} else {
DEBUG_PRINT_ERROR("ERROR:Input Buffer Index not found");
eRet = OMX_ErrorInsufficientResources;
}
return eRet;
}
|
OMX_ERRORTYPE omx_vdec::allocate_input_buffer(
OMX_IN OMX_HANDLETYPE hComp,
OMX_INOUT OMX_BUFFERHEADERTYPE** bufferHdr,
OMX_IN OMX_U32 port,
OMX_IN OMX_PTR appData,
OMX_IN OMX_U32 bytes)
{
OMX_ERRORTYPE eRet = OMX_ErrorNone;
struct vdec_setbuffer_cmd setbuffers;
OMX_BUFFERHEADERTYPE *input = NULL;
unsigned i = 0;
unsigned char *buf_addr = NULL;
int pmem_fd = -1;
(void) hComp;
(void) port;
if (bytes != drv_ctx.ip_buf.buffer_size) {
DEBUG_PRINT_LOW("Requested Size is wrong %u epected is %u",
(unsigned int)bytes, (unsigned int)drv_ctx.ip_buf.buffer_size);
return OMX_ErrorBadParameter;
}
if (!m_inp_mem_ptr) {
DEBUG_PRINT_HIGH("Allocate i/p buffer Header: Cnt(%d) Sz(%u)",
drv_ctx.ip_buf.actualcount,
(unsigned int)drv_ctx.ip_buf.buffer_size);
m_inp_mem_ptr = (OMX_BUFFERHEADERTYPE*) \
calloc( (sizeof(OMX_BUFFERHEADERTYPE)), drv_ctx.ip_buf.actualcount);
if (m_inp_mem_ptr == NULL) {
return OMX_ErrorInsufficientResources;
}
drv_ctx.ptr_inputbuffer = (struct vdec_bufferpayload *) \
calloc ((sizeof (struct vdec_bufferpayload)),drv_ctx.ip_buf.actualcount);
if (drv_ctx.ptr_inputbuffer == NULL) {
return OMX_ErrorInsufficientResources;
}
#ifdef USE_ION
drv_ctx.ip_buf_ion_info = (struct vdec_ion *) \
calloc ((sizeof (struct vdec_ion)),drv_ctx.ip_buf.actualcount);
if (drv_ctx.ip_buf_ion_info == NULL) {
return OMX_ErrorInsufficientResources;
}
#endif
for (i=0; i < drv_ctx.ip_buf.actualcount; i++) {
drv_ctx.ptr_inputbuffer [i].pmem_fd = -1;
#ifdef USE_ION
drv_ctx.ip_buf_ion_info[i].ion_device_fd = -1;
#endif
}
}
for (i=0; i< drv_ctx.ip_buf.actualcount; i++) {
if (BITMASK_ABSENT(&m_inp_bm_count,i)) {
DEBUG_PRINT_LOW("Free Input Buffer Index %d",i);
break;
}
}
if (i < drv_ctx.ip_buf.actualcount) {
struct v4l2_buffer buf;
struct v4l2_plane plane;
int rc;
DEBUG_PRINT_LOW("Allocate input Buffer");
#ifdef USE_ION
drv_ctx.ip_buf_ion_info[i].ion_device_fd = alloc_map_ion_memory(
drv_ctx.ip_buf.buffer_size,drv_ctx.op_buf.alignment,
&drv_ctx.ip_buf_ion_info[i].ion_alloc_data,
&drv_ctx.ip_buf_ion_info[i].fd_ion_data, secure_mode ? ION_SECURE
#ifndef DISABLE_INPUT_BUFFER_CACHE
: ION_FLAG_CACHED
#else
: 0
#endif
);
if (drv_ctx.ip_buf_ion_info[i].ion_device_fd < 0) {
return OMX_ErrorInsufficientResources;
}
pmem_fd = drv_ctx.ip_buf_ion_info[i].fd_ion_data.fd;
#else
pmem_fd = open (MEM_DEVICE,O_RDWR);
if (pmem_fd < 0) {
DEBUG_PRINT_ERROR("open failed for pmem/adsp for input buffer");
return OMX_ErrorInsufficientResources;
}
if (pmem_fd == 0) {
pmem_fd = open (MEM_DEVICE,O_RDWR);
if (pmem_fd < 0) {
DEBUG_PRINT_ERROR("open failed for pmem/adsp for input buffer");
return OMX_ErrorInsufficientResources;
}
}
if (!align_pmem_buffers(pmem_fd, drv_ctx.ip_buf.buffer_size,
drv_ctx.ip_buf.alignment)) {
DEBUG_PRINT_ERROR("align_pmem_buffers() failed");
close(pmem_fd);
return OMX_ErrorInsufficientResources;
}
#endif
if (!secure_mode) {
buf_addr = (unsigned char *)mmap(NULL,
drv_ctx.ip_buf.buffer_size,
PROT_READ|PROT_WRITE, MAP_SHARED, pmem_fd, 0);
if (buf_addr == MAP_FAILED) {
close(pmem_fd);
#ifdef USE_ION
free_ion_memory(&drv_ctx.ip_buf_ion_info[i]);
#endif
DEBUG_PRINT_ERROR("Map Failed to allocate input buffer");
return OMX_ErrorInsufficientResources;
}
}
*bufferHdr = (m_inp_mem_ptr + i);
if (secure_mode)
drv_ctx.ptr_inputbuffer [i].bufferaddr = *bufferHdr;
else
drv_ctx.ptr_inputbuffer [i].bufferaddr = buf_addr;
drv_ctx.ptr_inputbuffer [i].pmem_fd = pmem_fd;
drv_ctx.ptr_inputbuffer [i].buffer_len = drv_ctx.ip_buf.buffer_size;
drv_ctx.ptr_inputbuffer [i].mmaped_size = drv_ctx.ip_buf.buffer_size;
drv_ctx.ptr_inputbuffer [i].offset = 0;
buf.index = i;
buf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
buf.memory = V4L2_MEMORY_USERPTR;
plane.bytesused = 0;
plane.length = drv_ctx.ptr_inputbuffer [i].mmaped_size;
plane.m.userptr = (unsigned long)drv_ctx.ptr_inputbuffer[i].bufferaddr;
plane.reserved[0] =drv_ctx.ptr_inputbuffer [i].pmem_fd;
plane.reserved[1] = 0;
plane.data_offset = drv_ctx.ptr_inputbuffer[i].offset;
buf.m.planes = &plane;
buf.length = 1;
DEBUG_PRINT_LOW("Set the input Buffer Idx: %d Addr: %p", i,
drv_ctx.ptr_inputbuffer[i].bufferaddr);
rc = ioctl(drv_ctx.video_driver_fd, VIDIOC_PREPARE_BUF, &buf);
if (rc) {
DEBUG_PRINT_ERROR("Failed to prepare bufs");
/*TODO: How to handle this case */
return OMX_ErrorInsufficientResources;
}
input = *bufferHdr;
BITMASK_SET(&m_inp_bm_count,i);
DEBUG_PRINT_LOW("Buffer address %p of pmem",*bufferHdr);
if (secure_mode)
input->pBuffer = (OMX_U8 *)(intptr_t)drv_ctx.ptr_inputbuffer [i].pmem_fd;
else
input->pBuffer = (OMX_U8 *)buf_addr;
input->nSize = sizeof(OMX_BUFFERHEADERTYPE);
input->nVersion.nVersion = OMX_SPEC_VERSION;
input->nAllocLen = drv_ctx.ip_buf.buffer_size;
input->pAppPrivate = appData;
input->nInputPortIndex = OMX_CORE_INPUT_PORT_INDEX;
input->pInputPortPrivate = (void *)&drv_ctx.ptr_inputbuffer [i];
if (drv_ctx.disable_dmx) {
eRet = allocate_desc_buffer(i);
}
} else {
DEBUG_PRINT_ERROR("ERROR:Input Buffer Index not found");
eRet = OMX_ErrorInsufficientResources;
}
return eRet;
}
|
C
|
Android
| 0 |
CVE-2014-1713
|
https://www.cvedetails.com/cve/CVE-2014-1713/
|
CWE-399
|
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
|
f85a87ec670ad0fce9d98d90c9a705b72a288154
|
document.location bindings fix
BUG=352374
[email protected]
Review URL: https://codereview.chromium.org/196343011
git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void unforgeableLongAttributeAttributeSetterCallback(v8::Local<v8::String>, v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMSetter");
TestObjectPythonV8Internal::unforgeableLongAttributeAttributeSetter(jsValue, info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
|
static void unforgeableLongAttributeAttributeSetterCallback(v8::Local<v8::String>, v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMSetter");
TestObjectPythonV8Internal::unforgeableLongAttributeAttributeSetter(jsValue, info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
|
C
|
Chrome
| 0 |
CVE-2016-2449
|
https://www.cvedetails.com/cve/CVE-2016-2449/
|
CWE-264
|
https://android.googlesource.com/platform/frameworks/av/+/b04aee833c5cfb6b31b8558350feb14bb1a0f353
|
b04aee833c5cfb6b31b8558350feb14bb1a0f353
|
Camera3Device: Validate template ID
Validate template ID before creating a default request.
Bug: 26866110
Bug: 27568958
Change-Id: Ifda457024f1d5c2b1382f189c1a8d5fda852d30d
|
void Camera3Device::notifyShutter(const camera3_shutter_msg_t &msg,
NotificationListener *listener) {
ssize_t idx;
{
Mutex::Autolock l(mInFlightLock);
idx = mInFlightMap.indexOfKey(msg.frame_number);
if (idx >= 0) {
InFlightRequest &r = mInFlightMap.editValueAt(idx);
{
Mutex::Autolock l(mOutputLock);
if (r.hasInputBuffer) {
if (msg.frame_number < mNextReprocessShutterFrameNumber) {
SET_ERR("Shutter notification out-of-order. Expected "
"notification for frame %d, got frame %d",
mNextReprocessShutterFrameNumber, msg.frame_number);
return;
}
mNextReprocessShutterFrameNumber = msg.frame_number + 1;
} else {
if (msg.frame_number < mNextShutterFrameNumber) {
SET_ERR("Shutter notification out-of-order. Expected "
"notification for frame %d, got frame %d",
mNextShutterFrameNumber, msg.frame_number);
return;
}
mNextShutterFrameNumber = msg.frame_number + 1;
}
}
ALOGVV("Camera %d: %s: Shutter fired for frame %d (id %d) at %" PRId64,
mId, __FUNCTION__,
msg.frame_number, r.resultExtras.requestId, msg.timestamp);
if (listener != NULL) {
listener->notifyShutter(r.resultExtras, msg.timestamp);
}
r.shutterTimestamp = msg.timestamp;
sendCaptureResult(r.pendingMetadata, r.resultExtras,
r.partialResult.collectedResult, msg.frame_number,
r.hasInputBuffer, r.aeTriggerCancelOverride);
returnOutputBuffers(r.pendingOutputBuffers.array(),
r.pendingOutputBuffers.size(), r.shutterTimestamp);
r.pendingOutputBuffers.clear();
removeInFlightRequestIfReadyLocked(idx);
}
}
if (idx < 0) {
SET_ERR("Shutter notification for non-existent frame number %d",
msg.frame_number);
}
}
|
void Camera3Device::notifyShutter(const camera3_shutter_msg_t &msg,
NotificationListener *listener) {
ssize_t idx;
{
Mutex::Autolock l(mInFlightLock);
idx = mInFlightMap.indexOfKey(msg.frame_number);
if (idx >= 0) {
InFlightRequest &r = mInFlightMap.editValueAt(idx);
{
Mutex::Autolock l(mOutputLock);
if (r.hasInputBuffer) {
if (msg.frame_number < mNextReprocessShutterFrameNumber) {
SET_ERR("Shutter notification out-of-order. Expected "
"notification for frame %d, got frame %d",
mNextReprocessShutterFrameNumber, msg.frame_number);
return;
}
mNextReprocessShutterFrameNumber = msg.frame_number + 1;
} else {
if (msg.frame_number < mNextShutterFrameNumber) {
SET_ERR("Shutter notification out-of-order. Expected "
"notification for frame %d, got frame %d",
mNextShutterFrameNumber, msg.frame_number);
return;
}
mNextShutterFrameNumber = msg.frame_number + 1;
}
}
ALOGVV("Camera %d: %s: Shutter fired for frame %d (id %d) at %" PRId64,
mId, __FUNCTION__,
msg.frame_number, r.resultExtras.requestId, msg.timestamp);
if (listener != NULL) {
listener->notifyShutter(r.resultExtras, msg.timestamp);
}
r.shutterTimestamp = msg.timestamp;
sendCaptureResult(r.pendingMetadata, r.resultExtras,
r.partialResult.collectedResult, msg.frame_number,
r.hasInputBuffer, r.aeTriggerCancelOverride);
returnOutputBuffers(r.pendingOutputBuffers.array(),
r.pendingOutputBuffers.size(), r.shutterTimestamp);
r.pendingOutputBuffers.clear();
removeInFlightRequestIfReadyLocked(idx);
}
}
if (idx < 0) {
SET_ERR("Shutter notification for non-existent frame number %d",
msg.frame_number);
}
}
|
C
|
Android
| 0 |
CVE-2013-7421
|
https://www.cvedetails.com/cve/CVE-2013-7421/
|
CWE-264
|
https://github.com/torvalds/linux/commit/5d26a105b5a73e5635eae0629b42fa0a90e07b7b
|
5d26a105b5a73e5635eae0629b42fa0a90e07b7b
|
crypto: prefix module autoloading with "crypto-"
This prefixes all crypto module loading with "crypto-" so we never run
the risk of exposing module auto-loading to userspace via a crypto API,
as demonstrated by Mathias Krause:
https://lkml.org/lkml/2013/3/4/70
Signed-off-by: Kees Cook <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
|
static int hmac_sha1_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
ctx->config.data_format = HASH_DATA_8_BITS;
ctx->config.algorithm = HASH_ALGO_SHA1;
ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
ctx->digestsize = SHA1_DIGEST_SIZE;
return hash_init(req);
}
|
static int hmac_sha1_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
ctx->config.data_format = HASH_DATA_8_BITS;
ctx->config.algorithm = HASH_ALGO_SHA1;
ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
ctx->digestsize = SHA1_DIGEST_SIZE;
return hash_init(req);
}
|
C
|
linux
| 0 |
CVE-2018-6085
|
https://www.cvedetails.com/cve/CVE-2018-6085/
|
CWE-20
|
https://github.com/chromium/chromium/commit/df5b1e1f88e013bc96107cc52c4a4f33a8238444
|
df5b1e1f88e013bc96107cc52c4a4f33a8238444
|
Blockfile cache: fix long-standing sparse + evict reentrancy problem
Thanks to nedwilliamson@ (on gmail) for an alternative perspective
plus a reduction to make fixing this much easier.
Bug: 826626, 518908, 537063, 802886
Change-Id: Ibfa01416f9a8e7f7b361e4f93b4b6b134728b85f
Reviewed-on: https://chromium-review.googlesource.com/985052
Reviewed-by: Matt Menke <[email protected]>
Commit-Queue: Maks Orlovich <[email protected]>
Cr-Commit-Position: refs/heads/master@{#547103}
|
void InFlightBackendIO::DoomAllEntries(
const net::CompletionCallback& callback) {
scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
operation->DoomAllEntries();
PostOperation(FROM_HERE, operation.get());
}
|
void InFlightBackendIO::DoomAllEntries(
const net::CompletionCallback& callback) {
scoped_refptr<BackendIO> operation(new BackendIO(this, backend_, callback));
operation->DoomAllEntries();
PostOperation(FROM_HERE, operation.get());
}
|
C
|
Chrome
| 0 |
CVE-2016-9557
|
https://www.cvedetails.com/cve/CVE-2016-9557/
|
CWE-190
|
https://github.com/mdadams/jasper/commit/d42b2388f7f8e0332c846675133acea151fc557a
|
d42b2388f7f8e0332c846675133acea151fc557a
|
The generation of the configuration file jas_config.h has been completely
reworked in order to avoid pollution of the global namespace.
Some problematic types like uchar, ulong, and friends have been replaced
with names with a jas_ prefix.
An option max_samples has been added to the BMP and JPEG decoders to
restrict the maximum size of image that they can decode. This change
was made as a (possibly temporary) fix to address security concerns.
A max_samples command-line option has also been added to imginfo.
Whether an image component (for jas_image_t) is stored in memory or on
disk is now based on the component size (rather than the image size).
Some debug log message were added.
Some new integer overflow checks were added.
Some new safe integer add/multiply functions were added.
More pre-C99 cruft was removed. JasPer has numerous "hacks" to
handle pre-C99 compilers. JasPer now assumes C99 support. So, this
pre-C99 cruft is unnecessary and can be removed.
The regression jasper-doublefree-mem_close.jpg has been re-enabled.
Theoretically, it should work more predictably now.
|
static int jas_icccurv_output(jas_iccattrval_t *attrval, jas_stream_t *out)
{
jas_icccurv_t *curv = &attrval->data.curv;
unsigned int i;
if (jas_iccputuint32(out, curv->numents))
goto error;
for (i = 0; i < curv->numents; ++i) {
if (jas_iccputuint16(out, curv->ents[i]))
goto error;
}
return 0;
error:
return -1;
}
|
static int jas_icccurv_output(jas_iccattrval_t *attrval, jas_stream_t *out)
{
jas_icccurv_t *curv = &attrval->data.curv;
unsigned int i;
if (jas_iccputuint32(out, curv->numents))
goto error;
for (i = 0; i < curv->numents; ++i) {
if (jas_iccputuint16(out, curv->ents[i]))
goto error;
}
return 0;
error:
return -1;
}
|
C
|
jasper
| 0 |
CVE-2016-3835
|
https://www.cvedetails.com/cve/CVE-2016-3835/
|
CWE-200
|
https://android.googlesource.com/platform/hardware/qcom/media/+/7558d03e6498e970b761aa44fff6b2c659202d95
|
7558d03e6498e970b761aa44fff6b2c659202d95
|
DO NOT MERGE mm-video-v4l2: venc: add checks before accessing heap pointers
Heap pointers do not point to user virtual addresses in case
of secure session.
Set them to NULL and add checks to avoid accesing them
Bug: 28815329
Bug: 28920116
Change-Id: I94fd5808e753b58654d65e175d3857ef46ffba26
|
unsigned venc_dev::venc_flush( unsigned port)
{
struct v4l2_encoder_cmd enc;
DEBUG_PRINT_LOW("in %s", __func__);
enc.cmd = V4L2_ENC_QCOM_CMD_FLUSH;
enc.flags = V4L2_QCOM_CMD_FLUSH_OUTPUT | V4L2_QCOM_CMD_FLUSH_CAPTURE;
if (ioctl(m_nDriver_fd, VIDIOC_ENCODER_CMD, &enc)) {
DEBUG_PRINT_ERROR("Flush Port (%d) Failed ", port);
return -1;
}
return 0;
}
|
unsigned venc_dev::venc_flush( unsigned port)
{
struct v4l2_encoder_cmd enc;
DEBUG_PRINT_LOW("in %s", __func__);
enc.cmd = V4L2_ENC_QCOM_CMD_FLUSH;
enc.flags = V4L2_QCOM_CMD_FLUSH_OUTPUT | V4L2_QCOM_CMD_FLUSH_CAPTURE;
if (ioctl(m_nDriver_fd, VIDIOC_ENCODER_CMD, &enc)) {
DEBUG_PRINT_ERROR("Flush Port (%d) Failed ", port);
return -1;
}
return 0;
}
|
C
|
Android
| 0 |
CVE-2017-5093
|
https://www.cvedetails.com/cve/CVE-2017-5093/
|
CWE-20
|
https://github.com/chromium/chromium/commit/0720b02e4f303ea6b114d4ae9453e3a7ff55f8dc
|
0720b02e4f303ea6b114d4ae9453e3a7ff55f8dc
|
If JavaScript shows a dialog, cause the page to lose fullscreen.
BUG=670135, 550017, 726761, 728276
Review-Url: https://codereview.chromium.org/2906133004
Cr-Commit-Position: refs/heads/master@{#478884}
|
void WebContentsImpl::DidStartNavigationToPendingEntry(const GURL& url,
ReloadType reload_type) {
for (auto& observer : observers_)
observer.DidStartNavigationToPendingEntry(url, reload_type);
}
|
void WebContentsImpl::DidStartNavigationToPendingEntry(const GURL& url,
ReloadType reload_type) {
for (auto& observer : observers_)
observer.DidStartNavigationToPendingEntry(url, reload_type);
}
|
C
|
Chrome
| 0 |
CVE-2014-1743
|
https://www.cvedetails.com/cve/CVE-2014-1743/
|
CWE-399
|
https://github.com/chromium/chromium/commit/6d9425ec7badda912555d46ea7abcfab81fdd9b9
|
6d9425ec7badda912555d46ea7abcfab81fdd9b9
|
sync compositor: pass simple gfx types by const ref
See bug for reasoning
BUG=159273
Review URL: https://codereview.chromium.org/1417893006
Cr-Commit-Position: refs/heads/master@{#356653}
|
void AwContents::SmoothScroll(JNIEnv* env,
jobject obj,
jint target_x,
jint target_y,
jlong duration_ms) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
float scale = browser_view_renderer_.dip_scale() *
browser_view_renderer_.page_scale_factor();
render_view_host_ext_->SmoothScroll(target_x / scale, target_y / scale,
duration_ms);
}
|
void AwContents::SmoothScroll(JNIEnv* env,
jobject obj,
jint target_x,
jint target_y,
jlong duration_ms) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
float scale = browser_view_renderer_.dip_scale() *
browser_view_renderer_.page_scale_factor();
render_view_host_ext_->SmoothScroll(target_x / scale, target_y / scale,
duration_ms);
}
|
C
|
Chrome
| 0 |
CVE-2018-11596
|
https://www.cvedetails.com/cve/CVE-2018-11596/
|
CWE-119
|
https://github.com/espruino/Espruino/commit/ce1924193862d58cb43d3d4d9dada710a8361b89
|
ce1924193862d58cb43d3d4d9dada710a8361b89
|
fix jsvGetString regression
|
bool jsvIsString(const JsVar *v) { return v && (v->flags&JSV_VARTYPEMASK)>=_JSV_STRING_START && (v->flags&JSV_VARTYPEMASK)<=_JSV_STRING_END; } ///< String, or a NAME too
|
bool jsvIsString(const JsVar *v) { return v && (v->flags&JSV_VARTYPEMASK)>=_JSV_STRING_START && (v->flags&JSV_VARTYPEMASK)<=_JSV_STRING_END; } ///< String, or a NAME too
|
C
|
Espruino
| 0 |
CVE-2018-19840
|
https://www.cvedetails.com/cve/CVE-2018-19840/
|
CWE-835
|
https://github.com/dbry/WavPack/commit/070ef6f138956d9ea9612e69586152339dbefe51
|
070ef6f138956d9ea9612e69586152339dbefe51
|
issue #53: error out on zero sample rate
|
static void *find_metadata (void *wavpack_block, int desired_id, uint32_t *size)
{
WavpackHeader *wphdr = wavpack_block;
unsigned char *dp, meta_id, c1, c2;
int32_t bcount, meta_bc;
if (strncmp (wphdr->ckID, "wvpk", 4))
return NULL;
bcount = wphdr->ckSize - sizeof (WavpackHeader) + 8;
dp = (unsigned char *)(wphdr + 1);
while (bcount >= 2) {
meta_id = *dp++;
c1 = *dp++;
meta_bc = c1 << 1;
bcount -= 2;
if (meta_id & ID_LARGE) {
if (bcount < 2)
break;
c1 = *dp++;
c2 = *dp++;
meta_bc += ((uint32_t) c1 << 9) + ((uint32_t) c2 << 17);
bcount -= 2;
}
if ((meta_id & ID_UNIQUE) == desired_id) {
if ((bcount - meta_bc) >= 0) {
if (size)
*size = meta_bc - ((meta_id & ID_ODD_SIZE) ? 1 : 0);
return dp;
}
else
return NULL;
}
bcount -= meta_bc;
dp += meta_bc;
}
return NULL;
}
|
static void *find_metadata (void *wavpack_block, int desired_id, uint32_t *size)
{
WavpackHeader *wphdr = wavpack_block;
unsigned char *dp, meta_id, c1, c2;
int32_t bcount, meta_bc;
if (strncmp (wphdr->ckID, "wvpk", 4))
return NULL;
bcount = wphdr->ckSize - sizeof (WavpackHeader) + 8;
dp = (unsigned char *)(wphdr + 1);
while (bcount >= 2) {
meta_id = *dp++;
c1 = *dp++;
meta_bc = c1 << 1;
bcount -= 2;
if (meta_id & ID_LARGE) {
if (bcount < 2)
break;
c1 = *dp++;
c2 = *dp++;
meta_bc += ((uint32_t) c1 << 9) + ((uint32_t) c2 << 17);
bcount -= 2;
}
if ((meta_id & ID_UNIQUE) == desired_id) {
if ((bcount - meta_bc) >= 0) {
if (size)
*size = meta_bc - ((meta_id & ID_ODD_SIZE) ? 1 : 0);
return dp;
}
else
return NULL;
}
bcount -= meta_bc;
dp += meta_bc;
}
return NULL;
}
|
C
|
WavPack
| 0 |
CVE-2016-7115
|
https://www.cvedetails.com/cve/CVE-2016-7115/
|
CWE-119
|
https://github.com/haakonnessjoen/MAC-Telnet/commit/b69d11727d4f0f8cf719c79e3fb700f55ca03e9a
|
b69d11727d4f0f8cf719c79e3fb700f55ca03e9a
|
Merge pull request #20 from eyalitki/master
2nd round security fixes from eyalitki
|
void mndp_broadcast() {
struct mt_packet pdata;
struct utsname s_uname;
struct net_interface *interface;
unsigned int uptime;
#if defined(__APPLE__)
int mib[] = {CTL_KERN, KERN_BOOTTIME};
struct timeval boottime;
size_t tv_size = sizeof(boottime);
if (sysctl(mib, sizeof(mib)/sizeof(mib[0]), &boottime, &tv_size, NULL, 0) == -1) {
return;
}
uptime = htole32(boottime.tv_sec);
#elif defined(__linux__)
struct sysinfo s_sysinfo;
if (sysinfo(&s_sysinfo) != 0) {
return;
}
/* Seems like ping uptime is transmitted as little endian? */
uptime = htole32(s_sysinfo.uptime);
#else
struct timespec ts;
if (clock_gettime(CLOCK_UPTIME, &ts) != -1) {
uptime = htole32(((unsigned int)ts.tv_sec));
}
#endif
if (uname(&s_uname) != 0) {
return;
}
DL_FOREACH(interfaces, interface) {
struct mt_mndp_hdr *header = (struct mt_mndp_hdr *)&(pdata.data);
if (interface->has_mac == 0) {
continue;
}
mndp_init_packet(&pdata, 0, 1);
mndp_add_attribute(&pdata, MT_MNDPTYPE_ADDRESS, interface->mac_addr, ETH_ALEN);
mndp_add_attribute(&pdata, MT_MNDPTYPE_IDENTITY, s_uname.nodename, strlen(s_uname.nodename));
mndp_add_attribute(&pdata, MT_MNDPTYPE_VERSION, s_uname.release, strlen(s_uname.release));
mndp_add_attribute(&pdata, MT_MNDPTYPE_PLATFORM, PLATFORM_NAME, strlen(PLATFORM_NAME));
mndp_add_attribute(&pdata, MT_MNDPTYPE_HARDWARE, s_uname.machine, strlen(s_uname.machine));
mndp_add_attribute(&pdata, MT_MNDPTYPE_TIMESTAMP, &uptime, 4);
mndp_add_attribute(&pdata, MT_MNDPTYPE_SOFTID, MT_SOFTID_MACTELNET, strlen(MT_SOFTID_MACTELNET));
mndp_add_attribute(&pdata, MT_MNDPTYPE_IFNAME, interface->name, strlen(interface->name));
header->cksum = in_cksum((unsigned short *)&(pdata.data), pdata.size);
send_special_udp(interface, MT_MNDP_PORT, &pdata);
}
}
|
void mndp_broadcast() {
struct mt_packet pdata;
struct utsname s_uname;
struct net_interface *interface;
unsigned int uptime;
#if defined(__APPLE__)
int mib[] = {CTL_KERN, KERN_BOOTTIME};
struct timeval boottime;
size_t tv_size = sizeof(boottime);
if (sysctl(mib, sizeof(mib)/sizeof(mib[0]), &boottime, &tv_size, NULL, 0) == -1) {
return;
}
uptime = htole32(boottime.tv_sec);
#elif defined(__linux__)
struct sysinfo s_sysinfo;
if (sysinfo(&s_sysinfo) != 0) {
return;
}
/* Seems like ping uptime is transmitted as little endian? */
uptime = htole32(s_sysinfo.uptime);
#else
struct timespec ts;
if (clock_gettime(CLOCK_UPTIME, &ts) != -1) {
uptime = htole32(((unsigned int)ts.tv_sec));
}
#endif
if (uname(&s_uname) != 0) {
return;
}
DL_FOREACH(interfaces, interface) {
struct mt_mndp_hdr *header = (struct mt_mndp_hdr *)&(pdata.data);
if (interface->has_mac == 0) {
continue;
}
mndp_init_packet(&pdata, 0, 1);
mndp_add_attribute(&pdata, MT_MNDPTYPE_ADDRESS, interface->mac_addr, ETH_ALEN);
mndp_add_attribute(&pdata, MT_MNDPTYPE_IDENTITY, s_uname.nodename, strlen(s_uname.nodename));
mndp_add_attribute(&pdata, MT_MNDPTYPE_VERSION, s_uname.release, strlen(s_uname.release));
mndp_add_attribute(&pdata, MT_MNDPTYPE_PLATFORM, PLATFORM_NAME, strlen(PLATFORM_NAME));
mndp_add_attribute(&pdata, MT_MNDPTYPE_HARDWARE, s_uname.machine, strlen(s_uname.machine));
mndp_add_attribute(&pdata, MT_MNDPTYPE_TIMESTAMP, &uptime, 4);
mndp_add_attribute(&pdata, MT_MNDPTYPE_SOFTID, MT_SOFTID_MACTELNET, strlen(MT_SOFTID_MACTELNET));
mndp_add_attribute(&pdata, MT_MNDPTYPE_IFNAME, interface->name, strlen(interface->name));
header->cksum = in_cksum((unsigned short *)&(pdata.data), pdata.size);
send_special_udp(interface, MT_MNDP_PORT, &pdata);
}
}
|
C
|
MAC-Telnet
| 0 |
CVE-2016-7948
|
https://www.cvedetails.com/cve/CVE-2016-7948/
|
CWE-787
|
https://cgit.freedesktop.org/xorg/lib/libXrandr/commit/?id=a0df3e1c7728205e5c7650b2e6dce684139254a6
|
a0df3e1c7728205e5c7650b2e6dce684139254a6
| null |
SizeID XRRConfigCurrentConfiguration (XRRScreenConfiguration *config,
Rotation *rotation)
{
*rotation = (Rotation) config->current_rotation;
return (SizeID) config->current_size;
}
|
SizeID XRRConfigCurrentConfiguration (XRRScreenConfiguration *config,
Rotation *rotation)
{
*rotation = (Rotation) config->current_rotation;
return (SizeID) config->current_size;
}
|
C
|
libXrandr
| 0 |
CVE-2018-6043
|
https://www.cvedetails.com/cve/CVE-2018-6043/
|
CWE-20
|
https://github.com/chromium/chromium/commit/36fd3c9a6ba9fce9dd80c442c3ba5decd8e4c065
|
36fd3c9a6ba9fce9dd80c442c3ba5decd8e4c065
|
Reland "Launching an external protocol handler now escapes the URL."
This is a reland of 2401e58572884b3561e4348d64f11ac74667ef02
Original change's description:
> Launching an external protocol handler now escapes the URL.
>
> Fixes bug introduced in r102449.
>
> Bug: 785809
> Change-Id: I9e6dd1031dd7e7b8d378b138ab151daefdc0c6dc
> Reviewed-on: https://chromium-review.googlesource.com/778747
> Commit-Queue: Matt Giuca <[email protected]>
> Reviewed-by: Eric Lawrence <[email protected]>
> Reviewed-by: Ben Wells <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#518848}
Bug: 785809
Change-Id: Ib8954584004ff5681654398db76d48cdf4437df7
Reviewed-on: https://chromium-review.googlesource.com/788551
Reviewed-by: Ben Wells <[email protected]>
Commit-Queue: Matt Giuca <[email protected]>
Cr-Commit-Position: refs/heads/master@{#519203}
|
ExternalProtocolHandler::BlockState GetBlockStateWithDelegate(
const std::string& scheme,
ExternalProtocolHandler::Delegate* delegate,
Profile* profile) {
if (delegate)
return delegate->GetBlockState(scheme, profile);
return ExternalProtocolHandler::GetBlockState(scheme, profile);
}
|
ExternalProtocolHandler::BlockState GetBlockStateWithDelegate(
const std::string& scheme,
ExternalProtocolHandler::Delegate* delegate,
Profile* profile) {
if (delegate)
return delegate->GetBlockState(scheme, profile);
return ExternalProtocolHandler::GetBlockState(scheme, profile);
}
|
C
|
Chrome
| 0 |
CVE-2017-5087
|
https://www.cvedetails.com/cve/CVE-2017-5087/
|
CWE-416
|
https://github.com/chromium/chromium/commit/11601c08e92732d2883af2057c41c17cba890844
|
11601c08e92732d2883af2057c41c17cba890844
|
[IndexedDB] Fixed transaction use-after-free vuln
Bug: 725032
Change-Id: I689ded6c74d5563403587b149c3f3e02e807e4aa
Reviewed-on: https://chromium-review.googlesource.com/518483
Reviewed-by: Joshua Bell <[email protected]>
Commit-Queue: Daniel Murphy <[email protected]>
Cr-Commit-Position: refs/heads/master@{#475952}
|
void DatabaseImpl::Commit(int64_t transaction_id) {
idb_runner_->PostTask(
FROM_HERE, base::Bind(&IDBThreadHelper::Commit, base::Unretained(helper_),
transaction_id));
}
|
void DatabaseImpl::Commit(int64_t transaction_id) {
idb_runner_->PostTask(
FROM_HERE, base::Bind(&IDBThreadHelper::Commit, base::Unretained(helper_),
transaction_id));
}
|
C
|
Chrome
| 0 |
CVE-2013-2929
|
https://www.cvedetails.com/cve/CVE-2013-2929/
|
CWE-264
|
https://github.com/torvalds/linux/commit/d049f74f2dbe71354d43d393ac3a188947811348
|
d049f74f2dbe71354d43d393ac3a188947811348
|
exec/ptrace: fix get_dumpable() incorrect tests
The get_dumpable() return value is not boolean. Most users of the
function actually want to be testing for non-SUID_DUMP_USER(1) rather than
SUID_DUMP_DISABLE(0). The SUID_DUMP_ROOT(2) is also considered a
protected state. Almost all places did this correctly, excepting the two
places fixed in this patch.
Wrong logic:
if (dumpable == SUID_DUMP_DISABLE) { /* be protective */ }
or
if (dumpable == 0) { /* be protective */ }
or
if (!dumpable) { /* be protective */ }
Correct logic:
if (dumpable != SUID_DUMP_USER) { /* be protective */ }
or
if (dumpable != 1) { /* be protective */ }
Without this patch, if the system had set the sysctl fs/suid_dumpable=2, a
user was able to ptrace attach to processes that had dropped privileges to
that user. (This may have been partially mitigated if Yama was enabled.)
The macros have been moved into the file that declares get/set_dumpable(),
which means things like the ia64 code can see them too.
CVE-2013-2929
Reported-by: Vasily Kulikov <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Cc: "Luck, Tony" <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
{
int copied = 0;
while (len > 0) {
char buf[128];
int this_len, retval;
this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
if (copy_from_user(buf, src, this_len))
return -EFAULT;
retval = access_process_vm(tsk, dst, buf, this_len, 1);
if (!retval) {
if (copied)
break;
return -EIO;
}
copied += retval;
src += retval;
dst += retval;
len -= retval;
}
return copied;
}
|
int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
{
int copied = 0;
while (len > 0) {
char buf[128];
int this_len, retval;
this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
if (copy_from_user(buf, src, this_len))
return -EFAULT;
retval = access_process_vm(tsk, dst, buf, this_len, 1);
if (!retval) {
if (copied)
break;
return -EIO;
}
copied += retval;
src += retval;
dst += retval;
len -= retval;
}
return copied;
}
|
C
|
linux
| 0 |
CVE-2018-10360
|
https://www.cvedetails.com/cve/CVE-2018-10360/
|
CWE-125
|
https://github.com/file/file/commit/a642587a9c9e2dd7feacdf513c3643ce26ad3c22
|
a642587a9c9e2dd7feacdf513c3643ce26ad3c22
|
Avoid reading past the end of buffer (Rui Reis)
|
do_os_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type,
int swap, uint32_t namesz, uint32_t descsz,
size_t noff, size_t doff, int *flags)
{
if (namesz == 5 && strcmp((char *)&nbuf[noff], "SuSE") == 0 &&
type == NT_GNU_VERSION && descsz == 2) {
*flags |= FLAGS_DID_OS_NOTE;
file_printf(ms, ", for SuSE %d.%d", nbuf[doff], nbuf[doff + 1]);
return 1;
}
if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 &&
type == NT_GNU_VERSION && descsz == 16) {
uint32_t desc[4];
memcpy(desc, &nbuf[doff], sizeof(desc));
*flags |= FLAGS_DID_OS_NOTE;
if (file_printf(ms, ", for GNU/") == -1)
return 1;
switch (elf_getu32(swap, desc[0])) {
case GNU_OS_LINUX:
if (file_printf(ms, "Linux") == -1)
return 1;
break;
case GNU_OS_HURD:
if (file_printf(ms, "Hurd") == -1)
return 1;
break;
case GNU_OS_SOLARIS:
if (file_printf(ms, "Solaris") == -1)
return 1;
break;
case GNU_OS_KFREEBSD:
if (file_printf(ms, "kFreeBSD") == -1)
return 1;
break;
case GNU_OS_KNETBSD:
if (file_printf(ms, "kNetBSD") == -1)
return 1;
break;
default:
if (file_printf(ms, "<unknown>") == -1)
return 1;
}
if (file_printf(ms, " %d.%d.%d", elf_getu32(swap, desc[1]),
elf_getu32(swap, desc[2]), elf_getu32(swap, desc[3])) == -1)
return 1;
return 1;
}
if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) {
if (type == NT_NETBSD_VERSION && descsz == 4) {
*flags |= FLAGS_DID_OS_NOTE;
do_note_netbsd_version(ms, swap, &nbuf[doff]);
return 1;
}
}
if (namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0) {
if (type == NT_FREEBSD_VERSION && descsz == 4) {
*flags |= FLAGS_DID_OS_NOTE;
do_note_freebsd_version(ms, swap, &nbuf[doff]);
return 1;
}
}
if (namesz == 8 && strcmp((char *)&nbuf[noff], "OpenBSD") == 0 &&
type == NT_OPENBSD_VERSION && descsz == 4) {
*flags |= FLAGS_DID_OS_NOTE;
if (file_printf(ms, ", for OpenBSD") == -1)
return 1;
/* Content of note is always 0 */
return 1;
}
if (namesz == 10 && strcmp((char *)&nbuf[noff], "DragonFly") == 0 &&
type == NT_DRAGONFLY_VERSION && descsz == 4) {
uint32_t desc;
*flags |= FLAGS_DID_OS_NOTE;
if (file_printf(ms, ", for DragonFly") == -1)
return 1;
memcpy(&desc, &nbuf[doff], sizeof(desc));
desc = elf_getu32(swap, desc);
if (file_printf(ms, " %d.%d.%d", desc / 100000,
desc / 10000 % 10, desc % 10000) == -1)
return 1;
return 1;
}
return 0;
}
|
do_os_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type,
int swap, uint32_t namesz, uint32_t descsz,
size_t noff, size_t doff, int *flags)
{
if (namesz == 5 && strcmp((char *)&nbuf[noff], "SuSE") == 0 &&
type == NT_GNU_VERSION && descsz == 2) {
*flags |= FLAGS_DID_OS_NOTE;
file_printf(ms, ", for SuSE %d.%d", nbuf[doff], nbuf[doff + 1]);
return 1;
}
if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 &&
type == NT_GNU_VERSION && descsz == 16) {
uint32_t desc[4];
memcpy(desc, &nbuf[doff], sizeof(desc));
*flags |= FLAGS_DID_OS_NOTE;
if (file_printf(ms, ", for GNU/") == -1)
return 1;
switch (elf_getu32(swap, desc[0])) {
case GNU_OS_LINUX:
if (file_printf(ms, "Linux") == -1)
return 1;
break;
case GNU_OS_HURD:
if (file_printf(ms, "Hurd") == -1)
return 1;
break;
case GNU_OS_SOLARIS:
if (file_printf(ms, "Solaris") == -1)
return 1;
break;
case GNU_OS_KFREEBSD:
if (file_printf(ms, "kFreeBSD") == -1)
return 1;
break;
case GNU_OS_KNETBSD:
if (file_printf(ms, "kNetBSD") == -1)
return 1;
break;
default:
if (file_printf(ms, "<unknown>") == -1)
return 1;
}
if (file_printf(ms, " %d.%d.%d", elf_getu32(swap, desc[1]),
elf_getu32(swap, desc[2]), elf_getu32(swap, desc[3])) == -1)
return 1;
return 1;
}
if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) {
if (type == NT_NETBSD_VERSION && descsz == 4) {
*flags |= FLAGS_DID_OS_NOTE;
do_note_netbsd_version(ms, swap, &nbuf[doff]);
return 1;
}
}
if (namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0) {
if (type == NT_FREEBSD_VERSION && descsz == 4) {
*flags |= FLAGS_DID_OS_NOTE;
do_note_freebsd_version(ms, swap, &nbuf[doff]);
return 1;
}
}
if (namesz == 8 && strcmp((char *)&nbuf[noff], "OpenBSD") == 0 &&
type == NT_OPENBSD_VERSION && descsz == 4) {
*flags |= FLAGS_DID_OS_NOTE;
if (file_printf(ms, ", for OpenBSD") == -1)
return 1;
/* Content of note is always 0 */
return 1;
}
if (namesz == 10 && strcmp((char *)&nbuf[noff], "DragonFly") == 0 &&
type == NT_DRAGONFLY_VERSION && descsz == 4) {
uint32_t desc;
*flags |= FLAGS_DID_OS_NOTE;
if (file_printf(ms, ", for DragonFly") == -1)
return 1;
memcpy(&desc, &nbuf[doff], sizeof(desc));
desc = elf_getu32(swap, desc);
if (file_printf(ms, " %d.%d.%d", desc / 100000,
desc / 10000 % 10, desc % 10000) == -1)
return 1;
return 1;
}
return 0;
}
|
C
|
file
| 0 |
CVE-2016-3839
|
https://www.cvedetails.com/cve/CVE-2016-3839/
|
CWE-284
|
https://android.googlesource.com/platform/system/bt/+/472271b153c5dc53c28beac55480a8d8434b2d5c
|
472271b153c5dc53c28beac55480a8d8434b2d5c
|
DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
|
void btif_hl_add_socket_to_set(fd_set *p_org_set) {
btif_hl_mdl_cb_t *p_dcb = NULL;
btif_hl_mcl_cb_t *p_mcb = NULL;
btif_hl_app_cb_t *p_acb = NULL;
btif_hl_evt_cb_t evt_param;
bt_status_t status;
int len;
BTIF_TRACE_DEBUG("entering %s",__FUNCTION__);
for (const list_node_t *node = list_begin(soc_queue);
node != list_end(soc_queue); node = list_next(node)) {
btif_hl_soc_cb_t *p_scb = list_node(node);
BTIF_TRACE_DEBUG("btif_hl_add_socket_to_set first p_scb=0x%x", p_scb);
if (btif_hl_get_socket_state(p_scb) == BTIF_HL_SOC_STATE_W4_ADD) {
btif_hl_set_socket_state(p_scb, BTIF_HL_SOC_STATE_W4_READ);
FD_SET(p_scb->socket_id[1], p_org_set);
BTIF_TRACE_DEBUG("found and set socket_id=%d is_set=%d",
p_scb->socket_id[1], FD_ISSET(p_scb->socket_id[1], p_org_set));
p_mcb = BTIF_HL_GET_MCL_CB_PTR(p_scb->app_idx, p_scb->mcl_idx);
p_dcb = BTIF_HL_GET_MDL_CB_PTR(p_scb->app_idx, p_scb->mcl_idx, p_scb->mdl_idx);
p_acb = BTIF_HL_GET_APP_CB_PTR(p_scb->app_idx);
if (p_mcb && p_dcb) {
btif_hl_stop_timer_using_handle(p_mcb->mcl_handle);
evt_param.chan_cb.app_id = p_acb->app_id;
memcpy(evt_param.chan_cb.bd_addr, p_mcb->bd_addr, sizeof(BD_ADDR));
evt_param.chan_cb.channel_id = p_dcb->channel_id;
evt_param.chan_cb.fd = p_scb->socket_id[0];
evt_param.chan_cb.mdep_cfg_index = (int ) p_dcb->local_mdep_cfg_idx;
evt_param.chan_cb.cb_state = BTIF_HL_CHAN_CB_STATE_CONNECTED_PENDING;
len = sizeof(btif_hl_send_chan_state_cb_t);
status = btif_transfer_context (btif_hl_proc_cb_evt, BTIF_HL_SEND_CONNECTED_CB,
(char*) &evt_param, len, NULL);
ASSERTC(status == BT_STATUS_SUCCESS, "context transfer failed", status);
}
}
}
BTIF_TRACE_DEBUG("leaving %s",__FUNCTION__);
}
|
void btif_hl_add_socket_to_set(fd_set *p_org_set) {
btif_hl_mdl_cb_t *p_dcb = NULL;
btif_hl_mcl_cb_t *p_mcb = NULL;
btif_hl_app_cb_t *p_acb = NULL;
btif_hl_evt_cb_t evt_param;
bt_status_t status;
int len;
BTIF_TRACE_DEBUG("entering %s",__FUNCTION__);
for (const list_node_t *node = list_begin(soc_queue);
node != list_end(soc_queue); node = list_next(node)) {
btif_hl_soc_cb_t *p_scb = list_node(node);
BTIF_TRACE_DEBUG("btif_hl_add_socket_to_set first p_scb=0x%x", p_scb);
if (btif_hl_get_socket_state(p_scb) == BTIF_HL_SOC_STATE_W4_ADD) {
btif_hl_set_socket_state(p_scb, BTIF_HL_SOC_STATE_W4_READ);
FD_SET(p_scb->socket_id[1], p_org_set);
BTIF_TRACE_DEBUG("found and set socket_id=%d is_set=%d",
p_scb->socket_id[1], FD_ISSET(p_scb->socket_id[1], p_org_set));
p_mcb = BTIF_HL_GET_MCL_CB_PTR(p_scb->app_idx, p_scb->mcl_idx);
p_dcb = BTIF_HL_GET_MDL_CB_PTR(p_scb->app_idx, p_scb->mcl_idx, p_scb->mdl_idx);
p_acb = BTIF_HL_GET_APP_CB_PTR(p_scb->app_idx);
if (p_mcb && p_dcb) {
btif_hl_stop_timer_using_handle(p_mcb->mcl_handle);
evt_param.chan_cb.app_id = p_acb->app_id;
memcpy(evt_param.chan_cb.bd_addr, p_mcb->bd_addr, sizeof(BD_ADDR));
evt_param.chan_cb.channel_id = p_dcb->channel_id;
evt_param.chan_cb.fd = p_scb->socket_id[0];
evt_param.chan_cb.mdep_cfg_index = (int ) p_dcb->local_mdep_cfg_idx;
evt_param.chan_cb.cb_state = BTIF_HL_CHAN_CB_STATE_CONNECTED_PENDING;
len = sizeof(btif_hl_send_chan_state_cb_t);
status = btif_transfer_context (btif_hl_proc_cb_evt, BTIF_HL_SEND_CONNECTED_CB,
(char*) &evt_param, len, NULL);
ASSERTC(status == BT_STATUS_SUCCESS, "context transfer failed", status);
}
}
}
BTIF_TRACE_DEBUG("leaving %s",__FUNCTION__);
}
|
C
|
Android
| 0 |
CVE-2018-16073
|
https://www.cvedetails.com/cve/CVE-2018-16073/
|
CWE-285
|
https://github.com/chromium/chromium/commit/0bb3f5c715eb66bb5c1fb05fd81d902ca57f33ca
|
0bb3f5c715eb66bb5c1fb05fd81d902ca57f33ca
|
Use unique processes for data URLs on restore.
Data URLs are usually put into the process that created them, but this
info is not tracked after a tab restore. Ensure that they do not end up
in the parent frame's process (or each other's process), in case they
are malicious.
BUG=863069
Change-Id: Ib391f90c7bdf28a0a9c057c5cc7918c10aed968b
Reviewed-on: https://chromium-review.googlesource.com/1150767
Reviewed-by: Alex Moshchuk <[email protected]>
Reviewed-by: Lei Zhang <[email protected]>
Commit-Queue: Charlie Reis <[email protected]>
Cr-Commit-Position: refs/heads/master@{#581023}
|
bool has_received_shutdown_request() {
return has_received_shutdown_request_;
}
|
bool has_received_shutdown_request() {
return has_received_shutdown_request_;
}
|
C
|
Chrome
| 0 |
CVE-2017-13041
|
https://www.cvedetails.com/cve/CVE-2017-13041/
|
CWE-125
|
https://github.com/the-tcpdump-group/tcpdump/commit/f4b9e24c7384d882a7f434cc7413925bf871d63e
|
f4b9e24c7384d882a7f434cc7413925bf871d63e
|
CVE-2017-13041/ICMP6: Add more bounds checks.
This fixes a buffer over-read discovered by Kim Gwan Yeong.
Add a test using the capture file supplied by the reporter(s).
|
mldv2_report_print(netdissect_options *ndo, const u_char *bp, u_int len)
{
const struct icmp6_hdr *icp = (const struct icmp6_hdr *) bp;
u_int group, nsrcs, ngroups;
u_int i, j;
/* Minimum len is 8 */
if (len < 8) {
ND_PRINT((ndo," [invalid len %d]", len));
return;
}
ND_TCHECK(icp->icmp6_data16[1]);
ngroups = EXTRACT_16BITS(&icp->icmp6_data16[1]);
ND_PRINT((ndo,", %d group record(s)", ngroups));
if (ndo->ndo_vflag > 0) {
/* Print the group records */
group = 8;
for (i = 0; i < ngroups; i++) {
/* type(1) + auxlen(1) + numsrc(2) + grp(16) */
if (len < group + 20) {
ND_PRINT((ndo," [invalid number of groups]"));
return;
}
ND_TCHECK2(bp[group + 4], sizeof(struct in6_addr));
ND_PRINT((ndo," [gaddr %s", ip6addr_string(ndo, &bp[group + 4])));
ND_PRINT((ndo," %s", tok2str(mldv2report2str, " [v2-report-#%d]",
bp[group])));
nsrcs = (bp[group + 2] << 8) + bp[group + 3];
/* Check the number of sources and print them */
if (len < group + 20 + (nsrcs * sizeof(struct in6_addr))) {
ND_PRINT((ndo," [invalid number of sources %d]", nsrcs));
return;
}
if (ndo->ndo_vflag == 1)
ND_PRINT((ndo,", %d source(s)", nsrcs));
else {
/* Print the sources */
ND_PRINT((ndo," {"));
for (j = 0; j < nsrcs; j++) {
ND_TCHECK2(bp[group + 20 + j * sizeof(struct in6_addr)],
sizeof(struct in6_addr));
ND_PRINT((ndo," %s", ip6addr_string(ndo, &bp[group + 20 + j * sizeof(struct in6_addr)])));
}
ND_PRINT((ndo," }"));
}
/* Next group record */
group += 20 + nsrcs * sizeof(struct in6_addr);
ND_PRINT((ndo,"]"));
}
}
return;
trunc:
ND_PRINT((ndo,"[|icmp6]"));
return;
}
|
mldv2_report_print(netdissect_options *ndo, const u_char *bp, u_int len)
{
const struct icmp6_hdr *icp = (const struct icmp6_hdr *) bp;
u_int group, nsrcs, ngroups;
u_int i, j;
/* Minimum len is 8 */
if (len < 8) {
ND_PRINT((ndo," [invalid len %d]", len));
return;
}
ND_TCHECK(icp->icmp6_data16[1]);
ngroups = EXTRACT_16BITS(&icp->icmp6_data16[1]);
ND_PRINT((ndo,", %d group record(s)", ngroups));
if (ndo->ndo_vflag > 0) {
/* Print the group records */
group = 8;
for (i = 0; i < ngroups; i++) {
/* type(1) + auxlen(1) + numsrc(2) + grp(16) */
if (len < group + 20) {
ND_PRINT((ndo," [invalid number of groups]"));
return;
}
ND_TCHECK2(bp[group + 4], sizeof(struct in6_addr));
ND_PRINT((ndo," [gaddr %s", ip6addr_string(ndo, &bp[group + 4])));
ND_PRINT((ndo," %s", tok2str(mldv2report2str, " [v2-report-#%d]",
bp[group])));
nsrcs = (bp[group + 2] << 8) + bp[group + 3];
/* Check the number of sources and print them */
if (len < group + 20 + (nsrcs * sizeof(struct in6_addr))) {
ND_PRINT((ndo," [invalid number of sources %d]", nsrcs));
return;
}
if (ndo->ndo_vflag == 1)
ND_PRINT((ndo,", %d source(s)", nsrcs));
else {
/* Print the sources */
ND_PRINT((ndo," {"));
for (j = 0; j < nsrcs; j++) {
ND_TCHECK2(bp[group + 20 + j * sizeof(struct in6_addr)],
sizeof(struct in6_addr));
ND_PRINT((ndo," %s", ip6addr_string(ndo, &bp[group + 20 + j * sizeof(struct in6_addr)])));
}
ND_PRINT((ndo," }"));
}
/* Next group record */
group += 20 + nsrcs * sizeof(struct in6_addr);
ND_PRINT((ndo,"]"));
}
}
return;
trunc:
ND_PRINT((ndo,"[|icmp6]"));
return;
}
|
C
|
tcpdump
| 0 |
CVE-2012-3400
|
https://www.cvedetails.com/cve/CVE-2012-3400/
|
CWE-119
|
https://github.com/torvalds/linux/commit/adee11b2085bee90bd8f4f52123ffb07882d6256
|
adee11b2085bee90bd8f4f52123ffb07882d6256
|
udf: Avoid run away loop when partition table length is corrupted
Check provided length of partition table so that (possibly maliciously)
corrupted partition table cannot cause accessing data beyond current buffer.
Signed-off-by: Jan Kara <[email protected]>
|
static void destroy_inodecache(void)
{
kmem_cache_destroy(udf_inode_cachep);
}
|
static void destroy_inodecache(void)
{
kmem_cache_destroy(udf_inode_cachep);
}
|
C
|
linux
| 0 |
CVE-2018-20856
|
https://www.cvedetails.com/cve/CVE-2018-20856/
|
CWE-416
|
https://github.com/torvalds/linux/commit/54648cf1ec2d7f4b6a71767799c45676a138ca24
|
54648cf1ec2d7f4b6a71767799c45676a138ca24
|
block: blk_init_allocated_queue() set q->fq as NULL in the fail case
We find the memory use-after-free issue in __blk_drain_queue()
on the kernel 4.14. After read the latest kernel 4.18-rc6 we
think it has the same problem.
Memory is allocated for q->fq in the blk_init_allocated_queue().
If the elevator init function called with error return, it will
run into the fail case to free the q->fq.
Then the __blk_drain_queue() uses the same memory after the free
of the q->fq, it will lead to the unpredictable event.
The patch is to set q->fq as NULL in the fail case of
blk_init_allocated_queue().
Fixes: commit 7c94e1c157a2 ("block: introduce blk_flush_queue to drive flush machinery")
Cc: <[email protected]>
Reviewed-by: Ming Lei <[email protected]>
Reviewed-by: Bart Van Assche <[email protected]>
Signed-off-by: xiao jin <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
{
struct request_list *rl;
int on_thresh, off_thresh;
WARN_ON_ONCE(q->mq_ops);
spin_lock_irq(q->queue_lock);
q->nr_requests = nr;
blk_queue_congestion_threshold(q);
on_thresh = queue_congestion_on_threshold(q);
off_thresh = queue_congestion_off_threshold(q);
blk_queue_for_each_rl(rl, q) {
if (rl->count[BLK_RW_SYNC] >= on_thresh)
blk_set_congested(rl, BLK_RW_SYNC);
else if (rl->count[BLK_RW_SYNC] < off_thresh)
blk_clear_congested(rl, BLK_RW_SYNC);
if (rl->count[BLK_RW_ASYNC] >= on_thresh)
blk_set_congested(rl, BLK_RW_ASYNC);
else if (rl->count[BLK_RW_ASYNC] < off_thresh)
blk_clear_congested(rl, BLK_RW_ASYNC);
if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
blk_set_rl_full(rl, BLK_RW_SYNC);
} else {
blk_clear_rl_full(rl, BLK_RW_SYNC);
wake_up(&rl->wait[BLK_RW_SYNC]);
}
if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
blk_set_rl_full(rl, BLK_RW_ASYNC);
} else {
blk_clear_rl_full(rl, BLK_RW_ASYNC);
wake_up(&rl->wait[BLK_RW_ASYNC]);
}
}
spin_unlock_irq(q->queue_lock);
return 0;
}
|
int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
{
struct request_list *rl;
int on_thresh, off_thresh;
WARN_ON_ONCE(q->mq_ops);
spin_lock_irq(q->queue_lock);
q->nr_requests = nr;
blk_queue_congestion_threshold(q);
on_thresh = queue_congestion_on_threshold(q);
off_thresh = queue_congestion_off_threshold(q);
blk_queue_for_each_rl(rl, q) {
if (rl->count[BLK_RW_SYNC] >= on_thresh)
blk_set_congested(rl, BLK_RW_SYNC);
else if (rl->count[BLK_RW_SYNC] < off_thresh)
blk_clear_congested(rl, BLK_RW_SYNC);
if (rl->count[BLK_RW_ASYNC] >= on_thresh)
blk_set_congested(rl, BLK_RW_ASYNC);
else if (rl->count[BLK_RW_ASYNC] < off_thresh)
blk_clear_congested(rl, BLK_RW_ASYNC);
if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
blk_set_rl_full(rl, BLK_RW_SYNC);
} else {
blk_clear_rl_full(rl, BLK_RW_SYNC);
wake_up(&rl->wait[BLK_RW_SYNC]);
}
if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
blk_set_rl_full(rl, BLK_RW_ASYNC);
} else {
blk_clear_rl_full(rl, BLK_RW_ASYNC);
wake_up(&rl->wait[BLK_RW_ASYNC]);
}
}
spin_unlock_irq(q->queue_lock);
return 0;
}
|
C
|
linux
| 0 |
CVE-2017-1000198
|
https://www.cvedetails.com/cve/CVE-2017-1000198/
|
CWE-119
|
https://github.com/open-iscsi/tcmu-runner/commit/61bd03e600d2abf309173e9186f4d465bb1b7157
|
61bd03e600d2abf309173e9186f4d465bb1b7157
|
glfs: discard glfs_check_config
Signed-off-by: Prasanna Kumar Kalever <[email protected]>
|
static char* tcmu_get_path( struct tcmu_device *dev)
{
char *config;
config = strchr(tcmu_get_dev_cfgstring(dev), '/');
if (!config) {
tcmu_err("no configuration found in cfgstring\n");
return NULL;
}
config += 1; /* get past '/' */
return config;
}
|
static char* tcmu_get_path( struct tcmu_device *dev)
{
char *config;
config = strchr(tcmu_get_dev_cfgstring(dev), '/');
if (!config) {
tcmu_err("no configuration found in cfgstring\n");
return NULL;
}
config += 1; /* get past '/' */
return config;
}
|
C
|
tcmu-runner
| 0 |
CVE-2014-6269
|
https://www.cvedetails.com/cve/CVE-2014-6269/
|
CWE-189
|
https://git.haproxy.org/?p=haproxy-1.5.git;a=commitdiff;h=b4d05093bc89f71377230228007e69a1434c1a0c
|
b4d05093bc89f71377230228007e69a1434c1a0c
| null |
smp_fetch_url_ip(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
const struct arg *args, struct sample *smp, const char *kw)
{
struct http_txn *txn = l7;
struct sockaddr_storage addr;
CHECK_HTTP_MESSAGE_FIRST();
url2sa(txn->req.chn->buf->p + txn->req.sl.rq.u, txn->req.sl.rq.u_l, &addr, NULL);
if (((struct sockaddr_in *)&addr)->sin_family != AF_INET)
return 0;
smp->type = SMP_T_IPV4;
smp->data.ipv4 = ((struct sockaddr_in *)&addr)->sin_addr;
smp->flags = 0;
return 1;
}
|
smp_fetch_url_ip(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
const struct arg *args, struct sample *smp, const char *kw)
{
struct http_txn *txn = l7;
struct sockaddr_storage addr;
CHECK_HTTP_MESSAGE_FIRST();
url2sa(txn->req.chn->buf->p + txn->req.sl.rq.u, txn->req.sl.rq.u_l, &addr, NULL);
if (((struct sockaddr_in *)&addr)->sin_family != AF_INET)
return 0;
smp->type = SMP_T_IPV4;
smp->data.ipv4 = ((struct sockaddr_in *)&addr)->sin_addr;
smp->flags = 0;
return 1;
}
|
C
|
haproxy
| 0 |
CVE-2018-16427
|
https://www.cvedetails.com/cve/CVE-2018-16427/
|
CWE-125
|
https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
|
int sc_pkcs15emu_itacns_init_ex(sc_pkcs15_card_t *p15card, struct sc_aid *aid,
sc_pkcs15emu_opt_t *opts)
{
sc_card_t *card = p15card->card;
SC_FUNC_CALLED(card->ctx, 1);
/* Check card */
if (!(opts && opts->flags & SC_PKCS15EMU_FLAGS_NO_CHECK)) {
if (! (
(card->type > SC_CARD_TYPE_ITACNS_BASE &&
card->type < SC_CARD_TYPE_ITACNS_BASE + 1000)
|| card->type == SC_CARD_TYPE_CARDOS_CIE_V1)
)
return SC_ERROR_WRONG_CARD;
}
/* Init card */
return itacns_init(p15card);
}
|
int sc_pkcs15emu_itacns_init_ex(sc_pkcs15_card_t *p15card, struct sc_aid *aid,
sc_pkcs15emu_opt_t *opts)
{
sc_card_t *card = p15card->card;
SC_FUNC_CALLED(card->ctx, 1);
/* Check card */
if (!(opts && opts->flags & SC_PKCS15EMU_FLAGS_NO_CHECK)) {
if (! (
(card->type > SC_CARD_TYPE_ITACNS_BASE &&
card->type < SC_CARD_TYPE_ITACNS_BASE + 1000)
|| card->type == SC_CARD_TYPE_CARDOS_CIE_V1)
)
return SC_ERROR_WRONG_CARD;
}
/* Init card */
return itacns_init(p15card);
}
|
C
|
OpenSC
| 0 |
CVE-2016-9560
|
https://www.cvedetails.com/cve/CVE-2016-9560/
|
CWE-119
|
https://github.com/mdadams/jasper/commit/1abc2e5a401a4bf1d5ca4df91358ce5df111f495
|
1abc2e5a401a4bf1d5ca4df91358ce5df111f495
|
Fixed an array overflow problem in the JPC decoder.
|
jpc_streamlist_t *jpc_ppmstabtostreams(jpc_ppxstab_t *tab)
{
jpc_streamlist_t *streams;
jas_uchar *dataptr;
uint_fast32_t datacnt;
uint_fast32_t tpcnt;
jpc_ppxstabent_t *ent;
int entno;
jas_stream_t *stream;
int n;
if (!(streams = jpc_streamlist_create())) {
goto error;
}
if (!tab->numents) {
return streams;
}
entno = 0;
ent = tab->ents[entno];
dataptr = ent->data;
datacnt = ent->len;
for (;;) {
/* Get the length of the packet header data for the current
tile-part. */
if (datacnt < 4) {
goto error;
}
if (!(stream = jas_stream_memopen(0, 0))) {
goto error;
}
if (jpc_streamlist_insert(streams, jpc_streamlist_numstreams(streams),
stream)) {
goto error;
}
tpcnt = (dataptr[0] << 24) | (dataptr[1] << 16) | (dataptr[2] << 8)
| dataptr[3];
datacnt -= 4;
dataptr += 4;
/* Get the packet header data for the current tile-part. */
while (tpcnt) {
if (!datacnt) {
if (++entno >= tab->numents) {
goto error;
}
ent = tab->ents[entno];
dataptr = ent->data;
datacnt = ent->len;
}
n = JAS_MIN(tpcnt, datacnt);
if (jas_stream_write(stream, dataptr, n) != n) {
goto error;
}
tpcnt -= n;
dataptr += n;
datacnt -= n;
}
jas_stream_rewind(stream);
if (!datacnt) {
if (++entno >= tab->numents) {
break;
}
ent = tab->ents[entno];
dataptr = ent->data;
datacnt = ent->len;
}
}
return streams;
error:
if (streams) {
jpc_streamlist_destroy(streams);
}
return 0;
}
|
jpc_streamlist_t *jpc_ppmstabtostreams(jpc_ppxstab_t *tab)
{
jpc_streamlist_t *streams;
jas_uchar *dataptr;
uint_fast32_t datacnt;
uint_fast32_t tpcnt;
jpc_ppxstabent_t *ent;
int entno;
jas_stream_t *stream;
int n;
if (!(streams = jpc_streamlist_create())) {
goto error;
}
if (!tab->numents) {
return streams;
}
entno = 0;
ent = tab->ents[entno];
dataptr = ent->data;
datacnt = ent->len;
for (;;) {
/* Get the length of the packet header data for the current
tile-part. */
if (datacnt < 4) {
goto error;
}
if (!(stream = jas_stream_memopen(0, 0))) {
goto error;
}
if (jpc_streamlist_insert(streams, jpc_streamlist_numstreams(streams),
stream)) {
goto error;
}
tpcnt = (dataptr[0] << 24) | (dataptr[1] << 16) | (dataptr[2] << 8)
| dataptr[3];
datacnt -= 4;
dataptr += 4;
/* Get the packet header data for the current tile-part. */
while (tpcnt) {
if (!datacnt) {
if (++entno >= tab->numents) {
goto error;
}
ent = tab->ents[entno];
dataptr = ent->data;
datacnt = ent->len;
}
n = JAS_MIN(tpcnt, datacnt);
if (jas_stream_write(stream, dataptr, n) != n) {
goto error;
}
tpcnt -= n;
dataptr += n;
datacnt -= n;
}
jas_stream_rewind(stream);
if (!datacnt) {
if (++entno >= tab->numents) {
break;
}
ent = tab->ents[entno];
dataptr = ent->data;
datacnt = ent->len;
}
}
return streams;
error:
if (streams) {
jpc_streamlist_destroy(streams);
}
return 0;
}
|
C
|
jasper
| 0 |
CVE-2013-2908
|
https://www.cvedetails.com/cve/CVE-2013-2908/
| null |
https://github.com/chromium/chromium/commit/7edf2c655761e7505950013e62c89e3bd2f7e6dc
|
7edf2c655761e7505950013e62c89e3bd2f7e6dc
|
Call didAccessInitialDocument when javascript: URLs are used.
BUG=265221
TEST=See bug for repro.
Review URL: https://chromiumcodereview.appspot.com/22572004
git-svn-id: svn://svn.chromium.org/blink/trunk@155790 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
v8::Local<v8::Value> ScriptController::callFunction(v8::Handle<v8::Function> function, v8::Handle<v8::Object> receiver, int argc, v8::Handle<v8::Value> args[])
{
RefPtr<Frame> protect(m_frame);
return ScriptController::callFunctionWithInstrumentation(m_frame ? m_frame->document() : 0, function, receiver, argc, args);
}
|
v8::Local<v8::Value> ScriptController::callFunction(v8::Handle<v8::Function> function, v8::Handle<v8::Object> receiver, int argc, v8::Handle<v8::Value> args[])
{
RefPtr<Frame> protect(m_frame);
return ScriptController::callFunctionWithInstrumentation(m_frame ? m_frame->document() : 0, function, receiver, argc, args);
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/d1a59e4e845a01d7d7b80ef184b672752a9eae4d
|
d1a59e4e845a01d7d7b80ef184b672752a9eae4d
|
Fixing cross-process postMessage replies on more than two iterations.
When two frames are replying to each other using event.source across processes,
after the first two replies, things break down. The root cause is that in
RenderViewImpl::GetFrameByMappedID, the lookup was incorrect. It is now
properly searching for the remote frame id and returning the local one.
BUG=153445
Review URL: https://chromiumcodereview.appspot.com/11040015
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@159924 0039d316-1c4b-4281-b951-d872f2087c98
|
WebFrame* RenderViewImpl::GetChildFrame(const string16& xpath) const {
if (xpath.empty())
return webview()->mainFrame();
std::vector<string16> xpaths;
base::SplitString(xpath, '\n', &xpaths);
WebFrame* frame = webview()->mainFrame();
for (std::vector<string16>::const_iterator i = xpaths.begin();
frame && i != xpaths.end(); ++i) {
frame = frame->findChildByExpression(*i);
}
return frame;
}
|
WebFrame* RenderViewImpl::GetChildFrame(const string16& xpath) const {
if (xpath.empty())
return webview()->mainFrame();
std::vector<string16> xpaths;
base::SplitString(xpath, '\n', &xpaths);
WebFrame* frame = webview()->mainFrame();
for (std::vector<string16>::const_iterator i = xpaths.begin();
frame && i != xpaths.end(); ++i) {
frame = frame->findChildByExpression(*i);
}
return frame;
}
|
C
|
Chrome
| 0 |
CVE-2018-6060
|
https://www.cvedetails.com/cve/CVE-2018-6060/
|
CWE-416
|
https://github.com/chromium/chromium/commit/fd6a5115103b3e6a52ce15858c5ad4956df29300
|
fd6a5115103b3e6a52ce15858c5ad4956df29300
|
Revert "Keep AudioHandlers alive until they can be safely deleted."
This reverts commit 071df33edf2c8b4375fa432a83953359f93ea9e4.
Reason for revert:
This CL seems to cause an AudioNode leak on the Linux leak bot.
The log is:
https://ci.chromium.org/buildbot/chromium.webkit/WebKit%20Linux%20Trusty%20Leak/14252
* webaudio/AudioNode/audionode-connect-method-chaining.html
* webaudio/Panner/pannernode-basic.html
* webaudio/dom-exceptions.html
Original change's description:
> Keep AudioHandlers alive until they can be safely deleted.
>
> When an AudioNode is disposed, the handler is also disposed. But add
> the handler to the orphan list so that the handler stays alive until
> the context can safely delete it. If we don't do this, the handler
> may get deleted while the audio thread is processing the handler (due
> to, say, channel count changes and such).
>
> For an realtime context, always save the handler just in case the
> audio thread is running after the context is marked as closed (because
> the audio thread doesn't instantly stop when requested).
>
> For an offline context, only need to do this when the context is
> running because the context is guaranteed to be stopped if we're not
> in the running state. Hence, there's no possibility of deleting the
> handler while the graph is running.
>
> This is a revert of
> https://chromium-review.googlesource.com/c/chromium/src/+/860779, with
> a fix for the leak.
>
> Bug: 780919
> Change-Id: Ifb6b5fcf3fbc373f5779256688731245771da33c
> Reviewed-on: https://chromium-review.googlesource.com/862723
> Reviewed-by: Hongchan Choi <[email protected]>
> Commit-Queue: Raymond Toy <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#528829}
[email protected],[email protected]
Change-Id: Ibf406bf6ed34ea1f03e86a64a1e5ba6de0970c6f
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: 780919
Reviewed-on: https://chromium-review.googlesource.com/863402
Reviewed-by: Taiju Tsuiki <[email protected]>
Commit-Queue: Taiju Tsuiki <[email protected]>
Cr-Commit-Position: refs/heads/master@{#528888}
|
void AudioNode::disconnect(unsigned output_index,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
BaseAudioContext::GraphAutoLocker locker(context());
if (output_index >= numberOfOutputs()) {
exception_state.ThrowDOMException(
kIndexSizeError,
ExceptionMessages::IndexOutsideRange(
"output index", output_index, 0u,
ExceptionMessages::kInclusiveBound, numberOfOutputs() - 1,
ExceptionMessages::kInclusiveBound));
return;
}
DisconnectAllFromOutput(output_index);
}
|
void AudioNode::disconnect(unsigned output_index,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
BaseAudioContext::GraphAutoLocker locker(context());
if (output_index >= numberOfOutputs()) {
exception_state.ThrowDOMException(
kIndexSizeError,
ExceptionMessages::IndexOutsideRange(
"output index", output_index, 0u,
ExceptionMessages::kInclusiveBound, numberOfOutputs() - 1,
ExceptionMessages::kInclusiveBound));
return;
}
DisconnectAllFromOutput(output_index);
}
|
C
|
Chrome
| 0 |
CVE-2018-16427
|
https://www.cvedetails.com/cve/CVE-2018-16427/
|
CWE-125
|
https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
|
static int entersafe_gen_random(sc_card_t *card,u8 *buff,size_t size)
{
int r=SC_SUCCESS;
u8 rbuf[SC_MAX_APDU_BUFFER_SIZE]={0};
sc_apdu_t apdu;
SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE);
sc_format_apdu(card,&apdu,SC_APDU_CASE_2_SHORT,0x84,0x00,0x00);
apdu.resp=rbuf;
apdu.le=size;
apdu.resplen=sizeof(rbuf);
r=sc_transmit_apdu(card,&apdu);
SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "entersafe gen random failed");
if(apdu.resplen!=size)
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL,SC_ERROR_INTERNAL);
memcpy(buff,rbuf,size);
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL,r);
}
|
static int entersafe_gen_random(sc_card_t *card,u8 *buff,size_t size)
{
int r=SC_SUCCESS;
u8 rbuf[SC_MAX_APDU_BUFFER_SIZE]={0};
sc_apdu_t apdu;
SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE);
sc_format_apdu(card,&apdu,SC_APDU_CASE_2_SHORT,0x84,0x00,0x00);
apdu.resp=rbuf;
apdu.le=size;
apdu.resplen=sizeof(rbuf);
r=sc_transmit_apdu(card,&apdu);
SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "entersafe gen random failed");
if(apdu.resplen!=size)
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL,SC_ERROR_INTERNAL);
memcpy(buff,rbuf,size);
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL,r);
}
|
C
|
OpenSC
| 0 |
CVE-2017-9373
|
https://www.cvedetails.com/cve/CVE-2017-9373/
|
CWE-772
|
https://git.qemu.org/?p=qemu.git;a=commit;h=d68f0f778e7f4fbd674627274267f269e40f0b04
|
d68f0f778e7f4fbd674627274267f269e40f0b04
| null |
static void ahci_init_d2h(AHCIDevice *ad)
{
IDEState *ide_state = &ad->port.ifs[0];
AHCIPortRegs *pr = &ad->port_regs;
if (ad->init_d2h_sent) {
return;
}
if (ahci_write_fis_d2h(ad)) {
ad->init_d2h_sent = true;
/* We're emulating receiving the first Reg H2D Fis from the device;
* Update the SIG register, but otherwise proceed as normal. */
pr->sig = ((uint32_t)ide_state->hcyl << 24) |
(ide_state->lcyl << 16) |
(ide_state->sector << 8) |
(ide_state->nsector & 0xFF);
}
}
|
static void ahci_init_d2h(AHCIDevice *ad)
{
IDEState *ide_state = &ad->port.ifs[0];
AHCIPortRegs *pr = &ad->port_regs;
if (ad->init_d2h_sent) {
return;
}
if (ahci_write_fis_d2h(ad)) {
ad->init_d2h_sent = true;
/* We're emulating receiving the first Reg H2D Fis from the device;
* Update the SIG register, but otherwise proceed as normal. */
pr->sig = ((uint32_t)ide_state->hcyl << 24) |
(ide_state->lcyl << 16) |
(ide_state->sector << 8) |
(ide_state->nsector & 0xFF);
}
}
|
C
|
qemu
| 0 |
CVE-2016-3746
|
https://www.cvedetails.com/cve/CVE-2016-3746/
| null |
https://android.googlesource.com/platform/hardware/qcom/media/+/5b82f4f90c3d531313714df4b936f92fb0ff15cf
|
5b82f4f90c3d531313714df4b936f92fb0ff15cf
|
DO NOT MERGE mm-video-v4l2: vdec: Avoid processing ETBs/FTBs in invalid states
(per the spec) ETB/FTB should not be handled in states other than
Executing, Paused and Idle. This avoids accessing invalid buffers.
Also add a lock to protect the private-buffers from being deleted
while accessing from another thread.
Bug: 27890802
Security Vulnerability - Heap Use-After-Free and Possible LPE in
MediaServer (libOmxVdec problem #6)
CRs-Fixed: 1008882
Change-Id: Iaac2e383cd53cf9cf8042c9ed93ddc76dba3907e
|
OMX_ERRORTYPE omx_vdec::allocate_color_convert_buf::allocate_buffers_color_convert(OMX_HANDLETYPE hComp,
OMX_BUFFERHEADERTYPE **bufferHdr,OMX_U32 port,OMX_PTR appData,OMX_U32 bytes)
{
OMX_ERRORTYPE eRet = OMX_ErrorNone;
if (!enabled) {
eRet = omx->allocate_output_buffer(hComp,bufferHdr,port,appData,bytes);
return eRet;
}
if (enabled && omx->is_component_secure()) {
DEBUG_PRINT_ERROR("Notin color convert mode secure_mode %d",
omx->is_component_secure());
return OMX_ErrorUnsupportedSetting;
}
if (!bufferHdr || bytes > buffer_size_req) {
DEBUG_PRINT_ERROR("Invalid params allocate_buffers_color_convert %p", bufferHdr);
DEBUG_PRINT_ERROR("color_convert buffer_size_req %u bytes %u",
(unsigned int)buffer_size_req, (unsigned int)bytes);
return OMX_ErrorBadParameter;
}
if (allocated_count >= omx->drv_ctx.op_buf.actualcount) {
DEBUG_PRINT_ERROR("Actual count err in allocate_buffers_color_convert");
return OMX_ErrorInsufficientResources;
}
OMX_BUFFERHEADERTYPE *temp_bufferHdr = NULL;
eRet = omx->allocate_output_buffer(hComp,&temp_bufferHdr,
port,appData,omx->drv_ctx.op_buf.buffer_size);
if (eRet != OMX_ErrorNone || !temp_bufferHdr) {
DEBUG_PRINT_ERROR("Buffer allocation failed color_convert");
return eRet;
}
if ((temp_bufferHdr - omx->m_out_mem_ptr) >=
(int)omx->drv_ctx.op_buf.actualcount) {
DEBUG_PRINT_ERROR("Invalid header index %ld",
(long int)(temp_bufferHdr - omx->m_out_mem_ptr));
return OMX_ErrorUndefined;
}
unsigned int i = allocated_count;
#ifdef USE_ION
op_buf_ion_info[i].ion_device_fd = omx->alloc_map_ion_memory(
buffer_size_req,buffer_alignment_req,
&op_buf_ion_info[i].ion_alloc_data,&op_buf_ion_info[i].fd_ion_data,
ION_FLAG_CACHED);
pmem_fd[i] = op_buf_ion_info[i].fd_ion_data.fd;
if (op_buf_ion_info[i].ion_device_fd < 0) {
DEBUG_PRINT_ERROR("alloc_map_ion failed in color_convert");
return OMX_ErrorInsufficientResources;
}
pmem_baseaddress[i] = (unsigned char *)mmap(NULL,buffer_size_req,
PROT_READ|PROT_WRITE,MAP_SHARED,pmem_fd[i],0);
if (pmem_baseaddress[i] == MAP_FAILED) {
DEBUG_PRINT_ERROR("MMAP failed for Size %d",buffer_size_req);
close(pmem_fd[i]);
omx->free_ion_memory(&op_buf_ion_info[i]);
return OMX_ErrorInsufficientResources;
}
m_heap_ptr[i].video_heap_ptr = new VideoHeap (
op_buf_ion_info[i].ion_device_fd,buffer_size_req,
pmem_baseaddress[i],op_buf_ion_info[i].ion_alloc_data.handle,pmem_fd[i]);
#endif
m_pmem_info_client[i].pmem_fd = (unsigned long)m_heap_ptr[i].video_heap_ptr.get();
m_pmem_info_client[i].offset = 0;
m_platform_entry_client[i].entry = (void *)&m_pmem_info_client[i];
m_platform_entry_client[i].type = OMX_QCOM_PLATFORM_PRIVATE_PMEM;
m_platform_list_client[i].nEntries = 1;
m_platform_list_client[i].entryList = &m_platform_entry_client[i];
m_out_mem_ptr_client[i].pOutputPortPrivate = NULL;
m_out_mem_ptr_client[i].nAllocLen = buffer_size_req;
m_out_mem_ptr_client[i].nFilledLen = 0;
m_out_mem_ptr_client[i].nFlags = 0;
m_out_mem_ptr_client[i].nOutputPortIndex = OMX_CORE_OUTPUT_PORT_INDEX;
m_out_mem_ptr_client[i].nSize = sizeof(OMX_BUFFERHEADERTYPE);
m_out_mem_ptr_client[i].nVersion.nVersion = OMX_SPEC_VERSION;
m_out_mem_ptr_client[i].pPlatformPrivate = &m_platform_list_client[i];
m_out_mem_ptr_client[i].pBuffer = pmem_baseaddress[i];
m_out_mem_ptr_client[i].pAppPrivate = appData;
*bufferHdr = &m_out_mem_ptr_client[i];
DEBUG_PRINT_HIGH("IL client buffer header %p", *bufferHdr);
allocated_count++;
return eRet;
}
|
OMX_ERRORTYPE omx_vdec::allocate_color_convert_buf::allocate_buffers_color_convert(OMX_HANDLETYPE hComp,
OMX_BUFFERHEADERTYPE **bufferHdr,OMX_U32 port,OMX_PTR appData,OMX_U32 bytes)
{
OMX_ERRORTYPE eRet = OMX_ErrorNone;
if (!enabled) {
eRet = omx->allocate_output_buffer(hComp,bufferHdr,port,appData,bytes);
return eRet;
}
if (enabled && omx->is_component_secure()) {
DEBUG_PRINT_ERROR("Notin color convert mode secure_mode %d",
omx->is_component_secure());
return OMX_ErrorUnsupportedSetting;
}
if (!bufferHdr || bytes > buffer_size_req) {
DEBUG_PRINT_ERROR("Invalid params allocate_buffers_color_convert %p", bufferHdr);
DEBUG_PRINT_ERROR("color_convert buffer_size_req %u bytes %u",
(unsigned int)buffer_size_req, (unsigned int)bytes);
return OMX_ErrorBadParameter;
}
if (allocated_count >= omx->drv_ctx.op_buf.actualcount) {
DEBUG_PRINT_ERROR("Actual count err in allocate_buffers_color_convert");
return OMX_ErrorInsufficientResources;
}
OMX_BUFFERHEADERTYPE *temp_bufferHdr = NULL;
eRet = omx->allocate_output_buffer(hComp,&temp_bufferHdr,
port,appData,omx->drv_ctx.op_buf.buffer_size);
if (eRet != OMX_ErrorNone || !temp_bufferHdr) {
DEBUG_PRINT_ERROR("Buffer allocation failed color_convert");
return eRet;
}
if ((temp_bufferHdr - omx->m_out_mem_ptr) >=
(int)omx->drv_ctx.op_buf.actualcount) {
DEBUG_PRINT_ERROR("Invalid header index %ld",
(long int)(temp_bufferHdr - omx->m_out_mem_ptr));
return OMX_ErrorUndefined;
}
unsigned int i = allocated_count;
#ifdef USE_ION
op_buf_ion_info[i].ion_device_fd = omx->alloc_map_ion_memory(
buffer_size_req,buffer_alignment_req,
&op_buf_ion_info[i].ion_alloc_data,&op_buf_ion_info[i].fd_ion_data,
ION_FLAG_CACHED);
pmem_fd[i] = op_buf_ion_info[i].fd_ion_data.fd;
if (op_buf_ion_info[i].ion_device_fd < 0) {
DEBUG_PRINT_ERROR("alloc_map_ion failed in color_convert");
return OMX_ErrorInsufficientResources;
}
pmem_baseaddress[i] = (unsigned char *)mmap(NULL,buffer_size_req,
PROT_READ|PROT_WRITE,MAP_SHARED,pmem_fd[i],0);
if (pmem_baseaddress[i] == MAP_FAILED) {
DEBUG_PRINT_ERROR("MMAP failed for Size %d",buffer_size_req);
close(pmem_fd[i]);
omx->free_ion_memory(&op_buf_ion_info[i]);
return OMX_ErrorInsufficientResources;
}
m_heap_ptr[i].video_heap_ptr = new VideoHeap (
op_buf_ion_info[i].ion_device_fd,buffer_size_req,
pmem_baseaddress[i],op_buf_ion_info[i].ion_alloc_data.handle,pmem_fd[i]);
#endif
m_pmem_info_client[i].pmem_fd = (unsigned long)m_heap_ptr[i].video_heap_ptr.get();
m_pmem_info_client[i].offset = 0;
m_platform_entry_client[i].entry = (void *)&m_pmem_info_client[i];
m_platform_entry_client[i].type = OMX_QCOM_PLATFORM_PRIVATE_PMEM;
m_platform_list_client[i].nEntries = 1;
m_platform_list_client[i].entryList = &m_platform_entry_client[i];
m_out_mem_ptr_client[i].pOutputPortPrivate = NULL;
m_out_mem_ptr_client[i].nAllocLen = buffer_size_req;
m_out_mem_ptr_client[i].nFilledLen = 0;
m_out_mem_ptr_client[i].nFlags = 0;
m_out_mem_ptr_client[i].nOutputPortIndex = OMX_CORE_OUTPUT_PORT_INDEX;
m_out_mem_ptr_client[i].nSize = sizeof(OMX_BUFFERHEADERTYPE);
m_out_mem_ptr_client[i].nVersion.nVersion = OMX_SPEC_VERSION;
m_out_mem_ptr_client[i].pPlatformPrivate = &m_platform_list_client[i];
m_out_mem_ptr_client[i].pBuffer = pmem_baseaddress[i];
m_out_mem_ptr_client[i].pAppPrivate = appData;
*bufferHdr = &m_out_mem_ptr_client[i];
DEBUG_PRINT_HIGH("IL client buffer header %p", *bufferHdr);
allocated_count++;
return eRet;
}
|
C
|
Android
| 0 |
CVE-2013-6640
|
https://www.cvedetails.com/cve/CVE-2013-6640/
|
CWE-119
|
https://github.com/chromium/chromium/commit/516abadc2553489ce28faeea4917280032fbe91d
|
516abadc2553489ce28faeea4917280032fbe91d
|
Update AffiliationFetcher to use new Affiliation API wire format.
The new format is not backward compatible with the old one, therefore this CL updates the client side protobuf definitions to be in line with the API definition. However, this CL does not yet make use of any additional fields introduced in the new wire format.
BUG=437865
Review URL: https://codereview.chromium.org/996613002
Cr-Commit-Position: refs/heads/master@{#319860}
|
~MockAffiliationFetcherDelegate() {}
|
~MockAffiliationFetcherDelegate() {}
|
C
|
Chrome
| 0 |
CVE-2017-12843
|
https://www.cvedetails.com/cve/CVE-2017-12843/
|
CWE-20
|
https://github.com/cyrusimap/cyrus-imapd/commit/53c4137bd924b954432c6c59da7572c4c5ffa901
|
53c4137bd924b954432c6c59da7572c4c5ffa901
|
imapd: check for isadmin BEFORE parsing sync lines
|
static void cmd_xforever(const char *tag)
{
unsigned n = 1;
int r = 0;
while (!r) {
sleep(1);
prot_printf(imapd_out, "* FOREVER %u\r\n", n++);
prot_flush(imapd_out);
r = cmd_cancelled();
}
prot_printf(imapd_out, "%s OK %s\r\n", tag, error_message(r));
}
|
static void cmd_xforever(const char *tag)
{
unsigned n = 1;
int r = 0;
while (!r) {
sleep(1);
prot_printf(imapd_out, "* FOREVER %u\r\n", n++);
prot_flush(imapd_out);
r = cmd_cancelled();
}
prot_printf(imapd_out, "%s OK %s\r\n", tag, error_message(r));
}
|
C
|
cyrus-imapd
| 0 |
CVE-2019-12098
|
https://www.cvedetails.com/cve/CVE-2019-12098/
|
CWE-320
|
https://github.com/heimdal/heimdal/commit/2f7f3d9960aa6ea21358bdf3687cee5149aa35cf
|
2f7f3d9960aa6ea21358bdf3687cee5149aa35cf
|
CVE-2019-12098: krb5: always confirm PA-PKINIT-KX for anon PKINIT
RFC8062 Section 7 requires verification of the PA-PKINIT-KX key excahnge
when anonymous PKINIT is used. Failure to do so can permit an active
attacker to become a man-in-the-middle.
Introduced by a1ef548600c5bb51cf52a9a9ea12676506ede19f. First tagged
release Heimdal 1.4.0.
CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N (4.8)
Change-Id: I6cc1c0c24985936468af08693839ac6c3edda133
Signed-off-by: Jeffrey Altman <[email protected]>
Approved-by: Jeffrey Altman <[email protected]>
(cherry picked from commit 38c797e1ae9b9c8f99ae4aa2e73957679031fd2b)
|
pk_copy_error(krb5_context context,
hx509_context hx509ctx,
int hxret,
const char *fmt,
...)
{
va_list va;
char *s, *f;
int ret;
va_start(va, fmt);
ret = vasprintf(&f, fmt, va);
va_end(va);
if (ret == -1 || f == NULL) {
krb5_clear_error_message(context);
return;
}
s = hx509_get_error_string(hx509ctx, hxret);
if (s == NULL) {
krb5_clear_error_message(context);
free(f);
return;
}
krb5_set_error_message(context, hxret, "%s: %s", f, s);
free(s);
free(f);
}
|
pk_copy_error(krb5_context context,
hx509_context hx509ctx,
int hxret,
const char *fmt,
...)
{
va_list va;
char *s, *f;
int ret;
va_start(va, fmt);
ret = vasprintf(&f, fmt, va);
va_end(va);
if (ret == -1 || f == NULL) {
krb5_clear_error_message(context);
return;
}
s = hx509_get_error_string(hx509ctx, hxret);
if (s == NULL) {
krb5_clear_error_message(context);
free(f);
return;
}
krb5_set_error_message(context, hxret, "%s: %s", f, s);
free(s);
free(f);
}
|
C
|
heimdal
| 0 |
CVE-2019-11599
|
https://www.cvedetails.com/cve/CVE-2019-11599/
|
CWE-362
|
https://github.com/torvalds/linux/commit/04f5866e41fb70690e28397487d8bd8eea7d712a
|
04f5866e41fb70690e28397487d8bd8eea7d712a
|
coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping
The core dumping code has always run without holding the mmap_sem for
writing, despite that is the only way to ensure that the entire vma
layout will not change from under it. Only using some signal
serialization on the processes belonging to the mm is not nearly enough.
This was pointed out earlier. For example in Hugh's post from Jul 2017:
https://lkml.kernel.org/r/[email protected]
"Not strictly relevant here, but a related note: I was very surprised
to discover, only quite recently, how handle_mm_fault() may be called
without down_read(mmap_sem) - when core dumping. That seems a
misguided optimization to me, which would also be nice to correct"
In particular because the growsdown and growsup can move the
vm_start/vm_end the various loops the core dump does around the vma will
not be consistent if page faults can happen concurrently.
Pretty much all users calling mmget_not_zero()/get_task_mm() and then
taking the mmap_sem had the potential to introduce unexpected side
effects in the core dumping code.
Adding mmap_sem for writing around the ->core_dump invocation is a
viable long term fix, but it requires removing all copy user and page
faults and to replace them with get_dump_page() for all binary formats
which is not suitable as a short term fix.
For the time being this solution manually covers the places that can
confuse the core dump either by altering the vma layout or the vma flags
while it runs. Once ->core_dump runs under mmap_sem for writing the
function mmget_still_valid() can be dropped.
Allowing mmap_sem protected sections to run in parallel with the
coredump provides some minor parallelism advantage to the swapoff code
(which seems to be safe enough by never mangling any vma field and can
keep doing swapins in parallel to the core dumping) and to some other
corner case.
In order to facilitate the backporting I added "Fixes: 86039bd3b4e6"
however the side effect of this same race condition in /proc/pid/mem
should be reproducible since before 2.6.12-rc2 so I couldn't add any
other "Fixes:" because there's no hash beyond the git genesis commit.
Because find_extend_vma() is the only location outside of the process
context that could modify the "mm" structures under mmap_sem for
reading, by adding the mmget_still_valid() check to it, all other cases
that take the mmap_sem for reading don't need the new check after
mmget_not_zero()/get_task_mm(). The expand_stack() in page fault
context also doesn't need the new check, because all tasks under core
dumping are frozen.
Link: http://lkml.kernel.org/r/[email protected]
Fixes: 86039bd3b4e6 ("userfaultfd: add new syscall to provide memory externalization")
Signed-off-by: Andrea Arcangeli <[email protected]>
Reported-by: Jann Horn <[email protected]>
Suggested-by: Oleg Nesterov <[email protected]>
Acked-by: Peter Xu <[email protected]>
Reviewed-by: Mike Rapoport <[email protected]>
Reviewed-by: Oleg Nesterov <[email protected]>
Reviewed-by: Jann Horn <[email protected]>
Acked-by: Jason Gunthorpe <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file,
struct ib_device *ib_dev)
{
struct ib_uverbs_async_event_file *ev_file;
struct file *filp;
ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
if (!ev_file)
return ERR_PTR(-ENOMEM);
ib_uverbs_init_event_queue(&ev_file->ev_queue);
ev_file->uverbs_file = uverbs_file;
kref_get(&ev_file->uverbs_file->ref);
kref_init(&ev_file->ref);
filp = anon_inode_getfile("[infinibandevent]", &uverbs_async_event_fops,
ev_file, O_RDONLY);
if (IS_ERR(filp))
goto err_put_refs;
mutex_lock(&uverbs_file->device->lists_mutex);
list_add_tail(&ev_file->list,
&uverbs_file->device->uverbs_events_file_list);
mutex_unlock(&uverbs_file->device->lists_mutex);
WARN_ON(uverbs_file->async_file);
uverbs_file->async_file = ev_file;
kref_get(&uverbs_file->async_file->ref);
INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
ib_dev,
ib_uverbs_event_handler);
ib_register_event_handler(&uverbs_file->event_handler);
/* At that point async file stuff was fully set */
return filp;
err_put_refs:
kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
kref_put(&ev_file->ref, ib_uverbs_release_async_event_file);
return filp;
}
|
struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file,
struct ib_device *ib_dev)
{
struct ib_uverbs_async_event_file *ev_file;
struct file *filp;
ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
if (!ev_file)
return ERR_PTR(-ENOMEM);
ib_uverbs_init_event_queue(&ev_file->ev_queue);
ev_file->uverbs_file = uverbs_file;
kref_get(&ev_file->uverbs_file->ref);
kref_init(&ev_file->ref);
filp = anon_inode_getfile("[infinibandevent]", &uverbs_async_event_fops,
ev_file, O_RDONLY);
if (IS_ERR(filp))
goto err_put_refs;
mutex_lock(&uverbs_file->device->lists_mutex);
list_add_tail(&ev_file->list,
&uverbs_file->device->uverbs_events_file_list);
mutex_unlock(&uverbs_file->device->lists_mutex);
WARN_ON(uverbs_file->async_file);
uverbs_file->async_file = ev_file;
kref_get(&uverbs_file->async_file->ref);
INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
ib_dev,
ib_uverbs_event_handler);
ib_register_event_handler(&uverbs_file->event_handler);
/* At that point async file stuff was fully set */
return filp;
err_put_refs:
kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
kref_put(&ev_file->ref, ib_uverbs_release_async_event_file);
return filp;
}
|
C
|
linux
| 0 |
Subsets and Splits
CWE-119 Function Changes
This query retrieves specific examples (before and after code changes) of vulnerabilities with CWE-119, providing basic filtering but limited insight.
Vulnerable Code with CWE IDs
The query filters and combines records from multiple datasets to list specific vulnerability details, providing a basic overview of vulnerable functions but lacking deeper insights.
Vulnerable Functions in BigVul
Retrieves details of vulnerable functions from both validation and test datasets where vulnerabilities are present, providing a basic set of data points for further analysis.
Vulnerable Code Functions
This query filters and shows raw data for vulnerable functions, which provides basic insight into specific vulnerabilities but lacks broader analytical value.
Top 100 Vulnerable Functions
Retrieves 100 samples of vulnerabilities from the training dataset, showing the CVE ID, CWE ID, and code changes before and after the vulnerability, which is a basic filtering of vulnerability data.