CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2018-16077
|
https://www.cvedetails.com/cve/CVE-2018-16077/
|
CWE-285
|
https://github.com/chromium/chromium/commit/90f878780cce9c4b0475fcea14d91b8f510cce11
|
90f878780cce9c4b0475fcea14d91b8f510cce11
|
Prevent sandboxed documents from reusing the default window
Bug: 377995
Change-Id: Iff66c6d214dfd0cb7ea9c80f83afeedfff703541
Reviewed-on: https://chromium-review.googlesource.com/983558
Commit-Queue: Andy Paicu <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Cr-Commit-Position: refs/heads/master@{#567663}
|
inline float ParentTextZoomFactor(LocalFrame* frame) {
Frame* parent = frame->Tree().Parent();
if (!parent || !parent->IsLocalFrame())
return 1;
return ToLocalFrame(parent)->TextZoomFactor();
}
|
inline float ParentTextZoomFactor(LocalFrame* frame) {
Frame* parent = frame->Tree().Parent();
if (!parent || !parent->IsLocalFrame())
return 1;
return ToLocalFrame(parent)->TextZoomFactor();
}
|
C
|
Chrome
| 0 |
CVE-2014-1713
|
https://www.cvedetails.com/cve/CVE-2014-1713/
|
CWE-399
|
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
|
f85a87ec670ad0fce9d98d90c9a705b72a288154
|
document.location bindings fix
BUG=352374
[email protected]
Review URL: https://codereview.chromium.org/196343011
git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void nullableLongAttributeAttributeGetter(const v8::PropertyCallbackInfo<v8::Value>& info)
{
TestObject* imp = V8TestObject::toNative(info.Holder());
bool isNull = false;
int jsValue = imp->nullableLongAttribute(isNull);
if (isNull) {
v8SetReturnValueNull(info);
return;
}
v8SetReturnValueInt(info, jsValue);
}
|
static void nullableLongAttributeAttributeGetter(const v8::PropertyCallbackInfo<v8::Value>& info)
{
TestObject* imp = V8TestObject::toNative(info.Holder());
bool isNull = false;
int jsValue = imp->nullableLongAttribute(isNull);
if (isNull) {
v8SetReturnValueNull(info);
return;
}
v8SetReturnValueInt(info, jsValue);
}
|
C
|
Chrome
| 0 |
CVE-2015-6782
|
https://www.cvedetails.com/cve/CVE-2015-6782/
|
CWE-20
|
https://github.com/chromium/chromium/commit/e1e0c4301aaa8228e362f2409dbde2d4d1896866
|
e1e0c4301aaa8228e362f2409dbde2d4d1896866
|
Don't change Document load progress in any page dismissal events.
This can confuse the logic for blocking modal dialogs.
BUG=536652
Review URL: https://codereview.chromium.org/1373113002
Cr-Commit-Position: refs/heads/master@{#351419}
|
void Document::updateStyle(StyleRecalcChange change)
{
TRACE_EVENT_BEGIN0("blink,blink_style", "Document::updateStyle");
unsigned initialResolverAccessCount = styleEngine().resolverAccessCount();
HTMLFrameOwnerElement::UpdateSuspendScope suspendWidgetHierarchyUpdates;
m_lifecycle.advanceTo(DocumentLifecycle::InStyleRecalc);
NthIndexCache nthIndexCache(*this);
if (styleChangeType() >= SubtreeStyleChange)
change = Force;
if (change == Force) {
m_hasNodesWithPlaceholderStyle = false;
RefPtr<ComputedStyle> documentStyle = StyleResolver::styleForDocument(*this);
StyleRecalcChange localChange = ComputedStyle::stylePropagationDiff(documentStyle.get(), layoutView()->style());
if (localChange != NoChange)
layoutView()->setStyle(documentStyle.release());
}
clearNeedsStyleRecalc();
StyleResolver& resolver = ensureStyleResolver();
bool shouldRecordStats;
TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink,blink_style", &shouldRecordStats);
resolver.setStatsEnabled(shouldRecordStats);
if (Element* documentElement = this->documentElement()) {
inheritHtmlAndBodyElementStyles(change);
dirtyElementsForLayerUpdate();
if (documentElement->shouldCallRecalcStyle(change))
documentElement->recalcStyle(change);
while (dirtyElementsForLayerUpdate())
documentElement->recalcStyle(NoChange);
}
view()->recalcOverflowAfterStyleChange();
view()->setFrameTimingRequestsDirty(true);
clearChildNeedsStyleRecalc();
styleEngine().resetCSSFeatureFlags(resolver.ensureUpdatedRuleFeatureSet());
resolver.clearStyleSharingList();
ASSERT(!needsStyleRecalc());
ASSERT(!childNeedsStyleRecalc());
ASSERT(inStyleRecalc());
ASSERT(styleResolver() == &resolver);
m_lifecycle.advanceTo(DocumentLifecycle::StyleClean);
if (shouldRecordStats) {
TRACE_EVENT_END2("blink,blink_style", "Document::updateStyle",
"resolverAccessCount", styleEngine().resolverAccessCount() - initialResolverAccessCount,
"counters", resolver.stats()->toTracedValue());
} else {
TRACE_EVENT_END1("blink,blink_style", "Document::updateStyle",
"resolverAccessCount", styleEngine().resolverAccessCount() - initialResolverAccessCount);
}
}
|
void Document::updateStyle(StyleRecalcChange change)
{
TRACE_EVENT_BEGIN0("blink,blink_style", "Document::updateStyle");
unsigned initialResolverAccessCount = styleEngine().resolverAccessCount();
HTMLFrameOwnerElement::UpdateSuspendScope suspendWidgetHierarchyUpdates;
m_lifecycle.advanceTo(DocumentLifecycle::InStyleRecalc);
NthIndexCache nthIndexCache(*this);
if (styleChangeType() >= SubtreeStyleChange)
change = Force;
if (change == Force) {
m_hasNodesWithPlaceholderStyle = false;
RefPtr<ComputedStyle> documentStyle = StyleResolver::styleForDocument(*this);
StyleRecalcChange localChange = ComputedStyle::stylePropagationDiff(documentStyle.get(), layoutView()->style());
if (localChange != NoChange)
layoutView()->setStyle(documentStyle.release());
}
clearNeedsStyleRecalc();
StyleResolver& resolver = ensureStyleResolver();
bool shouldRecordStats;
TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink,blink_style", &shouldRecordStats);
resolver.setStatsEnabled(shouldRecordStats);
if (Element* documentElement = this->documentElement()) {
inheritHtmlAndBodyElementStyles(change);
dirtyElementsForLayerUpdate();
if (documentElement->shouldCallRecalcStyle(change))
documentElement->recalcStyle(change);
while (dirtyElementsForLayerUpdate())
documentElement->recalcStyle(NoChange);
}
view()->recalcOverflowAfterStyleChange();
view()->setFrameTimingRequestsDirty(true);
clearChildNeedsStyleRecalc();
styleEngine().resetCSSFeatureFlags(resolver.ensureUpdatedRuleFeatureSet());
resolver.clearStyleSharingList();
ASSERT(!needsStyleRecalc());
ASSERT(!childNeedsStyleRecalc());
ASSERT(inStyleRecalc());
ASSERT(styleResolver() == &resolver);
m_lifecycle.advanceTo(DocumentLifecycle::StyleClean);
if (shouldRecordStats) {
TRACE_EVENT_END2("blink,blink_style", "Document::updateStyle",
"resolverAccessCount", styleEngine().resolverAccessCount() - initialResolverAccessCount,
"counters", resolver.stats()->toTracedValue());
} else {
TRACE_EVENT_END1("blink,blink_style", "Document::updateStyle",
"resolverAccessCount", styleEngine().resolverAccessCount() - initialResolverAccessCount);
}
}
|
C
|
Chrome
| 0 |
CVE-2013-0211
|
https://www.cvedetails.com/cve/CVE-2013-0211/
|
CWE-189
|
https://github.com/libarchive/libarchive/commit/22531545514043e04633e1c015c7540b9de9dbe4
|
22531545514043e04633e1c015c7540b9de9dbe4
|
Limit write requests to at most INT_MAX.
This prevents a certain common programming error (passing -1 to write)
from leading to other problems deeper in the library.
|
__archive_write_open_filter(struct archive_write_filter *f)
{
if (f->open == NULL)
return (ARCHIVE_OK);
return (f->open)(f);
}
|
__archive_write_open_filter(struct archive_write_filter *f)
{
if (f->open == NULL)
return (ARCHIVE_OK);
return (f->open)(f);
}
|
C
|
libarchive
| 0 |
CVE-2013-6621
|
https://www.cvedetails.com/cve/CVE-2013-6621/
|
CWE-399
|
https://github.com/chromium/chromium/commit/4039d2fcaab746b6c20017ba9bb51c3a2403a76c
|
4039d2fcaab746b6c20017ba9bb51c3a2403a76c
|
Add logging to figure out which IPC we're failing to deserialize in RenderFrame.
BUG=369553
[email protected]
Review URL: https://codereview.chromium.org/263833020
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@268565 0039d316-1c4b-4281-b951-d872f2087c98
|
bool RenderFrameImpl::runModalBeforeUnloadDialog(
bool is_reload,
const blink::WebString& message) {
if (render_view()->is_swapped_out_)
return true;
if (render_view()->suppress_dialogs_until_swap_out_)
return false;
bool success = false;
base::string16 ignored_result;
render_view()->SendAndRunNestedMessageLoop(
new FrameHostMsg_RunBeforeUnloadConfirm(
routing_id_, frame_->document().url(), message, is_reload,
&success, &ignored_result));
return success;
}
|
bool RenderFrameImpl::runModalBeforeUnloadDialog(
bool is_reload,
const blink::WebString& message) {
if (render_view()->is_swapped_out_)
return true;
if (render_view()->suppress_dialogs_until_swap_out_)
return false;
bool success = false;
base::string16 ignored_result;
render_view()->SendAndRunNestedMessageLoop(
new FrameHostMsg_RunBeforeUnloadConfirm(
routing_id_, frame_->document().url(), message, is_reload,
&success, &ignored_result));
return success;
}
|
C
|
Chrome
| 0 |
CVE-2011-3055
|
https://www.cvedetails.com/cve/CVE-2011-3055/
| null |
https://github.com/chromium/chromium/commit/e9372a1bfd3588a80fcf49aa07321f0971dd6091
|
e9372a1bfd3588a80fcf49aa07321f0971dd6091
|
[V8] Pass Isolate to throwNotEnoughArgumentsError()
https://bugs.webkit.org/show_bug.cgi?id=86983
Reviewed by Adam Barth.
The objective is to pass Isolate around in V8 bindings.
This patch passes Isolate to throwNotEnoughArgumentsError().
No tests. No change in behavior.
* bindings/scripts/CodeGeneratorV8.pm:
(GenerateArgumentsCountCheck):
(GenerateEventConstructorCallback):
* bindings/scripts/test/V8/V8Float64Array.cpp:
(WebCore::Float64ArrayV8Internal::fooCallback):
* bindings/scripts/test/V8/V8TestActiveDOMObject.cpp:
(WebCore::TestActiveDOMObjectV8Internal::excitingFunctionCallback):
(WebCore::TestActiveDOMObjectV8Internal::postMessageCallback):
* bindings/scripts/test/V8/V8TestCustomNamedGetter.cpp:
(WebCore::TestCustomNamedGetterV8Internal::anotherFunctionCallback):
* bindings/scripts/test/V8/V8TestEventConstructor.cpp:
(WebCore::V8TestEventConstructor::constructorCallback):
* bindings/scripts/test/V8/V8TestEventTarget.cpp:
(WebCore::TestEventTargetV8Internal::itemCallback):
(WebCore::TestEventTargetV8Internal::dispatchEventCallback):
* bindings/scripts/test/V8/V8TestInterface.cpp:
(WebCore::TestInterfaceV8Internal::supplementalMethod2Callback):
(WebCore::V8TestInterface::constructorCallback):
* bindings/scripts/test/V8/V8TestMediaQueryListListener.cpp:
(WebCore::TestMediaQueryListListenerV8Internal::methodCallback):
* bindings/scripts/test/V8/V8TestNamedConstructor.cpp:
(WebCore::V8TestNamedConstructorConstructorCallback):
* bindings/scripts/test/V8/V8TestObj.cpp:
(WebCore::TestObjV8Internal::voidMethodWithArgsCallback):
(WebCore::TestObjV8Internal::intMethodWithArgsCallback):
(WebCore::TestObjV8Internal::objMethodWithArgsCallback):
(WebCore::TestObjV8Internal::methodWithSequenceArgCallback):
(WebCore::TestObjV8Internal::methodReturningSequenceCallback):
(WebCore::TestObjV8Internal::methodThatRequiresAllArgsAndThrowsCallback):
(WebCore::TestObjV8Internal::serializedValueCallback):
(WebCore::TestObjV8Internal::idbKeyCallback):
(WebCore::TestObjV8Internal::optionsObjectCallback):
(WebCore::TestObjV8Internal::methodWithNonOptionalArgAndOptionalArgCallback):
(WebCore::TestObjV8Internal::methodWithNonOptionalArgAndTwoOptionalArgsCallback):
(WebCore::TestObjV8Internal::methodWithCallbackArgCallback):
(WebCore::TestObjV8Internal::methodWithNonCallbackArgAndCallbackArgCallback):
(WebCore::TestObjV8Internal::overloadedMethod1Callback):
(WebCore::TestObjV8Internal::overloadedMethod2Callback):
(WebCore::TestObjV8Internal::overloadedMethod3Callback):
(WebCore::TestObjV8Internal::overloadedMethod4Callback):
(WebCore::TestObjV8Internal::overloadedMethod5Callback):
(WebCore::TestObjV8Internal::overloadedMethod6Callback):
(WebCore::TestObjV8Internal::overloadedMethod7Callback):
(WebCore::TestObjV8Internal::overloadedMethod11Callback):
(WebCore::TestObjV8Internal::overloadedMethod12Callback):
(WebCore::TestObjV8Internal::enabledAtRuntimeMethod1Callback):
(WebCore::TestObjV8Internal::enabledAtRuntimeMethod2Callback):
(WebCore::TestObjV8Internal::convert1Callback):
(WebCore::TestObjV8Internal::convert2Callback):
(WebCore::TestObjV8Internal::convert3Callback):
(WebCore::TestObjV8Internal::convert4Callback):
(WebCore::TestObjV8Internal::convert5Callback):
(WebCore::TestObjV8Internal::strictFunctionCallback):
(WebCore::V8TestObj::constructorCallback):
* bindings/scripts/test/V8/V8TestSerializedScriptValueInterface.cpp:
(WebCore::TestSerializedScriptValueInterfaceV8Internal::acceptTransferListCallback):
(WebCore::V8TestSerializedScriptValueInterface::constructorCallback):
* bindings/v8/ScriptController.cpp:
(WebCore::setValueAndClosePopupCallback):
* bindings/v8/V8Proxy.cpp:
(WebCore::V8Proxy::throwNotEnoughArgumentsError):
* bindings/v8/V8Proxy.h:
(V8Proxy):
* bindings/v8/custom/V8AudioContextCustom.cpp:
(WebCore::V8AudioContext::constructorCallback):
* bindings/v8/custom/V8DataViewCustom.cpp:
(WebCore::V8DataView::getInt8Callback):
(WebCore::V8DataView::getUint8Callback):
(WebCore::V8DataView::setInt8Callback):
(WebCore::V8DataView::setUint8Callback):
* bindings/v8/custom/V8DirectoryEntryCustom.cpp:
(WebCore::V8DirectoryEntry::getDirectoryCallback):
(WebCore::V8DirectoryEntry::getFileCallback):
* bindings/v8/custom/V8IntentConstructor.cpp:
(WebCore::V8Intent::constructorCallback):
* bindings/v8/custom/V8SVGLengthCustom.cpp:
(WebCore::V8SVGLength::convertToSpecifiedUnitsCallback):
* bindings/v8/custom/V8WebGLRenderingContextCustom.cpp:
(WebCore::getObjectParameter):
(WebCore::V8WebGLRenderingContext::getAttachedShadersCallback):
(WebCore::V8WebGLRenderingContext::getExtensionCallback):
(WebCore::V8WebGLRenderingContext::getFramebufferAttachmentParameterCallback):
(WebCore::V8WebGLRenderingContext::getParameterCallback):
(WebCore::V8WebGLRenderingContext::getProgramParameterCallback):
(WebCore::V8WebGLRenderingContext::getShaderParameterCallback):
(WebCore::V8WebGLRenderingContext::getUniformCallback):
(WebCore::vertexAttribAndUniformHelperf):
(WebCore::uniformHelperi):
(WebCore::uniformMatrixHelper):
* bindings/v8/custom/V8WebKitMutationObserverCustom.cpp:
(WebCore::V8WebKitMutationObserver::constructorCallback):
(WebCore::V8WebKitMutationObserver::observeCallback):
* bindings/v8/custom/V8WebSocketCustom.cpp:
(WebCore::V8WebSocket::constructorCallback):
(WebCore::V8WebSocket::sendCallback):
* bindings/v8/custom/V8XMLHttpRequestCustom.cpp:
(WebCore::V8XMLHttpRequest::openCallback):
git-svn-id: svn://svn.chromium.org/blink/trunk@117736 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static v8::Handle<v8::Value> convert4Callback(const v8::Arguments& args)
{
INC_STATS("DOM.TestObj.convert4");
if (args.Length() < 1)
return V8Proxy::throwNotEnoughArgumentsError(args.GetIsolate());
TestObj* imp = V8TestObj::toNative(args.Holder());
EXCEPTION_BLOCK(d*, , V8d::HasInstance(MAYBE_MISSING_PARAMETER(args, 0, DefaultIsUndefined)) ? V8d::toNative(v8::Handle<v8::Object>::Cast(MAYBE_MISSING_PARAMETER(args, 0, DefaultIsUndefined))) : 0);
imp->convert4();
return v8::Handle<v8::Value>();
}
|
static v8::Handle<v8::Value> convert4Callback(const v8::Arguments& args)
{
INC_STATS("DOM.TestObj.convert4");
if (args.Length() < 1)
return V8Proxy::throwNotEnoughArgumentsError();
TestObj* imp = V8TestObj::toNative(args.Holder());
EXCEPTION_BLOCK(d*, , V8d::HasInstance(MAYBE_MISSING_PARAMETER(args, 0, DefaultIsUndefined)) ? V8d::toNative(v8::Handle<v8::Object>::Cast(MAYBE_MISSING_PARAMETER(args, 0, DefaultIsUndefined))) : 0);
imp->convert4();
return v8::Handle<v8::Value>();
}
|
C
|
Chrome
| 1 |
CVE-2017-5120
|
https://www.cvedetails.com/cve/CVE-2017-5120/
| null |
https://github.com/chromium/chromium/commit/b7277af490d28ac7f802c015bb0ff31395768556
|
b7277af490d28ac7f802c015bb0ff31395768556
|
bindings: Support "attribute FrozenArray<T>?"
Adds a quick hack to support a case of "attribute FrozenArray<T>?".
Bug: 1028047
Change-Id: Ib3cecc4beb6bcc0fb0dbc667aca595454cc90c86
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1933866
Reviewed-by: Hitoshi Yoshida <[email protected]>
Commit-Queue: Yuki Shiino <[email protected]>
Cr-Commit-Position: refs/heads/master@{#718676}
|
static void Float32ArrayAttributeAttributeGetter(const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::Local<v8::Object> holder = info.Holder();
TestObject* impl = V8TestObject::ToImpl(holder);
V8SetReturnValueFast(info, impl->float32ArrayAttribute(), impl);
}
|
static void Float32ArrayAttributeAttributeGetter(const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::Local<v8::Object> holder = info.Holder();
TestObject* impl = V8TestObject::ToImpl(holder);
V8SetReturnValueFast(info, impl->float32ArrayAttribute(), impl);
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/8353baf8d1504dbdd4ad7584ff2466de657521cd
|
8353baf8d1504dbdd4ad7584ff2466de657521cd
|
Remove WebFrame::canHaveSecureChild
To simplify the public API, ServiceWorkerNetworkProvider can do the
parent walk itself.
Follow-up to https://crrev.com/ad1850962644e19.
BUG=607543
Review-Url: https://codereview.chromium.org/2082493002
Cr-Commit-Position: refs/heads/master@{#400896}
|
void Document::implicitClose()
{
DCHECK(!inStyleRecalc());
if (processingLoadEvent() || !m_parser)
return;
if (frame() && frame()->navigationScheduler().locationChangePending()) {
suppressLoadEvent();
return;
}
m_loadEventProgress = LoadEventInProgress;
ScriptableDocumentParser* parser = scriptableDocumentParser();
m_wellFormed = parser && parser->wellFormed();
detachParser();
if (frame() && frame()->script().canExecuteScripts(NotAboutToExecuteScript)) {
ImageLoader::dispatchPendingLoadEvents();
ImageLoader::dispatchPendingErrorEvents();
HTMLLinkElement::dispatchPendingLoadEvents();
HTMLStyleElement::dispatchPendingLoadEvents();
}
if (svgExtensions())
accessSVGExtensions().dispatchSVGLoadEventToOutermostSVGElements();
if (this->domWindow())
this->domWindow()->documentWasClosed();
if (frame()) {
frame()->loader().client()->dispatchDidHandleOnloadEvents();
loader()->applicationCacheHost()->stopDeferringEvents();
}
if (!frame()) {
m_loadEventProgress = LoadEventCompleted;
return;
}
if (frame()->navigationScheduler().locationChangePending() && elapsedTime() < cLayoutScheduleThreshold) {
m_loadEventProgress = LoadEventCompleted;
return;
}
if (!localOwner() || (localOwner()->layoutObject() && !localOwner()->layoutObject()->needsLayout())) {
updateStyleAndLayoutTree();
if (view() && layoutView() && (!layoutView()->firstChild() || layoutView()->needsLayout()))
view()->layout();
}
m_loadEventProgress = LoadEventCompleted;
if (frame() && layoutView() && settings()->accessibilityEnabled()) {
if (AXObjectCache* cache = axObjectCache()) {
if (this == &axObjectCacheOwner())
cache->handleLoadComplete(this);
else
cache->handleLayoutComplete(this);
}
}
if (svgExtensions())
accessSVGExtensions().startAnimations();
}
|
void Document::implicitClose()
{
DCHECK(!inStyleRecalc());
if (processingLoadEvent() || !m_parser)
return;
if (frame() && frame()->navigationScheduler().locationChangePending()) {
suppressLoadEvent();
return;
}
m_loadEventProgress = LoadEventInProgress;
ScriptableDocumentParser* parser = scriptableDocumentParser();
m_wellFormed = parser && parser->wellFormed();
detachParser();
if (frame() && frame()->script().canExecuteScripts(NotAboutToExecuteScript)) {
ImageLoader::dispatchPendingLoadEvents();
ImageLoader::dispatchPendingErrorEvents();
HTMLLinkElement::dispatchPendingLoadEvents();
HTMLStyleElement::dispatchPendingLoadEvents();
}
if (svgExtensions())
accessSVGExtensions().dispatchSVGLoadEventToOutermostSVGElements();
if (this->domWindow())
this->domWindow()->documentWasClosed();
if (frame()) {
frame()->loader().client()->dispatchDidHandleOnloadEvents();
loader()->applicationCacheHost()->stopDeferringEvents();
}
if (!frame()) {
m_loadEventProgress = LoadEventCompleted;
return;
}
if (frame()->navigationScheduler().locationChangePending() && elapsedTime() < cLayoutScheduleThreshold) {
m_loadEventProgress = LoadEventCompleted;
return;
}
if (!localOwner() || (localOwner()->layoutObject() && !localOwner()->layoutObject()->needsLayout())) {
updateStyleAndLayoutTree();
if (view() && layoutView() && (!layoutView()->firstChild() || layoutView()->needsLayout()))
view()->layout();
}
m_loadEventProgress = LoadEventCompleted;
if (frame() && layoutView() && settings()->accessibilityEnabled()) {
if (AXObjectCache* cache = axObjectCache()) {
if (this == &axObjectCacheOwner())
cache->handleLoadComplete(this);
else
cache->handleLayoutComplete(this);
}
}
if (svgExtensions())
accessSVGExtensions().startAnimations();
}
|
C
|
Chrome
| 0 |
CVE-2019-11360
|
https://www.cvedetails.com/cve/CVE-2019-11360/
|
CWE-119
|
https://git.netfilter.org/iptables/commit/iptables/xshared.c?id=2ae1099a42e6a0f06de305ca13a842ac83d4683e
|
2ae1099a42e6a0f06de305ca13a842ac83d4683e
| null |
void xs_init_match(struct xtables_match *match)
{
if (match->udata_size != 0) {
/*
* As soon as a subsequent instance of the same match
* is used, e.g. "-m time -m time", the first instance
* is no longer reachable anyway, so we can free udata.
* Same goes for target.
*/
free(match->udata);
match->udata = calloc(1, match->udata_size);
if (match->udata == NULL)
xtables_error(RESOURCE_PROBLEM, "malloc");
}
if (match->init != NULL)
match->init(match->m);
}
|
void xs_init_match(struct xtables_match *match)
{
if (match->udata_size != 0) {
/*
* As soon as a subsequent instance of the same match
* is used, e.g. "-m time -m time", the first instance
* is no longer reachable anyway, so we can free udata.
* Same goes for target.
*/
free(match->udata);
match->udata = calloc(1, match->udata_size);
if (match->udata == NULL)
xtables_error(RESOURCE_PROBLEM, "malloc");
}
if (match->init != NULL)
match->init(match->m);
}
|
C
|
netfilter
| 0 |
CVE-2015-2925
|
https://www.cvedetails.com/cve/CVE-2015-2925/
|
CWE-254
|
https://github.com/torvalds/linux/commit/cde93be45a8a90d8c264c776fab63487b5038a65
|
cde93be45a8a90d8c264c776fab63487b5038a65
|
dcache: Handle escaped paths in prepend_path
A rename can result in a dentry that by walking up d_parent
will never reach it's mnt_root. For lack of a better term
I call this an escaped path.
prepend_path is called by four different functions __d_path,
d_absolute_path, d_path, and getcwd.
__d_path only wants to see paths are connected to the root it passes
in. So __d_path needs prepend_path to return an error.
d_absolute_path similarly wants to see paths that are connected to
some root. Escaped paths are not connected to any mnt_root so
d_absolute_path needs prepend_path to return an error greater
than 1. So escaped paths will be treated like paths on lazily
unmounted mounts.
getcwd needs to prepend "(unreachable)" so getcwd also needs
prepend_path to return an error.
d_path is the interesting hold out. d_path just wants to print
something, and does not care about the weird cases. Which raises
the question what should be printed?
Given that <escaped_path>/<anything> should result in -ENOENT I
believe it is desirable for escaped paths to be printed as empty
paths. As there are not really any meaninful path components when
considered from the perspective of a mount tree.
So tweak prepend_path to return an empty path with an new error
code of 3 when it encounters an escaped path.
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: Al Viro <[email protected]>
|
static void __dentry_kill(struct dentry *dentry)
{
struct dentry *parent = NULL;
bool can_free = true;
if (!IS_ROOT(dentry))
parent = dentry->d_parent;
/*
* The dentry is now unrecoverably dead to the world.
*/
lockref_mark_dead(&dentry->d_lockref);
/*
* inform the fs via d_prune that this dentry is about to be
* unhashed and destroyed.
*/
if (dentry->d_flags & DCACHE_OP_PRUNE)
dentry->d_op->d_prune(dentry);
if (dentry->d_flags & DCACHE_LRU_LIST) {
if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
d_lru_del(dentry);
}
/* if it was on the hash then remove it */
__d_drop(dentry);
__list_del_entry(&dentry->d_child);
/*
* Inform d_walk() that we are no longer attached to the
* dentry tree
*/
dentry->d_flags |= DCACHE_DENTRY_KILLED;
if (parent)
spin_unlock(&parent->d_lock);
dentry_iput(dentry);
/*
* dentry_iput drops the locks, at which point nobody (except
* transient RCU lookups) can reach this dentry.
*/
BUG_ON(dentry->d_lockref.count > 0);
this_cpu_dec(nr_dentry);
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
spin_lock(&dentry->d_lock);
if (dentry->d_flags & DCACHE_SHRINK_LIST) {
dentry->d_flags |= DCACHE_MAY_FREE;
can_free = false;
}
spin_unlock(&dentry->d_lock);
if (likely(can_free))
dentry_free(dentry);
}
|
static void __dentry_kill(struct dentry *dentry)
{
struct dentry *parent = NULL;
bool can_free = true;
if (!IS_ROOT(dentry))
parent = dentry->d_parent;
/*
* The dentry is now unrecoverably dead to the world.
*/
lockref_mark_dead(&dentry->d_lockref);
/*
* inform the fs via d_prune that this dentry is about to be
* unhashed and destroyed.
*/
if (dentry->d_flags & DCACHE_OP_PRUNE)
dentry->d_op->d_prune(dentry);
if (dentry->d_flags & DCACHE_LRU_LIST) {
if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
d_lru_del(dentry);
}
/* if it was on the hash then remove it */
__d_drop(dentry);
__list_del_entry(&dentry->d_child);
/*
* Inform d_walk() that we are no longer attached to the
* dentry tree
*/
dentry->d_flags |= DCACHE_DENTRY_KILLED;
if (parent)
spin_unlock(&parent->d_lock);
dentry_iput(dentry);
/*
* dentry_iput drops the locks, at which point nobody (except
* transient RCU lookups) can reach this dentry.
*/
BUG_ON(dentry->d_lockref.count > 0);
this_cpu_dec(nr_dentry);
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
spin_lock(&dentry->d_lock);
if (dentry->d_flags & DCACHE_SHRINK_LIST) {
dentry->d_flags |= DCACHE_MAY_FREE;
can_free = false;
}
spin_unlock(&dentry->d_lock);
if (likely(can_free))
dentry_free(dentry);
}
|
C
|
linux
| 0 |
CVE-2018-16080
|
https://www.cvedetails.com/cve/CVE-2018-16080/
|
CWE-20
|
https://github.com/chromium/chromium/commit/c552cd7b8a0862f6b3c8c6a07f98bda3721101eb
|
c552cd7b8a0862f6b3c8c6a07f98bda3721101eb
|
Mac: turn popups into new tabs while in fullscreen.
It's platform convention to show popups as new tabs while in
non-HTML5 fullscreen. (Popups cause tabs to lose HTML5 fullscreen.)
This was implemented for Cocoa in a BrowserWindow override, but
it makes sense to just stick it into Browser and remove a ton
of override code put in just to support this.
BUG=858929, 868416
TEST=as in bugs
Change-Id: I43471f242813ec1159d9c690bab73dab3e610b7d
Reviewed-on: https://chromium-review.googlesource.com/1153455
Reviewed-by: Sidney San Martín <[email protected]>
Commit-Queue: Avi Drissman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#578755}
|
void BrowserView::ShowImeWarningBubble(
const extensions::Extension* extension,
const base::Callback<void(ImeWarningBubblePermissionStatus status)>&
callback) {
ImeWarningBubbleView::ShowBubble(extension, this, callback);
}
|
void BrowserView::ShowImeWarningBubble(
const extensions::Extension* extension,
const base::Callback<void(ImeWarningBubblePermissionStatus status)>&
callback) {
ImeWarningBubbleView::ShowBubble(extension, this, callback);
}
|
C
|
Chrome
| 0 |
CVE-2018-16541
|
https://www.cvedetails.com/cve/CVE-2018-16541/
|
CWE-416
|
http://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=241d91112771a6104de10b3948c3f350d6690c1d
|
241d91112771a6104de10b3948c3f350d6690c1d
| null |
gs_to_exit(const gs_memory_t *mem, int exit_status)
{
return gs_to_exit_with_code(mem, exit_status, 0);
}
|
gs_to_exit(const gs_memory_t *mem, int exit_status)
{
return gs_to_exit_with_code(mem, exit_status, 0);
}
|
C
|
ghostscript
| 0 |
CVE-2016-8654
|
https://www.cvedetails.com/cve/CVE-2016-8654/
|
CWE-119
|
https://github.com/mdadams/jasper/commit/4a59cfaf9ab3d48fca4a15c0d2674bf7138e3d1a
|
4a59cfaf9ab3d48fca4a15c0d2674bf7138e3d1a
|
Fixed a buffer overrun problem in the QMFB code in the JPC codec
that was caused by a buffer being allocated with a size that was too small
in some cases.
Added a new regression test case.
|
void jpc_ft_fwdlift_colgrp(jpc_fix_t *a, int numrows, int stride, int parity)
{
jpc_fix_t *lptr;
jpc_fix_t *hptr;
register jpc_fix_t *lptr2;
register jpc_fix_t *hptr2;
register int n;
register int i;
int llen;
llen = (numrows + 1 - parity) >> 1;
if (numrows > 1) {
/* Apply the first lifting step. */
lptr = &a[0];
hptr = &a[llen * stride];
if (parity) {
lptr2 = lptr;
hptr2 = hptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
hptr2[0] -= lptr2[0];
++hptr2;
++lptr2;
}
hptr += stride;
}
n = numrows - llen - parity - (parity == (numrows & 1));
while (n-- > 0) {
lptr2 = lptr;
hptr2 = hptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
hptr2[0] -= jpc_fix_asr(lptr2[0] + lptr2[stride], 1);
++lptr2;
++hptr2;
}
hptr += stride;
lptr += stride;
}
if (parity == (numrows & 1)) {
lptr2 = lptr;
hptr2 = hptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
hptr2[0] -= lptr2[0];
++lptr2;
++hptr2;
}
}
/* Apply the second lifting step. */
lptr = &a[0];
hptr = &a[llen * stride];
if (!parity) {
lptr2 = lptr;
hptr2 = hptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
lptr2[0] += jpc_fix_asr(hptr2[0] + 1, 1);
++lptr2;
++hptr2;
}
lptr += stride;
}
n = llen - (!parity) - (parity != (numrows & 1));
while (n-- > 0) {
lptr2 = lptr;
hptr2 = hptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
lptr2[0] += jpc_fix_asr(hptr2[0] + hptr2[stride] + 2, 2);
++lptr2;
++hptr2;
}
lptr += stride;
hptr += stride;
}
if (parity != (numrows & 1)) {
lptr2 = lptr;
hptr2 = hptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
lptr2[0] += jpc_fix_asr(hptr2[0] + 1, 1);
++lptr2;
++hptr2;
}
}
} else {
if (parity) {
lptr2 = &a[0];
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
lptr2[0] = jpc_fix_asl(lptr2[0], 1);
++lptr2;
}
}
}
}
|
void jpc_ft_fwdlift_colgrp(jpc_fix_t *a, int numrows, int stride, int parity)
{
jpc_fix_t *lptr;
jpc_fix_t *hptr;
register jpc_fix_t *lptr2;
register jpc_fix_t *hptr2;
register int n;
register int i;
int llen;
llen = (numrows + 1 - parity) >> 1;
if (numrows > 1) {
/* Apply the first lifting step. */
lptr = &a[0];
hptr = &a[llen * stride];
if (parity) {
lptr2 = lptr;
hptr2 = hptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
hptr2[0] -= lptr2[0];
++hptr2;
++lptr2;
}
hptr += stride;
}
n = numrows - llen - parity - (parity == (numrows & 1));
while (n-- > 0) {
lptr2 = lptr;
hptr2 = hptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
hptr2[0] -= jpc_fix_asr(lptr2[0] + lptr2[stride], 1);
++lptr2;
++hptr2;
}
hptr += stride;
lptr += stride;
}
if (parity == (numrows & 1)) {
lptr2 = lptr;
hptr2 = hptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
hptr2[0] -= lptr2[0];
++lptr2;
++hptr2;
}
}
/* Apply the second lifting step. */
lptr = &a[0];
hptr = &a[llen * stride];
if (!parity) {
lptr2 = lptr;
hptr2 = hptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
lptr2[0] += jpc_fix_asr(hptr2[0] + 1, 1);
++lptr2;
++hptr2;
}
lptr += stride;
}
n = llen - (!parity) - (parity != (numrows & 1));
while (n-- > 0) {
lptr2 = lptr;
hptr2 = hptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
lptr2[0] += jpc_fix_asr(hptr2[0] + hptr2[stride] + 2, 2);
++lptr2;
++hptr2;
}
lptr += stride;
hptr += stride;
}
if (parity != (numrows & 1)) {
lptr2 = lptr;
hptr2 = hptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
lptr2[0] += jpc_fix_asr(hptr2[0] + 1, 1);
++lptr2;
++hptr2;
}
}
} else {
if (parity) {
lptr2 = &a[0];
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
lptr2[0] = jpc_fix_asl(lptr2[0], 1);
++lptr2;
}
}
}
}
|
C
|
jasper
| 0 |
CVE-2016-10269
|
https://www.cvedetails.com/cve/CVE-2016-10269/
|
CWE-125
|
https://github.com/vadz/libtiff/commit/1044b43637fa7f70fb19b93593777b78bd20da86
|
1044b43637fa7f70fb19b93593777b78bd20da86
|
* libtiff/tif_pixarlog.c, libtiff/tif_luv.c: fix heap-based buffer
overflow on generation of PixarLog / LUV compressed files, with
ColorMap, TransferFunction attached and nasty plays with bitspersample.
The fix for LUV has not been tested, but suffers from the same kind
of issue of PixarLog.
Reported by Agostino Sarubbo.
Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2604
|
LogL10toY(int p10) /* compute luminance from 10-bit LogL */
{
if (p10 == 0)
return (0.);
return (exp(M_LN2/64.*(p10+.5) - M_LN2*12.));
}
|
LogL10toY(int p10) /* compute luminance from 10-bit LogL */
{
if (p10 == 0)
return (0.);
return (exp(M_LN2/64.*(p10+.5) - M_LN2*12.));
}
|
C
|
libtiff
| 0 |
CVE-2013-0292
|
https://www.cvedetails.com/cve/CVE-2013-0292/
|
CWE-20
|
https://cgit.freedesktop.org/dbus/dbus-glib/commit/?id=166978a09cf5edff4028e670b6074215a4c75eca
|
166978a09cf5edff4028e670b6074215a4c75eca
| null |
tristring_equal (gconstpointer a,
gconstpointer b)
{
const char *ap = a;
const char *bp = b;
size_t len;
if (!strequal_len (ap, bp, &len))
return FALSE;
ap += len + 1;
bp += len + 1;
if (!strequal_len (ap, bp, &len))
return FALSE;
ap += len + 1;
bp += len + 1;
if (strcmp (ap, bp) != 0)
return FALSE;
return TRUE;
}
|
tristring_equal (gconstpointer a,
gconstpointer b)
{
const char *ap = a;
const char *bp = b;
size_t len;
if (!strequal_len (ap, bp, &len))
return FALSE;
ap += len + 1;
bp += len + 1;
if (!strequal_len (ap, bp, &len))
return FALSE;
ap += len + 1;
bp += len + 1;
if (strcmp (ap, bp) != 0)
return FALSE;
return TRUE;
}
|
C
|
dbus
| 0 |
CVE-2015-8952
|
https://www.cvedetails.com/cve/CVE-2015-8952/
|
CWE-19
|
https://github.com/torvalds/linux/commit/be0726d33cb8f411945884664924bed3cb8c70ee
|
be0726d33cb8f411945884664924bed3cb8c70ee
|
ext2: convert to mbcache2
The conversion is generally straightforward. We convert filesystem from
a global cache to per-fs one. Similarly to ext4 the tricky part is that
xattr block corresponding to found mbcache entry can get freed before we
get buffer lock for that block. So we have to check whether the entry is
still valid after getting the buffer lock.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
|
static int ext2_fill_super(struct super_block *sb, void *data, int silent)
{
struct buffer_head * bh;
struct ext2_sb_info * sbi;
struct ext2_super_block * es;
struct inode *root;
unsigned long block;
unsigned long sb_block = get_sb_block(&data);
unsigned long logic_sb_block;
unsigned long offset = 0;
unsigned long def_mount_opts;
long ret = -EINVAL;
int blocksize = BLOCK_SIZE;
int db_count;
int i, j;
__le32 features;
int err;
err = -ENOMEM;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
goto failed;
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
if (!sbi->s_blockgroup_lock) {
kfree(sbi);
goto failed;
}
sb->s_fs_info = sbi;
sbi->s_sb_block = sb_block;
spin_lock_init(&sbi->s_lock);
/*
* See what the current blocksize for the device is, and
* use that as the blocksize. Otherwise (or if the blocksize
* is smaller than the default) use the default.
* This is important for devices that have a hardware
* sectorsize that is larger than the default.
*/
blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
if (!blocksize) {
ext2_msg(sb, KERN_ERR, "error: unable to set blocksize");
goto failed_sbi;
}
/*
* If the superblock doesn't start on a hardware sector boundary,
* calculate the offset.
*/
if (blocksize != BLOCK_SIZE) {
logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
offset = (sb_block*BLOCK_SIZE) % blocksize;
} else {
logic_sb_block = sb_block;
}
if (!(bh = sb_bread(sb, logic_sb_block))) {
ext2_msg(sb, KERN_ERR, "error: unable to read superblock");
goto failed_sbi;
}
/*
* Note: s_es must be initialized as soon as possible because
* some ext2 macro-instructions depend on its value
*/
es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
sbi->s_es = es;
sb->s_magic = le16_to_cpu(es->s_magic);
if (sb->s_magic != EXT2_SUPER_MAGIC)
goto cantfind_ext2;
/* Set defaults before we parse the mount options */
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
if (def_mount_opts & EXT2_DEFM_DEBUG)
set_opt(sbi->s_mount_opt, DEBUG);
if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
set_opt(sbi->s_mount_opt, GRPID);
if (def_mount_opts & EXT2_DEFM_UID16)
set_opt(sbi->s_mount_opt, NO_UID32);
#ifdef CONFIG_EXT2_FS_XATTR
if (def_mount_opts & EXT2_DEFM_XATTR_USER)
set_opt(sbi->s_mount_opt, XATTR_USER);
#endif
#ifdef CONFIG_EXT2_FS_POSIX_ACL
if (def_mount_opts & EXT2_DEFM_ACL)
set_opt(sbi->s_mount_opt, POSIX_ACL);
#endif
if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
set_opt(sbi->s_mount_opt, ERRORS_PANIC);
else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
set_opt(sbi->s_mount_opt, ERRORS_CONT);
else
set_opt(sbi->s_mount_opt, ERRORS_RO);
sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
set_opt(sbi->s_mount_opt, RESERVATION);
if (!parse_options((char *) data, sb))
goto failed_mount;
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
MS_POSIXACL : 0);
sb->s_iflags |= SB_I_CGROUPWB;
if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
(EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
ext2_msg(sb, KERN_WARNING,
"warning: feature flags set on rev 0 fs, "
"running e2fsck is recommended");
/*
* Check feature flags regardless of the revision level, since we
* previously didn't change the revision level when setting the flags,
* so there is a chance incompat flags are set on a rev 0 filesystem.
*/
features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
if (features) {
ext2_msg(sb, KERN_ERR, "error: couldn't mount because of "
"unsupported optional features (%x)",
le32_to_cpu(features));
goto failed_mount;
}
if (!(sb->s_flags & MS_RDONLY) &&
(features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){
ext2_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of "
"unsupported optional features (%x)",
le32_to_cpu(features));
goto failed_mount;
}
blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
if (sbi->s_mount_opt & EXT2_MOUNT_DAX) {
if (blocksize != PAGE_SIZE) {
ext2_msg(sb, KERN_ERR,
"error: unsupported blocksize for dax");
goto failed_mount;
}
if (!sb->s_bdev->bd_disk->fops->direct_access) {
ext2_msg(sb, KERN_ERR,
"error: device does not support dax");
goto failed_mount;
}
}
/* If the blocksize doesn't match, re-read the thing.. */
if (sb->s_blocksize != blocksize) {
brelse(bh);
if (!sb_set_blocksize(sb, blocksize)) {
ext2_msg(sb, KERN_ERR,
"error: bad blocksize %d", blocksize);
goto failed_sbi;
}
logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
offset = (sb_block*BLOCK_SIZE) % blocksize;
bh = sb_bread(sb, logic_sb_block);
if(!bh) {
ext2_msg(sb, KERN_ERR, "error: couldn't read"
"superblock on 2nd try");
goto failed_sbi;
}
es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
sbi->s_es = es;
if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
ext2_msg(sb, KERN_ERR, "error: magic mismatch");
goto failed_mount;
}
}
sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);
sb->s_max_links = EXT2_LINK_MAX;
if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
} else {
sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
!is_power_of_2(sbi->s_inode_size) ||
(sbi->s_inode_size > blocksize)) {
ext2_msg(sb, KERN_ERR,
"error: unsupported inode size: %d",
sbi->s_inode_size);
goto failed_mount;
}
}
sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
le32_to_cpu(es->s_log_frag_size);
if (sbi->s_frag_size == 0)
goto cantfind_ext2;
sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;
sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
if (EXT2_INODE_SIZE(sb) == 0)
goto cantfind_ext2;
sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
goto cantfind_ext2;
sbi->s_itb_per_group = sbi->s_inodes_per_group /
sbi->s_inodes_per_block;
sbi->s_desc_per_block = sb->s_blocksize /
sizeof (struct ext2_group_desc);
sbi->s_sbh = bh;
sbi->s_mount_state = le16_to_cpu(es->s_state);
sbi->s_addr_per_block_bits =
ilog2 (EXT2_ADDR_PER_BLOCK(sb));
sbi->s_desc_per_block_bits =
ilog2 (EXT2_DESC_PER_BLOCK(sb));
if (sb->s_magic != EXT2_SUPER_MAGIC)
goto cantfind_ext2;
if (sb->s_blocksize != bh->b_size) {
if (!silent)
ext2_msg(sb, KERN_ERR, "error: unsupported blocksize");
goto failed_mount;
}
if (sb->s_blocksize != sbi->s_frag_size) {
ext2_msg(sb, KERN_ERR,
"error: fragsize %lu != blocksize %lu"
"(not supported yet)",
sbi->s_frag_size, sb->s_blocksize);
goto failed_mount;
}
if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
ext2_msg(sb, KERN_ERR,
"error: #blocks per group too big: %lu",
sbi->s_blocks_per_group);
goto failed_mount;
}
if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
ext2_msg(sb, KERN_ERR,
"error: #fragments per group too big: %lu",
sbi->s_frags_per_group);
goto failed_mount;
}
if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
ext2_msg(sb, KERN_ERR,
"error: #inodes per group too big: %lu",
sbi->s_inodes_per_group);
goto failed_mount;
}
if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
goto cantfind_ext2;
sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
le32_to_cpu(es->s_first_data_block) - 1)
/ EXT2_BLOCKS_PER_GROUP(sb)) + 1;
db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
EXT2_DESC_PER_BLOCK(sb);
sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
ext2_msg(sb, KERN_ERR, "error: not enough memory");
goto failed_mount;
}
bgl_lock_init(sbi->s_blockgroup_lock);
sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
if (!sbi->s_debts) {
ext2_msg(sb, KERN_ERR, "error: not enough memory");
goto failed_mount_group_desc;
}
for (i = 0; i < db_count; i++) {
block = descriptor_loc(sb, logic_sb_block, i);
sbi->s_group_desc[i] = sb_bread(sb, block);
if (!sbi->s_group_desc[i]) {
for (j = 0; j < i; j++)
brelse (sbi->s_group_desc[j]);
ext2_msg(sb, KERN_ERR,
"error: unable to read group descriptors");
goto failed_mount_group_desc;
}
}
if (!ext2_check_descriptors (sb)) {
ext2_msg(sb, KERN_ERR, "group descriptors corrupted");
goto failed_mount2;
}
sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
/* per fileystem reservation list head & lock */
spin_lock_init(&sbi->s_rsv_window_lock);
sbi->s_rsv_window_root = RB_ROOT;
/*
* Add a single, static dummy reservation to the start of the
* reservation window list --- it gives us a placeholder for
* append-at-start-of-list which makes the allocation logic
* _much_ simpler.
*/
sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
sbi->s_rsv_window_head.rsv_alloc_hit = 0;
sbi->s_rsv_window_head.rsv_goal_size = 0;
ext2_rsv_window_add(sb, &sbi->s_rsv_window_head);
err = percpu_counter_init(&sbi->s_freeblocks_counter,
ext2_count_free_blocks(sb), GFP_KERNEL);
if (!err) {
err = percpu_counter_init(&sbi->s_freeinodes_counter,
ext2_count_free_inodes(sb), GFP_KERNEL);
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirs_counter,
ext2_count_dirs(sb), GFP_KERNEL);
}
if (err) {
ext2_msg(sb, KERN_ERR, "error: insufficient memory");
goto failed_mount3;
}
#ifdef CONFIG_EXT2_FS_XATTR
sbi->s_mb_cache = ext2_xattr_create_cache();
if (!sbi->s_mb_cache) {
ext2_msg(sb, KERN_ERR, "Failed to create an mb_cache");
goto failed_mount3;
}
#endif
/*
* set up enough so that it can read an inode
*/
sb->s_op = &ext2_sops;
sb->s_export_op = &ext2_export_ops;
sb->s_xattr = ext2_xattr_handlers;
#ifdef CONFIG_QUOTA
sb->dq_op = &dquot_operations;
sb->s_qcop = &dquot_quotactl_ops;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
#endif
root = ext2_iget(sb, EXT2_ROOT_INO);
if (IS_ERR(root)) {
ret = PTR_ERR(root);
goto failed_mount3;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
iput(root);
ext2_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
goto failed_mount3;
}
sb->s_root = d_make_root(root);
if (!sb->s_root) {
ext2_msg(sb, KERN_ERR, "error: get root inode failed");
ret = -ENOMEM;
goto failed_mount3;
}
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
ext2_msg(sb, KERN_WARNING,
"warning: mounting ext3 filesystem as ext2");
if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
sb->s_flags |= MS_RDONLY;
ext2_write_super(sb);
return 0;
cantfind_ext2:
if (!silent)
ext2_msg(sb, KERN_ERR,
"error: can't find an ext2 filesystem on dev %s.",
sb->s_id);
goto failed_mount;
failed_mount3:
if (sbi->s_mb_cache)
ext2_xattr_destroy_cache(sbi->s_mb_cache);
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
failed_mount_group_desc:
kfree(sbi->s_group_desc);
kfree(sbi->s_debts);
failed_mount:
brelse(bh);
failed_sbi:
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
failed:
return ret;
}
|
static int ext2_fill_super(struct super_block *sb, void *data, int silent)
{
struct buffer_head * bh;
struct ext2_sb_info * sbi;
struct ext2_super_block * es;
struct inode *root;
unsigned long block;
unsigned long sb_block = get_sb_block(&data);
unsigned long logic_sb_block;
unsigned long offset = 0;
unsigned long def_mount_opts;
long ret = -EINVAL;
int blocksize = BLOCK_SIZE;
int db_count;
int i, j;
__le32 features;
int err;
err = -ENOMEM;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
goto failed;
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
if (!sbi->s_blockgroup_lock) {
kfree(sbi);
goto failed;
}
sb->s_fs_info = sbi;
sbi->s_sb_block = sb_block;
spin_lock_init(&sbi->s_lock);
/*
* See what the current blocksize for the device is, and
* use that as the blocksize. Otherwise (or if the blocksize
* is smaller than the default) use the default.
* This is important for devices that have a hardware
* sectorsize that is larger than the default.
*/
blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
if (!blocksize) {
ext2_msg(sb, KERN_ERR, "error: unable to set blocksize");
goto failed_sbi;
}
/*
* If the superblock doesn't start on a hardware sector boundary,
* calculate the offset.
*/
if (blocksize != BLOCK_SIZE) {
logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
offset = (sb_block*BLOCK_SIZE) % blocksize;
} else {
logic_sb_block = sb_block;
}
if (!(bh = sb_bread(sb, logic_sb_block))) {
ext2_msg(sb, KERN_ERR, "error: unable to read superblock");
goto failed_sbi;
}
/*
* Note: s_es must be initialized as soon as possible because
* some ext2 macro-instructions depend on its value
*/
es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
sbi->s_es = es;
sb->s_magic = le16_to_cpu(es->s_magic);
if (sb->s_magic != EXT2_SUPER_MAGIC)
goto cantfind_ext2;
/* Set defaults before we parse the mount options */
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
if (def_mount_opts & EXT2_DEFM_DEBUG)
set_opt(sbi->s_mount_opt, DEBUG);
if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
set_opt(sbi->s_mount_opt, GRPID);
if (def_mount_opts & EXT2_DEFM_UID16)
set_opt(sbi->s_mount_opt, NO_UID32);
#ifdef CONFIG_EXT2_FS_XATTR
if (def_mount_opts & EXT2_DEFM_XATTR_USER)
set_opt(sbi->s_mount_opt, XATTR_USER);
#endif
#ifdef CONFIG_EXT2_FS_POSIX_ACL
if (def_mount_opts & EXT2_DEFM_ACL)
set_opt(sbi->s_mount_opt, POSIX_ACL);
#endif
if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
set_opt(sbi->s_mount_opt, ERRORS_PANIC);
else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
set_opt(sbi->s_mount_opt, ERRORS_CONT);
else
set_opt(sbi->s_mount_opt, ERRORS_RO);
sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
set_opt(sbi->s_mount_opt, RESERVATION);
if (!parse_options((char *) data, sb))
goto failed_mount;
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
MS_POSIXACL : 0);
sb->s_iflags |= SB_I_CGROUPWB;
if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
(EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
ext2_msg(sb, KERN_WARNING,
"warning: feature flags set on rev 0 fs, "
"running e2fsck is recommended");
/*
* Check feature flags regardless of the revision level, since we
* previously didn't change the revision level when setting the flags,
* so there is a chance incompat flags are set on a rev 0 filesystem.
*/
features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
if (features) {
ext2_msg(sb, KERN_ERR, "error: couldn't mount because of "
"unsupported optional features (%x)",
le32_to_cpu(features));
goto failed_mount;
}
if (!(sb->s_flags & MS_RDONLY) &&
(features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){
ext2_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of "
"unsupported optional features (%x)",
le32_to_cpu(features));
goto failed_mount;
}
blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
if (sbi->s_mount_opt & EXT2_MOUNT_DAX) {
if (blocksize != PAGE_SIZE) {
ext2_msg(sb, KERN_ERR,
"error: unsupported blocksize for dax");
goto failed_mount;
}
if (!sb->s_bdev->bd_disk->fops->direct_access) {
ext2_msg(sb, KERN_ERR,
"error: device does not support dax");
goto failed_mount;
}
}
/* If the blocksize doesn't match, re-read the thing.. */
if (sb->s_blocksize != blocksize) {
brelse(bh);
if (!sb_set_blocksize(sb, blocksize)) {
ext2_msg(sb, KERN_ERR,
"error: bad blocksize %d", blocksize);
goto failed_sbi;
}
logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
offset = (sb_block*BLOCK_SIZE) % blocksize;
bh = sb_bread(sb, logic_sb_block);
if(!bh) {
ext2_msg(sb, KERN_ERR, "error: couldn't read"
"superblock on 2nd try");
goto failed_sbi;
}
es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
sbi->s_es = es;
if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
ext2_msg(sb, KERN_ERR, "error: magic mismatch");
goto failed_mount;
}
}
sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);
sb->s_max_links = EXT2_LINK_MAX;
if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
} else {
sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
!is_power_of_2(sbi->s_inode_size) ||
(sbi->s_inode_size > blocksize)) {
ext2_msg(sb, KERN_ERR,
"error: unsupported inode size: %d",
sbi->s_inode_size);
goto failed_mount;
}
}
sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
le32_to_cpu(es->s_log_frag_size);
if (sbi->s_frag_size == 0)
goto cantfind_ext2;
sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;
sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
if (EXT2_INODE_SIZE(sb) == 0)
goto cantfind_ext2;
sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
goto cantfind_ext2;
sbi->s_itb_per_group = sbi->s_inodes_per_group /
sbi->s_inodes_per_block;
sbi->s_desc_per_block = sb->s_blocksize /
sizeof (struct ext2_group_desc);
sbi->s_sbh = bh;
sbi->s_mount_state = le16_to_cpu(es->s_state);
sbi->s_addr_per_block_bits =
ilog2 (EXT2_ADDR_PER_BLOCK(sb));
sbi->s_desc_per_block_bits =
ilog2 (EXT2_DESC_PER_BLOCK(sb));
if (sb->s_magic != EXT2_SUPER_MAGIC)
goto cantfind_ext2;
if (sb->s_blocksize != bh->b_size) {
if (!silent)
ext2_msg(sb, KERN_ERR, "error: unsupported blocksize");
goto failed_mount;
}
if (sb->s_blocksize != sbi->s_frag_size) {
ext2_msg(sb, KERN_ERR,
"error: fragsize %lu != blocksize %lu"
"(not supported yet)",
sbi->s_frag_size, sb->s_blocksize);
goto failed_mount;
}
if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
ext2_msg(sb, KERN_ERR,
"error: #blocks per group too big: %lu",
sbi->s_blocks_per_group);
goto failed_mount;
}
if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
ext2_msg(sb, KERN_ERR,
"error: #fragments per group too big: %lu",
sbi->s_frags_per_group);
goto failed_mount;
}
if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
ext2_msg(sb, KERN_ERR,
"error: #inodes per group too big: %lu",
sbi->s_inodes_per_group);
goto failed_mount;
}
if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
goto cantfind_ext2;
sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
le32_to_cpu(es->s_first_data_block) - 1)
/ EXT2_BLOCKS_PER_GROUP(sb)) + 1;
db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
EXT2_DESC_PER_BLOCK(sb);
sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
ext2_msg(sb, KERN_ERR, "error: not enough memory");
goto failed_mount;
}
bgl_lock_init(sbi->s_blockgroup_lock);
sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
if (!sbi->s_debts) {
ext2_msg(sb, KERN_ERR, "error: not enough memory");
goto failed_mount_group_desc;
}
for (i = 0; i < db_count; i++) {
block = descriptor_loc(sb, logic_sb_block, i);
sbi->s_group_desc[i] = sb_bread(sb, block);
if (!sbi->s_group_desc[i]) {
for (j = 0; j < i; j++)
brelse (sbi->s_group_desc[j]);
ext2_msg(sb, KERN_ERR,
"error: unable to read group descriptors");
goto failed_mount_group_desc;
}
}
if (!ext2_check_descriptors (sb)) {
ext2_msg(sb, KERN_ERR, "group descriptors corrupted");
goto failed_mount2;
}
sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
/* per fileystem reservation list head & lock */
spin_lock_init(&sbi->s_rsv_window_lock);
sbi->s_rsv_window_root = RB_ROOT;
/*
* Add a single, static dummy reservation to the start of the
* reservation window list --- it gives us a placeholder for
* append-at-start-of-list which makes the allocation logic
* _much_ simpler.
*/
sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
sbi->s_rsv_window_head.rsv_alloc_hit = 0;
sbi->s_rsv_window_head.rsv_goal_size = 0;
ext2_rsv_window_add(sb, &sbi->s_rsv_window_head);
err = percpu_counter_init(&sbi->s_freeblocks_counter,
ext2_count_free_blocks(sb), GFP_KERNEL);
if (!err) {
err = percpu_counter_init(&sbi->s_freeinodes_counter,
ext2_count_free_inodes(sb), GFP_KERNEL);
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirs_counter,
ext2_count_dirs(sb), GFP_KERNEL);
}
if (err) {
ext2_msg(sb, KERN_ERR, "error: insufficient memory");
goto failed_mount3;
}
/*
* set up enough so that it can read an inode
*/
sb->s_op = &ext2_sops;
sb->s_export_op = &ext2_export_ops;
sb->s_xattr = ext2_xattr_handlers;
#ifdef CONFIG_QUOTA
sb->dq_op = &dquot_operations;
sb->s_qcop = &dquot_quotactl_ops;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
#endif
root = ext2_iget(sb, EXT2_ROOT_INO);
if (IS_ERR(root)) {
ret = PTR_ERR(root);
goto failed_mount3;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
iput(root);
ext2_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
goto failed_mount3;
}
sb->s_root = d_make_root(root);
if (!sb->s_root) {
ext2_msg(sb, KERN_ERR, "error: get root inode failed");
ret = -ENOMEM;
goto failed_mount3;
}
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
ext2_msg(sb, KERN_WARNING,
"warning: mounting ext3 filesystem as ext2");
if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
sb->s_flags |= MS_RDONLY;
ext2_write_super(sb);
return 0;
cantfind_ext2:
if (!silent)
ext2_msg(sb, KERN_ERR,
"error: can't find an ext2 filesystem on dev %s.",
sb->s_id);
goto failed_mount;
failed_mount3:
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
failed_mount_group_desc:
kfree(sbi->s_group_desc);
kfree(sbi->s_debts);
failed_mount:
brelse(bh);
failed_sbi:
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
failed:
return ret;
}
|
C
|
linux
| 1 |
CVE-2011-4621
|
https://www.cvedetails.com/cve/CVE-2011-4621/
| null |
https://github.com/torvalds/linux/commit/f26f9aff6aaf67e9a430d16c266f91b13a5bff64
|
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
|
Sched: fix skip_clock_update optimization
idle_balance() drops/retakes rq->lock, leaving the previous task
vulnerable to set_tsk_need_resched(). Clear it after we return
from balancing instead, and in setup_thread_stack() as well, so
no successfully descheduled or never scheduled task has it set.
Need resched confused the skip_clock_update logic, which assumes
that the next call to update_rq_clock() will come nearly immediately
after being set. Make the optimization robust against the waking
a sleeper before it sucessfully deschedules case by checking that
the current task has not been dequeued before setting the flag,
since it is that useless clock update we're trying to save, and
clear unconditionally in schedule() proper instead of conditionally
in put_prev_task().
Signed-off-by: Mike Galbraith <[email protected]>
Reported-by: Bjoern B. Brandenburg <[email protected]>
Tested-by: Yong Zhang <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: [email protected]
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
long sched_group_rt_runtime(struct task_group *tg)
{
u64 rt_runtime_us;
if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
return -1;
rt_runtime_us = tg->rt_bandwidth.rt_runtime;
do_div(rt_runtime_us, NSEC_PER_USEC);
return rt_runtime_us;
}
|
long sched_group_rt_runtime(struct task_group *tg)
{
u64 rt_runtime_us;
if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
return -1;
rt_runtime_us = tg->rt_bandwidth.rt_runtime;
do_div(rt_runtime_us, NSEC_PER_USEC);
return rt_runtime_us;
}
|
C
|
linux
| 0 |
CVE-2018-12714
|
https://www.cvedetails.com/cve/CVE-2018-12714/
|
CWE-787
|
https://github.com/torvalds/linux/commit/81f9c4e4177d31ced6f52a89bb70e93bfb77ca03
|
81f9c4e4177d31ced6f52a89bb70e93bfb77ca03
|
Merge tag 'trace-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing fixes from Steven Rostedt:
"This contains a few fixes and a clean up.
- a bad merge caused an "endif" to go in the wrong place in
scripts/Makefile.build
- softirq tracing fix for tracing that corrupts lockdep and causes a
false splat
- histogram documentation typo fixes
- fix a bad memory reference when passing in no filter to the filter
code
- simplify code by using the swap macro instead of open coding the
swap"
* tag 'trace-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
tracing: Fix SKIP_STACK_VALIDATION=1 build due to bad merge with -mrecord-mcount
tracing: Fix some errors in histogram documentation
tracing: Use swap macro in update_max_tr
softirq: Reorder trace_softirqs_on to prevent lockdep splat
tracing: Check for no filter when processing event filters
|
static void enable_trace_buffered_event(void *data)
{
/* Probably not needed, but do it anyway */
smp_rmb();
this_cpu_dec(trace_buffered_event_cnt);
}
|
static void enable_trace_buffered_event(void *data)
{
/* Probably not needed, but do it anyway */
smp_rmb();
this_cpu_dec(trace_buffered_event_cnt);
}
|
C
|
linux
| 0 |
CVE-2018-19134
|
https://www.cvedetails.com/cve/CVE-2018-19134/
|
CWE-704
|
http://git.ghostscript.com/?p=ghostpdl.git;a=commitdiff;h=693baf02152119af6e6afd30bb8ec76d14f84bbf
|
693baf02152119af6e6afd30bb8ec76d14f84bbf
| null |
gx_dc_devn_masked_load(gx_device_color * pdevc, const gs_gstate * pgs,
gx_device * dev, gs_color_select_t select)
{
int code = (*gx_dc_type_data_devn.load) (pdevc, pgs, dev, select);
if (code < 0)
return code;
FINISH_PATTERN_LOAD
}
|
gx_dc_devn_masked_load(gx_device_color * pdevc, const gs_gstate * pgs,
gx_device * dev, gs_color_select_t select)
{
int code = (*gx_dc_type_data_devn.load) (pdevc, pgs, dev, select);
if (code < 0)
return code;
FINISH_PATTERN_LOAD
}
|
C
|
ghostscript
| 0 |
CVE-2015-6527
|
https://www.cvedetails.com/cve/CVE-2015-6527/
|
CWE-17
|
https://git.php.net/?p=php-src.git;a=commit;h=6aeee47b2cd47915ccfa3b41433a3f57aea24dd5
|
6aeee47b2cd47915ccfa3b41433a3f57aea24dd5
| null |
PHP_FUNCTION(implode)
{
zval *arg1, *arg2 = NULL, *arr;
zend_string *delim;
#ifndef FAST_ZPP
if (zend_parse_parameters(ZEND_NUM_ARGS(), "z|z", &arg1, &arg2) == FAILURE) {
return;
}
#else
ZEND_PARSE_PARAMETERS_START(1, 2)
Z_PARAM_ZVAL(arg1)
Z_PARAM_OPTIONAL
Z_PARAM_ZVAL(arg2)
ZEND_PARSE_PARAMETERS_END();
#endif
if (arg2 == NULL) {
if (Z_TYPE_P(arg1) != IS_ARRAY) {
php_error_docref(NULL, E_WARNING, "Argument must be an array");
return;
}
delim = ZSTR_EMPTY_ALLOC();
arr = arg1;
} else {
if (Z_TYPE_P(arg1) == IS_ARRAY) {
delim = zval_get_string(arg2);
arr = arg1;
} else if (Z_TYPE_P(arg2) == IS_ARRAY) {
delim = zval_get_string(arg1);
arr = arg2;
} else {
php_error_docref(NULL, E_WARNING, "Invalid arguments passed");
return;
}
}
php_implode(delim, arr, return_value);
zend_string_release(delim);
}
|
PHP_FUNCTION(implode)
{
zval *arg1, *arg2 = NULL, *arr;
zend_string *delim;
#ifndef FAST_ZPP
if (zend_parse_parameters(ZEND_NUM_ARGS(), "z|z", &arg1, &arg2) == FAILURE) {
return;
}
#else
ZEND_PARSE_PARAMETERS_START(1, 2)
Z_PARAM_ZVAL(arg1)
Z_PARAM_OPTIONAL
Z_PARAM_ZVAL(arg2)
ZEND_PARSE_PARAMETERS_END();
#endif
if (arg2 == NULL) {
if (Z_TYPE_P(arg1) != IS_ARRAY) {
php_error_docref(NULL, E_WARNING, "Argument must be an array");
return;
}
delim = ZSTR_EMPTY_ALLOC();
arr = arg1;
} else {
if (Z_TYPE_P(arg1) == IS_ARRAY) {
delim = zval_get_string(arg2);
arr = arg1;
} else if (Z_TYPE_P(arg2) == IS_ARRAY) {
delim = zval_get_string(arg1);
arr = arg2;
} else {
php_error_docref(NULL, E_WARNING, "Invalid arguments passed");
return;
}
}
php_implode(delim, arr, return_value);
zend_string_release(delim);
}
|
C
|
php
| 0 |
CVE-2013-2905
|
https://www.cvedetails.com/cve/CVE-2013-2905/
|
CWE-264
|
https://github.com/chromium/chromium/commit/afb848acb43ba316097ab4fddfa38dbd80bc6a71
|
afb848acb43ba316097ab4fddfa38dbd80bc6a71
|
Posix: fix named SHM mappings permissions.
Make sure that named mappings in /dev/shm/ aren't created with
broad permissions.
BUG=254159
[email protected], [email protected]
Review URL: https://codereview.chromium.org/17779002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@209814 0039d316-1c4b-4281-b951-d872f2087c98
|
SharedMemory::~SharedMemory() {
Close();
}
|
SharedMemory::~SharedMemory() {
Close();
}
|
C
|
Chrome
| 0 |
CVE-2015-1213
|
https://www.cvedetails.com/cve/CVE-2015-1213/
|
CWE-119
|
https://github.com/chromium/chromium/commit/faaa2fd0a05f1622d9a8806da118d4f3b602e707
|
faaa2fd0a05f1622d9a8806da118d4f3b602e707
|
[Blink>Media] Allow autoplay muted on Android by default
There was a mistake causing autoplay muted is shipped on Android
but it will be disabled if the chromium embedder doesn't specify
content setting for "AllowAutoplay" preference. This CL makes the
AllowAutoplay preference true by default so that it is allowed by
embedders (including AndroidWebView) unless they explicitly
disable it.
Intent to ship:
https://groups.google.com/a/chromium.org/d/msg/blink-dev/Q1cnzNI2GpI/AL_eyUNABgAJ
BUG=689018
Review-Url: https://codereview.chromium.org/2677173002
Cr-Commit-Position: refs/heads/master@{#448423}
|
~AudioSourceProviderClientLockScope() {
if (m_client)
m_client->unlock();
}
|
~AudioSourceProviderClientLockScope() {
if (m_client)
m_client->unlock();
}
|
C
|
Chrome
| 0 |
CVE-2009-3605
|
https://www.cvedetails.com/cve/CVE-2009-3605/
|
CWE-189
|
https://cgit.freedesktop.org/poppler/poppler/commit/?id=7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
|
7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
| null |
void GfxPath::append(GfxPath *path) {
int i;
if (n + path->n > size) {
size = n + path->n;
subpaths = (GfxSubpath **)
greallocn(subpaths, size, sizeof(GfxSubpath *));
}
for (i = 0; i < path->n; ++i) {
subpaths[n++] = path->subpaths[i]->copy();
}
justMoved = gFalse;
}
|
void GfxPath::append(GfxPath *path) {
int i;
if (n + path->n > size) {
size = n + path->n;
subpaths = (GfxSubpath **)
greallocn(subpaths, size, sizeof(GfxSubpath *));
}
for (i = 0; i < path->n; ++i) {
subpaths[n++] = path->subpaths[i]->copy();
}
justMoved = gFalse;
}
|
CPP
|
poppler
| 0 |
CVE-2019-11599
|
https://www.cvedetails.com/cve/CVE-2019-11599/
|
CWE-362
|
https://github.com/torvalds/linux/commit/04f5866e41fb70690e28397487d8bd8eea7d712a
|
04f5866e41fb70690e28397487d8bd8eea7d712a
|
coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping
The core dumping code has always run without holding the mmap_sem for
writing, despite that is the only way to ensure that the entire vma
layout will not change from under it. Only using some signal
serialization on the processes belonging to the mm is not nearly enough.
This was pointed out earlier. For example in Hugh's post from Jul 2017:
https://lkml.kernel.org/r/[email protected]
"Not strictly relevant here, but a related note: I was very surprised
to discover, only quite recently, how handle_mm_fault() may be called
without down_read(mmap_sem) - when core dumping. That seems a
misguided optimization to me, which would also be nice to correct"
In particular because the growsdown and growsup can move the
vm_start/vm_end the various loops the core dump does around the vma will
not be consistent if page faults can happen concurrently.
Pretty much all users calling mmget_not_zero()/get_task_mm() and then
taking the mmap_sem had the potential to introduce unexpected side
effects in the core dumping code.
Adding mmap_sem for writing around the ->core_dump invocation is a
viable long term fix, but it requires removing all copy user and page
faults and to replace them with get_dump_page() for all binary formats
which is not suitable as a short term fix.
For the time being this solution manually covers the places that can
confuse the core dump either by altering the vma layout or the vma flags
while it runs. Once ->core_dump runs under mmap_sem for writing the
function mmget_still_valid() can be dropped.
Allowing mmap_sem protected sections to run in parallel with the
coredump provides some minor parallelism advantage to the swapoff code
(which seems to be safe enough by never mangling any vma field and can
keep doing swapins in parallel to the core dumping) and to some other
corner case.
In order to facilitate the backporting I added "Fixes: 86039bd3b4e6"
however the side effect of this same race condition in /proc/pid/mem
should be reproducible since before 2.6.12-rc2 so I couldn't add any
other "Fixes:" because there's no hash beyond the git genesis commit.
Because find_extend_vma() is the only location outside of the process
context that could modify the "mm" structures under mmap_sem for
reading, by adding the mmget_still_valid() check to it, all other cases
that take the mmap_sem for reading don't need the new check after
mmget_not_zero()/get_task_mm(). The expand_stack() in page fault
context also doesn't need the new check, because all tasks under core
dumping are frozen.
Link: http://lkml.kernel.org/r/[email protected]
Fixes: 86039bd3b4e6 ("userfaultfd: add new syscall to provide memory externalization")
Signed-off-by: Andrea Arcangeli <[email protected]>
Reported-by: Jann Horn <[email protected]>
Suggested-by: Oleg Nesterov <[email protected]>
Acked-by: Peter Xu <[email protected]>
Reviewed-by: Mike Rapoport <[email protected]>
Reviewed-by: Oleg Nesterov <[email protected]>
Reviewed-by: Jann Horn <[email protected]>
Acked-by: Jason Gunthorpe <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int pid_numa_maps_open(struct inode *inode, struct file *file)
{
return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
sizeof(struct numa_maps_private));
}
|
static int pid_numa_maps_open(struct inode *inode, struct file *file)
{
return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
sizeof(struct numa_maps_private));
}
|
C
|
linux
| 0 |
CVE-2019-11487
|
https://www.cvedetails.com/cve/CVE-2019-11487/
|
CWE-416
|
https://github.com/torvalds/linux/commit/6b3a707736301c2128ca85ce85fb13f60b5e350a
|
6b3a707736301c2128ca85ce85fb13f60b5e350a
|
Merge branch 'page-refs' (page ref overflow)
Merge page ref overflow branch.
Jann Horn reported that he can overflow the page ref count with
sufficient memory (and a filesystem that is intentionally extremely
slow).
Admittedly it's not exactly easy. To have more than four billion
references to a page requires a minimum of 32GB of kernel memory just
for the pointers to the pages, much less any metadata to keep track of
those pointers. Jann needed a total of 140GB of memory and a specially
crafted filesystem that leaves all reads pending (in order to not ever
free the page references and just keep adding more).
Still, we have a fairly straightforward way to limit the two obvious
user-controllable sources of page references: direct-IO like page
references gotten through get_user_pages(), and the splice pipe page
duplication. So let's just do that.
* branch page-refs:
fs: prevent page refcount overflow in pipe_buf_get
mm: prevent get_user_pages() from overflowing page refcount
mm: add 'try_get_page()' helper function
mm: make page ref count overflow check tighter and more explicit
|
static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
{
struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
if (req) {
struct page **pages = NULL;
struct fuse_page_desc *page_descs = NULL;
WARN_ON(npages > FUSE_MAX_MAX_PAGES);
if (npages > FUSE_REQ_INLINE_PAGES) {
pages = fuse_req_pages_alloc(npages, flags,
&page_descs);
if (!pages) {
kmem_cache_free(fuse_req_cachep, req);
return NULL;
}
} else if (npages) {
pages = req->inline_pages;
page_descs = req->inline_page_descs;
}
fuse_request_init(req, pages, page_descs, npages);
}
return req;
}
|
static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
{
struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
if (req) {
struct page **pages = NULL;
struct fuse_page_desc *page_descs = NULL;
WARN_ON(npages > FUSE_MAX_MAX_PAGES);
if (npages > FUSE_REQ_INLINE_PAGES) {
pages = fuse_req_pages_alloc(npages, flags,
&page_descs);
if (!pages) {
kmem_cache_free(fuse_req_cachep, req);
return NULL;
}
} else if (npages) {
pages = req->inline_pages;
page_descs = req->inline_page_descs;
}
fuse_request_init(req, pages, page_descs, npages);
}
return req;
}
|
C
|
linux
| 0 |
CVE-2017-7177
|
https://www.cvedetails.com/cve/CVE-2017-7177/
|
CWE-358
|
https://github.com/inliniac/suricata/commit/4a04f814b15762eb446a5ead4d69d021512df6f8
|
4a04f814b15762eb446a5ead4d69d021512df6f8
|
defrag - take protocol into account during re-assembly
The IP protocol was not being used to match fragments with
their packets allowing a carefully constructed packet
with a different protocol to be matched, allowing re-assembly
to complete, creating a packet that would not be re-assembled
by the destination host.
|
IPV6DefragDoSturgesNovakTest(int policy, u_char *expected, size_t expected_len)
{
int i;
int ret = 0;
DefragInit();
/*
* Build the packets.
*/
int id = 1;
Packet *packets[17];
memset(packets, 0x00, sizeof(packets));
/*
* Original fragments.
*/
/* A*24 at 0. */
packets[0] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 0, 1, 'A', 24);
/* B*15 at 32. */
packets[1] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 32 >> 3, 1, 'B', 16);
/* C*24 at 48. */
packets[2] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 48 >> 3, 1, 'C', 24);
/* D*8 at 80. */
packets[3] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 80 >> 3, 1, 'D', 8);
/* E*16 at 104. */
packets[4] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 104 >> 3, 1, 'E', 16);
/* F*24 at 120. */
packets[5] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 120 >> 3, 1, 'F', 24);
/* G*16 at 144. */
packets[6] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 144 >> 3, 1, 'G', 16);
/* H*16 at 160. */
packets[7] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 160 >> 3, 1, 'H', 16);
/* I*8 at 176. */
packets[8] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 176 >> 3, 1, 'I', 8);
/*
* Overlapping subsequent fragments.
*/
/* J*32 at 8. */
packets[9] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 8 >> 3, 1, 'J', 32);
/* K*24 at 48. */
packets[10] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 48 >> 3, 1, 'K', 24);
/* L*24 at 72. */
packets[11] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 72 >> 3, 1, 'L', 24);
/* M*24 at 96. */
packets[12] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 96 >> 3, 1, 'M', 24);
/* N*8 at 128. */
packets[13] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 128 >> 3, 1, 'N', 8);
/* O*8 at 152. */
packets[14] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 152 >> 3, 1, 'O', 8);
/* P*8 at 160. */
packets[15] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 160 >> 3, 1, 'P', 8);
/* Q*16 at 176. */
packets[16] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 176 >> 3, 0, 'Q', 16);
default_policy = policy;
/* Send all but the last. */
for (i = 0; i < 9; i++) {
Packet *tp = Defrag(NULL, NULL, packets[i], NULL);
if (tp != NULL) {
SCFree(tp);
goto end;
}
if (ENGINE_ISSET_EVENT(packets[i], IPV6_FRAG_OVERLAP)) {
goto end;
}
}
int overlap = 0;
for (; i < 16; i++) {
Packet *tp = Defrag(NULL, NULL, packets[i], NULL);
if (tp != NULL) {
SCFree(tp);
goto end;
}
if (ENGINE_ISSET_EVENT(packets[i], IPV6_FRAG_OVERLAP)) {
overlap++;
}
}
if (!overlap)
goto end;
/* And now the last one. */
Packet *reassembled = Defrag(NULL, NULL, packets[16], NULL);
if (reassembled == NULL)
goto end;
if (memcmp(GET_PKT_DATA(reassembled) + 40, expected, expected_len) != 0)
goto end;
if (IPV6_GET_PLEN(reassembled) != 192)
goto end;
SCFree(reassembled);
/* Make sure all frags were returned to the pool. */
if (defrag_context->frag_pool->outstanding != 0) {
printf("defrag_context->frag_pool->outstanding %u: ", defrag_context->frag_pool->outstanding);
goto end;
}
ret = 1;
end:
for (i = 0; i < 17; i++) {
SCFree(packets[i]);
}
DefragDestroy();
return ret;
}
|
IPV6DefragDoSturgesNovakTest(int policy, u_char *expected, size_t expected_len)
{
int i;
int ret = 0;
DefragInit();
/*
* Build the packets.
*/
int id = 1;
Packet *packets[17];
memset(packets, 0x00, sizeof(packets));
/*
* Original fragments.
*/
/* A*24 at 0. */
packets[0] = IPV6BuildTestPacket(id, 0, 1, 'A', 24);
/* B*15 at 32. */
packets[1] = IPV6BuildTestPacket(id, 32 >> 3, 1, 'B', 16);
/* C*24 at 48. */
packets[2] = IPV6BuildTestPacket(id, 48 >> 3, 1, 'C', 24);
/* D*8 at 80. */
packets[3] = IPV6BuildTestPacket(id, 80 >> 3, 1, 'D', 8);
/* E*16 at 104. */
packets[4] = IPV6BuildTestPacket(id, 104 >> 3, 1, 'E', 16);
/* F*24 at 120. */
packets[5] = IPV6BuildTestPacket(id, 120 >> 3, 1, 'F', 24);
/* G*16 at 144. */
packets[6] = IPV6BuildTestPacket(id, 144 >> 3, 1, 'G', 16);
/* H*16 at 160. */
packets[7] = IPV6BuildTestPacket(id, 160 >> 3, 1, 'H', 16);
/* I*8 at 176. */
packets[8] = IPV6BuildTestPacket(id, 176 >> 3, 1, 'I', 8);
/*
* Overlapping subsequent fragments.
*/
/* J*32 at 8. */
packets[9] = IPV6BuildTestPacket(id, 8 >> 3, 1, 'J', 32);
/* K*24 at 48. */
packets[10] = IPV6BuildTestPacket(id, 48 >> 3, 1, 'K', 24);
/* L*24 at 72. */
packets[11] = IPV6BuildTestPacket(id, 72 >> 3, 1, 'L', 24);
/* M*24 at 96. */
packets[12] = IPV6BuildTestPacket(id, 96 >> 3, 1, 'M', 24);
/* N*8 at 128. */
packets[13] = IPV6BuildTestPacket(id, 128 >> 3, 1, 'N', 8);
/* O*8 at 152. */
packets[14] = IPV6BuildTestPacket(id, 152 >> 3, 1, 'O', 8);
/* P*8 at 160. */
packets[15] = IPV6BuildTestPacket(id, 160 >> 3, 1, 'P', 8);
/* Q*16 at 176. */
packets[16] = IPV6BuildTestPacket(id, 176 >> 3, 0, 'Q', 16);
default_policy = policy;
/* Send all but the last. */
for (i = 0; i < 9; i++) {
Packet *tp = Defrag(NULL, NULL, packets[i], NULL);
if (tp != NULL) {
SCFree(tp);
goto end;
}
if (ENGINE_ISSET_EVENT(packets[i], IPV6_FRAG_OVERLAP)) {
goto end;
}
}
int overlap = 0;
for (; i < 16; i++) {
Packet *tp = Defrag(NULL, NULL, packets[i], NULL);
if (tp != NULL) {
SCFree(tp);
goto end;
}
if (ENGINE_ISSET_EVENT(packets[i], IPV6_FRAG_OVERLAP)) {
overlap++;
}
}
if (!overlap)
goto end;
/* And now the last one. */
Packet *reassembled = Defrag(NULL, NULL, packets[16], NULL);
if (reassembled == NULL)
goto end;
if (memcmp(GET_PKT_DATA(reassembled) + 40, expected, expected_len) != 0)
goto end;
if (IPV6_GET_PLEN(reassembled) != 192)
goto end;
SCFree(reassembled);
/* Make sure all frags were returned to the pool. */
if (defrag_context->frag_pool->outstanding != 0) {
printf("defrag_context->frag_pool->outstanding %u: ", defrag_context->frag_pool->outstanding);
goto end;
}
ret = 1;
end:
for (i = 0; i < 17; i++) {
SCFree(packets[i]);
}
DefragDestroy();
return ret;
}
|
C
|
suricata
| 1 |
CVE-2016-3156
|
https://www.cvedetails.com/cve/CVE-2016-3156/
|
CWE-399
|
https://github.com/torvalds/linux/commit/fbd40ea0180a2d328c5adc61414dc8bab9335ce2
|
fbd40ea0180a2d328c5adc61414dc8bab9335ce2
|
ipv4: Don't do expensive useless work during inetdev destroy.
When an inetdev is destroyed, every address assigned to the interface
is removed. And in this scenerio we do two pointless things which can
be very expensive if the number of assigned interfaces is large:
1) Address promotion. We are deleting all addresses, so there is no
point in doing this.
2) A full nf conntrack table purge for every address. We only need to
do this once, as is already caught by the existing
masq_dev_notifier so masq_inet_event() can skip this.
Reported-by: Solar Designer <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
Tested-by: Cyrill Gorcunov <[email protected]>
|
struct in_device *inetdev_by_index(struct net *net, int ifindex)
{
struct net_device *dev;
struct in_device *in_dev = NULL;
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifindex);
if (dev)
in_dev = rcu_dereference_rtnl(dev->ip_ptr);
rcu_read_unlock();
return in_dev;
}
|
struct in_device *inetdev_by_index(struct net *net, int ifindex)
{
struct net_device *dev;
struct in_device *in_dev = NULL;
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifindex);
if (dev)
in_dev = rcu_dereference_rtnl(dev->ip_ptr);
rcu_read_unlock();
return in_dev;
}
|
C
|
linux
| 0 |
CVE-2017-7471
|
https://www.cvedetails.com/cve/CVE-2017-7471/
|
CWE-732
|
https://git.qemu.org/?p=qemu.git;a=commitdiff;h=9c6b899f7a46893ab3b671e341a2234e9c0c060e
|
9c6b899f7a46893ab3b671e341a2234e9c0c060e
| null |
static void local_mapped_file_attr(int dirfd, const char *name,
struct stat *stbuf)
{
FILE *fp;
char buf[ATTR_MAX];
int map_dirfd;
map_dirfd = openat_dir(dirfd, VIRTFS_META_DIR);
if (map_dirfd == -1) {
return;
}
fp = local_fopenat(map_dirfd, name, "r");
close_preserve_errno(map_dirfd);
if (!fp) {
return;
}
memset(buf, 0, ATTR_MAX);
while (fgets(buf, ATTR_MAX, fp)) {
if (!strncmp(buf, "virtfs.uid", 10)) {
stbuf->st_uid = atoi(buf+11);
} else if (!strncmp(buf, "virtfs.gid", 10)) {
stbuf->st_gid = atoi(buf+11);
} else if (!strncmp(buf, "virtfs.mode", 11)) {
stbuf->st_mode = atoi(buf+12);
} else if (!strncmp(buf, "virtfs.rdev", 11)) {
stbuf->st_rdev = atoi(buf+12);
}
memset(buf, 0, ATTR_MAX);
}
fclose(fp);
}
|
static void local_mapped_file_attr(int dirfd, const char *name,
struct stat *stbuf)
{
FILE *fp;
char buf[ATTR_MAX];
int map_dirfd;
map_dirfd = openat_dir(dirfd, VIRTFS_META_DIR);
if (map_dirfd == -1) {
return;
}
fp = local_fopenat(map_dirfd, name, "r");
close_preserve_errno(map_dirfd);
if (!fp) {
return;
}
memset(buf, 0, ATTR_MAX);
while (fgets(buf, ATTR_MAX, fp)) {
if (!strncmp(buf, "virtfs.uid", 10)) {
stbuf->st_uid = atoi(buf+11);
} else if (!strncmp(buf, "virtfs.gid", 10)) {
stbuf->st_gid = atoi(buf+11);
} else if (!strncmp(buf, "virtfs.mode", 11)) {
stbuf->st_mode = atoi(buf+12);
} else if (!strncmp(buf, "virtfs.rdev", 11)) {
stbuf->st_rdev = atoi(buf+12);
}
memset(buf, 0, ATTR_MAX);
}
fclose(fp);
}
|
C
|
qemu
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/961d0cda4cfc3bcf04aa48ccc32772d63af12d9b
|
961d0cda4cfc3bcf04aa48ccc32772d63af12d9b
|
Extract generation logic from the accessory controller into a separate one
This change adds a controller that is responsible for mediating
communication between ChromePasswordManagerClient and
PasswordAccessoryController for password generation. It is also
responsible for managing the modal dialog used to present the generated
password.
In the future it will make it easier to add manual generation to the
password accessory.
Bug: 845458
Change-Id: I0adbb2de9b9f5012745ae3963154f7d3247b3051
Reviewed-on: https://chromium-review.googlesource.com/c/1448181
Commit-Queue: Ioana Pandele <[email protected]>
Reviewed-by: Fabio Tirelo <[email protected]>
Reviewed-by: Vasilii Sukhanov <[email protected]>
Reviewed-by: Friedrich [CET] <[email protected]>
Cr-Commit-Position: refs/heads/master@{#629542}
|
ManualFillingControllerImpl::ManualFillingControllerImpl(
content::WebContents* web_contents)
: web_contents_(web_contents),
view_(ManualFillingViewInterface::Create(this)),
weak_factory_(this) {
if (PasswordAccessoryController::AllowedForWebContents(web_contents)) {
pwd_controller_ =
PasswordAccessoryController::GetOrCreate(web_contents)->AsWeakPtr();
DCHECK(pwd_controller_);
}
}
|
ManualFillingControllerImpl::ManualFillingControllerImpl(
content::WebContents* web_contents)
: web_contents_(web_contents),
view_(ManualFillingViewInterface::Create(this)),
weak_factory_(this) {
if (PasswordAccessoryController::AllowedForWebContents(web_contents)) {
pwd_controller_ =
PasswordAccessoryController::GetOrCreate(web_contents)->AsWeakPtr();
DCHECK(pwd_controller_);
}
}
|
C
|
Chrome
| 0 |
CVE-2014-3191
|
https://www.cvedetails.com/cve/CVE-2014-3191/
|
CWE-416
|
https://github.com/chromium/chromium/commit/11a4cc4a6d6e665d9a118fada4b7c658d6f70d95
|
11a4cc4a6d6e665d9a118fada4b7c658d6f70d95
|
Defer call to updateWidgetPositions() outside of RenderLayerScrollableArea.
updateWidgetPositions() can destroy the render tree, so it should never
be called from inside RenderLayerScrollableArea. Leaving it there allows
for the potential of use-after-free bugs.
BUG=402407
[email protected]
Review URL: https://codereview.chromium.org/490473003
git-svn-id: svn://svn.chromium.org/blink/trunk@180681 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
int RenderLayerScrollableArea::horizontalScrollbarHeight(OverlayScrollbarSizeRelevancy relevancy) const
{
if (!m_hBar || (m_hBar->isOverlayScrollbar() && (relevancy == IgnoreOverlayScrollbarSize || !m_hBar->shouldParticipateInHitTesting())))
return 0;
return m_hBar->height();
}
|
int RenderLayerScrollableArea::horizontalScrollbarHeight(OverlayScrollbarSizeRelevancy relevancy) const
{
if (!m_hBar || (m_hBar->isOverlayScrollbar() && (relevancy == IgnoreOverlayScrollbarSize || !m_hBar->shouldParticipateInHitTesting())))
return 0;
return m_hBar->height();
}
|
C
|
Chrome
| 0 |
CVE-2016-10218
|
https://www.cvedetails.com/cve/CVE-2016-10218/
|
CWE-476
|
http://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=d621292fb2c8157d9899dcd83fd04dd250e30fe4
|
d621292fb2c8157d9899dcd83fd04dd250e30fe4
| null |
pdf14_patt_trans_image_fill(gx_device * dev, const gs_gstate * pgs,
const gs_matrix *pmat, const gs_image_common_t *pic,
const gs_int_rect * prect,
const gx_drawing_color * pdcolor,
const gx_clip_path * pcpath, gs_memory_t * mem,
gx_image_enum_common_t ** pinfo)
{
const gs_image_t *pim = (const gs_image_t *)pic;
pdf14_device * p14dev = (pdf14_device *)dev;
gx_color_tile *ptile;
int code;
gs_int_rect group_rect;
gx_image_enum *penum;
gs_rect bbox_in, bbox_out;
gx_pattern_trans_t *fill_trans_buffer;
ptile = pdcolor->colors.pattern.p_tile;
/* Set up things in the ptile so that we get the proper
blending etc */
/* Set the blending procs and the is_additive setting based
upon the number of channels */
if (ptile->ttrans->n_chan-1 < 4) {
ptile->ttrans->blending_procs = &rgb_blending_procs;
ptile->ttrans->is_additive = true;
} else {
ptile->ttrans->blending_procs = &cmyk_blending_procs;
ptile->ttrans->is_additive = false;
}
/* Set the blending mode in the ptile based upon the current
setting in the gs_gstate */
ptile->blending_mode = pgs->blend_mode;
/* Based upon if the tiles overlap pick the type of rect
fill that we will want to use */
if (ptile->has_overlap) {
/* This one does blending since there is tile overlap */
ptile->ttrans->pat_trans_fill = &tile_rect_trans_blend;
} else {
/* This one does no blending since there is no tile overlap */
ptile->ttrans->pat_trans_fill = &tile_rect_trans_simple;
}
/* Set the procs so that we use the proper filling method. */
gx_set_pattern_procs_trans((gx_device_color*) pdcolor);
/* Let the imaging stuff get set up */
code = gx_default_begin_typed_image(dev, pgs, pmat, pic,
prect, pdcolor,pcpath, mem, pinfo);
if (code < 0)
return code;
/* Now Push the group */
/* First apply the inverse of the image matrix to our
image size to get our bounding box. */
bbox_in.p.x = 0;
bbox_in.p.y = 0;
bbox_in.q.x = pim->Width;
bbox_in.q.y = pim->Height;
code = gs_bbox_transform_inverse(&bbox_in, &(pim->ImageMatrix),
&bbox_out);
if (code < 0)
return code;
/* That in turn will get hit by the matrix in the gs_gstate */
code = compute_group_device_int_rect(p14dev, &group_rect,
&bbox_out, (gs_gstate *)pgs);
if (code < 0)
return code;
if (!(pim->Width == 0 || pim->Height == 0)) {
if_debug2m('?', p14dev->ctx->memory,
"[v*] Pushing trans group patt_trans_image_fill, uid = %ld id = %ld \n",
ptile->uid.id, ptile->id);
code = pdf14_push_transparency_group(p14dev->ctx, &group_rect, 1, 0, 255,255,
pgs->blend_mode, 0, 0,
ptile->ttrans->n_chan-1, false, NULL,
NULL, (gs_gstate *)pgs, dev);
/* Set up the output buffer information now that we have
pushed the group */
fill_trans_buffer = new_pattern_trans_buff(pgs->memory);
pdf14_get_buffer_information(dev, fill_trans_buffer, NULL, false);
/* Store this in the appropriate place in pdcolor. This
is released later in pdf14_pattern_trans_render when
we are all done with the mask fill */
ptile->ttrans->fill_trans_buffer = fill_trans_buffer;
/* Change the renderer to handle this case so we can catch the
end. We will then pop the group and reset the pdcolor proc.
Keep the base renderer also. */
penum = (gx_image_enum *) *pinfo;
ptile->ttrans->image_render = penum->render;
penum->render = &pdf14_pattern_trans_render;
ptile->trans_group_popped = false;
}
return code;
}
|
pdf14_patt_trans_image_fill(gx_device * dev, const gs_gstate * pgs,
const gs_matrix *pmat, const gs_image_common_t *pic,
const gs_int_rect * prect,
const gx_drawing_color * pdcolor,
const gx_clip_path * pcpath, gs_memory_t * mem,
gx_image_enum_common_t ** pinfo)
{
const gs_image_t *pim = (const gs_image_t *)pic;
pdf14_device * p14dev = (pdf14_device *)dev;
gx_color_tile *ptile;
int code;
gs_int_rect group_rect;
gx_image_enum *penum;
gs_rect bbox_in, bbox_out;
gx_pattern_trans_t *fill_trans_buffer;
ptile = pdcolor->colors.pattern.p_tile;
/* Set up things in the ptile so that we get the proper
blending etc */
/* Set the blending procs and the is_additive setting based
upon the number of channels */
if (ptile->ttrans->n_chan-1 < 4) {
ptile->ttrans->blending_procs = &rgb_blending_procs;
ptile->ttrans->is_additive = true;
} else {
ptile->ttrans->blending_procs = &cmyk_blending_procs;
ptile->ttrans->is_additive = false;
}
/* Set the blending mode in the ptile based upon the current
setting in the gs_gstate */
ptile->blending_mode = pgs->blend_mode;
/* Based upon if the tiles overlap pick the type of rect
fill that we will want to use */
if (ptile->has_overlap) {
/* This one does blending since there is tile overlap */
ptile->ttrans->pat_trans_fill = &tile_rect_trans_blend;
} else {
/* This one does no blending since there is no tile overlap */
ptile->ttrans->pat_trans_fill = &tile_rect_trans_simple;
}
/* Set the procs so that we use the proper filling method. */
gx_set_pattern_procs_trans((gx_device_color*) pdcolor);
/* Let the imaging stuff get set up */
code = gx_default_begin_typed_image(dev, pgs, pmat, pic,
prect, pdcolor,pcpath, mem, pinfo);
if (code < 0)
return code;
/* Now Push the group */
/* First apply the inverse of the image matrix to our
image size to get our bounding box. */
bbox_in.p.x = 0;
bbox_in.p.y = 0;
bbox_in.q.x = pim->Width;
bbox_in.q.y = pim->Height;
code = gs_bbox_transform_inverse(&bbox_in, &(pim->ImageMatrix),
&bbox_out);
if (code < 0)
return code;
/* That in turn will get hit by the matrix in the gs_gstate */
code = compute_group_device_int_rect(p14dev, &group_rect,
&bbox_out, (gs_gstate *)pgs);
if (code < 0)
return code;
if (!(pim->Width == 0 || pim->Height == 0)) {
if_debug2m('?', p14dev->ctx->memory,
"[v*] Pushing trans group patt_trans_image_fill, uid = %ld id = %ld \n",
ptile->uid.id, ptile->id);
code = pdf14_push_transparency_group(p14dev->ctx, &group_rect, 1, 0, 255,255,
pgs->blend_mode, 0, 0,
ptile->ttrans->n_chan-1, false, NULL,
NULL, (gs_gstate *)pgs, dev);
/* Set up the output buffer information now that we have
pushed the group */
fill_trans_buffer = new_pattern_trans_buff(pgs->memory);
pdf14_get_buffer_information(dev, fill_trans_buffer, NULL, false);
/* Store this in the appropriate place in pdcolor. This
is released later in pdf14_pattern_trans_render when
we are all done with the mask fill */
ptile->ttrans->fill_trans_buffer = fill_trans_buffer;
/* Change the renderer to handle this case so we can catch the
end. We will then pop the group and reset the pdcolor proc.
Keep the base renderer also. */
penum = (gx_image_enum *) *pinfo;
ptile->ttrans->image_render = penum->render;
penum->render = &pdf14_pattern_trans_render;
ptile->trans_group_popped = false;
}
return code;
}
|
C
|
ghostscript
| 0 |
CVE-2017-15088
|
https://www.cvedetails.com/cve/CVE-2017-15088/
|
CWE-119
|
https://github.com/krb5/krb5/commit/fbb687db1088ddd894d975996e5f6a4252b9a2b4
|
fbb687db1088ddd894d975996e5f6a4252b9a2b4
|
Fix PKINIT cert matching data construction
Rewrite X509_NAME_oneline_ex() and its call sites to use dynamic
allocation and to perform proper error checking.
ticket: 8617
target_version: 1.16
target_version: 1.15-next
target_version: 1.14-next
tags: pullup
|
static void compat_dh_get0_key(const DH *dh, const BIGNUM **pub,
const BIGNUM **priv)
{
if (pub != NULL)
*pub = dh->pub_key;
if (priv != NULL)
*priv = dh->priv_key;
}
|
static void compat_dh_get0_key(const DH *dh, const BIGNUM **pub,
const BIGNUM **priv)
{
if (pub != NULL)
*pub = dh->pub_key;
if (priv != NULL)
*priv = dh->priv_key;
}
|
C
|
krb5
| 0 |
CVE-2017-15386
|
https://www.cvedetails.com/cve/CVE-2017-15386/
|
CWE-20
|
https://github.com/chromium/chromium/commit/ba3b1b344017bbf36283464b51014fad15c2f3f4
|
ba3b1b344017bbf36283464b51014fad15c2f3f4
|
If a page shows a popup, end fullscreen.
This was implemented in Blink r159834, but it is susceptible
to a popup/fullscreen race. This CL reverts that implementation
and re-implements it in WebContents.
BUG=752003
TEST=WebContentsImplBrowserTest.PopupsFromJavaScriptEndFullscreen
Change-Id: Ia345cdeda273693c3231ad8f486bebfc3d83927f
Reviewed-on: https://chromium-review.googlesource.com/606987
Commit-Queue: Avi Drissman <[email protected]>
Reviewed-by: Charlie Reis <[email protected]>
Reviewed-by: Philip Jägenstedt <[email protected]>
Cr-Commit-Position: refs/heads/master@{#498171}
|
void WebContentsImpl::OnDialogClosed(int render_process_id,
int render_frame_id,
IPC::Message* reply_msg,
bool dialog_was_suppressed,
bool success,
const base::string16& user_input) {
RenderFrameHostImpl* rfh = RenderFrameHostImpl::FromID(render_process_id,
render_frame_id);
last_dialog_suppressed_ = dialog_was_suppressed;
if (is_showing_before_unload_dialog_ && !success) {
if (rfh && rfh == rfh->frame_tree_node()->current_frame_host()) {
rfh->frame_tree_node()->BeforeUnloadCanceled();
controller_.DiscardNonCommittedEntries();
}
for (auto& observer : observers_)
observer.BeforeUnloadDialogCancelled();
}
if (rfh) {
rfh->JavaScriptDialogClosed(reply_msg, success, user_input);
std::vector<protocol::PageHandler*> page_handlers =
protocol::PageHandler::EnabledForWebContents(this);
for (auto* handler : page_handlers)
handler->DidCloseJavaScriptDialog(success, user_input);
} else {
delete reply_msg;
}
is_showing_javascript_dialog_ = false;
is_showing_before_unload_dialog_ = false;
}
|
void WebContentsImpl::OnDialogClosed(int render_process_id,
int render_frame_id,
IPC::Message* reply_msg,
bool dialog_was_suppressed,
bool success,
const base::string16& user_input) {
RenderFrameHostImpl* rfh = RenderFrameHostImpl::FromID(render_process_id,
render_frame_id);
last_dialog_suppressed_ = dialog_was_suppressed;
if (is_showing_before_unload_dialog_ && !success) {
if (rfh && rfh == rfh->frame_tree_node()->current_frame_host()) {
rfh->frame_tree_node()->BeforeUnloadCanceled();
controller_.DiscardNonCommittedEntries();
}
for (auto& observer : observers_)
observer.BeforeUnloadDialogCancelled();
}
if (rfh) {
rfh->JavaScriptDialogClosed(reply_msg, success, user_input);
std::vector<protocol::PageHandler*> page_handlers =
protocol::PageHandler::EnabledForWebContents(this);
for (auto* handler : page_handlers)
handler->DidCloseJavaScriptDialog(success, user_input);
} else {
delete reply_msg;
}
is_showing_javascript_dialog_ = false;
is_showing_before_unload_dialog_ = false;
}
|
C
|
Chrome
| 0 |
CVE-2013-6635
|
https://www.cvedetails.com/cve/CVE-2013-6635/
|
CWE-399
|
https://github.com/chromium/chromium/commit/6b96dd532af164a73f2aac757bafff58211aca2c
|
6b96dd532af164a73f2aac757bafff58211aca2c
|
Revert "Load web contents after tab is created."
This reverts commit 4c55f398def3214369aefa9f2f2e8f5940d3799d.
BUG=432562
[email protected],[email protected],[email protected]
Review URL: https://codereview.chromium.org/894003005
Cr-Commit-Position: refs/heads/master@{#314469}
|
ScopedJavaLocalRef<jstring> WebContentsAndroid::GetURL(JNIEnv* env,
jobject obj) const {
return ConvertUTF8ToJavaString(env, web_contents_->GetURL().spec());
}
|
ScopedJavaLocalRef<jstring> WebContentsAndroid::GetURL(JNIEnv* env,
jobject obj) const {
return ConvertUTF8ToJavaString(env, web_contents_->GetURL().spec());
}
|
C
|
Chrome
| 0 |
CVE-2017-5118
|
https://www.cvedetails.com/cve/CVE-2017-5118/
|
CWE-732
|
https://github.com/chromium/chromium/commit/0ab2412a104d2f235d7b9fe19d30ef605a410832
|
0ab2412a104d2f235d7b9fe19d30ef605a410832
|
Inherit CSP when we inherit the security origin
This prevents attacks that use main window navigation to get out of the
existing csp constraints such as the related bug
Bug: 747847
Change-Id: I1e57b50da17f65d38088205b0a3c7c49ef2ae4d8
Reviewed-on: https://chromium-review.googlesource.com/592027
Reviewed-by: Mike West <[email protected]>
Commit-Queue: Andy Paicu <[email protected]>
Cr-Commit-Position: refs/heads/master@{#492333}
|
WebSandboxFlags WebLocalFrameImpl::EffectiveSandboxFlags() const {
if (!GetFrame())
return WebSandboxFlags::kNone;
return static_cast<WebSandboxFlags>(
GetFrame()->Loader().EffectiveSandboxFlags());
}
|
WebSandboxFlags WebLocalFrameImpl::EffectiveSandboxFlags() const {
if (!GetFrame())
return WebSandboxFlags::kNone;
return static_cast<WebSandboxFlags>(
GetFrame()->Loader().EffectiveSandboxFlags());
}
|
C
|
Chrome
| 0 |
CVE-2011-3188
|
https://www.cvedetails.com/cve/CVE-2011-3188/
| null |
https://github.com/torvalds/linux/commit/6e5714eaf77d79ae1c8b47e3e040ff5411b717ec
|
6e5714eaf77d79ae1c8b47e3e040ff5411b717ec
|
net: Compute protocol sequence numbers and fragment IDs using MD5.
Computers have become a lot faster since we compromised on the
partial MD4 hash which we use currently for performance reasons.
MD5 is a much safer choice, and is inline with both RFC1948 and
other ISS generators (OpenBSD, Solaris, etc.)
Furthermore, only having 24-bits of the sequence number be truly
unpredictable is a very serious limitation. So the periodic
regeneration and 8-bit counter have been removed. We compute and
use a full 32-bit sequence number.
For ipv6, DCCP was found to use a 32-bit truncated initial sequence
number (it needs 43-bits) and that is fixed here as well.
Reported-by: Dan Kaminsky <[email protected]>
Tested-by: Willy Tarreau <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void rt_do_flush(struct net *net, int process_context)
{
unsigned int i;
struct rtable *rth, *next;
for (i = 0; i <= rt_hash_mask; i++) {
struct rtable __rcu **pprev;
struct rtable *list;
if (process_context && need_resched())
cond_resched();
rth = rcu_dereference_raw(rt_hash_table[i].chain);
if (!rth)
continue;
spin_lock_bh(rt_hash_lock_addr(i));
list = NULL;
pprev = &rt_hash_table[i].chain;
rth = rcu_dereference_protected(*pprev,
lockdep_is_held(rt_hash_lock_addr(i)));
while (rth) {
next = rcu_dereference_protected(rth->dst.rt_next,
lockdep_is_held(rt_hash_lock_addr(i)));
if (!net ||
net_eq(dev_net(rth->dst.dev), net)) {
rcu_assign_pointer(*pprev, next);
rcu_assign_pointer(rth->dst.rt_next, list);
list = rth;
} else {
pprev = &rth->dst.rt_next;
}
rth = next;
}
spin_unlock_bh(rt_hash_lock_addr(i));
for (; list; list = next) {
next = rcu_dereference_protected(list->dst.rt_next, 1);
rt_free(list);
}
}
}
|
static void rt_do_flush(struct net *net, int process_context)
{
unsigned int i;
struct rtable *rth, *next;
for (i = 0; i <= rt_hash_mask; i++) {
struct rtable __rcu **pprev;
struct rtable *list;
if (process_context && need_resched())
cond_resched();
rth = rcu_dereference_raw(rt_hash_table[i].chain);
if (!rth)
continue;
spin_lock_bh(rt_hash_lock_addr(i));
list = NULL;
pprev = &rt_hash_table[i].chain;
rth = rcu_dereference_protected(*pprev,
lockdep_is_held(rt_hash_lock_addr(i)));
while (rth) {
next = rcu_dereference_protected(rth->dst.rt_next,
lockdep_is_held(rt_hash_lock_addr(i)));
if (!net ||
net_eq(dev_net(rth->dst.dev), net)) {
rcu_assign_pointer(*pprev, next);
rcu_assign_pointer(rth->dst.rt_next, list);
list = rth;
} else {
pprev = &rth->dst.rt_next;
}
rth = next;
}
spin_unlock_bh(rt_hash_lock_addr(i));
for (; list; list = next) {
next = rcu_dereference_protected(list->dst.rt_next, 1);
rt_free(list);
}
}
}
|
C
|
linux
| 0 |
CVE-2017-6903
|
https://www.cvedetails.com/cve/CVE-2017-6903/
|
CWE-269
|
https://github.com/iortcw/iortcw/commit/b6ff2bcb1e4e6976d61e316175c6d7c99860fe20
|
b6ff2bcb1e4e6976d61e316175c6d7c99860fe20
|
All: Don't load .pk3s as .dlls, and don't load user config files from .pk3s
|
int FS_FindVM(void **startSearch, char *found, int foundlen, const char *name, qboolean unpure, int enableQvm)
{
searchpath_t *search, *lastSearch;
directory_t *dir;
pack_t *pack;
char dllName[MAX_OSPATH], qvmName[MAX_OSPATH];
char *netpath;
if(!fs_searchpaths)
Com_Error(ERR_FATAL, "Filesystem call made without initialization");
if(enableQvm)
Com_sprintf(qvmName, sizeof(qvmName), "vm/%s.mp.qvm", name);
Q_strncpyz(dllName, Sys_GetDLLName(name), sizeof(dllName));
lastSearch = *startSearch;
if(*startSearch == NULL)
search = fs_searchpaths;
else
search = lastSearch->next;
while(search)
{
if(search->dir && (unpure || !Q_stricmp(name, "qagame")))
{
dir = search->dir;
netpath = FS_BuildOSPath(dir->path, dir->gamedir, dllName);
if(enableQvm && FS_FOpenFileReadDir(qvmName, search, NULL, qfalse, unpure) > 0)
{
*startSearch = search;
return VMI_COMPILED;
}
if(dir->allowUnzippedDLLs && FS_FileInPathExists(netpath))
{
Q_strncpyz(found, netpath, foundlen);
*startSearch = search;
return VMI_NATIVE;
}
}
else if(search->pack)
{
pack = search->pack;
if(lastSearch && lastSearch->pack)
{
if(!FS_FilenameCompare(lastSearch->pack->pakPathname, pack->pakPathname))
{
search = search->next;
continue;
}
}
if(enableQvm && FS_FOpenFileReadDir(qvmName, search, NULL, qfalse, unpure) > 0)
{
*startSearch = search;
return VMI_COMPILED;
}
#ifndef DEDICATED
if (Q_stricmp(name, "qagame"))
{
netpath = FS_BuildOSPath(fs_homepath->string, pack->pakGamename, dllName);
if (FS_FOpenFileReadDir(dllName, search, NULL, qfalse, unpure) > 0
&& FS_CL_ExtractFromPakFile(search, netpath, dllName, NULL))
{
Com_Printf( "Loading %s dll from %s\n", name, pack->pakFilename );
Q_strncpyz(found, netpath, foundlen);
*startSearch = search;
return VMI_NATIVE;
}
}
#endif
}
search = search->next;
}
return -1;
}
|
int FS_FindVM(void **startSearch, char *found, int foundlen, const char *name, qboolean unpure, int enableQvm)
{
searchpath_t *search, *lastSearch;
directory_t *dir;
pack_t *pack;
char dllName[MAX_OSPATH], qvmName[MAX_OSPATH];
char *netpath;
if(!fs_searchpaths)
Com_Error(ERR_FATAL, "Filesystem call made without initialization");
if(enableQvm)
Com_sprintf(qvmName, sizeof(qvmName), "vm/%s.mp.qvm", name);
Q_strncpyz(dllName, Sys_GetDLLName(name), sizeof(dllName));
lastSearch = *startSearch;
if(*startSearch == NULL)
search = fs_searchpaths;
else
search = lastSearch->next;
while(search)
{
if(search->dir && (unpure || !Q_stricmp(name, "qagame")))
{
dir = search->dir;
netpath = FS_BuildOSPath(dir->path, dir->gamedir, dllName);
if(enableQvm && FS_FOpenFileReadDir(qvmName, search, NULL, qfalse, unpure) > 0)
{
*startSearch = search;
return VMI_COMPILED;
}
if(dir->allowUnzippedDLLs && FS_FileInPathExists(netpath))
{
Q_strncpyz(found, netpath, foundlen);
*startSearch = search;
return VMI_NATIVE;
}
}
else if(search->pack)
{
pack = search->pack;
if(lastSearch && lastSearch->pack)
{
if(!FS_FilenameCompare(lastSearch->pack->pakPathname, pack->pakPathname))
{
search = search->next;
continue;
}
}
if(enableQvm && FS_FOpenFileReadDir(qvmName, search, NULL, qfalse, unpure) > 0)
{
*startSearch = search;
return VMI_COMPILED;
}
#ifndef DEDICATED
if (Q_stricmp(name, "qagame"))
{
netpath = FS_BuildOSPath(fs_homepath->string, pack->pakGamename, dllName);
if (FS_FOpenFileReadDir(dllName, search, NULL, qfalse, unpure) > 0
&& FS_CL_ExtractFromPakFile(search, netpath, dllName, NULL))
{
Com_Printf( "Loading %s dll from %s\n", name, pack->pakFilename );
Q_strncpyz(found, netpath, foundlen);
*startSearch = search;
return VMI_NATIVE;
}
}
#endif
}
search = search->next;
}
return -1;
}
|
C
|
OpenJK
| 0 |
CVE-2014-2270
|
https://www.cvedetails.com/cve/CVE-2014-2270/
|
CWE-119
|
https://github.com/file/file/commit/447558595a3650db2886cd2f416ad0beba965801
|
447558595a3650db2886cd2f416ad0beba965801
|
PR/313: Aaron Reffett: Check properly for exceeding the offset.
|
match(struct magic_set *ms, struct magic *magic, uint32_t nmagic,
const unsigned char *s, size_t nbytes, size_t offset, int mode, int text,
int flip, int recursion_level, int *printed_something, int *need_separator,
int *returnval)
{
uint32_t magindex = 0;
unsigned int cont_level = 0;
int returnvalv = 0, e; /* if a match is found it is set to 1*/
int firstline = 1; /* a flag to print X\n X\n- X */
int print = (ms->flags & (MAGIC_MIME|MAGIC_APPLE)) == 0;
if (returnval == NULL)
returnval = &returnvalv;
if (file_check_mem(ms, cont_level) == -1)
return -1;
for (magindex = 0; magindex < nmagic; magindex++) {
int flush = 0;
struct magic *m = &magic[magindex];
if (m->type != FILE_NAME)
if ((IS_STRING(m->type) &&
#define FLT (STRING_BINTEST | STRING_TEXTTEST)
((text && (m->str_flags & FLT) == STRING_BINTEST) ||
(!text && (m->str_flags & FLT) == STRING_TEXTTEST))) ||
(m->flag & mode) != mode) {
/* Skip sub-tests */
while (magindex + 1 < nmagic &&
magic[magindex + 1].cont_level != 0 &&
++magindex)
continue;
continue; /* Skip to next top-level test*/
}
ms->offset = m->offset;
ms->line = m->lineno;
/* if main entry matches, print it... */
switch (mget(ms, s, m, nbytes, offset, cont_level, mode, text,
flip, recursion_level + 1, printed_something,
need_separator, returnval)) {
case -1:
return -1;
case 0:
flush = m->reln != '!';
break;
default:
if (m->type == FILE_INDIRECT)
*returnval = 1;
switch (magiccheck(ms, m)) {
case -1:
return -1;
case 0:
flush++;
break;
default:
flush = 0;
break;
}
break;
}
if (flush) {
/*
* main entry didn't match,
* flush its continuations
*/
while (magindex < nmagic - 1 &&
magic[magindex + 1].cont_level != 0)
magindex++;
continue;
}
if ((e = handle_annotation(ms, m)) != 0) {
*need_separator = 1;
*printed_something = 1;
*returnval = 1;
return e;
}
/*
* If we are going to print something, we'll need to print
* a blank before we print something else.
*/
if (*m->desc) {
*need_separator = 1;
*printed_something = 1;
if (print_sep(ms, firstline) == -1)
return -1;
}
if (print && mprint(ms, m) == -1)
return -1;
ms->c.li[cont_level].off = moffset(ms, m);
/* and any continuations that match */
if (file_check_mem(ms, ++cont_level) == -1)
return -1;
while (magic[magindex+1].cont_level != 0 &&
++magindex < nmagic) {
m = &magic[magindex];
ms->line = m->lineno; /* for messages */
if (cont_level < m->cont_level)
continue;
if (cont_level > m->cont_level) {
/*
* We're at the end of the level
* "cont_level" continuations.
*/
cont_level = m->cont_level;
}
ms->offset = m->offset;
if (m->flag & OFFADD) {
ms->offset +=
ms->c.li[cont_level - 1].off;
}
#ifdef ENABLE_CONDITIONALS
if (m->cond == COND_ELSE ||
m->cond == COND_ELIF) {
if (ms->c.li[cont_level].last_match == 1)
continue;
}
#endif
switch (mget(ms, s, m, nbytes, offset, cont_level, mode,
text, flip, recursion_level + 1, printed_something,
need_separator, returnval)) {
case -1:
return -1;
case 0:
if (m->reln != '!')
continue;
flush = 1;
break;
default:
if (m->type == FILE_INDIRECT)
*returnval = 1;
flush = 0;
break;
}
switch (flush ? 1 : magiccheck(ms, m)) {
case -1:
return -1;
case 0:
#ifdef ENABLE_CONDITIONALS
ms->c.li[cont_level].last_match = 0;
#endif
break;
default:
#ifdef ENABLE_CONDITIONALS
ms->c.li[cont_level].last_match = 1;
#endif
if (m->type == FILE_CLEAR)
ms->c.li[cont_level].got_match = 0;
else if (ms->c.li[cont_level].got_match) {
if (m->type == FILE_DEFAULT)
break;
} else
ms->c.li[cont_level].got_match = 1;
if ((e = handle_annotation(ms, m)) != 0) {
*need_separator = 1;
*printed_something = 1;
*returnval = 1;
return e;
}
/*
* If we are going to print something,
* make sure that we have a separator first.
*/
if (*m->desc) {
if (!*printed_something) {
*printed_something = 1;
if (print_sep(ms, firstline)
== -1)
return -1;
}
}
/*
* This continuation matched. Print
* its message, with a blank before it
* if the previous item printed and
* this item isn't empty.
*/
/* space if previous printed */
if (*need_separator
&& ((m->flag & NOSPACE) == 0)
&& *m->desc) {
if (print &&
file_printf(ms, " ") == -1)
return -1;
*need_separator = 0;
}
if (print && mprint(ms, m) == -1)
return -1;
ms->c.li[cont_level].off = moffset(ms, m);
if (*m->desc)
*need_separator = 1;
/*
* If we see any continuations
* at a higher level,
* process them.
*/
if (file_check_mem(ms, ++cont_level) == -1)
return -1;
break;
}
}
if (*printed_something) {
firstline = 0;
if (print)
*returnval = 1;
}
if ((ms->flags & MAGIC_CONTINUE) == 0 && *printed_something) {
return *returnval; /* don't keep searching */
}
}
return *returnval; /* This is hit if -k is set or there is no match */
}
|
match(struct magic_set *ms, struct magic *magic, uint32_t nmagic,
const unsigned char *s, size_t nbytes, size_t offset, int mode, int text,
int flip, int recursion_level, int *printed_something, int *need_separator,
int *returnval)
{
uint32_t magindex = 0;
unsigned int cont_level = 0;
int returnvalv = 0, e; /* if a match is found it is set to 1*/
int firstline = 1; /* a flag to print X\n X\n- X */
int print = (ms->flags & (MAGIC_MIME|MAGIC_APPLE)) == 0;
if (returnval == NULL)
returnval = &returnvalv;
if (file_check_mem(ms, cont_level) == -1)
return -1;
for (magindex = 0; magindex < nmagic; magindex++) {
int flush = 0;
struct magic *m = &magic[magindex];
if (m->type != FILE_NAME)
if ((IS_STRING(m->type) &&
#define FLT (STRING_BINTEST | STRING_TEXTTEST)
((text && (m->str_flags & FLT) == STRING_BINTEST) ||
(!text && (m->str_flags & FLT) == STRING_TEXTTEST))) ||
(m->flag & mode) != mode) {
/* Skip sub-tests */
while (magindex + 1 < nmagic &&
magic[magindex + 1].cont_level != 0 &&
++magindex)
continue;
continue; /* Skip to next top-level test*/
}
ms->offset = m->offset;
ms->line = m->lineno;
/* if main entry matches, print it... */
switch (mget(ms, s, m, nbytes, offset, cont_level, mode, text,
flip, recursion_level + 1, printed_something,
need_separator, returnval)) {
case -1:
return -1;
case 0:
flush = m->reln != '!';
break;
default:
if (m->type == FILE_INDIRECT)
*returnval = 1;
switch (magiccheck(ms, m)) {
case -1:
return -1;
case 0:
flush++;
break;
default:
flush = 0;
break;
}
break;
}
if (flush) {
/*
* main entry didn't match,
* flush its continuations
*/
while (magindex < nmagic - 1 &&
magic[magindex + 1].cont_level != 0)
magindex++;
continue;
}
if ((e = handle_annotation(ms, m)) != 0) {
*need_separator = 1;
*printed_something = 1;
*returnval = 1;
return e;
}
/*
* If we are going to print something, we'll need to print
* a blank before we print something else.
*/
if (*m->desc) {
*need_separator = 1;
*printed_something = 1;
if (print_sep(ms, firstline) == -1)
return -1;
}
if (print && mprint(ms, m) == -1)
return -1;
ms->c.li[cont_level].off = moffset(ms, m);
/* and any continuations that match */
if (file_check_mem(ms, ++cont_level) == -1)
return -1;
while (magic[magindex+1].cont_level != 0 &&
++magindex < nmagic) {
m = &magic[magindex];
ms->line = m->lineno; /* for messages */
if (cont_level < m->cont_level)
continue;
if (cont_level > m->cont_level) {
/*
* We're at the end of the level
* "cont_level" continuations.
*/
cont_level = m->cont_level;
}
ms->offset = m->offset;
if (m->flag & OFFADD) {
ms->offset +=
ms->c.li[cont_level - 1].off;
}
#ifdef ENABLE_CONDITIONALS
if (m->cond == COND_ELSE ||
m->cond == COND_ELIF) {
if (ms->c.li[cont_level].last_match == 1)
continue;
}
#endif
switch (mget(ms, s, m, nbytes, offset, cont_level, mode,
text, flip, recursion_level + 1, printed_something,
need_separator, returnval)) {
case -1:
return -1;
case 0:
if (m->reln != '!')
continue;
flush = 1;
break;
default:
if (m->type == FILE_INDIRECT)
*returnval = 1;
flush = 0;
break;
}
switch (flush ? 1 : magiccheck(ms, m)) {
case -1:
return -1;
case 0:
#ifdef ENABLE_CONDITIONALS
ms->c.li[cont_level].last_match = 0;
#endif
break;
default:
#ifdef ENABLE_CONDITIONALS
ms->c.li[cont_level].last_match = 1;
#endif
if (m->type == FILE_CLEAR)
ms->c.li[cont_level].got_match = 0;
else if (ms->c.li[cont_level].got_match) {
if (m->type == FILE_DEFAULT)
break;
} else
ms->c.li[cont_level].got_match = 1;
if ((e = handle_annotation(ms, m)) != 0) {
*need_separator = 1;
*printed_something = 1;
*returnval = 1;
return e;
}
/*
* If we are going to print something,
* make sure that we have a separator first.
*/
if (*m->desc) {
if (!*printed_something) {
*printed_something = 1;
if (print_sep(ms, firstline)
== -1)
return -1;
}
}
/*
* This continuation matched. Print
* its message, with a blank before it
* if the previous item printed and
* this item isn't empty.
*/
/* space if previous printed */
if (*need_separator
&& ((m->flag & NOSPACE) == 0)
&& *m->desc) {
if (print &&
file_printf(ms, " ") == -1)
return -1;
*need_separator = 0;
}
if (print && mprint(ms, m) == -1)
return -1;
ms->c.li[cont_level].off = moffset(ms, m);
if (*m->desc)
*need_separator = 1;
/*
* If we see any continuations
* at a higher level,
* process them.
*/
if (file_check_mem(ms, ++cont_level) == -1)
return -1;
break;
}
}
if (*printed_something) {
firstline = 0;
if (print)
*returnval = 1;
}
if ((ms->flags & MAGIC_CONTINUE) == 0 && *printed_something) {
return *returnval; /* don't keep searching */
}
}
return *returnval; /* This is hit if -k is set or there is no match */
}
|
C
|
file
| 0 |
CVE-2018-7480
|
https://www.cvedetails.com/cve/CVE-2018-7480/
|
CWE-415
|
https://github.com/torvalds/linux/commit/9b54d816e00425c3a517514e0d677bb3cec49258
|
9b54d816e00425c3a517514e0d677bb3cec49258
|
blkcg: fix double free of new_blkg in blkcg_init_queue
If blkg_create fails, new_blkg passed as an argument will
be freed by blkg_create, so there is no need to free it again.
Signed-off-by: Hou Tao <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
static void blkg_free(struct blkcg_gq *blkg)
{
int i;
if (!blkg)
return;
for (i = 0; i < BLKCG_MAX_POLS; i++)
if (blkg->pd[i])
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
if (blkg->blkcg != &blkcg_root)
blk_exit_rl(&blkg->rl);
blkg_rwstat_exit(&blkg->stat_ios);
blkg_rwstat_exit(&blkg->stat_bytes);
kfree(blkg);
}
|
static void blkg_free(struct blkcg_gq *blkg)
{
int i;
if (!blkg)
return;
for (i = 0; i < BLKCG_MAX_POLS; i++)
if (blkg->pd[i])
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
if (blkg->blkcg != &blkcg_root)
blk_exit_rl(&blkg->rl);
blkg_rwstat_exit(&blkg->stat_ios);
blkg_rwstat_exit(&blkg->stat_bytes);
kfree(blkg);
}
|
C
|
linux
| 0 |
CVE-2018-19489
|
https://www.cvedetails.com/cve/CVE-2018-19489/
|
CWE-362
|
https://git.qemu.org/?p=qemu.git;a=commit;h=1d20398694a3b67a388d955b7a945ba4aa90a8a8
|
1d20398694a3b67a388d955b7a945ba4aa90a8a8
| null |
static void coroutine_fn v9fs_unlinkat(void *opaque)
{
int err = 0;
V9fsString name;
int32_t dfid, flags, rflags = 0;
size_t offset = 7;
V9fsPath path;
V9fsFidState *dfidp;
V9fsPDU *pdu = opaque;
v9fs_string_init(&name);
err = pdu_unmarshal(pdu, offset, "dsd", &dfid, &name, &flags);
if (err < 0) {
goto out_nofid;
}
if (name_is_illegal(name.data)) {
err = -ENOENT;
goto out_nofid;
}
if (!strcmp(".", name.data)) {
err = -EINVAL;
goto out_nofid;
}
if (!strcmp("..", name.data)) {
err = -ENOTEMPTY;
goto out_nofid;
}
if (flags & ~P9_DOTL_AT_REMOVEDIR) {
err = -EINVAL;
goto out_nofid;
}
if (flags & P9_DOTL_AT_REMOVEDIR) {
rflags |= AT_REMOVEDIR;
}
dfidp = get_fid(pdu, dfid);
if (dfidp == NULL) {
err = -EINVAL;
goto out_nofid;
}
/*
* IF the file is unlinked, we cannot reopen
* the file later. So don't reclaim fd
*/
v9fs_path_init(&path);
err = v9fs_co_name_to_path(pdu, &dfidp->path, name.data, &path);
if (err < 0) {
goto out_err;
}
err = v9fs_mark_fids_unreclaim(pdu, &path);
if (err < 0) {
goto out_err;
}
err = v9fs_co_unlinkat(pdu, &dfidp->path, &name, rflags);
if (!err) {
err = offset;
}
out_err:
put_fid(pdu, dfidp);
v9fs_path_free(&path);
out_nofid:
pdu_complete(pdu, err);
v9fs_string_free(&name);
}
|
static void coroutine_fn v9fs_unlinkat(void *opaque)
{
int err = 0;
V9fsString name;
int32_t dfid, flags, rflags = 0;
size_t offset = 7;
V9fsPath path;
V9fsFidState *dfidp;
V9fsPDU *pdu = opaque;
v9fs_string_init(&name);
err = pdu_unmarshal(pdu, offset, "dsd", &dfid, &name, &flags);
if (err < 0) {
goto out_nofid;
}
if (name_is_illegal(name.data)) {
err = -ENOENT;
goto out_nofid;
}
if (!strcmp(".", name.data)) {
err = -EINVAL;
goto out_nofid;
}
if (!strcmp("..", name.data)) {
err = -ENOTEMPTY;
goto out_nofid;
}
if (flags & ~P9_DOTL_AT_REMOVEDIR) {
err = -EINVAL;
goto out_nofid;
}
if (flags & P9_DOTL_AT_REMOVEDIR) {
rflags |= AT_REMOVEDIR;
}
dfidp = get_fid(pdu, dfid);
if (dfidp == NULL) {
err = -EINVAL;
goto out_nofid;
}
/*
* IF the file is unlinked, we cannot reopen
* the file later. So don't reclaim fd
*/
v9fs_path_init(&path);
err = v9fs_co_name_to_path(pdu, &dfidp->path, name.data, &path);
if (err < 0) {
goto out_err;
}
err = v9fs_mark_fids_unreclaim(pdu, &path);
if (err < 0) {
goto out_err;
}
err = v9fs_co_unlinkat(pdu, &dfidp->path, &name, rflags);
if (!err) {
err = offset;
}
out_err:
put_fid(pdu, dfidp);
v9fs_path_free(&path);
out_nofid:
pdu_complete(pdu, err);
v9fs_string_free(&name);
}
|
C
|
qemu
| 0 |
CVE-2015-8374
|
https://www.cvedetails.com/cve/CVE-2015-8374/
|
CWE-200
|
https://github.com/torvalds/linux/commit/0305cd5f7fca85dae392b9ba85b116896eb7c1c7
|
0305cd5f7fca85dae392b9ba85b116896eb7c1c7
|
Btrfs: fix truncation of compressed and inlined extents
When truncating a file to a smaller size which consists of an inline
extent that is compressed, we did not discard (or made unusable) the
data between the new file size and the old file size, wasting metadata
space and allowing for the truncated data to be leaked and the data
corruption/loss mentioned below.
We were also not correctly decrementing the number of bytes used by the
inode, we were setting it to zero, giving a wrong report for callers of
the stat(2) syscall. The fsck tool also reported an error about a mismatch
between the nbytes of the file versus the real space used by the file.
Now because we weren't discarding the truncated region of the file, it
was possible for a caller of the clone ioctl to actually read the data
that was truncated, allowing for a security breach without requiring root
access to the system, using only standard filesystem operations. The
scenario is the following:
1) User A creates a file which consists of an inline and compressed
extent with a size of 2000 bytes - the file is not accessible to
any other users (no read, write or execution permission for anyone
else);
2) The user truncates the file to a size of 1000 bytes;
3) User A makes the file world readable;
4) User B creates a file consisting of an inline extent of 2000 bytes;
5) User B issues a clone operation from user A's file into its own
file (using a length argument of 0, clone the whole range);
6) User B now gets to see the 1000 bytes that user A truncated from
its file before it made its file world readbale. User B also lost
the bytes in the range [1000, 2000[ bytes from its own file, but
that might be ok if his/her intention was reading stale data from
user A that was never supposed to be public.
Note that this contrasts with the case where we truncate a file from 2000
bytes to 1000 bytes and then truncate it back from 1000 to 2000 bytes. In
this case reading any byte from the range [1000, 2000[ will return a value
of 0x00, instead of the original data.
This problem exists since the clone ioctl was added and happens both with
and without my recent data loss and file corruption fixes for the clone
ioctl (patch "Btrfs: fix file corruption and data loss after cloning
inline extents").
So fix this by truncating the compressed inline extents as we do for the
non-compressed case, which involves decompressing, if the data isn't already
in the page cache, compressing the truncated version of the extent, writing
the compressed content into the inline extent and then truncate it.
The following test case for fstests reproduces the problem. In order for
the test to pass both this fix and my previous fix for the clone ioctl
that forbids cloning a smaller inline extent into a larger one,
which is titled "Btrfs: fix file corruption and data loss after cloning
inline extents", are needed. Without that other fix the test fails in a
different way that does not leak the truncated data, instead part of
destination file gets replaced with zeroes (because the destination file
has a larger inline extent than the source).
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
tmp=/tmp/$$
status=1 # failure is the default!
trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
rm -f $tmp.*
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
# real QA test starts here
_need_to_be_root
_supported_fs btrfs
_supported_os Linux
_require_scratch
_require_cloner
rm -f $seqres.full
_scratch_mkfs >>$seqres.full 2>&1
_scratch_mount "-o compress"
# Create our test files. File foo is going to be the source of a clone operation
# and consists of a single inline extent with an uncompressed size of 512 bytes,
# while file bar consists of a single inline extent with an uncompressed size of
# 256 bytes. For our test's purpose, it's important that file bar has an inline
# extent with a size smaller than foo's inline extent.
$XFS_IO_PROG -f -c "pwrite -S 0xa1 0 128" \
-c "pwrite -S 0x2a 128 384" \
$SCRATCH_MNT/foo | _filter_xfs_io
$XFS_IO_PROG -f -c "pwrite -S 0xbb 0 256" $SCRATCH_MNT/bar | _filter_xfs_io
# Now durably persist all metadata and data. We do this to make sure that we get
# on disk an inline extent with a size of 512 bytes for file foo.
sync
# Now truncate our file foo to a smaller size. Because it consists of a
# compressed and inline extent, btrfs did not shrink the inline extent to the
# new size (if the extent was not compressed, btrfs would shrink it to 128
# bytes), it only updates the inode's i_size to 128 bytes.
$XFS_IO_PROG -c "truncate 128" $SCRATCH_MNT/foo
# Now clone foo's inline extent into bar.
# This clone operation should fail with errno EOPNOTSUPP because the source
# file consists only of an inline extent and the file's size is smaller than
# the inline extent of the destination (128 bytes < 256 bytes). However the
# clone ioctl was not prepared to deal with a file that has a size smaller
# than the size of its inline extent (something that happens only for compressed
# inline extents), resulting in copying the full inline extent from the source
# file into the destination file.
#
# Note that btrfs' clone operation for inline extents consists of removing the
# inline extent from the destination inode and copy the inline extent from the
# source inode into the destination inode, meaning that if the destination
# inode's inline extent is larger (N bytes) than the source inode's inline
# extent (M bytes), some bytes (N - M bytes) will be lost from the destination
# file. Btrfs could copy the source inline extent's data into the destination's
# inline extent so that we would not lose any data, but that's currently not
# done due to the complexity that would be needed to deal with such cases
# (specially when one or both extents are compressed), returning EOPNOTSUPP, as
# it's normally not a very common case to clone very small files (only case
# where we get inline extents) and copying inline extents does not save any
# space (unlike for normal, non-inlined extents).
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/foo $SCRATCH_MNT/bar
# Now because the above clone operation used to succeed, and due to foo's inline
# extent not being shinked by the truncate operation, our file bar got the whole
# inline extent copied from foo, making us lose the last 128 bytes from bar
# which got replaced by the bytes in range [128, 256[ from foo before foo was
# truncated - in other words, data loss from bar and being able to read old and
# stale data from foo that should not be possible to read anymore through normal
# filesystem operations. Contrast with the case where we truncate a file from a
# size N to a smaller size M, truncate it back to size N and then read the range
# [M, N[, we should always get the value 0x00 for all the bytes in that range.
# We expected the clone operation to fail with errno EOPNOTSUPP and therefore
# not modify our file's bar data/metadata. So its content should be 256 bytes
# long with all bytes having the value 0xbb.
#
# Without the btrfs bug fix, the clone operation succeeded and resulted in
# leaking truncated data from foo, the bytes that belonged to its range
# [128, 256[, and losing data from bar in that same range. So reading the
# file gave us the following content:
#
# 0000000 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1
# *
# 0000200 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a
# *
# 0000400
echo "File bar's content after the clone operation:"
od -t x1 $SCRATCH_MNT/bar
# Also because the foo's inline extent was not shrunk by the truncate
# operation, btrfs' fsck, which is run by the fstests framework everytime a
# test completes, failed reporting the following error:
#
# root 5 inode 257 errors 400, nbytes wrong
status=0
exit
Cc: [email protected]
Signed-off-by: Filipe Manana <[email protected]>
|
static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode = NULL;
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
int err = 0;
int drop_on_err = 0;
u64 objectid = 0;
u64 index = 0;
/*
* 2 items for inode and ref
* 2 items for dir items
* 1 for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_fail;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
S_IFDIR | mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_fail;
}
drop_on_err = 1;
/* these must be set before we unlock the inode */
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
goto out_fail_inode;
btrfs_i_size_write(inode, 0);
err = btrfs_update_inode(trans, root, inode);
if (err)
goto out_fail_inode;
err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
dentry->d_name.len, 0, index);
if (err)
goto out_fail_inode;
d_instantiate(dentry, inode);
/*
* mkdir is special. We're unlocking after we call d_instantiate
* to avoid a race with nfsd calling d_instantiate.
*/
unlock_new_inode(inode);
drop_on_err = 0;
out_fail:
btrfs_end_transaction(trans, root);
if (drop_on_err) {
inode_dec_link_count(inode);
iput(inode);
}
btrfs_balance_delayed_items(root);
btrfs_btree_balance_dirty(root);
return err;
out_fail_inode:
unlock_new_inode(inode);
goto out_fail;
}
|
static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode = NULL;
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
int err = 0;
int drop_on_err = 0;
u64 objectid = 0;
u64 index = 0;
/*
* 2 items for inode and ref
* 2 items for dir items
* 1 for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_fail;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
S_IFDIR | mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_fail;
}
drop_on_err = 1;
/* these must be set before we unlock the inode */
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
goto out_fail_inode;
btrfs_i_size_write(inode, 0);
err = btrfs_update_inode(trans, root, inode);
if (err)
goto out_fail_inode;
err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
dentry->d_name.len, 0, index);
if (err)
goto out_fail_inode;
d_instantiate(dentry, inode);
/*
* mkdir is special. We're unlocking after we call d_instantiate
* to avoid a race with nfsd calling d_instantiate.
*/
unlock_new_inode(inode);
drop_on_err = 0;
out_fail:
btrfs_end_transaction(trans, root);
if (drop_on_err) {
inode_dec_link_count(inode);
iput(inode);
}
btrfs_balance_delayed_items(root);
btrfs_btree_balance_dirty(root);
return err;
out_fail_inode:
unlock_new_inode(inode);
goto out_fail;
}
|
C
|
linux
| 0 |
CVE-2013-0839
|
https://www.cvedetails.com/cve/CVE-2013-0839/
|
CWE-399
|
https://github.com/chromium/chromium/commit/dd3b6fe574edad231c01c78e4647a74c38dc4178
|
dd3b6fe574edad231c01c78e4647a74c38dc4178
|
Remove parent* arg from GDataEntry ctor.
* Remove static FromDocumentEntry from GDataEntry, GDataFile, GDataDirectory. Replace with InitFromDocumentEntry.
* Move common code from GDataFile::InitFromDocumentEntry and GDataDirectory::InitFromDocumentEntry to GDataEntry::InitFromDocumentEntry.
* Add GDataDirectoryService::FromDocumentEntry and use this everywhere.
* Make ctors of GDataFile, GDataDirectory private, so these must be created by GDataDirectoryService's CreateGDataFile and
CreateGDataDirectory. Make GDataEntry ctor protected.
BUG=141494
TEST=unit tests.
Review URL: https://chromiumcodereview.appspot.com/10854083
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@151008 0039d316-1c4b-4281-b951-d872f2087c98
|
void GDataFileSystem::CloseFileOnUIThread(
const FilePath& file_path,
const FileOperationCallback& callback) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
if (open_files_.find(file_path) == open_files_.end()) {
MessageLoop::current()->PostTask(
FROM_HERE,
base::Bind(callback, GDATA_FILE_ERROR_NOT_FOUND));
return;
}
GetEntryInfoByPathAsyncOnUIThread(
file_path,
base::Bind(&GDataFileSystem::OnGetEntryInfoCompleteForCloseFile,
ui_weak_ptr_,
file_path,
base::Bind(&GDataFileSystem::OnCloseFileFinished,
ui_weak_ptr_,
file_path,
callback)));
}
|
void GDataFileSystem::CloseFileOnUIThread(
const FilePath& file_path,
const FileOperationCallback& callback) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
if (open_files_.find(file_path) == open_files_.end()) {
MessageLoop::current()->PostTask(
FROM_HERE,
base::Bind(callback, GDATA_FILE_ERROR_NOT_FOUND));
return;
}
GetEntryInfoByPathAsyncOnUIThread(
file_path,
base::Bind(&GDataFileSystem::OnGetEntryInfoCompleteForCloseFile,
ui_weak_ptr_,
file_path,
base::Bind(&GDataFileSystem::OnCloseFileFinished,
ui_weak_ptr_,
file_path,
callback)));
}
|
C
|
Chrome
| 0 |
CVE-2016-5688
|
https://www.cvedetails.com/cve/CVE-2016-5688/
|
CWE-119
|
https://github.com/ImageMagick/ImageMagick/commit/aecd0ada163a4d6c769cec178955d5f3e9316f2f
|
aecd0ada163a4d6c769cec178955d5f3e9316f2f
|
Set pixel cache to undefined if any resource limit is exceeded
|
MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,source_image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) geometry->width; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CopyImage)
#endif
proceed=SetImageProgress(image,CopyImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
|
MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,source_image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) geometry->width; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CopyImage)
#endif
proceed=SetImageProgress(image,CopyImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
|
C
|
ImageMagick
| 0 |
CVE-2018-1000880
|
https://www.cvedetails.com/cve/CVE-2018-1000880/
|
CWE-415
|
https://github.com/libarchive/libarchive/pull/1105/commits/9c84b7426660c09c18cc349f6d70b5f8168b5680
|
9c84b7426660c09c18cc349f6d70b5f8168b5680
|
warc: consume data once read
The warc decoder only used read ahead, it wouldn't actually consume
data that had previously been printed. This means that if you specify
an invalid content length, it will just reprint the same data over
and over and over again until it hits the desired length.
This means that a WARC resource with e.g.
Content-Length: 666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666665
but only a few hundred bytes of data, causes a quasi-infinite loop.
Consume data in subsequent calls to _warc_read.
Found with an AFL + afl-rb + qsym setup.
|
_warc_rduri(const char *buf, size_t bsz)
{
static const char _key[] = "\r\nWARC-Target-URI:";
const char *val, *uri, *eol, *p;
warc_string_t res = {0U, NULL};
if ((val = xmemmem(buf, bsz, _key, sizeof(_key) - 1U)) == NULL) {
/* no bother */
return res;
}
/* overread whitespace */
val += sizeof(_key) - 1U;
if ((eol = _warc_find_eol(val, buf + bsz - val)) == NULL) {
/* no end of line */
return res;
}
while (val < eol && (*val == ' ' || *val == '\t'))
++val;
/* overread URL designators */
if ((uri = xmemmem(val, eol - val, "://", 3U)) == NULL) {
/* not touching that! */
return res;
}
/* spaces inside uri are not allowed, CRLF should follow */
for (p = val; p < eol; p++) {
if (isspace((unsigned char)*p))
return res;
}
/* there must be at least space for ftp */
if (uri < (val + 3U))
return res;
/* move uri to point to after :// */
uri += 3U;
/* now then, inspect the URI */
if (memcmp(val, "file", 4U) == 0) {
/* perfect, nothing left to do here */
} else if (memcmp(val, "http", 4U) == 0 ||
memcmp(val, "ftp", 3U) == 0) {
/* overread domain, and the first / */
while (uri < eol && *uri++ != '/');
} else {
/* not sure what to do? best to bugger off */
return res;
}
res.str = uri;
res.len = eol - uri;
return res;
}
|
_warc_rduri(const char *buf, size_t bsz)
{
static const char _key[] = "\r\nWARC-Target-URI:";
const char *val, *uri, *eol, *p;
warc_string_t res = {0U, NULL};
if ((val = xmemmem(buf, bsz, _key, sizeof(_key) - 1U)) == NULL) {
/* no bother */
return res;
}
/* overread whitespace */
val += sizeof(_key) - 1U;
if ((eol = _warc_find_eol(val, buf + bsz - val)) == NULL) {
/* no end of line */
return res;
}
while (val < eol && (*val == ' ' || *val == '\t'))
++val;
/* overread URL designators */
if ((uri = xmemmem(val, eol - val, "://", 3U)) == NULL) {
/* not touching that! */
return res;
}
/* spaces inside uri are not allowed, CRLF should follow */
for (p = val; p < eol; p++) {
if (isspace((unsigned char)*p))
return res;
}
/* there must be at least space for ftp */
if (uri < (val + 3U))
return res;
/* move uri to point to after :// */
uri += 3U;
/* now then, inspect the URI */
if (memcmp(val, "file", 4U) == 0) {
/* perfect, nothing left to do here */
} else if (memcmp(val, "http", 4U) == 0 ||
memcmp(val, "ftp", 3U) == 0) {
/* overread domain, and the first / */
while (uri < eol && *uri++ != '/');
} else {
/* not sure what to do? best to bugger off */
return res;
}
res.str = uri;
res.len = eol - uri;
return res;
}
|
C
|
libarchive
| 0 |
CVE-2018-1066
|
https://www.cvedetails.com/cve/CVE-2018-1066/
|
CWE-476
|
https://github.com/torvalds/linux/commit/cabfb3680f78981d26c078a26e5c748531257ebb
|
cabfb3680f78981d26c078a26e5c748531257ebb
|
CIFS: Enable encryption during session setup phase
In order to allow encryption on SMB connection we need to exchange
a session key and generate encryption and decryption keys.
Signed-off-by: Pavel Shilovsky <[email protected]>
|
SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int index,
struct cifs_search_info *srch_inf)
{
struct smb2_query_directory_req *req;
struct smb2_query_directory_rsp *rsp = NULL;
struct kvec iov[2];
struct kvec rsp_iov;
int rc = 0;
int len;
int resp_buftype = CIFS_NO_BUFFER;
unsigned char *bufptr;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;
__le16 asteriks = cpu_to_le16('*');
char *end_of_smb;
unsigned int output_size = CIFSMaxBufSize;
size_t info_buf_size;
int flags = 0;
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
if (rc)
return rc;
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
switch (srch_inf->info_level) {
case SMB_FIND_FILE_DIRECTORY_INFO:
req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
break;
case SMB_FIND_FILE_ID_FULL_DIR_INFO:
req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
break;
default:
cifs_dbg(VFS, "info level %u isn't supported\n",
srch_inf->info_level);
rc = -EINVAL;
goto qdir_exit;
}
req->FileIndex = cpu_to_le32(index);
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
len = 0x2;
bufptr = req->Buffer;
memcpy(bufptr, &asteriks, len);
req->FileNameOffset =
cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
req->FileNameLength = cpu_to_le16(len);
/*
* BB could be 30 bytes or so longer if we used SMB2 specific
* buffer lengths, but this is safe and close enough.
*/
output_size = min_t(unsigned int, output_size, server->maxBuf);
output_size = min_t(unsigned int, output_size, 2 << 15);
req->OutputBufferLength = cpu_to_le32(output_size);
iov[0].iov_base = (char *)req;
/* 4 for RFC1001 length and 1 for Buffer */
iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
iov[1].iov_base = (char *)(req->Buffer);
iov[1].iov_len = len;
inc_rfc1001_len(req, len - 1 /* Buffer */);
rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
if (rc) {
if (rc == -ENODATA &&
rsp->hdr.sync_hdr.Status == STATUS_NO_MORE_FILES) {
srch_inf->endOfSearch = true;
rc = 0;
}
cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
goto qdir_exit;
}
rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
info_buf_size);
if (rc)
goto qdir_exit;
srch_inf->unicode = true;
if (srch_inf->ntwrk_buf_start) {
if (srch_inf->smallBuf)
cifs_small_buf_release(srch_inf->ntwrk_buf_start);
else
cifs_buf_release(srch_inf->ntwrk_buf_start);
}
srch_inf->ntwrk_buf_start = (char *)rsp;
srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ +
(char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset);
/* 4 for rfc1002 length field */
end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr;
srch_inf->entries_in_buffer =
num_entries(srch_inf->srch_entries_start, end_of_smb,
&srch_inf->last_entry, info_buf_size);
srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
srch_inf->srch_entries_start, srch_inf->last_entry);
if (resp_buftype == CIFS_LARGE_BUFFER)
srch_inf->smallBuf = false;
else if (resp_buftype == CIFS_SMALL_BUFFER)
srch_inf->smallBuf = true;
else
cifs_dbg(VFS, "illegal search buffer type\n");
return rc;
qdir_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
}
|
SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int index,
struct cifs_search_info *srch_inf)
{
struct smb2_query_directory_req *req;
struct smb2_query_directory_rsp *rsp = NULL;
struct kvec iov[2];
struct kvec rsp_iov;
int rc = 0;
int len;
int resp_buftype = CIFS_NO_BUFFER;
unsigned char *bufptr;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;
__le16 asteriks = cpu_to_le16('*');
char *end_of_smb;
unsigned int output_size = CIFSMaxBufSize;
size_t info_buf_size;
int flags = 0;
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
if (rc)
return rc;
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
switch (srch_inf->info_level) {
case SMB_FIND_FILE_DIRECTORY_INFO:
req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
break;
case SMB_FIND_FILE_ID_FULL_DIR_INFO:
req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
break;
default:
cifs_dbg(VFS, "info level %u isn't supported\n",
srch_inf->info_level);
rc = -EINVAL;
goto qdir_exit;
}
req->FileIndex = cpu_to_le32(index);
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
len = 0x2;
bufptr = req->Buffer;
memcpy(bufptr, &asteriks, len);
req->FileNameOffset =
cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
req->FileNameLength = cpu_to_le16(len);
/*
* BB could be 30 bytes or so longer if we used SMB2 specific
* buffer lengths, but this is safe and close enough.
*/
output_size = min_t(unsigned int, output_size, server->maxBuf);
output_size = min_t(unsigned int, output_size, 2 << 15);
req->OutputBufferLength = cpu_to_le32(output_size);
iov[0].iov_base = (char *)req;
/* 4 for RFC1001 length and 1 for Buffer */
iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
iov[1].iov_base = (char *)(req->Buffer);
iov[1].iov_len = len;
inc_rfc1001_len(req, len - 1 /* Buffer */);
rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
if (rc) {
if (rc == -ENODATA &&
rsp->hdr.sync_hdr.Status == STATUS_NO_MORE_FILES) {
srch_inf->endOfSearch = true;
rc = 0;
}
cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
goto qdir_exit;
}
rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
info_buf_size);
if (rc)
goto qdir_exit;
srch_inf->unicode = true;
if (srch_inf->ntwrk_buf_start) {
if (srch_inf->smallBuf)
cifs_small_buf_release(srch_inf->ntwrk_buf_start);
else
cifs_buf_release(srch_inf->ntwrk_buf_start);
}
srch_inf->ntwrk_buf_start = (char *)rsp;
srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ +
(char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset);
/* 4 for rfc1002 length field */
end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr;
srch_inf->entries_in_buffer =
num_entries(srch_inf->srch_entries_start, end_of_smb,
&srch_inf->last_entry, info_buf_size);
srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
srch_inf->srch_entries_start, srch_inf->last_entry);
if (resp_buftype == CIFS_LARGE_BUFFER)
srch_inf->smallBuf = false;
else if (resp_buftype == CIFS_SMALL_BUFFER)
srch_inf->smallBuf = true;
else
cifs_dbg(VFS, "illegal search buffer type\n");
return rc;
qdir_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
}
|
C
|
linux
| 0 |
CVE-2018-19045
|
https://www.cvedetails.com/cve/CVE-2018-19045/
|
CWE-200
|
https://github.com/acassen/keepalived/commit/c6247a9ef2c7b33244ab1d3aa5d629ec49f0a067
|
c6247a9ef2c7b33244ab1d3aa5d629ec49f0a067
|
Add command line and configuration option to set umask
Issue #1048 identified that files created by keepalived are created
with mode 0666. This commit changes the default to 0644, and also
allows the umask to be specified in the configuration or as a command
line option.
Signed-off-by: Quentin Armitage <[email protected]>
|
lvs_timeouts(vector_t *strvec)
{
unsigned val;
size_t i;
if (vector_size(strvec) < 3) {
report_config_error(CONFIG_GENERAL_ERROR, "lvs_timeouts requires at least one option");
return;
}
for (i = 1; i < vector_size(strvec); i++) {
if (!strcmp(strvec_slot(strvec, i), "tcp")) {
if (i == vector_size(strvec) - 1) {
report_config_error(CONFIG_GENERAL_ERROR, "No value specified for lvs_timeout tcp - ignoring");
continue;
}
if (!read_unsigned_strvec(strvec, i + 1, &val, 0, LVS_MAX_TIMEOUT, false))
report_config_error(CONFIG_GENERAL_ERROR, "Invalid lvs_timeout tcp (%s) - ignoring", FMT_STR_VSLOT(strvec, i+1));
else
global_data->lvs_tcp_timeout = val;
i++; /* skip over value */
continue;
}
if (!strcmp(strvec_slot(strvec, i), "tcpfin")) {
if (i == vector_size(strvec) - 1) {
report_config_error(CONFIG_GENERAL_ERROR, "No value specified for lvs_timeout tcpfin - ignoring");
continue;
}
if (!read_unsigned_strvec(strvec, i + 1, &val, 0, LVS_MAX_TIMEOUT, false))
report_config_error(CONFIG_GENERAL_ERROR, "Invalid lvs_timeout tcpfin (%s) - ignoring", FMT_STR_VSLOT(strvec, i+1));
else
global_data->lvs_tcpfin_timeout = val;
i++; /* skip over value */
continue;
}
if (!strcmp(strvec_slot(strvec, i), "udp")) {
if (i == vector_size(strvec) - 1) {
report_config_error(CONFIG_GENERAL_ERROR, "No value specified for lvs_timeout udp - ignoring");
continue;
}
if (!read_unsigned_strvec(strvec, i + 1, &val, 0, LVS_MAX_TIMEOUT, false))
report_config_error(CONFIG_GENERAL_ERROR, "Invalid lvs_timeout udp (%s) - ignoring", FMT_STR_VSLOT(strvec, i+1));
else
global_data->lvs_udp_timeout = val;
i++; /* skip over value */
continue;
}
report_config_error(CONFIG_GENERAL_ERROR, "Unknown option %s specified for lvs_timeouts", FMT_STR_VSLOT(strvec, i));
}
}
|
lvs_timeouts(vector_t *strvec)
{
unsigned val;
size_t i;
if (vector_size(strvec) < 3) {
report_config_error(CONFIG_GENERAL_ERROR, "lvs_timeouts requires at least one option");
return;
}
for (i = 1; i < vector_size(strvec); i++) {
if (!strcmp(strvec_slot(strvec, i), "tcp")) {
if (i == vector_size(strvec) - 1) {
report_config_error(CONFIG_GENERAL_ERROR, "No value specified for lvs_timeout tcp - ignoring");
continue;
}
if (!read_unsigned_strvec(strvec, i + 1, &val, 0, LVS_MAX_TIMEOUT, false))
report_config_error(CONFIG_GENERAL_ERROR, "Invalid lvs_timeout tcp (%s) - ignoring", FMT_STR_VSLOT(strvec, i+1));
else
global_data->lvs_tcp_timeout = val;
i++; /* skip over value */
continue;
}
if (!strcmp(strvec_slot(strvec, i), "tcpfin")) {
if (i == vector_size(strvec) - 1) {
report_config_error(CONFIG_GENERAL_ERROR, "No value specified for lvs_timeout tcpfin - ignoring");
continue;
}
if (!read_unsigned_strvec(strvec, i + 1, &val, 0, LVS_MAX_TIMEOUT, false))
report_config_error(CONFIG_GENERAL_ERROR, "Invalid lvs_timeout tcpfin (%s) - ignoring", FMT_STR_VSLOT(strvec, i+1));
else
global_data->lvs_tcpfin_timeout = val;
i++; /* skip over value */
continue;
}
if (!strcmp(strvec_slot(strvec, i), "udp")) {
if (i == vector_size(strvec) - 1) {
report_config_error(CONFIG_GENERAL_ERROR, "No value specified for lvs_timeout udp - ignoring");
continue;
}
if (!read_unsigned_strvec(strvec, i + 1, &val, 0, LVS_MAX_TIMEOUT, false))
report_config_error(CONFIG_GENERAL_ERROR, "Invalid lvs_timeout udp (%s) - ignoring", FMT_STR_VSLOT(strvec, i+1));
else
global_data->lvs_udp_timeout = val;
i++; /* skip over value */
continue;
}
report_config_error(CONFIG_GENERAL_ERROR, "Unknown option %s specified for lvs_timeouts", FMT_STR_VSLOT(strvec, i));
}
}
|
C
|
keepalived
| 0 |
CVE-2016-7912
|
https://www.cvedetails.com/cve/CVE-2016-7912/
|
CWE-416
|
https://github.com/torvalds/linux/commit/38740a5b87d53ceb89eb2c970150f6e94e00373a
|
38740a5b87d53ceb89eb2c970150f6e94e00373a
|
usb: gadget: f_fs: Fix use-after-free
When using asynchronous read or write operations on the USB endpoints the
issuer of the IO request is notified by calling the ki_complete() callback
of the submitted kiocb when the URB has been completed.
Calling this ki_complete() callback will free kiocb. Make sure that the
structure is no longer accessed beyond that point, otherwise undefined
behaviour might occur.
Fixes: 2e4c7553cd6f ("usb: gadget: f_fs: add aio support")
Cc: <[email protected]> # v3.15+
Signed-off-by: Lars-Peter Clausen <[email protected]>
Signed-off-by: Felipe Balbi <[email protected]>
|
static int ffs_aio_cancel(struct kiocb *kiocb)
{
struct ffs_io_data *io_data = kiocb->private;
struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
int value;
ENTER();
spin_lock_irq(&epfile->ffs->eps_lock);
if (likely(io_data && io_data->ep && io_data->req))
value = usb_ep_dequeue(io_data->ep, io_data->req);
else
value = -EINVAL;
spin_unlock_irq(&epfile->ffs->eps_lock);
return value;
}
|
static int ffs_aio_cancel(struct kiocb *kiocb)
{
struct ffs_io_data *io_data = kiocb->private;
struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
int value;
ENTER();
spin_lock_irq(&epfile->ffs->eps_lock);
if (likely(io_data && io_data->ep && io_data->req))
value = usb_ep_dequeue(io_data->ep, io_data->req);
else
value = -EINVAL;
spin_unlock_irq(&epfile->ffs->eps_lock);
return value;
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/df831400bcb63db4259b5858281b1727ba972a2a
|
df831400bcb63db4259b5858281b1727ba972a2a
|
WebKit2: Support window bounce when panning.
https://bugs.webkit.org/show_bug.cgi?id=58065
<rdar://problem/9244367>
Reviewed by Adam Roben.
Make gestureDidScroll synchronous, as once we scroll, we need to know
whether or not we are at the beginning or end of the scrollable document.
If we are at either end of the scrollable document, we call the Windows 7
API to bounce the window to give an indication that you are past an end
of the document.
* UIProcess/WebPageProxy.cpp:
(WebKit::WebPageProxy::gestureDidScroll): Pass a boolean for the reply, and return it.
* UIProcess/WebPageProxy.h:
* UIProcess/win/WebView.cpp:
(WebKit::WebView::WebView): Inititalize a new variable.
(WebKit::WebView::onGesture): Once we send the message to scroll, check if have gone to
an end of the document, and if we have, bounce the window.
* UIProcess/win/WebView.h:
* WebProcess/WebPage/WebPage.h:
* WebProcess/WebPage/WebPage.messages.in: GestureDidScroll is now sync.
* WebProcess/WebPage/win/WebPageWin.cpp:
(WebKit::WebPage::gestureDidScroll): When we are done scrolling, check if we have a vertical
scrollbar and if we are at the beginning or the end of the scrollable document.
git-svn-id: svn://svn.chromium.org/blink/trunk@83197 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
HIMC WebView::getIMMContext()
{
return Ime::ImmGetContext(m_window);
}
|
HIMC WebView::getIMMContext()
{
return Ime::ImmGetContext(m_window);
}
|
C
|
Chrome
| 0 |
CVE-2016-4071
|
https://www.cvedetails.com/cve/CVE-2016-4071/
|
CWE-20
|
https://git.php.net/?p=php-src.git;a=commit;h=6e25966544fb1d2f3d7596e060ce9c9269bbdcf8
|
6e25966544fb1d2f3d7596e060ce9c9269bbdcf8
| null |
static int netsnmp_session_set_contextEngineID(struct snmp_session *s, char * contextEngineID)
{
size_t ebuf_len = 32, eout_len = 0;
u_char *ebuf = (u_char *) emalloc(ebuf_len);
if (ebuf == NULL) {
php_error_docref(NULL, E_WARNING, "malloc failure setting contextEngineID");
return (-1);
}
if (!snmp_hex_to_binary(&ebuf, &ebuf_len, &eout_len, 1, contextEngineID)) {
php_error_docref(NULL, E_WARNING, "Bad engine ID value '%s'", contextEngineID);
efree(ebuf);
return (-1);
}
if (s->contextEngineID) {
efree(s->contextEngineID);
}
s->contextEngineID = ebuf;
s->contextEngineIDLen = eout_len;
return (0);
}
|
static int netsnmp_session_set_contextEngineID(struct snmp_session *s, char * contextEngineID)
{
size_t ebuf_len = 32, eout_len = 0;
u_char *ebuf = (u_char *) emalloc(ebuf_len);
if (ebuf == NULL) {
php_error_docref(NULL, E_WARNING, "malloc failure setting contextEngineID");
return (-1);
}
if (!snmp_hex_to_binary(&ebuf, &ebuf_len, &eout_len, 1, contextEngineID)) {
php_error_docref(NULL, E_WARNING, "Bad engine ID value '%s'", contextEngineID);
efree(ebuf);
return (-1);
}
if (s->contextEngineID) {
efree(s->contextEngineID);
}
s->contextEngineID = ebuf;
s->contextEngineIDLen = eout_len;
return (0);
}
|
C
|
php
| 0 |
CVE-2016-1665
|
https://www.cvedetails.com/cve/CVE-2016-1665/
|
CWE-20
|
https://github.com/chromium/chromium/commit/282f53ffdc3b1902da86f6a0791af736837efbf8
|
282f53ffdc3b1902da86f6a0791af736837efbf8
|
[signin] Add metrics to track the source for refresh token updated events
This CL add a source for update and revoke credentials operations. It then
surfaces the source in the chrome://signin-internals page.
This CL also records the following histograms that track refresh token events:
* Signin.RefreshTokenUpdated.ToValidToken.Source
* Signin.RefreshTokenUpdated.ToInvalidToken.Source
* Signin.RefreshTokenRevoked.Source
These histograms are needed to validate the assumptions of how often tokens
are revoked by the browser and the sources for the token revocations.
Bug: 896182
Change-Id: I2fcab80ee8e5699708e695bc3289fa6d34859a90
Reviewed-on: https://chromium-review.googlesource.com/c/1286464
Reviewed-by: Jochen Eisinger <[email protected]>
Reviewed-by: David Roger <[email protected]>
Reviewed-by: Ilya Sherman <[email protected]>
Commit-Queue: Mihai Sardarescu <[email protected]>
Cr-Commit-Position: refs/heads/master@{#606181}
|
views::View* ProfileChooserView::CreateProfileChooserView(
AvatarMenu* avatar_menu) {
views::View* view = new views::View();
views::GridLayout* layout = CreateSingleColumnLayout(view, menu_width_);
Indexes other_profiles;
views::View* sync_error_view = nullptr;
views::View* current_profile_view = nullptr;
views::View* current_profile_accounts = nullptr;
views::View* option_buttons_view = nullptr;
views::View* autofill_home_view = nullptr;
bool current_profile_signed_in = false;
for (size_t i = 0; i < avatar_menu->GetNumberOfItems(); ++i) {
const AvatarMenu::Item& item = avatar_menu->GetItemAt(i);
if (item.active) {
option_buttons_view = CreateOptionsView(
item.signed_in && profiles::IsLockAvailable(browser_->profile()),
avatar_menu);
current_profile_view = CreateCurrentProfileView(item, false);
autofill_home_view = CreateAutofillHomeView();
current_profile_signed_in = item.signed_in;
if (!IsProfileChooser(view_mode_))
current_profile_accounts = CreateCurrentProfileAccountsView(item);
sync_error_view = CreateSyncErrorViewIfNeeded(item);
} else {
other_profiles.push_back(i);
}
}
if (sync_error_view) {
layout->StartRow(1.0, 0);
layout->AddView(sync_error_view);
layout->StartRow(views::GridLayout::kFixedSize, 0);
layout->AddView(new views::Separator());
}
if (!current_profile_view) {
current_profile_view = CreateGuestProfileView();
option_buttons_view = CreateOptionsView(false, avatar_menu);
}
if (!(dice_enabled_ && sync_error_view)) {
layout->StartRow(1.0, 0);
layout->AddView(current_profile_view);
}
if (!IsProfileChooser(view_mode_)) {
DCHECK(current_profile_accounts);
layout->StartRow(views::GridLayout::kFixedSize, 0);
layout->AddView(new views::Separator());
layout->StartRow(1.0, 0);
layout->AddView(current_profile_accounts);
}
if (browser_->profile()->IsSupervised()) {
layout->StartRow(1.0, 0);
layout->AddView(CreateSupervisedUserDisclaimerView());
}
if (autofill_home_view) {
const int content_list_vert_spacing =
ChromeLayoutProvider::Get()->GetDistanceMetric(
DISTANCE_CONTENT_LIST_VERTICAL_MULTI);
if (!current_profile_signed_in) {
layout->StartRow(0, 0);
layout->AddView(new views::Separator());
layout->AddPaddingRow(1.0, content_list_vert_spacing);
}
layout->StartRow(0, 0);
layout->AddView(autofill_home_view);
layout->AddPaddingRow(1.0, content_list_vert_spacing);
}
layout->StartRow(views::GridLayout::kFixedSize, 0);
layout->AddView(new views::Separator());
if (option_buttons_view) {
layout->StartRow(views::GridLayout::kFixedSize, 0);
layout->AddView(option_buttons_view);
}
return view;
}
|
views::View* ProfileChooserView::CreateProfileChooserView(
AvatarMenu* avatar_menu) {
views::View* view = new views::View();
views::GridLayout* layout = CreateSingleColumnLayout(view, menu_width_);
Indexes other_profiles;
views::View* sync_error_view = nullptr;
views::View* current_profile_view = nullptr;
views::View* current_profile_accounts = nullptr;
views::View* option_buttons_view = nullptr;
views::View* autofill_home_view = nullptr;
bool current_profile_signed_in = false;
for (size_t i = 0; i < avatar_menu->GetNumberOfItems(); ++i) {
const AvatarMenu::Item& item = avatar_menu->GetItemAt(i);
if (item.active) {
option_buttons_view = CreateOptionsView(
item.signed_in && profiles::IsLockAvailable(browser_->profile()),
avatar_menu);
current_profile_view = CreateCurrentProfileView(item, false);
autofill_home_view = CreateAutofillHomeView();
current_profile_signed_in = item.signed_in;
if (!IsProfileChooser(view_mode_))
current_profile_accounts = CreateCurrentProfileAccountsView(item);
sync_error_view = CreateSyncErrorViewIfNeeded(item);
} else {
other_profiles.push_back(i);
}
}
if (sync_error_view) {
layout->StartRow(1.0, 0);
layout->AddView(sync_error_view);
layout->StartRow(views::GridLayout::kFixedSize, 0);
layout->AddView(new views::Separator());
}
if (!current_profile_view) {
current_profile_view = CreateGuestProfileView();
option_buttons_view = CreateOptionsView(false, avatar_menu);
}
if (!(dice_enabled_ && sync_error_view)) {
layout->StartRow(1.0, 0);
layout->AddView(current_profile_view);
}
if (!IsProfileChooser(view_mode_)) {
DCHECK(current_profile_accounts);
layout->StartRow(views::GridLayout::kFixedSize, 0);
layout->AddView(new views::Separator());
layout->StartRow(1.0, 0);
layout->AddView(current_profile_accounts);
}
if (browser_->profile()->IsSupervised()) {
layout->StartRow(1.0, 0);
layout->AddView(CreateSupervisedUserDisclaimerView());
}
if (autofill_home_view) {
const int content_list_vert_spacing =
ChromeLayoutProvider::Get()->GetDistanceMetric(
DISTANCE_CONTENT_LIST_VERTICAL_MULTI);
if (!current_profile_signed_in) {
layout->StartRow(0, 0);
layout->AddView(new views::Separator());
layout->AddPaddingRow(1.0, content_list_vert_spacing);
}
layout->StartRow(0, 0);
layout->AddView(autofill_home_view);
layout->AddPaddingRow(1.0, content_list_vert_spacing);
}
layout->StartRow(views::GridLayout::kFixedSize, 0);
layout->AddView(new views::Separator());
if (option_buttons_view) {
layout->StartRow(views::GridLayout::kFixedSize, 0);
layout->AddView(option_buttons_view);
}
return view;
}
|
C
|
Chrome
| 0 |
CVE-2015-3193
|
https://www.cvedetails.com/cve/CVE-2015-3193/
|
CWE-200
|
https://git.openssl.org/?p=openssl.git;a=commit;h=d73cc256c8e256c32ed959456101b73ba9842f72
|
d73cc256c8e256c32ed959456101b73ba9842f72
| null |
int test_add(BIO *bp)
{
BIGNUM a, b, c;
int i;
BN_init(&a);
BN_init(&b);
BN_init(&c);
BN_bntest_rand(&a, 512, 0, 0);
for (i = 0; i < num0; i++) {
BN_bntest_rand(&b, 450 + i, 0, 0);
a.neg = rand_neg();
b.neg = rand_neg();
BN_add(&c, &a, &b);
if (bp != NULL) {
if (!results) {
BN_print(bp, &a);
BIO_puts(bp, " + ");
BN_print(bp, &b);
BIO_puts(bp, " - ");
}
BN_print(bp, &c);
BIO_puts(bp, "\n");
}
a.neg = !a.neg;
b.neg = !b.neg;
BN_add(&c, &c, &b);
BN_add(&c, &c, &a);
if (!BN_is_zero(&c)) {
fprintf(stderr, "Add test failed!\n");
return 0;
}
}
BN_free(&a);
BN_free(&b);
BN_free(&c);
return (1);
}
|
int test_add(BIO *bp)
{
BIGNUM a, b, c;
int i;
BN_init(&a);
BN_init(&b);
BN_init(&c);
BN_bntest_rand(&a, 512, 0, 0);
for (i = 0; i < num0; i++) {
BN_bntest_rand(&b, 450 + i, 0, 0);
a.neg = rand_neg();
b.neg = rand_neg();
BN_add(&c, &a, &b);
if (bp != NULL) {
if (!results) {
BN_print(bp, &a);
BIO_puts(bp, " + ");
BN_print(bp, &b);
BIO_puts(bp, " - ");
}
BN_print(bp, &c);
BIO_puts(bp, "\n");
}
a.neg = !a.neg;
b.neg = !b.neg;
BN_add(&c, &c, &b);
BN_add(&c, &c, &a);
if (!BN_is_zero(&c)) {
fprintf(stderr, "Add test failed!\n");
return 0;
}
}
BN_free(&a);
BN_free(&b);
BN_free(&c);
return (1);
}
|
C
|
openssl
| 0 |
CVE-2017-5061
|
https://www.cvedetails.com/cve/CVE-2017-5061/
|
CWE-362
|
https://github.com/chromium/chromium/commit/5d78b84d39bd34bc9fce9d01c0dcd5a22a330d34
|
5d78b84d39bd34bc9fce9d01c0dcd5a22a330d34
|
(Reland) Discard compositor frames from unloaded web content
This is a reland of https://codereview.chromium.org/2707243005/ with a
small change to fix an uninitialized memory error that fails on MSAN
bots.
BUG=672847
[email protected], [email protected]
CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_site_isolation
Review-Url: https://codereview.chromium.org/2731283003
Cr-Commit-Position: refs/heads/master@{#454954}
|
void LayerTreeHost::SetLayerTreeMutator(
std::unique_ptr<LayerTreeMutator> mutator) {
proxy_->SetMutator(std::move(mutator));
}
|
void LayerTreeHost::SetLayerTreeMutator(
std::unique_ptr<LayerTreeMutator> mutator) {
proxy_->SetMutator(std::move(mutator));
}
|
C
|
Chrome
| 0 |
CVE-2015-8215
|
https://www.cvedetails.com/cve/CVE-2015-8215/
|
CWE-20
|
https://github.com/torvalds/linux/commit/77751427a1ff25b27d47a4c36b12c3c8667855ac
|
77751427a1ff25b27d47a4c36b12c3c8667855ac
|
ipv6: addrconf: validate new MTU before applying it
Currently we don't check if the new MTU is valid or not and this allows
one to configure a smaller than minimum allowed by RFCs or even bigger
than interface own MTU, which is a problem as it may lead to packet
drops.
If you have a daemon like NetworkManager running, this may be exploited
by remote attackers by forging RA packets with an invalid MTU, possibly
leading to a DoS. (NetworkManager currently only validates for values
too small, but not for too big ones.)
The fix is just to make sure the new value is valid. That is, between
IPV6_MIN_MTU and interface's MTU.
Note that similar check is already performed at
ndisc_router_discovery(), for when kernel itself parses the RA.
Signed-off-by: Marcelo Ricardo Leitner <[email protected]>
Signed-off-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
u32 portid, u32 seq, int event, unsigned int flags)
{
struct nlmsghdr *nlh;
u32 preferred, valid;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
if (nlh == NULL)
return -EMSGSIZE;
put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
ifa->idev->dev->ifindex);
if (!((ifa->flags&IFA_F_PERMANENT) &&
(ifa->prefered_lft == INFINITY_LIFE_TIME))) {
preferred = ifa->prefered_lft;
valid = ifa->valid_lft;
if (preferred != INFINITY_LIFE_TIME) {
long tval = (jiffies - ifa->tstamp)/HZ;
if (preferred > tval)
preferred -= tval;
else
preferred = 0;
if (valid != INFINITY_LIFE_TIME) {
if (valid > tval)
valid -= tval;
else
valid = 0;
}
}
} else {
preferred = INFINITY_LIFE_TIME;
valid = INFINITY_LIFE_TIME;
}
if (!ipv6_addr_any(&ifa->peer_addr)) {
if (nla_put(skb, IFA_LOCAL, 16, &ifa->addr) < 0 ||
nla_put(skb, IFA_ADDRESS, 16, &ifa->peer_addr) < 0)
goto error;
} else
if (nla_put(skb, IFA_ADDRESS, 16, &ifa->addr) < 0)
goto error;
if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
goto error;
if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
goto error;
nlmsg_end(skb, nlh);
return 0;
error:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
|
static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
u32 portid, u32 seq, int event, unsigned int flags)
{
struct nlmsghdr *nlh;
u32 preferred, valid;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
if (nlh == NULL)
return -EMSGSIZE;
put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
ifa->idev->dev->ifindex);
if (!((ifa->flags&IFA_F_PERMANENT) &&
(ifa->prefered_lft == INFINITY_LIFE_TIME))) {
preferred = ifa->prefered_lft;
valid = ifa->valid_lft;
if (preferred != INFINITY_LIFE_TIME) {
long tval = (jiffies - ifa->tstamp)/HZ;
if (preferred > tval)
preferred -= tval;
else
preferred = 0;
if (valid != INFINITY_LIFE_TIME) {
if (valid > tval)
valid -= tval;
else
valid = 0;
}
}
} else {
preferred = INFINITY_LIFE_TIME;
valid = INFINITY_LIFE_TIME;
}
if (!ipv6_addr_any(&ifa->peer_addr)) {
if (nla_put(skb, IFA_LOCAL, 16, &ifa->addr) < 0 ||
nla_put(skb, IFA_ADDRESS, 16, &ifa->peer_addr) < 0)
goto error;
} else
if (nla_put(skb, IFA_ADDRESS, 16, &ifa->addr) < 0)
goto error;
if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
goto error;
if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
goto error;
nlmsg_end(skb, nlh);
return 0;
error:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
|
C
|
linux
| 0 |
CVE-2014-0196
|
https://www.cvedetails.com/cve/CVE-2014-0196/
|
CWE-362
|
https://github.com/torvalds/linux/commit/4291086b1f081b869c6d79e5b7441633dc3ace00
|
4291086b1f081b869c6d79e5b7441633dc3ace00
|
n_tty: Fix n_tty_write crash when echoing in raw mode
The tty atomic_write_lock does not provide an exclusion guarantee for
the tty driver if the termios settings are LECHO & !OPOST. And since
it is unexpected and not allowed to call TTY buffer helpers like
tty_insert_flip_string concurrently, this may lead to crashes when
concurrect writers call pty_write. In that case the following two
writers:
* the ECHOing from a workqueue and
* pty_write from the process
race and can overflow the corresponding TTY buffer like follows.
If we look into tty_insert_flip_string_fixed_flag, there is:
int space = __tty_buffer_request_room(port, goal, flags);
struct tty_buffer *tb = port->buf.tail;
...
memcpy(char_buf_ptr(tb, tb->used), chars, space);
...
tb->used += space;
so the race of the two can result in something like this:
A B
__tty_buffer_request_room
__tty_buffer_request_room
memcpy(buf(tb->used), ...)
tb->used += space;
memcpy(buf(tb->used), ...) ->BOOM
B's memcpy is past the tty_buffer due to the previous A's tb->used
increment.
Since the N_TTY line discipline input processing can output
concurrently with a tty write, obtain the N_TTY ldisc output_lock to
serialize echo output with normal tty writes. This ensures the tty
buffer helper tty_insert_flip_string is not called concurrently and
everything is fine.
Note that this is nicely reproducible by an ordinary user using
forkpty and some setup around that (raw termios + ECHO). And it is
present in kernels at least after commit
d945cb9cce20ac7143c2de8d88b187f62db99bdc (pty: Rework the pty layer to
use the normal buffering logic) in 2.6.31-rc3.
js: add more info to the commit log
js: switch to bool
js: lock unconditionally
js: lock only the tty->ops->write call
References: CVE-2014-0196
Reported-and-tested-by: Jiri Slaby <[email protected]>
Signed-off-by: Peter Hurley <[email protected]>
Signed-off-by: Jiri Slaby <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Alan Cox <[email protected]>
Cc: <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static inline void finish_erasing(struct n_tty_data *ldata)
{
if (ldata->erasing) {
echo_char_raw('/', ldata);
ldata->erasing = 0;
}
}
|
static inline void finish_erasing(struct n_tty_data *ldata)
{
if (ldata->erasing) {
echo_char_raw('/', ldata);
ldata->erasing = 0;
}
}
|
C
|
linux
| 0 |
CVE-2016-5164
|
https://www.cvedetails.com/cve/CVE-2016-5164/
|
CWE-79
|
https://github.com/chromium/chromium/commit/93bc623489bdcfc7e9127614fcfb3258edf3f0f9
|
93bc623489bdcfc7e9127614fcfb3258edf3f0f9
|
[DevTools] Copy objects from debugger context to inspected context properly.
BUG=637594
Review-Url: https://codereview.chromium.org/2253643002
Cr-Commit-Position: refs/heads/master@{#412436}
|
void V8Debugger::setPauseOnExceptionsState(PauseOnExceptionsState pauseOnExceptionsState)
{
DCHECK(enabled());
v8::HandleScope scope(m_isolate);
v8::Context::Scope contextScope(debuggerContext());
v8::Local<v8::Value> argv[] = { v8::Int32::New(m_isolate, pauseOnExceptionsState) };
callDebuggerMethod("setPauseOnExceptionsState", 1, argv);
}
|
void V8Debugger::setPauseOnExceptionsState(PauseOnExceptionsState pauseOnExceptionsState)
{
DCHECK(enabled());
v8::HandleScope scope(m_isolate);
v8::Context::Scope contextScope(debuggerContext());
v8::Local<v8::Value> argv[] = { v8::Int32::New(m_isolate, pauseOnExceptionsState) };
callDebuggerMethod("setPauseOnExceptionsState", 1, argv);
}
|
C
|
Chrome
| 0 |
CVE-2016-7969
|
https://www.cvedetails.com/cve/CVE-2016-7969/
|
CWE-125
|
https://github.com/libass/libass/pull/240/commits/b72b283b936a600c730e00875d7d067bded3fc26
|
b72b283b936a600c730e00875d7d067bded3fc26
|
Fix line wrapping mode 0/3 bugs
This fixes two separate bugs:
a) Don't move a linebreak into the first symbol. This results in a empty
line at the front, which does not help to equalize line lengths at all.
b) When moving a linebreak into a symbol that already is a break, the
number of lines must be decremented. Otherwise, uninitialized memory
is possibly used for later layout operations.
Found by fuzzer test case
id:000085,sig:11,src:003377+003350,op:splice,rep:8.
|
static void fill_composite_hash(CompositeHashKey *hk, CombinedBitmapInfo *info)
{
hk->filter = info->filter;
hk->bitmap_count = info->bitmap_count;
hk->bitmaps = info->bitmaps;
}
|
static void fill_composite_hash(CompositeHashKey *hk, CombinedBitmapInfo *info)
{
hk->filter = info->filter;
hk->bitmap_count = info->bitmap_count;
hk->bitmaps = info->bitmaps;
}
|
C
|
libass
| 0 |
CVE-2012-2842
|
https://www.cvedetails.com/cve/CVE-2012-2842/
|
CWE-399
|
https://github.com/chromium/chromium/commit/e3171b346e6919f4162ea128d0f7b342cf878fd4
|
e3171b346e6919f4162ea128d0f7b342cf878fd4
|
ash: Fix right-alignment of power-status text.
It turns out setting ALING_RIGHT on a Label isn't enough to get proper
right-aligned text. Label has to be explicitly told that it is multi-lined.
BUG=none
TEST=none
Review URL: https://chromiumcodereview.appspot.com/9918026
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@129898 0039d316-1c4b-4281-b951-d872f2087c98
|
views::View* TrayPower::CreateDefaultView(user::LoginStatus status) {
date_.reset(new tray::DateView(tray::DateView::DATE));
if (status != user::LOGGED_IN_NONE && status != user::LOGGED_IN_LOCKED)
date_->set_actionable(true);
views::View* container = new views::View;
views::BoxLayout* layout = new views::BoxLayout(views::BoxLayout::kHorizontal,
kTrayPopupPaddingHorizontal, 10, 0);
layout->set_spread_blank_space(true);
container->SetLayoutManager(layout);
container->set_background(views::Background::CreateSolidBackground(
SkColorSetRGB(0xf1, 0xf1, 0xf1)));
container->AddChildView(date_.get());
PowerSupplyStatus power_status =
ash::Shell::GetInstance()->tray_delegate()->GetPowerSupplyStatus();
if (power_status.battery_is_present) {
power_.reset(new tray::PowerPopupView());
power_->UpdatePowerStatus(power_status);
container->AddChildView(power_.get());
}
return container;
}
|
views::View* TrayPower::CreateDefaultView(user::LoginStatus status) {
date_.reset(new tray::DateView(tray::DateView::DATE));
if (status != user::LOGGED_IN_NONE && status != user::LOGGED_IN_LOCKED)
date_->set_actionable(true);
views::View* container = new views::View;
views::BoxLayout* layout = new views::BoxLayout(views::BoxLayout::kHorizontal,
kTrayPopupPaddingHorizontal, 10, 0);
layout->set_spread_blank_space(true);
container->SetLayoutManager(layout);
container->set_background(views::Background::CreateSolidBackground(
SkColorSetRGB(0xf1, 0xf1, 0xf1)));
container->AddChildView(date_.get());
PowerSupplyStatus power_status =
ash::Shell::GetInstance()->tray_delegate()->GetPowerSupplyStatus();
if (power_status.battery_is_present) {
power_.reset(new tray::PowerPopupView());
power_->UpdatePowerStatus(power_status);
container->AddChildView(power_.get());
}
return container;
}
|
C
|
Chrome
| 0 |
CVE-2014-5045
|
https://www.cvedetails.com/cve/CVE-2014-5045/
|
CWE-59
|
https://github.com/torvalds/linux/commit/295dc39d941dc2ae53d5c170365af4c9d5c16212
|
295dc39d941dc2ae53d5c170365af4c9d5c16212
|
fs: umount on symlink leaks mnt count
Currently umount on symlink blocks following umount:
/vz is separate mount
# ls /vz/ -al | grep test
drwxr-xr-x. 2 root root 4096 Jul 19 01:14 testdir
lrwxrwxrwx. 1 root root 11 Jul 19 01:16 testlink -> /vz/testdir
# umount -l /vz/testlink
umount: /vz/testlink: not mounted (expected)
# lsof /vz
# umount /vz
umount: /vz: device is busy. (unexpected)
In this case mountpoint_last() gets an extra refcount on path->mnt
Signed-off-by: Vasily Averin <[email protected]>
Acked-by: Ian Kent <[email protected]>
Acked-by: Jeff Layton <[email protected]>
Cc: [email protected]
Signed-off-by: Christoph Hellwig <[email protected]>
|
static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
{
struct fs_struct *fs = current->fs;
struct dentry *parent = nd->path.dentry;
BUG_ON(!(nd->flags & LOOKUP_RCU));
/*
* After legitimizing the bastards, terminate_walk()
* will do the right thing for non-RCU mode, and all our
* subsequent exit cases should rcu_read_unlock()
* before returning. Do vfsmount first; if dentry
* can't be legitimized, just set nd->path.dentry to NULL
* and rely on dput(NULL) being a no-op.
*/
if (!legitimize_mnt(nd->path.mnt, nd->m_seq))
return -ECHILD;
nd->flags &= ~LOOKUP_RCU;
if (!lockref_get_not_dead(&parent->d_lockref)) {
nd->path.dentry = NULL;
goto out;
}
/*
* For a negative lookup, the lookup sequence point is the parents
* sequence point, and it only needs to revalidate the parent dentry.
*
* For a positive lookup, we need to move both the parent and the
* dentry from the RCU domain to be properly refcounted. And the
* sequence number in the dentry validates *both* dentry counters,
* since we checked the sequence number of the parent after we got
* the child sequence number. So we know the parent must still
* be valid if the child sequence number is still valid.
*/
if (!dentry) {
if (read_seqcount_retry(&parent->d_seq, nd->seq))
goto out;
BUG_ON(nd->inode != parent->d_inode);
} else {
if (!lockref_get_not_dead(&dentry->d_lockref))
goto out;
if (read_seqcount_retry(&dentry->d_seq, nd->seq))
goto drop_dentry;
}
/*
* Sequence counts matched. Now make sure that the root is
* still valid and get it if required.
*/
if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
spin_lock(&fs->lock);
if (nd->root.mnt != fs->root.mnt || nd->root.dentry != fs->root.dentry)
goto unlock_and_drop_dentry;
path_get(&nd->root);
spin_unlock(&fs->lock);
}
rcu_read_unlock();
return 0;
unlock_and_drop_dentry:
spin_unlock(&fs->lock);
drop_dentry:
rcu_read_unlock();
dput(dentry);
goto drop_root_mnt;
out:
rcu_read_unlock();
drop_root_mnt:
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
return -ECHILD;
}
|
static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
{
struct fs_struct *fs = current->fs;
struct dentry *parent = nd->path.dentry;
BUG_ON(!(nd->flags & LOOKUP_RCU));
/*
* After legitimizing the bastards, terminate_walk()
* will do the right thing for non-RCU mode, and all our
* subsequent exit cases should rcu_read_unlock()
* before returning. Do vfsmount first; if dentry
* can't be legitimized, just set nd->path.dentry to NULL
* and rely on dput(NULL) being a no-op.
*/
if (!legitimize_mnt(nd->path.mnt, nd->m_seq))
return -ECHILD;
nd->flags &= ~LOOKUP_RCU;
if (!lockref_get_not_dead(&parent->d_lockref)) {
nd->path.dentry = NULL;
goto out;
}
/*
* For a negative lookup, the lookup sequence point is the parents
* sequence point, and it only needs to revalidate the parent dentry.
*
* For a positive lookup, we need to move both the parent and the
* dentry from the RCU domain to be properly refcounted. And the
* sequence number in the dentry validates *both* dentry counters,
* since we checked the sequence number of the parent after we got
* the child sequence number. So we know the parent must still
* be valid if the child sequence number is still valid.
*/
if (!dentry) {
if (read_seqcount_retry(&parent->d_seq, nd->seq))
goto out;
BUG_ON(nd->inode != parent->d_inode);
} else {
if (!lockref_get_not_dead(&dentry->d_lockref))
goto out;
if (read_seqcount_retry(&dentry->d_seq, nd->seq))
goto drop_dentry;
}
/*
* Sequence counts matched. Now make sure that the root is
* still valid and get it if required.
*/
if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
spin_lock(&fs->lock);
if (nd->root.mnt != fs->root.mnt || nd->root.dentry != fs->root.dentry)
goto unlock_and_drop_dentry;
path_get(&nd->root);
spin_unlock(&fs->lock);
}
rcu_read_unlock();
return 0;
unlock_and_drop_dentry:
spin_unlock(&fs->lock);
drop_dentry:
rcu_read_unlock();
dput(dentry);
goto drop_root_mnt;
out:
rcu_read_unlock();
drop_root_mnt:
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
return -ECHILD;
}
|
C
|
linux
| 0 |
CVE-2014-2739
|
https://www.cvedetails.com/cve/CVE-2014-2739/
|
CWE-20
|
https://github.com/torvalds/linux/commit/b2853fd6c2d0f383dbdf7427e263eb576a633867
|
b2853fd6c2d0f383dbdf7427e263eb576a633867
|
IB/core: Don't resolve passive side RoCE L2 address in CMA REQ handler
The code that resolves the passive side source MAC within the rdma_cm
connection request handler was both redundant and buggy, so remove it.
It was redundant since later, when an RC QP is modified to RTR state,
the resolution will take place in the ib_core module. It was buggy
because this callback also deals with UD SIDR exchange, for which we
incorrectly looked at the REQ member of the CM event and dereferenced
a random value.
Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures")
Signed-off-by: Moni Shoua <[email protected]>
Signed-off-by: Or Gerlitz <[email protected]>
Signed-off-by: Roland Dreier <[email protected]>
|
static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
struct sockaddr *dst_addr)
{
if (!src_addr || !src_addr->sa_family) {
src_addr = (struct sockaddr *) &id->route.addr.src_addr;
src_addr->sa_family = dst_addr->sa_family;
if (dst_addr->sa_family == AF_INET6) {
((struct sockaddr_in6 *) src_addr)->sin6_scope_id =
((struct sockaddr_in6 *) dst_addr)->sin6_scope_id;
} else if (dst_addr->sa_family == AF_IB) {
((struct sockaddr_ib *) src_addr)->sib_pkey =
((struct sockaddr_ib *) dst_addr)->sib_pkey;
}
}
return rdma_bind_addr(id, src_addr);
}
|
static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
struct sockaddr *dst_addr)
{
if (!src_addr || !src_addr->sa_family) {
src_addr = (struct sockaddr *) &id->route.addr.src_addr;
src_addr->sa_family = dst_addr->sa_family;
if (dst_addr->sa_family == AF_INET6) {
((struct sockaddr_in6 *) src_addr)->sin6_scope_id =
((struct sockaddr_in6 *) dst_addr)->sin6_scope_id;
} else if (dst_addr->sa_family == AF_IB) {
((struct sockaddr_ib *) src_addr)->sib_pkey =
((struct sockaddr_ib *) dst_addr)->sib_pkey;
}
}
return rdma_bind_addr(id, src_addr);
}
|
C
|
linux
| 0 |
CVE-2015-6768
|
https://www.cvedetails.com/cve/CVE-2015-6768/
|
CWE-264
|
https://github.com/chromium/chromium/commit/4c8b008f055f79e622344627fed7f820375a4f01
|
4c8b008f055f79e622344627fed7f820375a4f01
|
Change Document::detach() to RELEASE_ASSERT all subframes are gone.
BUG=556724,577105
Review URL: https://codereview.chromium.org/1667573002
Cr-Commit-Position: refs/heads/master@{#373642}
|
Element* Document::elementFromPoint(int x, int y) const
{
if (!layoutView())
return 0;
return TreeScope::elementFromPoint(x, y);
}
|
Element* Document::elementFromPoint(int x, int y) const
{
if (!layoutView())
return 0;
return TreeScope::elementFromPoint(x, y);
}
|
C
|
Chrome
| 0 |
CVE-2016-10030
|
https://www.cvedetails.com/cve/CVE-2016-10030/
|
CWE-284
|
https://github.com/SchedMD/slurm/commit/92362a92fffe60187df61f99ab11c249d44120ee
|
92362a92fffe60187df61f99ab11c249d44120ee
|
Fix security issue in _prolog_error().
Fix security issue caused by insecure file path handling triggered by
the failure of a Prolog script. To exploit this a user needs to
anticipate or cause the Prolog to fail for their job.
(This commit is slightly different from the fix to the 15.08 branch.)
CVE-2016-10030.
|
_rpc_step_complete(slurm_msg_t *msg)
{
step_complete_msg_t *req = (step_complete_msg_t *)msg->data;
int rc = SLURM_SUCCESS;
int fd;
uid_t req_uid;
uint16_t protocol_version;
debug3("Entering _rpc_step_complete");
fd = stepd_connect(conf->spooldir, conf->node_name,
req->job_id, req->job_step_id, &protocol_version);
if (fd == -1) {
error("stepd_connect to %u.%u failed: %m",
req->job_id, req->job_step_id);
rc = ESLURM_INVALID_JOB_ID;
goto done;
}
/* step completion messages are only allowed from other slurmstepd,
so only root or SlurmUser is allowed here */
req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info);
if (!_slurm_authorized_user(req_uid)) {
debug("step completion from uid %ld for job %u.%u",
(long) req_uid, req->job_id, req->job_step_id);
rc = ESLURM_USER_ID_MISSING; /* or bad in this case */
goto done2;
}
rc = stepd_completion(fd, protocol_version, req);
if (rc == -1)
rc = ESLURMD_JOB_NOTRUNNING;
done2:
close(fd);
done:
slurm_send_rc_msg(msg, rc);
return rc;
}
|
_rpc_step_complete(slurm_msg_t *msg)
{
step_complete_msg_t *req = (step_complete_msg_t *)msg->data;
int rc = SLURM_SUCCESS;
int fd;
uid_t req_uid;
uint16_t protocol_version;
debug3("Entering _rpc_step_complete");
fd = stepd_connect(conf->spooldir, conf->node_name,
req->job_id, req->job_step_id, &protocol_version);
if (fd == -1) {
error("stepd_connect to %u.%u failed: %m",
req->job_id, req->job_step_id);
rc = ESLURM_INVALID_JOB_ID;
goto done;
}
/* step completion messages are only allowed from other slurmstepd,
so only root or SlurmUser is allowed here */
req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info);
if (!_slurm_authorized_user(req_uid)) {
debug("step completion from uid %ld for job %u.%u",
(long) req_uid, req->job_id, req->job_step_id);
rc = ESLURM_USER_ID_MISSING; /* or bad in this case */
goto done2;
}
rc = stepd_completion(fd, protocol_version, req);
if (rc == -1)
rc = ESLURMD_JOB_NOTRUNNING;
done2:
close(fd);
done:
slurm_send_rc_msg(msg, rc);
return rc;
}
|
C
|
slurm
| 0 |
CVE-2014-3980
|
https://www.cvedetails.com/cve/CVE-2014-3980/
|
CWE-264
|
https://github.com/ueno/libfep/commit/293d9d3f
|
293d9d3f
|
Don't use abstract Unix domain sockets
|
fep_client_open (const char *address)
{
FepClient *client;
struct sockaddr_un sun;
ssize_t sun_len;
int retval;
if (!address)
address = getenv ("LIBFEP_CONTROL_SOCK");
if (!address)
return NULL;
if (strlen (address) + 1 >= sizeof(sun.sun_path))
{
fep_log (FEP_LOG_LEVEL_WARNING,
"unix domain socket path too long: %d + 1 >= %d",
strlen (address),
sizeof (sun.sun_path));
free (address);
return NULL;
}
client = xzalloc (sizeof(FepClient));
client->filter_running = false;
client->messages = NULL;
memset (&sun, 0, sizeof(struct sockaddr_un));
sun.sun_family = AF_UNIX;
memcpy (sun.sun_path, address, strlen (address));
sun_len = sizeof (struct sockaddr_un);
client->control = socket (AF_UNIX, SOCK_STREAM, 0);
if (client->control < 0)
{
free (client);
return NULL;
}
retval = connect (client->control,
(const struct sockaddr *) &sun,
sun_len);
if (retval < 0)
{
close (client->control);
free (client);
return NULL;
}
return client;
}
|
fep_client_open (const char *address)
{
FepClient *client;
struct sockaddr_un sun;
ssize_t sun_len;
int retval;
if (!address)
address = getenv ("LIBFEP_CONTROL_SOCK");
if (!address)
return NULL;
if (strlen (address) + 1 >= sizeof(sun.sun_path))
{
fep_log (FEP_LOG_LEVEL_WARNING,
"unix domain socket path too long: %d + 1 >= %d",
strlen (address),
sizeof (sun.sun_path));
free (address);
return NULL;
}
client = xzalloc (sizeof(FepClient));
client->filter_running = false;
client->messages = NULL;
memset (&sun, 0, sizeof(struct sockaddr_un));
sun.sun_family = AF_UNIX;
#ifdef __linux__
sun.sun_path[0] = '\0';
memcpy (sun.sun_path + 1, address, strlen (address));
sun_len = offsetof (struct sockaddr_un, sun_path) + strlen (address) + 1;
#else
memcpy (sun.sun_path, address, strlen (address));
sun_len = sizeof (struct sockaddr_un);
#endif
client->control = socket (AF_UNIX, SOCK_STREAM, 0);
if (client->control < 0)
{
free (client);
return NULL;
}
retval = connect (client->control,
(const struct sockaddr *) &sun,
sun_len);
if (retval < 0)
{
close (client->control);
free (client);
return NULL;
}
return client;
}
|
C
|
libfep
| 1 |
CVE-2017-9739
|
https://www.cvedetails.com/cve/CVE-2017-9739/
|
CWE-125
|
http://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=c501a58f8d5650c8ba21d447c0d6f07eafcb0f15
|
c501a58f8d5650c8ba21d447c0d6f07eafcb0f15
| null |
static void Ins_IF( INS_ARG )
{
Int nIfs;
Bool Out;
if ( args[0] != 0 )
return;
nIfs = 1;
Out = 0;
do
{
if ( SKIP_Code() == FAILURE )
return;
switch ( CUR.opcode )
{
case 0x58: /* IF */
nIfs++;
break;
case 0x1b: /* ELSE */
Out = (nIfs == 1);
break;
case 0x59: /* EIF */
nIfs--;
Out = (nIfs == 0);
break;
}
} while ( Out == 0 );
}
|
static void Ins_IF( INS_ARG )
{
Int nIfs;
Bool Out;
if ( args[0] != 0 )
return;
nIfs = 1;
Out = 0;
do
{
if ( SKIP_Code() == FAILURE )
return;
switch ( CUR.opcode )
{
case 0x58: /* IF */
nIfs++;
break;
case 0x1b: /* ELSE */
Out = (nIfs == 1);
break;
case 0x59: /* EIF */
nIfs--;
Out = (nIfs == 0);
break;
}
} while ( Out == 0 );
}
|
C
|
ghostscript
| 0 |
CVE-2014-5077
|
https://www.cvedetails.com/cve/CVE-2014-5077/
| null |
https://github.com/torvalds/linux/commit/1be9a950c646c9092fb3618197f7b6bfb50e82aa
|
1be9a950c646c9092fb3618197f7b6bfb50e82aa
|
net: sctp: inherit auth_capable on INIT collisions
Jason reported an oops caused by SCTP on his ARM machine with
SCTP authentication enabled:
Internal error: Oops: 17 [#1] ARM
CPU: 0 PID: 104 Comm: sctp-test Not tainted 3.13.0-68744-g3632f30c9b20-dirty #1
task: c6eefa40 ti: c6f52000 task.ti: c6f52000
PC is at sctp_auth_calculate_hmac+0xc4/0x10c
LR is at sg_init_table+0x20/0x38
pc : [<c024bb80>] lr : [<c00f32dc>] psr: 40000013
sp : c6f538e8 ip : 00000000 fp : c6f53924
r10: c6f50d80 r9 : 00000000 r8 : 00010000
r7 : 00000000 r6 : c7be4000 r5 : 00000000 r4 : c6f56254
r3 : c00c8170 r2 : 00000001 r1 : 00000008 r0 : c6f1e660
Flags: nZcv IRQs on FIQs on Mode SVC_32 ISA ARM Segment user
Control: 0005397f Table: 06f28000 DAC: 00000015
Process sctp-test (pid: 104, stack limit = 0xc6f521c0)
Stack: (0xc6f538e8 to 0xc6f54000)
[...]
Backtrace:
[<c024babc>] (sctp_auth_calculate_hmac+0x0/0x10c) from [<c0249af8>] (sctp_packet_transmit+0x33c/0x5c8)
[<c02497bc>] (sctp_packet_transmit+0x0/0x5c8) from [<c023e96c>] (sctp_outq_flush+0x7fc/0x844)
[<c023e170>] (sctp_outq_flush+0x0/0x844) from [<c023ef78>] (sctp_outq_uncork+0x24/0x28)
[<c023ef54>] (sctp_outq_uncork+0x0/0x28) from [<c0234364>] (sctp_side_effects+0x1134/0x1220)
[<c0233230>] (sctp_side_effects+0x0/0x1220) from [<c02330b0>] (sctp_do_sm+0xac/0xd4)
[<c0233004>] (sctp_do_sm+0x0/0xd4) from [<c023675c>] (sctp_assoc_bh_rcv+0x118/0x160)
[<c0236644>] (sctp_assoc_bh_rcv+0x0/0x160) from [<c023d5bc>] (sctp_inq_push+0x6c/0x74)
[<c023d550>] (sctp_inq_push+0x0/0x74) from [<c024a6b0>] (sctp_rcv+0x7d8/0x888)
While we already had various kind of bugs in that area
ec0223ec48a9 ("net: sctp: fix sctp_sf_do_5_1D_ce to verify if
we/peer is AUTH capable") and b14878ccb7fa ("net: sctp: cache
auth_enable per endpoint"), this one is a bit of a different
kind.
Giving a bit more background on why SCTP authentication is
needed can be found in RFC4895:
SCTP uses 32-bit verification tags to protect itself against
blind attackers. These values are not changed during the
lifetime of an SCTP association.
Looking at new SCTP extensions, there is the need to have a
method of proving that an SCTP chunk(s) was really sent by
the original peer that started the association and not by a
malicious attacker.
To cause this bug, we're triggering an INIT collision between
peers; normal SCTP handshake where both sides intent to
authenticate packets contains RANDOM; CHUNKS; HMAC-ALGO
parameters that are being negotiated among peers:
---------- INIT[RANDOM; CHUNKS; HMAC-ALGO] ---------->
<------- INIT-ACK[RANDOM; CHUNKS; HMAC-ALGO] ---------
-------------------- COOKIE-ECHO -------------------->
<-------------------- COOKIE-ACK ---------------------
RFC4895 says that each endpoint therefore knows its own random
number and the peer's random number *after* the association
has been established. The local and peer's random number along
with the shared key are then part of the secret used for
calculating the HMAC in the AUTH chunk.
Now, in our scenario, we have 2 threads with 1 non-blocking
SEQ_PACKET socket each, setting up common shared SCTP_AUTH_KEY
and SCTP_AUTH_ACTIVE_KEY properly, and each of them calling
sctp_bindx(3), listen(2) and connect(2) against each other,
thus the handshake looks similar to this, e.g.:
---------- INIT[RANDOM; CHUNKS; HMAC-ALGO] ---------->
<------- INIT-ACK[RANDOM; CHUNKS; HMAC-ALGO] ---------
<--------- INIT[RANDOM; CHUNKS; HMAC-ALGO] -----------
-------- INIT-ACK[RANDOM; CHUNKS; HMAC-ALGO] -------->
...
Since such collisions can also happen with verification tags,
the RFC4895 for AUTH rather vaguely says under section 6.1:
In case of INIT collision, the rules governing the handling
of this Random Number follow the same pattern as those for
the Verification Tag, as explained in Section 5.2.4 of
RFC 2960 [5]. Therefore, each endpoint knows its own Random
Number and the peer's Random Number after the association
has been established.
In RFC2960, section 5.2.4, we're eventually hitting Action B:
B) In this case, both sides may be attempting to start an
association at about the same time but the peer endpoint
started its INIT after responding to the local endpoint's
INIT. Thus it may have picked a new Verification Tag not
being aware of the previous Tag it had sent this endpoint.
The endpoint should stay in or enter the ESTABLISHED
state but it MUST update its peer's Verification Tag from
the State Cookie, stop any init or cookie timers that may
running and send a COOKIE ACK.
In other words, the handling of the Random parameter is the
same as behavior for the Verification Tag as described in
Action B of section 5.2.4.
Looking at the code, we exactly hit the sctp_sf_do_dupcook_b()
case which triggers an SCTP_CMD_UPDATE_ASSOC command to the
side effect interpreter, and in fact it properly copies over
peer_{random, hmacs, chunks} parameters from the newly created
association to update the existing one.
Also, the old asoc_shared_key is being released and based on
the new params, sctp_auth_asoc_init_active_key() updated.
However, the issue observed in this case is that the previous
asoc->peer.auth_capable was 0, and has *not* been updated, so
that instead of creating a new secret, we're doing an early
return from the function sctp_auth_asoc_init_active_key()
leaving asoc->asoc_shared_key as NULL. However, we now have to
authenticate chunks from the updated chunk list (e.g. COOKIE-ACK).
That in fact causes the server side when responding with ...
<------------------ AUTH; COOKIE-ACK -----------------
... to trigger a NULL pointer dereference, since in
sctp_packet_transmit(), it discovers that an AUTH chunk is
being queued for xmit, and thus it calls sctp_auth_calculate_hmac().
Since the asoc->active_key_id is still inherited from the
endpoint, and the same as encoded into the chunk, it uses
asoc->asoc_shared_key, which is still NULL, as an asoc_key
and dereferences it in ...
crypto_hash_setkey(desc.tfm, &asoc_key->data[0], asoc_key->len)
... causing an oops. All this happens because sctp_make_cookie_ack()
called with the *new* association has the peer.auth_capable=1
and therefore marks the chunk with auth=1 after checking
sctp_auth_send_cid(), but it is *actually* sent later on over
the then *updated* association's transport that didn't initialize
its shared key due to peer.auth_capable=0. Since control chunks
in that case are not sent by the temporary association which
are scheduled for deletion, they are issued for xmit via
SCTP_CMD_REPLY in the interpreter with the context of the
*updated* association. peer.auth_capable was 0 in the updated
association (which went from COOKIE_WAIT into ESTABLISHED state),
since all previous processing that performed sctp_process_init()
was being done on temporary associations, that we eventually
throw away each time.
The correct fix is to update to the new peer.auth_capable
value as well in the collision case via sctp_assoc_update(),
so that in case the collision migrated from 0 -> 1,
sctp_auth_asoc_init_active_key() can properly recalculate
the secret. This therefore fixes the observed server panic.
Fixes: 730fc3d05cd4 ("[SCTP]: Implete SCTP-AUTH parameter processing")
Reported-by: Jason Gunthorpe <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Tested-by: Jason Gunthorpe <[email protected]>
Cc: Vlad Yasevich <[email protected]>
Acked-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static u8 sctp_trans_score(const struct sctp_transport *trans)
{
return sctp_trans_state_to_prio_map[trans->state];
}
|
static u8 sctp_trans_score(const struct sctp_transport *trans)
{
return sctp_trans_state_to_prio_map[trans->state];
}
|
C
|
linux
| 0 |
CVE-2011-4131
|
https://www.cvedetails.com/cve/CVE-2011-4131/
|
CWE-189
|
https://github.com/torvalds/linux/commit/bf118a342f10dafe44b14451a1392c3254629a1f
|
bf118a342f10dafe44b14451a1392c3254629a1f
|
NFSv4: include bitmap in nfsv4 get acl data
The NFSv4 bitmap size is unbounded: a server can return an arbitrary
sized bitmap in an FATTR4_WORD0_ACL request. Replace using the
nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server
with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data
xdr length to the (cached) acl page data.
This is a general solution to commit e5012d1f "NFSv4.1: update
nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead
when getting ACLs.
Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr
was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved.
Cc: [email protected]
Signed-off-by: Andy Adamson <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
static void nfs4_xdr_enc_layoutreturn(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs4_layoutreturn_args *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, NFS_FH(args->inode), &hdr);
encode_layoutreturn(xdr, args, &hdr);
encode_nops(&hdr);
}
|
static void nfs4_xdr_enc_layoutreturn(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs4_layoutreturn_args *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, NFS_FH(args->inode), &hdr);
encode_layoutreturn(xdr, args, &hdr);
encode_nops(&hdr);
}
|
C
|
linux
| 0 |
CVE-2016-3751
|
https://www.cvedetails.com/cve/CVE-2016-3751/
| null |
https://android.googlesource.com/platform/external/libpng/+/9d4853418ab2f754c2b63e091c29c5529b8b86ca
|
9d4853418ab2f754c2b63e091c29c5529b8b86ca
|
DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
|
static double pcerr(PNG_CONST png_modifier *pm, int in_depth, int out_depth)
static double pcerr(const png_modifier *pm, int in_depth, int out_depth)
{
/* Percentage error permitted in the linear values. Note that the specified
* value is a percentage but this routine returns a simple number.
*/
if (pm->assume_16_bit_calculations ||
(pm->calculations_use_input_precision ? in_depth : out_depth) == 16)
return pm->maxpc16 * .01;
else
return pm->maxpc8 * .01;
}
|
static double pcerr(PNG_CONST png_modifier *pm, int in_depth, int out_depth)
{
/* Percentage error permitted in the linear values. Note that the specified
* value is a percentage but this routine returns a simple number.
*/
if (pm->assume_16_bit_calculations ||
(pm->calculations_use_input_precision ? in_depth : out_depth) == 16)
return pm->maxpc16 * .01;
else
return pm->maxpc8 * .01;
}
|
C
|
Android
| 1 |
CVE-2019-11360
|
https://www.cvedetails.com/cve/CVE-2019-11360/
|
CWE-119
|
https://git.netfilter.org/iptables/commit/iptables/xshared.c?id=2ae1099a42e6a0f06de305ca13a842ac83d4683e
|
2ae1099a42e6a0f06de305ca13a842ac83d4683e
| null |
static mainfunc_t subcmd_get(const char *cmd, const struct subcommand *cb)
{
for (; cb->name != NULL; ++cb)
if (strcmp(cb->name, cmd) == 0)
return cb->main;
return NULL;
}
|
static mainfunc_t subcmd_get(const char *cmd, const struct subcommand *cb)
{
for (; cb->name != NULL; ++cb)
if (strcmp(cb->name, cmd) == 0)
return cb->main;
return NULL;
}
|
C
|
netfilter
| 0 |
CVE-2018-12714
|
https://www.cvedetails.com/cve/CVE-2018-12714/
|
CWE-787
|
https://github.com/torvalds/linux/commit/81f9c4e4177d31ced6f52a89bb70e93bfb77ca03
|
81f9c4e4177d31ced6f52a89bb70e93bfb77ca03
|
Merge tag 'trace-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing fixes from Steven Rostedt:
"This contains a few fixes and a clean up.
- a bad merge caused an "endif" to go in the wrong place in
scripts/Makefile.build
- softirq tracing fix for tracing that corrupts lockdep and causes a
false splat
- histogram documentation typo fixes
- fix a bad memory reference when passing in no filter to the filter
code
- simplify code by using the swap macro instead of open coding the
swap"
* tag 'trace-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
tracing: Fix SKIP_STACK_VALIDATION=1 build due to bad merge with -mrecord-mcount
tracing: Fix some errors in histogram documentation
tracing: Use swap macro in update_max_tr
softirq: Reorder trace_softirqs_on to prevent lockdep splat
tracing: Check for no filter when processing event filters
|
static inline void trace_access_lock(int cpu)
{
(void)cpu;
mutex_lock(&access_lock);
}
|
static inline void trace_access_lock(int cpu)
{
(void)cpu;
mutex_lock(&access_lock);
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/f1a142d29ad1dfaecd3b609051b476440289ec72
|
f1a142d29ad1dfaecd3b609051b476440289ec72
|
Fix print media page size by using the value we compute.
BUG=82472
TEST=NONE (in bug)
Review URL: http://codereview.chromium.org/8344016
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@106160 0039d316-1c4b-4281-b951-d872f2087c98
|
PrintWebViewHelper::PrintPreviewContext::PrintPreviewContext()
: frame_(NULL),
total_page_count_(0),
current_page_index_(0),
generate_draft_pages_(true),
print_ready_metafile_page_count_(0),
error_(PREVIEW_ERROR_NONE),
state_(UNINITIALIZED) {
}
|
PrintWebViewHelper::PrintPreviewContext::PrintPreviewContext()
: frame_(NULL),
total_page_count_(0),
current_page_index_(0),
generate_draft_pages_(true),
print_ready_metafile_page_count_(0),
error_(PREVIEW_ERROR_NONE),
state_(UNINITIALIZED) {
}
|
C
|
Chrome
| 0 |
CVE-2019-5799
|
https://www.cvedetails.com/cve/CVE-2019-5799/
|
CWE-20
|
https://github.com/chromium/chromium/commit/108147dfd1ea159fd3632ef92ccc4ab8952980c7
|
108147dfd1ea159fd3632ef92ccc4ab8952980c7
|
Inherit the navigation initiator when navigating instead of the parent/opener
Spec PR: https://github.com/w3c/webappsec-csp/pull/358
Bug: 905301, 894228, 836148
Change-Id: I43ada2266d42d1cd56dbe3c6dd89d115e878a83a
Reviewed-on: https://chromium-review.googlesource.com/c/1314633
Commit-Queue: Andy Paicu <[email protected]>
Reviewed-by: Mike West <[email protected]>
Cr-Commit-Position: refs/heads/master@{#610850}
|
bool DocumentLoader::ShouldContinueForResponse() const {
if (substitute_data_.IsValid())
return true;
int status_code = response_.HttpStatusCode();
if (status_code == 204 || status_code == 205) {
return false;
}
if (IsContentDispositionAttachment(
response_.HttpHeaderField(http_names::kContentDisposition))) {
return false;
}
if (!CanShowMIMEType(response_.MimeType(), frame_))
return false;
return true;
}
|
bool DocumentLoader::ShouldContinueForResponse() const {
if (substitute_data_.IsValid())
return true;
int status_code = response_.HttpStatusCode();
if (status_code == 204 || status_code == 205) {
return false;
}
if (IsContentDispositionAttachment(
response_.HttpHeaderField(http_names::kContentDisposition))) {
return false;
}
if (!CanShowMIMEType(response_.MimeType(), frame_))
return false;
return true;
}
|
C
|
Chrome
| 0 |
CVE-2016-5189
|
https://www.cvedetails.com/cve/CVE-2016-5189/
|
CWE-284
|
https://github.com/chromium/chromium/commit/2440e872debd68ae7c2a8bf9ddb34df2cce378cd
|
2440e872debd68ae7c2a8bf9ddb34df2cce378cd
|
[GCPW] Disallow sign in of consumer accounts when mdm is enabled.
Unless the registry key "mdm_aca" is explicitly set to 1, always
fail sign in of consumer accounts when mdm enrollment is enabled.
Consumer accounts are defined as accounts with gmail.com or
googlemail.com domain.
Bug: 944049
Change-Id: Icb822f3737d90931de16a8d3317616dd2b159edd
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1532903
Commit-Queue: Tien Mai <[email protected]>
Reviewed-by: Roger Tawa <[email protected]>
Cr-Commit-Position: refs/heads/master@{#646278}
|
base::string16 GetEmailDomains() {
std::vector<wchar_t> email_domains(16);
ULONG length = email_domains.size();
HRESULT hr = GetGlobalFlag(kEmailDomainsKey, &email_domains[0], &length);
if (FAILED(hr)) {
if (hr == HRESULT_FROM_WIN32(ERROR_MORE_DATA)) {
email_domains.resize(length + 1);
length = email_domains.size();
hr = GetGlobalFlag(kEmailDomainsKey, &email_domains[0], &length);
if (FAILED(hr))
email_domains[0] = 0;
}
}
return base::string16(&email_domains[0]);
}
|
base::string16 GetEmailDomains() {
std::vector<wchar_t> email_domains(16);
ULONG length = email_domains.size();
HRESULT hr = GetGlobalFlag(kEmailDomainsKey, &email_domains[0], &length);
if (FAILED(hr)) {
if (hr == HRESULT_FROM_WIN32(ERROR_MORE_DATA)) {
email_domains.resize(length + 1);
length = email_domains.size();
hr = GetGlobalFlag(kEmailDomainsKey, &email_domains[0], &length);
if (FAILED(hr))
email_domains[0] = 0;
}
}
return base::string16(&email_domains[0]);
}
|
C
|
Chrome
| 0 |
CVE-2017-9060
|
https://www.cvedetails.com/cve/CVE-2017-9060/
|
CWE-772
|
https://git.qemu.org/?p=qemu.git;a=commit;h=dd248ed7e204ee8a1873914e02b8b526e8f1b80d
|
dd248ed7e204ee8a1873914e02b8b526e8f1b80d
| null |
static void virtio_gpu_cursor_bh(void *opaque)
{
VirtIOGPU *g = opaque;
virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq);
}
|
static void virtio_gpu_cursor_bh(void *opaque)
{
VirtIOGPU *g = opaque;
virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq);
}
|
C
|
qemu
| 0 |
CVE-2016-5165
|
https://www.cvedetails.com/cve/CVE-2016-5165/
|
CWE-79
|
https://github.com/chromium/chromium/commit/19b8593007150b9a78da7d13f6e5f8feb10881a7
|
19b8593007150b9a78da7d13f6e5f8feb10881a7
|
Add CPU metrics provider and Add CPU/GPU provider for UKM.
Bug: 907674
Change-Id: I61b88aeac8d2a7ff81d812fa5a267f48203ec7e2
Reviewed-on: https://chromium-review.googlesource.com/c/1381376
Commit-Queue: Nik Bhagat <[email protected]>
Reviewed-by: Robert Kaplow <[email protected]>
Cr-Commit-Position: refs/heads/master@{#618037}
|
void MetricsLog::RegisterPrefs(PrefRegistrySimple* registry) {
EnvironmentRecorder::RegisterPrefs(registry);
}
|
void MetricsLog::RegisterPrefs(PrefRegistrySimple* registry) {
EnvironmentRecorder::RegisterPrefs(registry);
}
|
C
|
Chrome
| 0 |
CVE-2016-0826
|
https://www.cvedetails.com/cve/CVE-2016-0826/
|
CWE-264
|
https://android.googlesource.com/platform/frameworks/av/+/c9ab2b0bb05a7e19fb057e79b36e232809d70122
|
c9ab2b0bb05a7e19fb057e79b36e232809d70122
|
Camera: Disallow dumping clients directly
Camera service dumps should only be initiated through
ICameraService::dump.
Bug: 26265403
Change-Id: If3ca4718ed74bf33ad8a416192689203029e2803
|
void CameraService::releaseSound() {
Mutex::Autolock lock(mSoundLock);
LOG1("CameraService::releaseSound ref=%d", mSoundRef);
if (--mSoundRef) return;
for (int i = 0; i < NUM_SOUNDS; i++) {
if (mSoundPlayer[i] != 0) {
mSoundPlayer[i]->disconnect();
mSoundPlayer[i].clear();
}
}
}
|
void CameraService::releaseSound() {
Mutex::Autolock lock(mSoundLock);
LOG1("CameraService::releaseSound ref=%d", mSoundRef);
if (--mSoundRef) return;
for (int i = 0; i < NUM_SOUNDS; i++) {
if (mSoundPlayer[i] != 0) {
mSoundPlayer[i]->disconnect();
mSoundPlayer[i].clear();
}
}
}
|
C
|
Android
| 0 |
CVE-2018-16088
|
https://www.cvedetails.com/cve/CVE-2018-16088/
| null |
https://github.com/chromium/chromium/commit/4379a7fcff8190aa7ba72307b398161c32102c52
|
4379a7fcff8190aa7ba72307b398161c32102c52
|
Only allow downloading in response to real keyboard modifiers
BUG=848531
Change-Id: I97554c8d312243b55647f1376945aee32dbd95bf
Reviewed-on: https://chromium-review.googlesource.com/1082216
Reviewed-by: Mike West <[email protected]>
Commit-Queue: Jochen Eisinger <[email protected]>
Cr-Commit-Position: refs/heads/master@{#564051}
|
void FrameLoader::SetReferrerForFrameRequest(FrameLoadRequest& frame_request) {
ResourceRequest& request = frame_request.GetResourceRequest();
Document* origin_document = frame_request.OriginDocument();
if (!origin_document)
return;
if (request.DidSetHTTPReferrer())
return;
if (frame_request.GetShouldSendReferrer() == kNeverSendReferrer)
return;
Referrer referrer = SecurityPolicy::GenerateReferrer(
origin_document->GetReferrerPolicy(), request.Url(),
origin_document->OutgoingReferrer());
request.SetHTTPReferrer(referrer);
request.SetHTTPOriginToMatchReferrerIfNeeded();
}
|
void FrameLoader::SetReferrerForFrameRequest(FrameLoadRequest& frame_request) {
ResourceRequest& request = frame_request.GetResourceRequest();
Document* origin_document = frame_request.OriginDocument();
if (!origin_document)
return;
if (request.DidSetHTTPReferrer())
return;
if (frame_request.GetShouldSendReferrer() == kNeverSendReferrer)
return;
Referrer referrer = SecurityPolicy::GenerateReferrer(
origin_document->GetReferrerPolicy(), request.Url(),
origin_document->OutgoingReferrer());
request.SetHTTPReferrer(referrer);
request.SetHTTPOriginToMatchReferrerIfNeeded();
}
|
C
|
Chrome
| 0 |
CVE-2017-9202
|
https://www.cvedetails.com/cve/CVE-2017-9202/
|
CWE-369
|
https://github.com/jsummers/imageworsener/commit/dc49c807926b96e503bd7c0dec35119eecd6c6fe
|
dc49c807926b96e503bd7c0dec35119eecd6c6fe
|
Double-check that the input image's density is valid
Fixes a bug that could result in division by zero, at least for a JPEG
source image.
Fixes issues #19, #20
|
IW_IMPL(void) iw_set_warning_fn(struct iw_context *ctx, iw_warningfn_type warnfn)
{
ctx->warning_fn = warnfn;
}
|
IW_IMPL(void) iw_set_warning_fn(struct iw_context *ctx, iw_warningfn_type warnfn)
{
ctx->warning_fn = warnfn;
}
|
C
|
imageworsener
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/9d02cda7a634fbd6e53d98091f618057f0174387
|
9d02cda7a634fbd6e53d98091f618057f0174387
|
Coverity: Fixing pass by value.
CID=101462, 101458, 101437, 101471, 101467
BUG=NONE
TEST=NONE
Review URL: http://codereview.chromium.org/9006023
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@115257 0039d316-1c4b-4281-b951-d872f2087c98
|
base::Time ExtensionPrefs::GetInstallTime(
const std::string& extension_id) const {
const DictionaryValue* extension = GetExtensionPref(extension_id);
if (!extension) {
NOTREACHED();
return base::Time();
}
std::string install_time_str;
if (!extension->GetString(kPrefInstallTime, &install_time_str))
return base::Time();
int64 install_time_i64 = 0;
if (!base::StringToInt64(install_time_str, &install_time_i64))
return base::Time();
return base::Time::FromInternalValue(install_time_i64);
}
|
base::Time ExtensionPrefs::GetInstallTime(
const std::string& extension_id) const {
const DictionaryValue* extension = GetExtensionPref(extension_id);
if (!extension) {
NOTREACHED();
return base::Time();
}
std::string install_time_str;
if (!extension->GetString(kPrefInstallTime, &install_time_str))
return base::Time();
int64 install_time_i64 = 0;
if (!base::StringToInt64(install_time_str, &install_time_i64))
return base::Time();
return base::Time::FromInternalValue(install_time_i64);
}
|
C
|
Chrome
| 0 |
CVE-2019-14323
|
https://www.cvedetails.com/cve/CVE-2019-14323/
|
CWE-119
|
https://github.com/troglobit/ssdp-responder/commit/ce04b1f29a137198182f60bbb628d5ceb8171765
|
ce04b1f29a137198182f60bbb628d5ceb8171765
|
Fix #1: Ensure recv buf is always NUL terminated
Signed-off-by: Joachim Nilsson <[email protected]>
|
static void lsb_init(void)
{
FILE *fp;
char *ptr;
char line[80];
const char *file = "/etc/lsb-release";
fp = fopen(file, "r");
if (!fp) {
fallback:
logit(LOG_WARNING, "No %s found on system, using built-in server string.", file);
return;
}
while (fgets(line, sizeof(line), fp)) {
line[strlen(line) - 1] = 0;
ptr = strstr(line, "DISTRIB_ID");
if (ptr && (ptr = strchr(ptr, '=')))
os = strdup(++ptr);
ptr = strstr(line, "DISTRIB_RELEASE");
if (ptr && (ptr = strchr(ptr, '=')))
ver = strdup(++ptr);
}
fclose(fp);
if (os && ver)
snprintf(server_string, sizeof(server_string), "%s/%s UPnP/1.0 %s/%s",
os, ver, PACKAGE_NAME, PACKAGE_VERSION);
else
goto fallback;
logit(LOG_DEBUG, "Server: %s", server_string);
}
|
static void lsb_init(void)
{
FILE *fp;
char *ptr;
char line[80];
const char *file = "/etc/lsb-release";
fp = fopen(file, "r");
if (!fp) {
fallback:
logit(LOG_WARNING, "No %s found on system, using built-in server string.", file);
return;
}
while (fgets(line, sizeof(line), fp)) {
line[strlen(line) - 1] = 0;
ptr = strstr(line, "DISTRIB_ID");
if (ptr && (ptr = strchr(ptr, '=')))
os = strdup(++ptr);
ptr = strstr(line, "DISTRIB_RELEASE");
if (ptr && (ptr = strchr(ptr, '=')))
ver = strdup(++ptr);
}
fclose(fp);
if (os && ver)
snprintf(server_string, sizeof(server_string), "%s/%s UPnP/1.0 %s/%s",
os, ver, PACKAGE_NAME, PACKAGE_VERSION);
else
goto fallback;
logit(LOG_DEBUG, "Server: %s", server_string);
}
|
C
|
ssdp-responder
| 0 |
CVE-2016-10066
|
https://www.cvedetails.com/cve/CVE-2016-10066/
|
CWE-119
|
https://github.com/ImageMagick/ImageMagick/commit/f6e9d0d9955e85bdd7540b251cd50d598dacc5e6
|
f6e9d0d9955e85bdd7540b251cd50d598dacc5e6
| null |
static void ipa_draw_arc(wmfAPI * API, wmfDrawArc_t * draw_arc)
{
util_draw_arc(API, draw_arc, magick_arc_open);
}
|
static void ipa_draw_arc(wmfAPI * API, wmfDrawArc_t * draw_arc)
{
util_draw_arc(API, draw_arc, magick_arc_open);
}
|
C
|
ImageMagick
| 0 |
CVE-2016-1705
|
https://www.cvedetails.com/cve/CVE-2016-1705/
| null |
https://github.com/chromium/chromium/commit/4afb628e068367d5b73440537555902cd12416f8
|
4afb628e068367d5b73440537555902cd12416f8
|
gpu/android : Add support for partial swap with surface control.
Add support for PostSubBuffer to GLSurfaceEGLSurfaceControl. This should
allow the display compositor to draw the minimum sub-rect necessary from
the damage tracking in BufferQueue on the client-side, and also to pass
this damage rect to the framework.
[email protected]
Bug: 926020
Change-Id: I73d3320cab68250d4c6865bf21c5531682d8bf61
Reviewed-on: https://chromium-review.googlesource.com/c/1457467
Commit-Queue: Khushal <[email protected]>
Commit-Queue: Antoine Labour <[email protected]>
Reviewed-by: Antoine Labour <[email protected]>
Auto-Submit: Khushal <[email protected]>
Cr-Commit-Position: refs/heads/master@{#629852}
|
void CompositorImpl::DidSwapBuffers(const gfx::Size& swap_size) {
client_->DidSwapBuffers(swap_size);
if (swap_completed_with_size_for_testing_)
swap_completed_with_size_for_testing_.Run(swap_size);
}
|
void CompositorImpl::DidSwapBuffers(const gfx::Size& swap_size) {
client_->DidSwapBuffers(swap_size);
if (swap_completed_with_size_for_testing_)
swap_completed_with_size_for_testing_.Run(swap_size);
}
|
C
|
Chrome
| 0 |
CVE-2011-2349
|
https://www.cvedetails.com/cve/CVE-2011-2349/
|
CWE-399
|
https://github.com/chromium/chromium/commit/e755d9faf5c7d75a8ea290892cb1b5cc07c412ec
|
e755d9faf5c7d75a8ea290892cb1b5cc07c412ec
|
cros: The next 100 clang plugin errors.
BUG=none
TEST=none
TBR=dpolukhin
Review URL: http://codereview.chromium.org/7022008
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@85418 0039d316-1c4b-4281-b951-d872f2087c98
|
int PluginSelectionPolicy::FindFirstAllowed(
const GURL& url,
const std::vector<webkit::npapi::WebPluginInfo>& info) {
for (std::vector<webkit::npapi::WebPluginInfo>::size_type i = 0;
i < info.size(); ++i) {
if (IsAllowed(url, info[i].path))
return i;
}
return -1;
}
|
int PluginSelectionPolicy::FindFirstAllowed(
const GURL& url,
const std::vector<webkit::npapi::WebPluginInfo>& info) {
for (std::vector<webkit::npapi::WebPluginInfo>::size_type i = 0;
i < info.size(); ++i) {
if (IsAllowed(url, info[i].path))
return i;
}
return -1;
}
|
C
|
Chrome
| 0 |
CVE-2016-9084
|
https://www.cvedetails.com/cve/CVE-2016-9084/
|
CWE-190
|
https://github.com/torvalds/linux/commit/05692d7005a364add85c6e25a6c4447ce08f913a
|
05692d7005a364add85c6e25a6c4447ce08f913a
|
vfio/pci: Fix integer overflows, bitmask check
The VFIO_DEVICE_SET_IRQS ioctl did not sufficiently sanitize
user-supplied integers, potentially allowing memory corruption. This
patch adds appropriate integer overflow checks, checks the range bounds
for VFIO_IRQ_SET_DATA_NONE, and also verifies that only single element
in the VFIO_IRQ_SET_DATA_TYPE_MASK bitmask is set.
VFIO_IRQ_SET_ACTION_TYPE_MASK is already correctly checked later in
vfio_pci_set_irqs_ioctl().
Furthermore, a kzalloc is changed to a kcalloc because the use of a
kzalloc with an integer multiplication allowed an integer overflow
condition to be reached without this patch. kcalloc checks for overflow
and should prevent a similar occurrence.
Signed-off-by: Vlad Tsyrklevich <[email protected]>
Signed-off-by: Alex Williamson <[email protected]>
|
static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
return -EINVAL;
return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
count, flags, data);
}
|
static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
return -EINVAL;
return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
count, flags, data);
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/3a353ebdb7753a3fbeb401c4c0e0f3358ccbb90b
|
3a353ebdb7753a3fbeb401c4c0e0f3358ccbb90b
|
Support pausing media when a context is frozen.
Media is resumed when the context is unpaused. This feature will be used
for bfcache and pausing iframes feature policy.
BUG=907125
Change-Id: Ic3925ea1a4544242b7bf0b9ad8c9cb9f63976bbd
Reviewed-on: https://chromium-review.googlesource.com/c/1410126
Commit-Queue: Dave Tapuska <[email protected]>
Reviewed-by: Kentaro Hara <[email protected]>
Reviewed-by: Mounir Lamouri <[email protected]>
Cr-Commit-Position: refs/heads/master@{#623319}
|
PeriodicWave* BaseAudioContext::createPeriodicWave(
const Vector<float>& real,
const Vector<float>& imag,
const PeriodicWaveConstraints* options,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
bool disable = options->disableNormalization();
return PeriodicWave::Create(*this, real, imag, disable, exception_state);
}
|
PeriodicWave* BaseAudioContext::createPeriodicWave(
const Vector<float>& real,
const Vector<float>& imag,
const PeriodicWaveConstraints* options,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
bool disable = options->disableNormalization();
return PeriodicWave::Create(*this, real, imag, disable, exception_state);
}
|
C
|
Chrome
| 0 |
CVE-2012-1179
|
https://www.cvedetails.com/cve/CVE-2012-1179/
|
CWE-264
|
https://github.com/torvalds/linux/commit/4a1d704194a441bf83c636004a479e01360ec850
|
4a1d704194a441bf83c636004a479e01360ec850
|
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
commit 1a5a9906d4e8d1976b701f889d8f35d54b928f25 upstream.
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm)
{
int i;
for (i = 0; i < NR_MM_COUNTERS; i++) {
if (task->rss_stat.count[i]) {
add_mm_counter(mm, i, task->rss_stat.count[i]);
task->rss_stat.count[i] = 0;
}
}
task->rss_stat.events = 0;
}
|
static void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm)
{
int i;
for (i = 0; i < NR_MM_COUNTERS; i++) {
if (task->rss_stat.count[i]) {
add_mm_counter(mm, i, task->rss_stat.count[i]);
task->rss_stat.count[i] = 0;
}
}
task->rss_stat.events = 0;
}
|
C
|
linux
| 0 |
CVE-2017-8068
|
https://www.cvedetails.com/cve/CVE-2017-8068/
|
CWE-119
|
https://github.com/torvalds/linux/commit/5593523f968bc86d42a035c6df47d5e0979b5ace
|
5593523f968bc86d42a035c6df47d5e0979b5ace
|
pegasus: Use heap buffers for all register access
Allocating USB buffers on the stack is not portable, and no longer
works on x86_64 (with VMAP_STACK enabled as per default).
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
References: https://bugs.debian.org/852556
Reported-by: Lisandro Damián Nicanor Pérez Meyer <[email protected]>
Tested-by: Lisandro Damián Nicanor Pérez Meyer <[email protected]>
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void pegasus_disconnect(struct usb_interface *intf)
{
struct pegasus *pegasus = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
if (!pegasus) {
dev_dbg(&intf->dev, "unregistering non-bound device?\n");
return;
}
pegasus->flags |= PEGASUS_UNPLUG;
cancel_delayed_work(&pegasus->carrier_check);
unregister_netdev(pegasus->net);
unlink_all_urbs(pegasus);
free_all_urbs(pegasus);
if (pegasus->rx_skb != NULL) {
dev_kfree_skb(pegasus->rx_skb);
pegasus->rx_skb = NULL;
}
free_netdev(pegasus->net);
pegasus_dec_workqueue();
}
|
static void pegasus_disconnect(struct usb_interface *intf)
{
struct pegasus *pegasus = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
if (!pegasus) {
dev_dbg(&intf->dev, "unregistering non-bound device?\n");
return;
}
pegasus->flags |= PEGASUS_UNPLUG;
cancel_delayed_work(&pegasus->carrier_check);
unregister_netdev(pegasus->net);
unlink_all_urbs(pegasus);
free_all_urbs(pegasus);
if (pegasus->rx_skb != NULL) {
dev_kfree_skb(pegasus->rx_skb);
pegasus->rx_skb = NULL;
}
free_netdev(pegasus->net);
pegasus_dec_workqueue();
}
|
C
|
linux
| 0 |
CVE-2011-2806
|
https://www.cvedetails.com/cve/CVE-2011-2806/
|
CWE-119
|
https://github.com/chromium/chromium/commit/01e4ee2fda0a5e57a8d0c8cb829022eb84fdff12
|
01e4ee2fda0a5e57a8d0c8cb829022eb84fdff12
|
Rename isPositioned to isOutOfFlowPositioned for clarity
https://bugs.webkit.org/show_bug.cgi?id=89836
Reviewed by Antti Koivisto.
RenderObject and RenderStyle had an isPositioned() method that was
confusing, because it excluded relative positioning. Rename to
isOutOfFlowPositioned(), which makes it clearer that it only applies
to absolute and fixed positioning.
Simple rename; no behavior change.
Source/WebCore:
* css/CSSComputedStyleDeclaration.cpp:
(WebCore::getPositionOffsetValue):
* css/StyleResolver.cpp:
(WebCore::StyleResolver::collectMatchingRulesForList):
* dom/Text.cpp:
(WebCore::Text::rendererIsNeeded):
* editing/DeleteButtonController.cpp:
(WebCore::isDeletableElement):
* editing/TextIterator.cpp:
(WebCore::shouldEmitNewlinesBeforeAndAfterNode):
* rendering/AutoTableLayout.cpp:
(WebCore::shouldScaleColumns):
* rendering/InlineFlowBox.cpp:
(WebCore::InlineFlowBox::addToLine):
(WebCore::InlineFlowBox::placeBoxesInInlineDirection):
(WebCore::InlineFlowBox::requiresIdeographicBaseline):
(WebCore::InlineFlowBox::adjustMaxAscentAndDescent):
(WebCore::InlineFlowBox::computeLogicalBoxHeights):
(WebCore::InlineFlowBox::placeBoxesInBlockDirection):
(WebCore::InlineFlowBox::flipLinesInBlockDirection):
(WebCore::InlineFlowBox::computeOverflow):
(WebCore::InlineFlowBox::computeOverAnnotationAdjustment):
(WebCore::InlineFlowBox::computeUnderAnnotationAdjustment):
* rendering/InlineIterator.h:
(WebCore::isIteratorTarget):
* rendering/LayoutState.cpp:
(WebCore::LayoutState::LayoutState):
* rendering/RenderBlock.cpp:
(WebCore::RenderBlock::MarginInfo::MarginInfo):
(WebCore::RenderBlock::styleWillChange):
(WebCore::RenderBlock::styleDidChange):
(WebCore::RenderBlock::addChildToContinuation):
(WebCore::RenderBlock::addChildToAnonymousColumnBlocks):
(WebCore::RenderBlock::containingColumnsBlock):
(WebCore::RenderBlock::columnsBlockForSpanningElement):
(WebCore::RenderBlock::addChildIgnoringAnonymousColumnBlocks):
(WebCore::getInlineRun):
(WebCore::RenderBlock::isSelfCollapsingBlock):
(WebCore::RenderBlock::layoutBlock):
(WebCore::RenderBlock::addOverflowFromBlockChildren):
(WebCore::RenderBlock::expandsToEncloseOverhangingFloats):
(WebCore::RenderBlock::handlePositionedChild):
(WebCore::RenderBlock::moveRunInUnderSiblingBlockIfNeeded):
(WebCore::RenderBlock::collapseMargins):
(WebCore::RenderBlock::clearFloatsIfNeeded):
(WebCore::RenderBlock::simplifiedNormalFlowLayout):
(WebCore::RenderBlock::isSelectionRoot):
(WebCore::RenderBlock::blockSelectionGaps):
(WebCore::RenderBlock::clearFloats):
(WebCore::RenderBlock::markAllDescendantsWithFloatsForLayout):
(WebCore::RenderBlock::markSiblingsWithFloatsForLayout):
(WebCore::isChildHitTestCandidate):
(WebCore::InlineMinMaxIterator::next):
(WebCore::RenderBlock::computeBlockPreferredLogicalWidths):
(WebCore::RenderBlock::firstLineBoxBaseline):
(WebCore::RenderBlock::lastLineBoxBaseline):
(WebCore::RenderBlock::updateFirstLetter):
(WebCore::shouldCheckLines):
(WebCore::getHeightForLineCount):
(WebCore::RenderBlock::adjustForBorderFit):
(WebCore::inNormalFlow):
(WebCore::RenderBlock::adjustLinePositionForPagination):
(WebCore::RenderBlock::adjustBlockChildForPagination):
(WebCore::RenderBlock::renderName):
* rendering/RenderBlock.h:
(WebCore::RenderBlock::shouldSkipCreatingRunsForObject):
* rendering/RenderBlockLineLayout.cpp:
(WebCore::RenderBlock::setMarginsForRubyRun):
(WebCore::RenderBlock::computeInlineDirectionPositionsForLine):
(WebCore::RenderBlock::computeBlockDirectionPositionsForLine):
(WebCore::RenderBlock::layoutInlineChildren):
(WebCore::requiresLineBox):
(WebCore::RenderBlock::LineBreaker::skipTrailingWhitespace):
(WebCore::RenderBlock::LineBreaker::skipLeadingWhitespace):
(WebCore::RenderBlock::LineBreaker::nextLineBreak):
* rendering/RenderBox.cpp:
(WebCore::RenderBox::removeFloatingOrPositionedChildFromBlockLists):
(WebCore::RenderBox::styleWillChange):
(WebCore::RenderBox::styleDidChange):
(WebCore::RenderBox::updateBoxModelInfoFromStyle):
(WebCore::RenderBox::offsetFromContainer):
(WebCore::RenderBox::positionLineBox):
(WebCore::RenderBox::computeRectForRepaint):
(WebCore::RenderBox::computeLogicalWidthInRegion):
(WebCore::RenderBox::renderBoxRegionInfo):
(WebCore::RenderBox::computeLogicalHeight):
(WebCore::RenderBox::computePercentageLogicalHeight):
(WebCore::RenderBox::computeReplacedLogicalWidthUsing):
(WebCore::RenderBox::computeReplacedLogicalHeightUsing):
(WebCore::RenderBox::availableLogicalHeightUsing):
(WebCore::percentageLogicalHeightIsResolvable):
* rendering/RenderBox.h:
(WebCore::RenderBox::stretchesToViewport):
(WebCore::RenderBox::isDeprecatedFlexItem):
* rendering/RenderBoxModelObject.cpp:
(WebCore::RenderBoxModelObject::adjustedPositionRelativeToOffsetParent):
(WebCore::RenderBoxModelObject::mapAbsoluteToLocalPoint):
* rendering/RenderBoxModelObject.h:
(WebCore::RenderBoxModelObject::requiresLayer):
* rendering/RenderDeprecatedFlexibleBox.cpp:
(WebCore::childDoesNotAffectWidthOrFlexing):
(WebCore::RenderDeprecatedFlexibleBox::layoutBlock):
(WebCore::RenderDeprecatedFlexibleBox::layoutHorizontalBox):
(WebCore::RenderDeprecatedFlexibleBox::layoutVerticalBox):
(WebCore::RenderDeprecatedFlexibleBox::renderName):
* rendering/RenderFieldset.cpp:
(WebCore::RenderFieldset::findLegend):
* rendering/RenderFlexibleBox.cpp:
(WebCore::RenderFlexibleBox::computePreferredLogicalWidths):
(WebCore::RenderFlexibleBox::autoMarginOffsetInMainAxis):
(WebCore::RenderFlexibleBox::availableAlignmentSpaceForChild):
(WebCore::RenderFlexibleBox::computeMainAxisPreferredSizes):
(WebCore::RenderFlexibleBox::computeNextFlexLine):
(WebCore::RenderFlexibleBox::resolveFlexibleLengths):
(WebCore::RenderFlexibleBox::prepareChildForPositionedLayout):
(WebCore::RenderFlexibleBox::layoutAndPlaceChildren):
(WebCore::RenderFlexibleBox::layoutColumnReverse):
(WebCore::RenderFlexibleBox::adjustAlignmentForChild):
(WebCore::RenderFlexibleBox::flipForRightToLeftColumn):
* rendering/RenderGrid.cpp:
(WebCore::RenderGrid::renderName):
* rendering/RenderImage.cpp:
(WebCore::RenderImage::computeIntrinsicRatioInformation):
* rendering/RenderInline.cpp:
(WebCore::RenderInline::addChildIgnoringContinuation):
(WebCore::RenderInline::addChildToContinuation):
(WebCore::RenderInline::generateCulledLineBoxRects):
(WebCore):
(WebCore::RenderInline::culledInlineFirstLineBox):
(WebCore::RenderInline::culledInlineLastLineBox):
(WebCore::RenderInline::culledInlineVisualOverflowBoundingBox):
(WebCore::RenderInline::computeRectForRepaint):
(WebCore::RenderInline::dirtyLineBoxes):
* rendering/RenderLayer.cpp:
(WebCore::checkContainingBlockChainForPagination):
(WebCore::RenderLayer::updateLayerPosition):
(WebCore::isPositionedContainer):
(WebCore::RenderLayer::calculateClipRects):
(WebCore::RenderLayer::shouldBeNormalFlowOnly):
* rendering/RenderLayerCompositor.cpp:
(WebCore::RenderLayerCompositor::requiresCompositingForPosition):
* rendering/RenderLineBoxList.cpp:
(WebCore::RenderLineBoxList::dirtyLinesFromChangedChild):
* rendering/RenderListItem.cpp:
(WebCore::getParentOfFirstLineBox):
* rendering/RenderMultiColumnBlock.cpp:
(WebCore::RenderMultiColumnBlock::renderName):
* rendering/RenderObject.cpp:
(WebCore::RenderObject::markContainingBlocksForLayout):
(WebCore::RenderObject::setPreferredLogicalWidthsDirty):
(WebCore::RenderObject::invalidateContainerPreferredLogicalWidths):
(WebCore::RenderObject::styleWillChange):
(WebCore::RenderObject::offsetParent):
* rendering/RenderObject.h:
(WebCore::RenderObject::isOutOfFlowPositioned):
(WebCore::RenderObject::isInFlowPositioned):
(WebCore::RenderObject::hasClip):
(WebCore::RenderObject::isFloatingOrOutOfFlowPositioned):
* rendering/RenderObjectChildList.cpp:
(WebCore::RenderObjectChildList::removeChildNode):
* rendering/RenderReplaced.cpp:
(WebCore::hasAutoHeightOrContainingBlockWithAutoHeight):
* rendering/RenderRubyRun.cpp:
(WebCore::RenderRubyRun::rubyText):
* rendering/RenderTable.cpp:
(WebCore::RenderTable::addChild):
(WebCore::RenderTable::computeLogicalWidth):
(WebCore::RenderTable::layout):
* rendering/style/RenderStyle.h:
Source/WebKit/blackberry:
* Api/WebPage.cpp:
(BlackBerry::WebKit::isPositionedContainer):
(BlackBerry::WebKit::isNonRenderViewFixedPositionedContainer):
(BlackBerry::WebKit::isFixedPositionedContainer):
Source/WebKit2:
* WebProcess/WebPage/qt/LayerTreeHostQt.cpp:
(WebKit::updateOffsetFromViewportForSelf):
git-svn-id: svn://svn.chromium.org/blink/trunk@121123 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
String CSSComputedStyleDeclaration::cssText() const
{
String result("");
for (unsigned i = 0; i < numComputedProperties; i++) {
if (i)
result += " ";
result += getPropertyName(computedProperties[i]);
result += ": ";
result += getPropertyValue(computedProperties[i]);
result += ";";
}
return result;
}
|
String CSSComputedStyleDeclaration::cssText() const
{
String result("");
for (unsigned i = 0; i < numComputedProperties; i++) {
if (i)
result += " ";
result += getPropertyName(computedProperties[i]);
result += ": ";
result += getPropertyValue(computedProperties[i]);
result += ";";
}
return result;
}
|
C
|
Chrome
| 0 |
CVE-2018-17206
|
https://www.cvedetails.com/cve/CVE-2018-17206/
| null |
https://github.com/openvswitch/ovs/commit/9237a63c47bd314b807cda0bd2216264e82edbe8
|
9237a63c47bd314b807cda0bd2216264e82edbe8
|
ofp-actions: Avoid buffer overread in BUNDLE action decoding.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]>
|
decode_OFPAT_RAW10_STRIP_VLAN(struct ofpbuf *out)
{
ofpact_put_STRIP_VLAN(out)->ofpact.raw = OFPAT_RAW10_STRIP_VLAN;
return 0;
}
|
decode_OFPAT_RAW10_STRIP_VLAN(struct ofpbuf *out)
{
ofpact_put_STRIP_VLAN(out)->ofpact.raw = OFPAT_RAW10_STRIP_VLAN;
return 0;
}
|
C
|
ovs
| 0 |
CVE-2014-9672
|
https://www.cvedetails.com/cve/CVE-2014-9672/
|
CWE-119
|
https://git.savannah.gnu.org/cgit/freetype/freetype2.git/commit/?id=18a8f0d9943369449bc4de92d411c78fb08d616c
|
18a8f0d9943369449bc4de92d411c78fb08d616c
| null |
create_lwfn_name( char* ps_name,
Str255 lwfn_file_name )
{
int max = 5, count = 0;
FT_Byte* p = lwfn_file_name;
FT_Byte* q = (FT_Byte*)ps_name;
lwfn_file_name[0] = 0;
while ( *q )
{
if ( ft_isupper( *q ) )
{
if ( count )
max = 3;
count = 0;
}
if ( count < max && ( ft_isalnum( *q ) || *q == '_' ) )
{
*++p = *q;
lwfn_file_name[0]++;
count++;
}
q++;
}
}
|
create_lwfn_name( char* ps_name,
Str255 lwfn_file_name )
{
int max = 5, count = 0;
FT_Byte* p = lwfn_file_name;
FT_Byte* q = (FT_Byte*)ps_name;
lwfn_file_name[0] = 0;
while ( *q )
{
if ( ft_isupper( *q ) )
{
if ( count )
max = 3;
count = 0;
}
if ( count < max && ( ft_isalnum( *q ) || *q == '_' ) )
{
*++p = *q;
lwfn_file_name[0]++;
count++;
}
q++;
}
}
|
C
|
savannah
| 0 |
CVE-2012-2895
|
https://www.cvedetails.com/cve/CVE-2012-2895/
|
CWE-119
|
https://github.com/chromium/chromium/commit/3475f5e448ddf5e48888f3d0563245cc46e3c98b
|
3475f5e448ddf5e48888f3d0563245cc46e3c98b
|
ash: Add launcher overflow bubble.
- Host a LauncherView in bubble to display overflown items;
- Mouse wheel and two-finger scroll to scroll the LauncherView in bubble in case overflow bubble is overflown;
- Fit bubble when items are added/removed;
- Keep launcher bar on screen when the bubble is shown;
BUG=128054
TEST=Verify launcher overflown items are in a bubble instead of menu.
Review URL: https://chromiumcodereview.appspot.com/10659003
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@146460 0039d316-1c4b-4281-b951-d872f2087c98
|
int ShelfLayoutManager::GetWorkAreaSize(const State& state, int size) const {
if (state.visibility_state == VISIBLE)
return size;
if (state.visibility_state == AUTO_HIDE)
return kAutoHideSize;
return 0;
}
|
int ShelfLayoutManager::GetWorkAreaSize(const State& state, int size) const {
if (state.visibility_state == VISIBLE)
return size;
if (state.visibility_state == AUTO_HIDE)
return kAutoHideSize;
return 0;
}
|
C
|
Chrome
| 0 |
CVE-2018-1000880
|
https://www.cvedetails.com/cve/CVE-2018-1000880/
|
CWE-415
|
https://github.com/libarchive/libarchive/pull/1105/commits/9c84b7426660c09c18cc349f6d70b5f8168b5680
|
9c84b7426660c09c18cc349f6d70b5f8168b5680
|
warc: consume data once read
The warc decoder only used read ahead, it wouldn't actually consume
data that had previously been printed. This means that if you specify
an invalid content length, it will just reprint the same data over
and over and over again until it hits the desired length.
This means that a WARC resource with e.g.
Content-Length: 666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666665
but only a few hundred bytes of data, causes a quasi-infinite loop.
Consume data in subsequent calls to _warc_read.
Found with an AFL + afl-rb + qsym setup.
|
_warc_cleanup(struct archive_read *a)
{
struct warc_s *w = a->format->data;
if (w->pool.len > 0U) {
free(w->pool.str);
}
archive_string_free(&w->sver);
free(w);
a->format->data = NULL;
return (ARCHIVE_OK);
}
|
_warc_cleanup(struct archive_read *a)
{
struct warc_s *w = a->format->data;
if (w->pool.len > 0U) {
free(w->pool.str);
}
archive_string_free(&w->sver);
free(w);
a->format->data = NULL;
return (ARCHIVE_OK);
}
|
C
|
libarchive
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/fb83de09f2c986ee91741f3a2776feea0e18e3f6
|
fb83de09f2c986ee91741f3a2776feea0e18e3f6
|
Revert "[Picture in Picture] Call parent function in OnGestureEvent."
This reverts commit e60d9aef9d1eeeff4e5954ba137ed5009261f626.
Reason for revert: Causes the close button to receive gesture events even when it's not the target of the tap. This causes the PiP window to unexpectedly close.
Bug: 895773
Original change's description:
> [Picture in Picture] Call parent function in OnGestureEvent.
>
> Change-Id: I854654be22abd217c3f8ed557bc3fb9118c557c6
> Reviewed-on: https://chromium-review.googlesource.com/1192326
> Reviewed-by: CJ DiMeglio <[email protected]>
> Commit-Queue: apacible <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#586820}
# Not skipping CQ checks because original CL landed > 1 day ago.
Change-Id: I2f36d78713f0b811a0a2681e09284c394e146a5c
Reviewed-on: https://chromium-review.googlesource.com/c/1318397
Commit-Queue: Tommy Steimel <[email protected]>
Reviewed-by: CJ DiMeglio <[email protected]>
Reviewed-by: Mounir Lamouri <[email protected]>
Cr-Commit-Position: refs/heads/master@{#607039}
|
OverlayWindowViews::OverlayWindowViews(
content::PictureInPictureWindowController* controller)
: controller_(controller),
window_background_view_(new views::View()),
video_view_(new views::View()),
controls_scrim_view_(new views::View()),
controls_parent_view_(new views::View()),
close_controls_view_(new views::CloseImageButton(this)),
#if defined(OS_CHROMEOS)
resize_handle_view_(new views::ResizeHandleButton(this)),
#endif
play_pause_controls_view_(new views::ToggleImageButton(this)),
hide_controls_timer_(
FROM_HERE,
base::TimeDelta::FromMilliseconds(2500 /* 2.5 seconds */),
base::BindRepeating(&OverlayWindowViews::UpdateControlsVisibility,
base::Unretained(this),
false /* is_visible */)) {
views::Widget::InitParams params(views::Widget::InitParams::TYPE_WINDOW);
params.ownership = views::Widget::InitParams::WIDGET_OWNS_NATIVE_WIDGET;
params.bounds = CalculateAndUpdateWindowBounds();
params.keep_on_top = true;
params.visible_on_all_workspaces = true;
params.remove_standard_frame = true;
params.name = "PictureInPictureWindow";
params.delegate = new OverlayWindowWidgetDelegate(this);
Init(params);
SetUpViews();
#if defined(OS_CHROMEOS)
GetNativeWindow()->SetProperty(ash::kWindowPipTypeKey, true);
#endif // defined(OS_CHROMEOS)
is_initialized_ = true;
}
|
OverlayWindowViews::OverlayWindowViews(
content::PictureInPictureWindowController* controller)
: controller_(controller),
window_background_view_(new views::View()),
video_view_(new views::View()),
controls_scrim_view_(new views::View()),
controls_parent_view_(new views::View()),
close_controls_view_(new views::CloseImageButton(this)),
#if defined(OS_CHROMEOS)
resize_handle_view_(new views::ResizeHandleButton(this)),
#endif
play_pause_controls_view_(new views::ToggleImageButton(this)),
hide_controls_timer_(
FROM_HERE,
base::TimeDelta::FromMilliseconds(2500 /* 2.5 seconds */),
base::BindRepeating(&OverlayWindowViews::UpdateControlsVisibility,
base::Unretained(this),
false /* is_visible */)) {
views::Widget::InitParams params(views::Widget::InitParams::TYPE_WINDOW);
params.ownership = views::Widget::InitParams::WIDGET_OWNS_NATIVE_WIDGET;
params.bounds = CalculateAndUpdateWindowBounds();
params.keep_on_top = true;
params.visible_on_all_workspaces = true;
params.remove_standard_frame = true;
params.name = "PictureInPictureWindow";
params.delegate = new OverlayWindowWidgetDelegate(this);
Init(params);
SetUpViews();
#if defined(OS_CHROMEOS)
GetNativeWindow()->SetProperty(ash::kWindowPipTypeKey, true);
#endif // defined(OS_CHROMEOS)
is_initialized_ = true;
}
|
C
|
Chrome
| 0 |
CVE-2018-13006
|
https://www.cvedetails.com/cve/CVE-2018-13006/
|
CWE-125
|
https://github.com/gpac/gpac/commit/bceb03fd2be95097a7b409ea59914f332fb6bc86
|
bceb03fd2be95097a7b409ea59914f332fb6bc86
|
fixed 2 possible heap overflows (inc. #1088)
|
GF_Err rssr_Size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
|
GF_Err rssr_Size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
|
C
|
gpac
| 0 |
CVE-2014-4344
|
https://www.cvedetails.com/cve/CVE-2014-4344/
|
CWE-476
|
https://github.com/krb5/krb5/commit/a7886f0ed1277c69142b14a2c6629175a6331edc
|
a7886f0ed1277c69142b14a2c6629175a6331edc
|
Fix null deref in SPNEGO acceptor [CVE-2014-4344]
When processing a continuation token, acc_ctx_cont was dereferencing
the initial byte of the token without checking the length. This could
result in a null dereference.
CVE-2014-4344:
In MIT krb5 1.5 and newer, an unauthenticated or partially
authenticated remote attacker can cause a NULL dereference and
application crash during a SPNEGO negotiation by sending an empty
token as the second or later context token from initiator to acceptor.
The attacker must provide at least one valid context token in the
security context negotiation before sending the empty token. This can
be done by an unauthenticated attacker by forcing SPNEGO to
renegotiate the underlying mechanism, or by using IAKERB to wrap an
unauthenticated AS-REQ as the first token.
CVSSv2 Vector: AV:N/AC:L/Au:N/C:N/I:N/A:C/E:POC/RL:OF/RC:C
[[email protected]: CVE summary, CVSSv2 vector]
(cherry picked from commit 524688ce87a15fc75f87efc8c039ba4c7d5c197b)
ticket: 7970
version_fixed: 1.12.2
status: resolved
|
spnego_gss_complete_auth_token(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
gss_buffer_t input_message_buffer)
{
OM_uint32 ret;
ret = gss_complete_auth_token(minor_status,
context_handle,
input_message_buffer);
return (ret);
}
|
spnego_gss_complete_auth_token(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
gss_buffer_t input_message_buffer)
{
OM_uint32 ret;
ret = gss_complete_auth_token(minor_status,
context_handle,
input_message_buffer);
return (ret);
}
|
C
|
krb5
| 0 |
CVE-2017-16535
|
https://www.cvedetails.com/cve/CVE-2017-16535/
|
CWE-125
|
https://github.com/torvalds/linux/commit/1c0edc3633b56000e18d82fc241e3995ca18a69e
|
1c0edc3633b56000e18d82fc241e3995ca18a69e
|
USB: core: fix out-of-bounds access bug in usb_get_bos_descriptor()
Andrey used the syzkaller fuzzer to find an out-of-bounds memory
access in usb_get_bos_descriptor(). The code wasn't checking that the
next usb_dev_cap_header structure could fit into the remaining buffer
space.
This patch fixes the error and also reduces the bNumDeviceCaps field
in the header to match the actual number of capabilities found, in
cases where there are fewer than expected.
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: Alan Stern <[email protected]>
Tested-by: Andrey Konovalov <[email protected]>
CC: <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
int asnum, struct usb_host_interface *ifp, int num_ep,
unsigned char *buffer, int size)
{
unsigned char *buffer0 = buffer;
struct usb_endpoint_descriptor *d;
struct usb_host_endpoint *endpoint;
int n, i, j, retval;
unsigned int maxp;
const unsigned short *maxpacket_maxes;
d = (struct usb_endpoint_descriptor *) buffer;
buffer += d->bLength;
size -= d->bLength;
if (d->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE)
n = USB_DT_ENDPOINT_AUDIO_SIZE;
else if (d->bLength >= USB_DT_ENDPOINT_SIZE)
n = USB_DT_ENDPOINT_SIZE;
else {
dev_warn(ddev, "config %d interface %d altsetting %d has an "
"invalid endpoint descriptor of length %d, skipping\n",
cfgno, inum, asnum, d->bLength);
goto skip_to_next_endpoint_or_interface_descriptor;
}
i = d->bEndpointAddress & ~USB_ENDPOINT_DIR_MASK;
if (i >= 16 || i == 0) {
dev_warn(ddev, "config %d interface %d altsetting %d has an "
"invalid endpoint with address 0x%X, skipping\n",
cfgno, inum, asnum, d->bEndpointAddress);
goto skip_to_next_endpoint_or_interface_descriptor;
}
/* Only store as many endpoints as we have room for */
if (ifp->desc.bNumEndpoints >= num_ep)
goto skip_to_next_endpoint_or_interface_descriptor;
/* Check for duplicate endpoint addresses */
for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
if (ifp->endpoint[i].desc.bEndpointAddress ==
d->bEndpointAddress) {
dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
cfgno, inum, asnum, d->bEndpointAddress);
goto skip_to_next_endpoint_or_interface_descriptor;
}
}
endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
++ifp->desc.bNumEndpoints;
memcpy(&endpoint->desc, d, n);
INIT_LIST_HEAD(&endpoint->urb_list);
/*
* Fix up bInterval values outside the legal range.
* Use 10 or 8 ms if no proper value can be guessed.
*/
i = 0; /* i = min, j = max, n = default */
j = 255;
if (usb_endpoint_xfer_int(d)) {
i = 1;
switch (to_usb_device(ddev)->speed) {
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
case USB_SPEED_HIGH:
/*
* Many device manufacturers are using full-speed
* bInterval values in high-speed interrupt endpoint
* descriptors. Try to fix those and fall back to an
* 8-ms default value otherwise.
*/
n = fls(d->bInterval*8);
if (n == 0)
n = 7; /* 8 ms = 2^(7-1) uframes */
j = 16;
/*
* Adjust bInterval for quirked devices.
*/
/*
* This quirk fixes bIntervals reported in ms.
*/
if (to_usb_device(ddev)->quirks &
USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) {
n = clamp(fls(d->bInterval) + 3, i, j);
i = j = n;
}
/*
* This quirk fixes bIntervals reported in
* linear microframes.
*/
if (to_usb_device(ddev)->quirks &
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL) {
n = clamp(fls(d->bInterval), i, j);
i = j = n;
}
break;
default: /* USB_SPEED_FULL or _LOW */
/*
* For low-speed, 10 ms is the official minimum.
* But some "overclocked" devices might want faster
* polling so we'll allow it.
*/
n = 10;
break;
}
} else if (usb_endpoint_xfer_isoc(d)) {
i = 1;
j = 16;
switch (to_usb_device(ddev)->speed) {
case USB_SPEED_HIGH:
n = 7; /* 8 ms = 2^(7-1) uframes */
break;
default: /* USB_SPEED_FULL */
n = 4; /* 8 ms = 2^(4-1) frames */
break;
}
}
if (d->bInterval < i || d->bInterval > j) {
dev_warn(ddev, "config %d interface %d altsetting %d "
"endpoint 0x%X has an invalid bInterval %d, "
"changing to %d\n",
cfgno, inum, asnum,
d->bEndpointAddress, d->bInterval, n);
endpoint->desc.bInterval = n;
}
/* Some buggy low-speed devices have Bulk endpoints, which is
* explicitly forbidden by the USB spec. In an attempt to make
* them usable, we will try treating them as Interrupt endpoints.
*/
if (to_usb_device(ddev)->speed == USB_SPEED_LOW &&
usb_endpoint_xfer_bulk(d)) {
dev_warn(ddev, "config %d interface %d altsetting %d "
"endpoint 0x%X is Bulk; changing to Interrupt\n",
cfgno, inum, asnum, d->bEndpointAddress);
endpoint->desc.bmAttributes = USB_ENDPOINT_XFER_INT;
endpoint->desc.bInterval = 1;
if (usb_endpoint_maxp(&endpoint->desc) > 8)
endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
}
/* Validate the wMaxPacketSize field */
maxp = usb_endpoint_maxp(&endpoint->desc);
/* Find the highest legal maxpacket size for this endpoint */
i = 0; /* additional transactions per microframe */
switch (to_usb_device(ddev)->speed) {
case USB_SPEED_LOW:
maxpacket_maxes = low_speed_maxpacket_maxes;
break;
case USB_SPEED_FULL:
maxpacket_maxes = full_speed_maxpacket_maxes;
break;
case USB_SPEED_HIGH:
/* Bits 12..11 are allowed only for HS periodic endpoints */
if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
i = maxp & (BIT(12) | BIT(11));
maxp &= ~i;
}
/* fallthrough */
default:
maxpacket_maxes = high_speed_maxpacket_maxes;
break;
case USB_SPEED_SUPER:
case USB_SPEED_SUPER_PLUS:
maxpacket_maxes = super_speed_maxpacket_maxes;
break;
}
j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)];
if (maxp > j) {
dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
cfgno, inum, asnum, d->bEndpointAddress, maxp, j);
maxp = j;
endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp);
}
/*
* Some buggy high speed devices have bulk endpoints using
* maxpacket sizes other than 512. High speed HCDs may not
* be able to handle that particular bug, so let's warn...
*/
if (to_usb_device(ddev)->speed == USB_SPEED_HIGH
&& usb_endpoint_xfer_bulk(d)) {
if (maxp != 512)
dev_warn(ddev, "config %d interface %d altsetting %d "
"bulk endpoint 0x%X has invalid maxpacket %d\n",
cfgno, inum, asnum, d->bEndpointAddress,
maxp);
}
/* Parse a possible SuperSpeed endpoint companion descriptor */
if (to_usb_device(ddev)->speed >= USB_SPEED_SUPER)
usb_parse_ss_endpoint_companion(ddev, cfgno,
inum, asnum, endpoint, buffer, size);
/* Skip over any Class Specific or Vendor Specific descriptors;
* find the next endpoint or interface descriptor */
endpoint->extra = buffer;
i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
USB_DT_INTERFACE, &n);
endpoint->extralen = i;
retval = buffer - buffer0 + i;
if (n > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
n, plural(n), "endpoint");
return retval;
skip_to_next_endpoint_or_interface_descriptor:
i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
USB_DT_INTERFACE, NULL);
return buffer - buffer0 + i;
}
|
static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
int asnum, struct usb_host_interface *ifp, int num_ep,
unsigned char *buffer, int size)
{
unsigned char *buffer0 = buffer;
struct usb_endpoint_descriptor *d;
struct usb_host_endpoint *endpoint;
int n, i, j, retval;
unsigned int maxp;
const unsigned short *maxpacket_maxes;
d = (struct usb_endpoint_descriptor *) buffer;
buffer += d->bLength;
size -= d->bLength;
if (d->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE)
n = USB_DT_ENDPOINT_AUDIO_SIZE;
else if (d->bLength >= USB_DT_ENDPOINT_SIZE)
n = USB_DT_ENDPOINT_SIZE;
else {
dev_warn(ddev, "config %d interface %d altsetting %d has an "
"invalid endpoint descriptor of length %d, skipping\n",
cfgno, inum, asnum, d->bLength);
goto skip_to_next_endpoint_or_interface_descriptor;
}
i = d->bEndpointAddress & ~USB_ENDPOINT_DIR_MASK;
if (i >= 16 || i == 0) {
dev_warn(ddev, "config %d interface %d altsetting %d has an "
"invalid endpoint with address 0x%X, skipping\n",
cfgno, inum, asnum, d->bEndpointAddress);
goto skip_to_next_endpoint_or_interface_descriptor;
}
/* Only store as many endpoints as we have room for */
if (ifp->desc.bNumEndpoints >= num_ep)
goto skip_to_next_endpoint_or_interface_descriptor;
/* Check for duplicate endpoint addresses */
for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
if (ifp->endpoint[i].desc.bEndpointAddress ==
d->bEndpointAddress) {
dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
cfgno, inum, asnum, d->bEndpointAddress);
goto skip_to_next_endpoint_or_interface_descriptor;
}
}
endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
++ifp->desc.bNumEndpoints;
memcpy(&endpoint->desc, d, n);
INIT_LIST_HEAD(&endpoint->urb_list);
/*
* Fix up bInterval values outside the legal range.
* Use 10 or 8 ms if no proper value can be guessed.
*/
i = 0; /* i = min, j = max, n = default */
j = 255;
if (usb_endpoint_xfer_int(d)) {
i = 1;
switch (to_usb_device(ddev)->speed) {
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
case USB_SPEED_HIGH:
/*
* Many device manufacturers are using full-speed
* bInterval values in high-speed interrupt endpoint
* descriptors. Try to fix those and fall back to an
* 8-ms default value otherwise.
*/
n = fls(d->bInterval*8);
if (n == 0)
n = 7; /* 8 ms = 2^(7-1) uframes */
j = 16;
/*
* Adjust bInterval for quirked devices.
*/
/*
* This quirk fixes bIntervals reported in ms.
*/
if (to_usb_device(ddev)->quirks &
USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) {
n = clamp(fls(d->bInterval) + 3, i, j);
i = j = n;
}
/*
* This quirk fixes bIntervals reported in
* linear microframes.
*/
if (to_usb_device(ddev)->quirks &
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL) {
n = clamp(fls(d->bInterval), i, j);
i = j = n;
}
break;
default: /* USB_SPEED_FULL or _LOW */
/*
* For low-speed, 10 ms is the official minimum.
* But some "overclocked" devices might want faster
* polling so we'll allow it.
*/
n = 10;
break;
}
} else if (usb_endpoint_xfer_isoc(d)) {
i = 1;
j = 16;
switch (to_usb_device(ddev)->speed) {
case USB_SPEED_HIGH:
n = 7; /* 8 ms = 2^(7-1) uframes */
break;
default: /* USB_SPEED_FULL */
n = 4; /* 8 ms = 2^(4-1) frames */
break;
}
}
if (d->bInterval < i || d->bInterval > j) {
dev_warn(ddev, "config %d interface %d altsetting %d "
"endpoint 0x%X has an invalid bInterval %d, "
"changing to %d\n",
cfgno, inum, asnum,
d->bEndpointAddress, d->bInterval, n);
endpoint->desc.bInterval = n;
}
/* Some buggy low-speed devices have Bulk endpoints, which is
* explicitly forbidden by the USB spec. In an attempt to make
* them usable, we will try treating them as Interrupt endpoints.
*/
if (to_usb_device(ddev)->speed == USB_SPEED_LOW &&
usb_endpoint_xfer_bulk(d)) {
dev_warn(ddev, "config %d interface %d altsetting %d "
"endpoint 0x%X is Bulk; changing to Interrupt\n",
cfgno, inum, asnum, d->bEndpointAddress);
endpoint->desc.bmAttributes = USB_ENDPOINT_XFER_INT;
endpoint->desc.bInterval = 1;
if (usb_endpoint_maxp(&endpoint->desc) > 8)
endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
}
/* Validate the wMaxPacketSize field */
maxp = usb_endpoint_maxp(&endpoint->desc);
/* Find the highest legal maxpacket size for this endpoint */
i = 0; /* additional transactions per microframe */
switch (to_usb_device(ddev)->speed) {
case USB_SPEED_LOW:
maxpacket_maxes = low_speed_maxpacket_maxes;
break;
case USB_SPEED_FULL:
maxpacket_maxes = full_speed_maxpacket_maxes;
break;
case USB_SPEED_HIGH:
/* Bits 12..11 are allowed only for HS periodic endpoints */
if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
i = maxp & (BIT(12) | BIT(11));
maxp &= ~i;
}
/* fallthrough */
default:
maxpacket_maxes = high_speed_maxpacket_maxes;
break;
case USB_SPEED_SUPER:
case USB_SPEED_SUPER_PLUS:
maxpacket_maxes = super_speed_maxpacket_maxes;
break;
}
j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)];
if (maxp > j) {
dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
cfgno, inum, asnum, d->bEndpointAddress, maxp, j);
maxp = j;
endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp);
}
/*
* Some buggy high speed devices have bulk endpoints using
* maxpacket sizes other than 512. High speed HCDs may not
* be able to handle that particular bug, so let's warn...
*/
if (to_usb_device(ddev)->speed == USB_SPEED_HIGH
&& usb_endpoint_xfer_bulk(d)) {
if (maxp != 512)
dev_warn(ddev, "config %d interface %d altsetting %d "
"bulk endpoint 0x%X has invalid maxpacket %d\n",
cfgno, inum, asnum, d->bEndpointAddress,
maxp);
}
/* Parse a possible SuperSpeed endpoint companion descriptor */
if (to_usb_device(ddev)->speed >= USB_SPEED_SUPER)
usb_parse_ss_endpoint_companion(ddev, cfgno,
inum, asnum, endpoint, buffer, size);
/* Skip over any Class Specific or Vendor Specific descriptors;
* find the next endpoint or interface descriptor */
endpoint->extra = buffer;
i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
USB_DT_INTERFACE, &n);
endpoint->extralen = i;
retval = buffer - buffer0 + i;
if (n > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
n, plural(n), "endpoint");
return retval;
skip_to_next_endpoint_or_interface_descriptor:
i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
USB_DT_INTERFACE, NULL);
return buffer - buffer0 + i;
}
|
C
|
linux
| 0 |
CVE-2019-13272
|
https://www.cvedetails.com/cve/CVE-2019-13272/
|
CWE-264
|
https://github.com/torvalds/linux/commit/6994eefb0053799d2e07cd140df6c2ea106c41ee
|
6994eefb0053799d2e07cd140df6c2ea106c41ee
|
ptrace: Fix ->ptracer_cred handling for PTRACE_TRACEME
Fix two issues:
When called for PTRACE_TRACEME, ptrace_link() would obtain an RCU
reference to the parent's objective credentials, then give that pointer
to get_cred(). However, the object lifetime rules for things like
struct cred do not permit unconditionally turning an RCU reference into
a stable reference.
PTRACE_TRACEME records the parent's credentials as if the parent was
acting as the subject, but that's not the case. If a malicious
unprivileged child uses PTRACE_TRACEME and the parent is privileged, and
at a later point, the parent process becomes attacker-controlled
(because it drops privileges and calls execve()), the attacker ends up
with control over two processes with a privileged ptrace relationship,
which can be abused to ptrace a suid binary and obtain root privileges.
Fix both of these by always recording the credentials of the process
that is requesting the creation of the ptrace relationship:
current_cred() can't change under us, and current is the proper subject
for access control.
This change is theoretically userspace-visible, but I am not aware of
any code that it will actually break.
Fixes: 64b875f7ac8a ("ptrace: Capture the ptracer's creds not PT_PTRACE_CAP")
Signed-off-by: Jann Horn <[email protected]>
Acked-by: Oleg Nesterov <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
|
static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
{
const struct cred *cred = current_cred(), *tcred;
struct mm_struct *mm;
kuid_t caller_uid;
kgid_t caller_gid;
if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
return -EPERM;
}
/* May we inspect the given task?
* This check is used both for attaching with ptrace
* and for allowing access to sensitive information in /proc.
*
* ptrace_attach denies several cases that /proc allows
* because setting up the necessary parent/child relationship
* or halting the specified task is impossible.
*/
/* Don't let security modules deny introspection */
if (same_thread_group(task, current))
return 0;
rcu_read_lock();
if (mode & PTRACE_MODE_FSCREDS) {
caller_uid = cred->fsuid;
caller_gid = cred->fsgid;
} else {
/*
* Using the euid would make more sense here, but something
* in userland might rely on the old behavior, and this
* shouldn't be a security problem since
* PTRACE_MODE_REALCREDS implies that the caller explicitly
* used a syscall that requests access to another process
* (and not a filesystem syscall to procfs).
*/
caller_uid = cred->uid;
caller_gid = cred->gid;
}
tcred = __task_cred(task);
if (uid_eq(caller_uid, tcred->euid) &&
uid_eq(caller_uid, tcred->suid) &&
uid_eq(caller_uid, tcred->uid) &&
gid_eq(caller_gid, tcred->egid) &&
gid_eq(caller_gid, tcred->sgid) &&
gid_eq(caller_gid, tcred->gid))
goto ok;
if (ptrace_has_cap(tcred->user_ns, mode))
goto ok;
rcu_read_unlock();
return -EPERM;
ok:
rcu_read_unlock();
/*
* If a task drops privileges and becomes nondumpable (through a syscall
* like setresuid()) while we are trying to access it, we must ensure
* that the dumpability is read after the credentials; otherwise,
* we may be able to attach to a task that we shouldn't be able to
* attach to (as if the task had dropped privileges without becoming
* nondumpable).
* Pairs with a write barrier in commit_creds().
*/
smp_rmb();
mm = task->mm;
if (mm &&
((get_dumpable(mm) != SUID_DUMP_USER) &&
!ptrace_has_cap(mm->user_ns, mode)))
return -EPERM;
return security_ptrace_access_check(task, mode);
}
|
static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
{
const struct cred *cred = current_cred(), *tcred;
struct mm_struct *mm;
kuid_t caller_uid;
kgid_t caller_gid;
if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
return -EPERM;
}
/* May we inspect the given task?
* This check is used both for attaching with ptrace
* and for allowing access to sensitive information in /proc.
*
* ptrace_attach denies several cases that /proc allows
* because setting up the necessary parent/child relationship
* or halting the specified task is impossible.
*/
/* Don't let security modules deny introspection */
if (same_thread_group(task, current))
return 0;
rcu_read_lock();
if (mode & PTRACE_MODE_FSCREDS) {
caller_uid = cred->fsuid;
caller_gid = cred->fsgid;
} else {
/*
* Using the euid would make more sense here, but something
* in userland might rely on the old behavior, and this
* shouldn't be a security problem since
* PTRACE_MODE_REALCREDS implies that the caller explicitly
* used a syscall that requests access to another process
* (and not a filesystem syscall to procfs).
*/
caller_uid = cred->uid;
caller_gid = cred->gid;
}
tcred = __task_cred(task);
if (uid_eq(caller_uid, tcred->euid) &&
uid_eq(caller_uid, tcred->suid) &&
uid_eq(caller_uid, tcred->uid) &&
gid_eq(caller_gid, tcred->egid) &&
gid_eq(caller_gid, tcred->sgid) &&
gid_eq(caller_gid, tcred->gid))
goto ok;
if (ptrace_has_cap(tcred->user_ns, mode))
goto ok;
rcu_read_unlock();
return -EPERM;
ok:
rcu_read_unlock();
/*
* If a task drops privileges and becomes nondumpable (through a syscall
* like setresuid()) while we are trying to access it, we must ensure
* that the dumpability is read after the credentials; otherwise,
* we may be able to attach to a task that we shouldn't be able to
* attach to (as if the task had dropped privileges without becoming
* nondumpable).
* Pairs with a write barrier in commit_creds().
*/
smp_rmb();
mm = task->mm;
if (mm &&
((get_dumpable(mm) != SUID_DUMP_USER) &&
!ptrace_has_cap(mm->user_ns, mode)))
return -EPERM;
return security_ptrace_access_check(task, mode);
}
|
C
|
linux
| 0 |
CVE-2018-16425
|
https://www.cvedetails.com/cve/CVE-2018-16425/
|
CWE-415
|
https://github.com/OpenSC/OpenSC/commit/360e95d45ac4123255a4c796db96337f332160ad#diff-d643a0fa169471dbf2912f4866dc49c5
|
360e95d45ac4123255a4c796db96337f332160ad#diff-d643a0fa169471dbf2912f4866dc49c5
|
fixed out of bounds writes
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting the problems.
|
int uncompress_gzip(void* uncompressed, size_t *uncompressed_len,
const void* compressed, size_t compressed_len)
{
return SC_ERROR_NOT_SUPPORTED;
}
|
int uncompress_gzip(void* uncompressed, size_t *uncompressed_len,
const void* compressed, size_t compressed_len)
{
return SC_ERROR_NOT_SUPPORTED;
}
|
C
|
OpenSC
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/74c1ec481b33194dc7a428f2d58fc89640b313ae
|
74c1ec481b33194dc7a428f2d58fc89640b313ae
|
Fix glGetFramebufferAttachmentParameteriv so it returns
current names for buffers.
TEST=unit_tests and conformance tests
BUG=none
Review URL: http://codereview.chromium.org/3135003
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@55831 0039d316-1c4b-4281-b951-d872f2087c98
|
error::Error GLES2DecoderImpl::HandleSwapBuffers(
uint32 immediate_data_size, const gles2::SwapBuffers& c) {
if (offscreen_target_frame_buffer_.get()) {
ScopedGLErrorSuppressor suppressor(this);
if (!UpdateOffscreenFrameBufferSize())
return error::kLostContext;
if (parent_) {
ScopedFrameBufferBinder binder(this,
offscreen_target_frame_buffer_->id());
offscreen_saved_color_texture_->Copy(
offscreen_saved_color_texture_->size());
}
} else {
context_->SwapBuffers();
}
if (swap_buffers_callback_.get()) {
swap_buffers_callback_->Run();
}
return error::kNoError;
}
|
error::Error GLES2DecoderImpl::HandleSwapBuffers(
uint32 immediate_data_size, const gles2::SwapBuffers& c) {
if (offscreen_target_frame_buffer_.get()) {
ScopedGLErrorSuppressor suppressor(this);
if (!UpdateOffscreenFrameBufferSize())
return error::kLostContext;
if (parent_) {
ScopedFrameBufferBinder binder(this,
offscreen_target_frame_buffer_->id());
offscreen_saved_color_texture_->Copy(
offscreen_saved_color_texture_->size());
}
} else {
context_->SwapBuffers();
}
if (swap_buffers_callback_.get()) {
swap_buffers_callback_->Run();
}
return error::kNoError;
}
|
C
|
Chrome
| 0 |
CVE-2017-6850
|
https://www.cvedetails.com/cve/CVE-2017-6850/
|
CWE-476
|
https://github.com/mdadams/jasper/commit/e96fc4fdd525fa0ede28074a7e2b1caf94b58b0d
|
e96fc4fdd525fa0ede28074a7e2b1caf94b58b0d
|
Fixed bugs due to uninitialized data in the JP2 decoder.
Also, added some comments marking I/O stream interfaces that probably
need to be changed (in the long term) to fix integer overflow problems.
|
static int sfile_close(jas_stream_obj_t *obj)
{
FILE *fp;
JAS_DBGLOG(100, ("sfile_close(%p)\n", obj));
fp = JAS_CAST(FILE *, obj);
return fclose(fp);
}
|
static int sfile_close(jas_stream_obj_t *obj)
{
FILE *fp;
JAS_DBGLOG(100, ("sfile_close(%p)\n", obj));
fp = JAS_CAST(FILE *, obj);
return fclose(fp);
}
|
C
|
jasper
| 0 |
CVE-2018-6040
|
https://www.cvedetails.com/cve/CVE-2018-6040/
|
CWE-732
|
https://github.com/chromium/chromium/commit/209f225b2d51334eaf69ffdf002e25eaa1e0d448
|
209f225b2d51334eaf69ffdf002e25eaa1e0d448
|
Fixed bug where PlzNavigate CSP in a iframe did not get the inherited CSP
When inheriting the CSP from a parent document to a local-scheme CSP,
it does not always get propagated to the PlzNavigate CSP. This means
that PlzNavigate CSP checks (like `frame-src`) would be ran against
a blank policy instead of the proper inherited policy.
Bug: 778658
Change-Id: I61bb0d432e1cea52f199e855624cb7b3078f56a9
Reviewed-on: https://chromium-review.googlesource.com/765969
Commit-Queue: Andy Paicu <[email protected]>
Reviewed-by: Mike West <[email protected]>
Cr-Commit-Position: refs/heads/master@{#518245}
|
EventQueue* Document::GetEventQueue() const {
if (!dom_window_)
return nullptr;
return dom_window_->GetEventQueue();
}
|
EventQueue* Document::GetEventQueue() const {
if (!dom_window_)
return nullptr;
return dom_window_->GetEventQueue();
}
|
C
|
Chrome
| 0 |
CVE-2015-3412
|
https://www.cvedetails.com/cve/CVE-2015-3412/
|
CWE-254
|
https://git.php.net/?p=php-src.git;a=commit;h=4435b9142ff9813845d5c97ab29a5d637bedb257
|
4435b9142ff9813845d5c97ab29a5d637bedb257
| null |
PHP_METHOD(domdocument, registerNodeClass)
{
zval *id;
xmlDoc *docp;
char *baseclass = NULL, *extendedclass = NULL;
int baseclass_len = 0, extendedclass_len = 0;
zend_class_entry *basece = NULL, *ce = NULL;
dom_object *intern;
if (zend_parse_method_parameters(ZEND_NUM_ARGS() TSRMLS_CC, getThis(), "Oss!", &id, dom_document_class_entry, &baseclass, &baseclass_len, &extendedclass, &extendedclass_len) == FAILURE) {
return;
}
if (baseclass_len) {
zend_class_entry **pce;
if (zend_lookup_class(baseclass, baseclass_len, &pce TSRMLS_CC) == FAILURE) {
php_error_docref(NULL TSRMLS_CC, E_ERROR, "Class %s does not exist", baseclass);
return;
}
basece = *pce;
}
if (basece == NULL || ! instanceof_function(basece, dom_node_class_entry TSRMLS_CC)) {
php_error_docref(NULL TSRMLS_CC, E_ERROR, "Class %s is not derived from DOMNode.", baseclass);
return;
}
if (extendedclass_len) {
zend_class_entry **pce;
if (zend_lookup_class(extendedclass, extendedclass_len, &pce TSRMLS_CC) == FAILURE) {
php_error_docref(NULL TSRMLS_CC, E_ERROR, "Class %s does not exist", extendedclass);
}
ce = *pce;
}
if (ce == NULL || instanceof_function(ce, basece TSRMLS_CC)) {
DOM_GET_OBJ(docp, id, xmlDocPtr, intern);
if (dom_set_doc_classmap(intern->document, basece, ce TSRMLS_CC) == FAILURE) {
php_error_docref(NULL TSRMLS_CC, E_ERROR, "Class %s could not be registered.", extendedclass);
}
RETURN_TRUE;
} else {
php_error_docref(NULL TSRMLS_CC, E_ERROR, "Class %s is not derived from %s.", extendedclass, baseclass);
}
RETURN_FALSE;
}
|
PHP_METHOD(domdocument, registerNodeClass)
{
zval *id;
xmlDoc *docp;
char *baseclass = NULL, *extendedclass = NULL;
int baseclass_len = 0, extendedclass_len = 0;
zend_class_entry *basece = NULL, *ce = NULL;
dom_object *intern;
if (zend_parse_method_parameters(ZEND_NUM_ARGS() TSRMLS_CC, getThis(), "Oss!", &id, dom_document_class_entry, &baseclass, &baseclass_len, &extendedclass, &extendedclass_len) == FAILURE) {
return;
}
if (baseclass_len) {
zend_class_entry **pce;
if (zend_lookup_class(baseclass, baseclass_len, &pce TSRMLS_CC) == FAILURE) {
php_error_docref(NULL TSRMLS_CC, E_ERROR, "Class %s does not exist", baseclass);
return;
}
basece = *pce;
}
if (basece == NULL || ! instanceof_function(basece, dom_node_class_entry TSRMLS_CC)) {
php_error_docref(NULL TSRMLS_CC, E_ERROR, "Class %s is not derived from DOMNode.", baseclass);
return;
}
if (extendedclass_len) {
zend_class_entry **pce;
if (zend_lookup_class(extendedclass, extendedclass_len, &pce TSRMLS_CC) == FAILURE) {
php_error_docref(NULL TSRMLS_CC, E_ERROR, "Class %s does not exist", extendedclass);
}
ce = *pce;
}
if (ce == NULL || instanceof_function(ce, basece TSRMLS_CC)) {
DOM_GET_OBJ(docp, id, xmlDocPtr, intern);
if (dom_set_doc_classmap(intern->document, basece, ce TSRMLS_CC) == FAILURE) {
php_error_docref(NULL TSRMLS_CC, E_ERROR, "Class %s could not be registered.", extendedclass);
}
RETURN_TRUE;
} else {
php_error_docref(NULL TSRMLS_CC, E_ERROR, "Class %s is not derived from %s.", extendedclass, baseclass);
}
RETURN_FALSE;
}
|
C
|
php
| 0 |
Subsets and Splits
CWE-119 Function Changes
This query retrieves specific examples (before and after code changes) of vulnerabilities with CWE-119, providing basic filtering but limited insight.
Vulnerable Code with CWE IDs
The query filters and combines records from multiple datasets to list specific vulnerability details, providing a basic overview of vulnerable functions but lacking deeper insights.
Vulnerable Functions in BigVul
Retrieves details of vulnerable functions from both validation and test datasets where vulnerabilities are present, providing a basic set of data points for further analysis.
Vulnerable Code Functions
This query filters and shows raw data for vulnerable functions, which provides basic insight into specific vulnerabilities but lacks broader analytical value.
Top 100 Vulnerable Functions
Retrieves 100 samples of vulnerabilities from the training dataset, showing the CVE ID, CWE ID, and code changes before and after the vulnerability, which is a basic filtering of vulnerability data.