CVE ID
stringlengths
13
43
CVE Page
stringlengths
45
48
CWE ID
stringclasses
90 values
codeLink
stringlengths
46
139
commit_id
stringlengths
6
81
commit_message
stringlengths
3
13.3k
func_after
stringlengths
14
241k
func_before
stringlengths
14
241k
lang
stringclasses
3 values
project
stringclasses
309 values
vul
int8
0
1
CVE-2016-3834
https://www.cvedetails.com/cve/CVE-2016-3834/
CWE-200
https://android.googlesource.com/platform/frameworks/av/+/1f24c730ab6ca5aff1e3137b340b8aeaeda4bdbc
1f24c730ab6ca5aff1e3137b340b8aeaeda4bdbc
DO NOT MERGE: Camera: Adjust pointers to ANW buffers to avoid infoleak Subtract address of a random static object from pointers being routed through app process. Bug: 28466701 Change-Id: Idcbfe81e9507433769672f3dc6d67db5eeed4e04
void CameraSource::ProxyListener::dataCallbackTimestamp( nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) { mSource->dataCallbackTimestamp(timestamp / 1000, msgType, dataPtr); }
void CameraSource::ProxyListener::dataCallbackTimestamp( nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) { mSource->dataCallbackTimestamp(timestamp / 1000, msgType, dataPtr); }
C
Android
0
CVE-2011-4127
https://www.cvedetails.com/cve/CVE-2011-4127/
CWE-264
https://github.com/torvalds/linux/commit/ec8013beddd717d1740cfefb1a9b900deef85462
ec8013beddd717d1740cfefb1a9b900deef85462
dm: do not forward ioctls from logical volumes to the underlying device A logical volume can map to just part of underlying physical volume. In this case, it must be treated like a partition. Based on a patch from Alasdair G Kergon. Cc: Alasdair G Kergon <[email protected]> Cc: [email protected] Signed-off-by: Paolo Bonzini <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static int linear_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct linear_c *lc = ti->private; return fn(ti, lc->dev, lc->start, ti->len, data); }
static int linear_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct linear_c *lc = ti->private; return fn(ti, lc->dev, lc->start, ti->len, data); }
C
linux
0
CVE-2016-10269
https://www.cvedetails.com/cve/CVE-2016-10269/
CWE-125
https://github.com/vadz/libtiff/commit/1044b43637fa7f70fb19b93593777b78bd20da86
1044b43637fa7f70fb19b93593777b78bd20da86
* libtiff/tif_pixarlog.c, libtiff/tif_luv.c: fix heap-based buffer overflow on generation of PixarLog / LUV compressed files, with ColorMap, TransferFunction attached and nasty plays with bitspersample. The fix for LUV has not been tested, but suffers from the same kind of issue of PixarLog. Reported by Agostino Sarubbo. Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2604
LogLuvEncode24(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s) { static const char module[] = "LogLuvEncode24"; LogLuvState* sp = EncoderState(tif); tmsize_t i; tmsize_t npixels; tmsize_t occ; uint8* op; uint32* tp; assert(s == 0); assert(sp != NULL); npixels = cc / sp->pixel_size; if (sp->user_datafmt == SGILOGDATAFMT_RAW) tp = (uint32*) bp; else { tp = (uint32*) sp->tbuf; if(sp->tbuflen < npixels) { TIFFErrorExt(tif->tif_clientdata, module, "Translation buffer too short"); return (0); } (*sp->tfunc)(sp, bp, npixels); } /* write out encoded pixels */ op = tif->tif_rawcp; occ = tif->tif_rawdatasize - tif->tif_rawcc; for (i = npixels; i--; ) { if (occ < 3) { tif->tif_rawcp = op; tif->tif_rawcc = tif->tif_rawdatasize - occ; if (!TIFFFlushData1(tif)) return (-1); op = tif->tif_rawcp; occ = tif->tif_rawdatasize - tif->tif_rawcc; } *op++ = (uint8)(*tp >> 16); *op++ = (uint8)(*tp >> 8 & 0xff); *op++ = (uint8)(*tp++ & 0xff); occ -= 3; } tif->tif_rawcp = op; tif->tif_rawcc = tif->tif_rawdatasize - occ; return (1); }
LogLuvEncode24(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s) { static const char module[] = "LogLuvEncode24"; LogLuvState* sp = EncoderState(tif); tmsize_t i; tmsize_t npixels; tmsize_t occ; uint8* op; uint32* tp; assert(s == 0); assert(sp != NULL); npixels = cc / sp->pixel_size; if (sp->user_datafmt == SGILOGDATAFMT_RAW) tp = (uint32*) bp; else { tp = (uint32*) sp->tbuf; if(sp->tbuflen < npixels) { TIFFErrorExt(tif->tif_clientdata, module, "Translation buffer too short"); return (0); } (*sp->tfunc)(sp, bp, npixels); } /* write out encoded pixels */ op = tif->tif_rawcp; occ = tif->tif_rawdatasize - tif->tif_rawcc; for (i = npixels; i--; ) { if (occ < 3) { tif->tif_rawcp = op; tif->tif_rawcc = tif->tif_rawdatasize - occ; if (!TIFFFlushData1(tif)) return (-1); op = tif->tif_rawcp; occ = tif->tif_rawdatasize - tif->tif_rawcc; } *op++ = (uint8)(*tp >> 16); *op++ = (uint8)(*tp >> 8 & 0xff); *op++ = (uint8)(*tp++ & 0xff); occ -= 3; } tif->tif_rawcp = op; tif->tif_rawcc = tif->tif_rawdatasize - occ; return (1); }
C
libtiff
0
null
null
null
https://github.com/chromium/chromium/commit/befb46ae3385fa13975521e9a2281e35805b339e
befb46ae3385fa13975521e9a2281e35805b339e
2009-10-23 Chris Evans <[email protected]> Reviewed by Adam Barth. Added test for bug 27239 (ignore Refresh for view source mode). https://bugs.webkit.org/show_bug.cgi?id=27239 * http/tests/security/view-source-no-refresh.html: Added * http/tests/security/view-source-no-refresh-expected.txt: Added * http/tests/security/resources/view-source-no-refresh.php: Added 2009-10-23 Chris Evans <[email protected]> Reviewed by Adam Barth. Ignore the Refresh header if we're in view source mode. https://bugs.webkit.org/show_bug.cgi?id=27239 Test: http/tests/security/view-source-no-refresh.html * loader/FrameLoader.cpp: ignore Refresh in view-source mode. git-svn-id: svn://svn.chromium.org/blink/trunk@50018 bbb929c8-8fbe-4397-9dbb-9b2b20218538
String FrameLoader::outgoingOrigin() const { return m_frame->document()->securityOrigin()->toString(); }
String FrameLoader::outgoingOrigin() const { return m_frame->document()->securityOrigin()->toString(); }
C
Chrome
0
CVE-2018-9510
https://www.cvedetails.com/cve/CVE-2018-9510/
CWE-200
https://android.googlesource.com/platform/system/bt/+/6e4b8e505173f803a5fc05abc09f64eef89dc308
6e4b8e505173f803a5fc05abc09f64eef89dc308
Checks the SMP length to fix OOB read Bug: 111937065 Test: manual Change-Id: I330880a6e1671d0117845430db4076dfe1aba688 Merged-In: I330880a6e1671d0117845430db4076dfe1aba688 (cherry picked from commit fceb753bda651c4135f3f93a510e5fcb4c7542b8)
void smp_send_init(tSMP_CB* p_cb, tSMP_INT_DATA* p_data) { SMP_TRACE_DEBUG("%s", __func__); smp_send_cmd(SMP_OPCODE_INIT, p_cb); }
void smp_send_init(tSMP_CB* p_cb, tSMP_INT_DATA* p_data) { SMP_TRACE_DEBUG("%s", __func__); smp_send_cmd(SMP_OPCODE_INIT, p_cb); }
C
Android
0
CVE-2015-8935
https://www.cvedetails.com/cve/CVE-2015-8935/
CWE-79
https://github.com/php/php-src/commit/996faf964bba1aec06b153b370a7f20d3dd2bb8b?w=1
996faf964bba1aec06b153b370a7f20d3dd2bb8b?w=1
Update header handling to RFC 7230
SAPI_API double sapi_get_request_time(TSRMLS_D) { if(SG(global_request_time)) return SG(global_request_time); if (sapi_module.get_request_time && SG(server_context)) { SG(global_request_time) = sapi_module.get_request_time(TSRMLS_C); } else { struct timeval tp = {0}; if (!gettimeofday(&tp, NULL)) { SG(global_request_time) = (double)(tp.tv_sec + tp.tv_usec / 1000000.00); } else { SG(global_request_time) = (double)time(0); } } return SG(global_request_time); }
SAPI_API double sapi_get_request_time(TSRMLS_D) { if(SG(global_request_time)) return SG(global_request_time); if (sapi_module.get_request_time && SG(server_context)) { SG(global_request_time) = sapi_module.get_request_time(TSRMLS_C); } else { struct timeval tp = {0}; if (!gettimeofday(&tp, NULL)) { SG(global_request_time) = (double)(tp.tv_sec + tp.tv_usec / 1000000.00); } else { SG(global_request_time) = (double)time(0); } } return SG(global_request_time); }
C
php-src
0
CVE-2011-3055
https://www.cvedetails.com/cve/CVE-2011-3055/
null
https://github.com/chromium/chromium/commit/e9372a1bfd3588a80fcf49aa07321f0971dd6091
e9372a1bfd3588a80fcf49aa07321f0971dd6091
[V8] Pass Isolate to throwNotEnoughArgumentsError() https://bugs.webkit.org/show_bug.cgi?id=86983 Reviewed by Adam Barth. The objective is to pass Isolate around in V8 bindings. This patch passes Isolate to throwNotEnoughArgumentsError(). No tests. No change in behavior. * bindings/scripts/CodeGeneratorV8.pm: (GenerateArgumentsCountCheck): (GenerateEventConstructorCallback): * bindings/scripts/test/V8/V8Float64Array.cpp: (WebCore::Float64ArrayV8Internal::fooCallback): * bindings/scripts/test/V8/V8TestActiveDOMObject.cpp: (WebCore::TestActiveDOMObjectV8Internal::excitingFunctionCallback): (WebCore::TestActiveDOMObjectV8Internal::postMessageCallback): * bindings/scripts/test/V8/V8TestCustomNamedGetter.cpp: (WebCore::TestCustomNamedGetterV8Internal::anotherFunctionCallback): * bindings/scripts/test/V8/V8TestEventConstructor.cpp: (WebCore::V8TestEventConstructor::constructorCallback): * bindings/scripts/test/V8/V8TestEventTarget.cpp: (WebCore::TestEventTargetV8Internal::itemCallback): (WebCore::TestEventTargetV8Internal::dispatchEventCallback): * bindings/scripts/test/V8/V8TestInterface.cpp: (WebCore::TestInterfaceV8Internal::supplementalMethod2Callback): (WebCore::V8TestInterface::constructorCallback): * bindings/scripts/test/V8/V8TestMediaQueryListListener.cpp: (WebCore::TestMediaQueryListListenerV8Internal::methodCallback): * bindings/scripts/test/V8/V8TestNamedConstructor.cpp: (WebCore::V8TestNamedConstructorConstructorCallback): * bindings/scripts/test/V8/V8TestObj.cpp: (WebCore::TestObjV8Internal::voidMethodWithArgsCallback): (WebCore::TestObjV8Internal::intMethodWithArgsCallback): (WebCore::TestObjV8Internal::objMethodWithArgsCallback): (WebCore::TestObjV8Internal::methodWithSequenceArgCallback): (WebCore::TestObjV8Internal::methodReturningSequenceCallback): (WebCore::TestObjV8Internal::methodThatRequiresAllArgsAndThrowsCallback): (WebCore::TestObjV8Internal::serializedValueCallback): (WebCore::TestObjV8Internal::idbKeyCallback): (WebCore::TestObjV8Internal::optionsObjectCallback): (WebCore::TestObjV8Internal::methodWithNonOptionalArgAndOptionalArgCallback): (WebCore::TestObjV8Internal::methodWithNonOptionalArgAndTwoOptionalArgsCallback): (WebCore::TestObjV8Internal::methodWithCallbackArgCallback): (WebCore::TestObjV8Internal::methodWithNonCallbackArgAndCallbackArgCallback): (WebCore::TestObjV8Internal::overloadedMethod1Callback): (WebCore::TestObjV8Internal::overloadedMethod2Callback): (WebCore::TestObjV8Internal::overloadedMethod3Callback): (WebCore::TestObjV8Internal::overloadedMethod4Callback): (WebCore::TestObjV8Internal::overloadedMethod5Callback): (WebCore::TestObjV8Internal::overloadedMethod6Callback): (WebCore::TestObjV8Internal::overloadedMethod7Callback): (WebCore::TestObjV8Internal::overloadedMethod11Callback): (WebCore::TestObjV8Internal::overloadedMethod12Callback): (WebCore::TestObjV8Internal::enabledAtRuntimeMethod1Callback): (WebCore::TestObjV8Internal::enabledAtRuntimeMethod2Callback): (WebCore::TestObjV8Internal::convert1Callback): (WebCore::TestObjV8Internal::convert2Callback): (WebCore::TestObjV8Internal::convert3Callback): (WebCore::TestObjV8Internal::convert4Callback): (WebCore::TestObjV8Internal::convert5Callback): (WebCore::TestObjV8Internal::strictFunctionCallback): (WebCore::V8TestObj::constructorCallback): * bindings/scripts/test/V8/V8TestSerializedScriptValueInterface.cpp: (WebCore::TestSerializedScriptValueInterfaceV8Internal::acceptTransferListCallback): (WebCore::V8TestSerializedScriptValueInterface::constructorCallback): * bindings/v8/ScriptController.cpp: (WebCore::setValueAndClosePopupCallback): * bindings/v8/V8Proxy.cpp: (WebCore::V8Proxy::throwNotEnoughArgumentsError): * bindings/v8/V8Proxy.h: (V8Proxy): * bindings/v8/custom/V8AudioContextCustom.cpp: (WebCore::V8AudioContext::constructorCallback): * bindings/v8/custom/V8DataViewCustom.cpp: (WebCore::V8DataView::getInt8Callback): (WebCore::V8DataView::getUint8Callback): (WebCore::V8DataView::setInt8Callback): (WebCore::V8DataView::setUint8Callback): * bindings/v8/custom/V8DirectoryEntryCustom.cpp: (WebCore::V8DirectoryEntry::getDirectoryCallback): (WebCore::V8DirectoryEntry::getFileCallback): * bindings/v8/custom/V8IntentConstructor.cpp: (WebCore::V8Intent::constructorCallback): * bindings/v8/custom/V8SVGLengthCustom.cpp: (WebCore::V8SVGLength::convertToSpecifiedUnitsCallback): * bindings/v8/custom/V8WebGLRenderingContextCustom.cpp: (WebCore::getObjectParameter): (WebCore::V8WebGLRenderingContext::getAttachedShadersCallback): (WebCore::V8WebGLRenderingContext::getExtensionCallback): (WebCore::V8WebGLRenderingContext::getFramebufferAttachmentParameterCallback): (WebCore::V8WebGLRenderingContext::getParameterCallback): (WebCore::V8WebGLRenderingContext::getProgramParameterCallback): (WebCore::V8WebGLRenderingContext::getShaderParameterCallback): (WebCore::V8WebGLRenderingContext::getUniformCallback): (WebCore::vertexAttribAndUniformHelperf): (WebCore::uniformHelperi): (WebCore::uniformMatrixHelper): * bindings/v8/custom/V8WebKitMutationObserverCustom.cpp: (WebCore::V8WebKitMutationObserver::constructorCallback): (WebCore::V8WebKitMutationObserver::observeCallback): * bindings/v8/custom/V8WebSocketCustom.cpp: (WebCore::V8WebSocket::constructorCallback): (WebCore::V8WebSocket::sendCallback): * bindings/v8/custom/V8XMLHttpRequestCustom.cpp: (WebCore::V8XMLHttpRequest::openCallback): git-svn-id: svn://svn.chromium.org/blink/trunk@117736 bbb929c8-8fbe-4397-9dbb-9b2b20218538
v8::Handle<v8::Value> V8WebGLRenderingContext::getProgramParameterCallback(const v8::Arguments& args) { INC_STATS("DOM.WebGLRenderingContext.getProgramParameter()"); if (args.Length() != 2) return V8Proxy::throwNotEnoughArgumentsError(args.GetIsolate()); ExceptionCode ec = 0; WebGLRenderingContext* context = V8WebGLRenderingContext::toNative(args.Holder()); if (args.Length() > 0 && !isUndefinedOrNull(args[0]) && !V8WebGLProgram::HasInstance(args[0])) { V8Proxy::throwTypeError(); return notHandledByInterceptor(); } WebGLProgram* program = V8WebGLProgram::HasInstance(args[0]) ? V8WebGLProgram::toNative(v8::Handle<v8::Object>::Cast(args[0])) : 0; unsigned pname = toInt32(args[1]); WebGLGetInfo info = context->getProgramParameter(program, pname, ec); if (ec) { V8Proxy::setDOMException(ec, args.GetIsolate()); return v8::Undefined(); } return toV8Object(info, args.GetIsolate()); }
v8::Handle<v8::Value> V8WebGLRenderingContext::getProgramParameterCallback(const v8::Arguments& args) { INC_STATS("DOM.WebGLRenderingContext.getProgramParameter()"); if (args.Length() != 2) return V8Proxy::throwNotEnoughArgumentsError(); ExceptionCode ec = 0; WebGLRenderingContext* context = V8WebGLRenderingContext::toNative(args.Holder()); if (args.Length() > 0 && !isUndefinedOrNull(args[0]) && !V8WebGLProgram::HasInstance(args[0])) { V8Proxy::throwTypeError(); return notHandledByInterceptor(); } WebGLProgram* program = V8WebGLProgram::HasInstance(args[0]) ? V8WebGLProgram::toNative(v8::Handle<v8::Object>::Cast(args[0])) : 0; unsigned pname = toInt32(args[1]); WebGLGetInfo info = context->getProgramParameter(program, pname, ec); if (ec) { V8Proxy::setDOMException(ec, args.GetIsolate()); return v8::Undefined(); } return toV8Object(info, args.GetIsolate()); }
C
Chrome
1
CVE-2019-5755
https://www.cvedetails.com/cve/CVE-2019-5755/
CWE-189
https://github.com/chromium/chromium/commit/971548cdca2d4c0a6fedd3db0c94372c2a27eac3
971548cdca2d4c0a6fedd3db0c94372c2a27eac3
Make MediaStreamDispatcherHost per-request instead of per-frame. Instead of having RenderFrameHost own a single MSDH to handle all requests from a frame, MSDH objects will be owned by a strong binding. A consequence of this is that an additional requester ID is added to requests to MediaStreamManager, so that an MSDH is able to cancel only requests generated by it. In practice, MSDH will continue to be per frame in most cases since each frame normally makes a single request for an MSDH object. This fixes a lifetime issue caused by the IO thread executing tasks after the RenderFrameHost dies. Drive-by: Fix some minor lint issues. Bug: 912520 Change-Id: I52742ffc98b9fc57ce8e6f5093a61aed86d3e516 Reviewed-on: https://chromium-review.googlesource.com/c/1369799 Reviewed-by: Emircan Uysaler <[email protected]> Reviewed-by: Ken Buchanan <[email protected]> Reviewed-by: Olga Sharonova <[email protected]> Commit-Queue: Guido Urdaneta <[email protected]> Cr-Commit-Position: refs/heads/master@{#616347}
void MediaStreamManager::SetUpRequest(const std::string& label) { DCHECK_CURRENTLY_ON(BrowserThread::IO); DeviceRequest* request = FindRequest(label); if (!request) { DVLOG(1) << "SetUpRequest label " << label << " doesn't exist!!"; return; // This can happen if the request has been canceled. } request->SetAudioType(AdjustAudioStreamTypeBasedOnCommandLineSwitches( request->controls.audio.stream_type)); request->SetVideoType(request->controls.video.stream_type); const bool is_display_capture = request->video_type() == MEDIA_DISPLAY_VIDEO_CAPTURE; if (is_display_capture && !SetUpDisplayCaptureRequest(request)) { FinalizeRequestFailed(label, request, MEDIA_DEVICE_SCREEN_CAPTURE_FAILURE); return; } const bool is_tab_capture = request->audio_type() == MEDIA_GUM_TAB_AUDIO_CAPTURE || request->video_type() == MEDIA_GUM_TAB_VIDEO_CAPTURE; if (is_tab_capture) { if (!SetUpTabCaptureRequest(request, label)) { FinalizeRequestFailed(label, request, MEDIA_DEVICE_TAB_CAPTURE_FAILURE); } return; } const bool is_screen_capture = request->video_type() == MEDIA_GUM_DESKTOP_VIDEO_CAPTURE; if (is_screen_capture && !SetUpScreenCaptureRequest(request)) { FinalizeRequestFailed(label, request, MEDIA_DEVICE_SCREEN_CAPTURE_FAILURE); return; } if (!is_tab_capture && !is_screen_capture && !is_display_capture) { if (IsDeviceMediaType(request->audio_type()) || IsDeviceMediaType(request->video_type())) { StartEnumeration(request, label); return; } if (!SetUpDeviceCaptureRequest(request, MediaDeviceEnumeration())) { FinalizeRequestFailed(label, request, MEDIA_DEVICE_NO_HARDWARE); return; } } ReadOutputParamsAndPostRequestToUI(label, request, MediaDeviceEnumeration()); }
void MediaStreamManager::SetUpRequest(const std::string& label) { DCHECK_CURRENTLY_ON(BrowserThread::IO); DeviceRequest* request = FindRequest(label); if (!request) { DVLOG(1) << "SetUpRequest label " << label << " doesn't exist!!"; return; // This can happen if the request has been canceled. } request->SetAudioType(AdjustAudioStreamTypeBasedOnCommandLineSwitches( request->controls.audio.stream_type)); request->SetVideoType(request->controls.video.stream_type); const bool is_display_capture = request->video_type() == MEDIA_DISPLAY_VIDEO_CAPTURE; if (is_display_capture && !SetUpDisplayCaptureRequest(request)) { FinalizeRequestFailed(label, request, MEDIA_DEVICE_SCREEN_CAPTURE_FAILURE); return; } const bool is_tab_capture = request->audio_type() == MEDIA_GUM_TAB_AUDIO_CAPTURE || request->video_type() == MEDIA_GUM_TAB_VIDEO_CAPTURE; if (is_tab_capture) { if (!SetUpTabCaptureRequest(request, label)) { FinalizeRequestFailed(label, request, MEDIA_DEVICE_TAB_CAPTURE_FAILURE); } return; } const bool is_screen_capture = request->video_type() == MEDIA_GUM_DESKTOP_VIDEO_CAPTURE; if (is_screen_capture && !SetUpScreenCaptureRequest(request)) { FinalizeRequestFailed(label, request, MEDIA_DEVICE_SCREEN_CAPTURE_FAILURE); return; } if (!is_tab_capture && !is_screen_capture && !is_display_capture) { if (IsDeviceMediaType(request->audio_type()) || IsDeviceMediaType(request->video_type())) { StartEnumeration(request, label); return; } if (!SetUpDeviceCaptureRequest(request, MediaDeviceEnumeration())) { FinalizeRequestFailed(label, request, MEDIA_DEVICE_NO_HARDWARE); return; } } ReadOutputParamsAndPostRequestToUI(label, request, MediaDeviceEnumeration()); }
C
Chrome
0
CVE-2013-6401
https://www.cvedetails.com/cve/CVE-2013-6401/
CWE-310
https://github.com/akheron/jansson/commit/8f80c2d83808150724d31793e6ade92749b1faa4
8f80c2d83808150724d31793e6ade92749b1faa4
CVE-2013-6401: Change hash function, randomize hashes Thanks to Florian Weimer and Eric Sesterhenn for reporting, reviewing and testing.
void *json_object_iter_next(json_t *json, void *iter) { json_object_t *object; if(!json_is_object(json) || iter == NULL) return NULL; object = json_to_object(json); return hashtable_iter_next(&object->hashtable, iter); }
void *json_object_iter_next(json_t *json, void *iter) { json_object_t *object; if(!json_is_object(json) || iter == NULL) return NULL; object = json_to_object(json); return hashtable_iter_next(&object->hashtable, iter); }
C
jansson
0
CVE-2019-14763
https://www.cvedetails.com/cve/CVE-2019-14763/
CWE-189
https://github.com/torvalds/linux/commit/c91815b596245fd7da349ecc43c8def670d2269e
c91815b596245fd7da349ecc43c8def670d2269e
usb: dwc3: gadget: never call ->complete() from ->ep_queue() This is a requirement which has always existed but, somehow, wasn't reflected in the documentation and problems weren't found until now when Tuba Yavuz found a possible deadlock happening between dwc3 and f_hid. She described the situation as follows: spin_lock_irqsave(&hidg->write_spinlock, flags); // first acquire /* we our function has been disabled by host */ if (!hidg->req) { free_ep_req(hidg->in_ep, hidg->req); goto try_again; } [...] status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC); => [...] => usb_gadget_giveback_request => f_hidg_req_complete => spin_lock_irqsave(&hidg->write_spinlock, flags); // second acquire Note that this happens because dwc3 would call ->complete() on a failed usb_ep_queue() due to failed Start Transfer command. This is, anyway, a theoretical situation because dwc3 currently uses "No Response Update Transfer" command for Bulk and Interrupt endpoints. It's still good to make this case impossible to happen even if the "No Reponse Update Transfer" command is changed. Reported-by: Tuba Yavuz <[email protected]> Signed-off-by: Felipe Balbi <[email protected]> Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
static void dwc3_disconnect_gadget(struct dwc3 *dwc) { if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { spin_unlock(&dwc->lock); dwc->gadget_driver->disconnect(&dwc->gadget); spin_lock(&dwc->lock); } }
static void dwc3_disconnect_gadget(struct dwc3 *dwc) { if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { spin_unlock(&dwc->lock); dwc->gadget_driver->disconnect(&dwc->gadget); spin_lock(&dwc->lock); } }
C
linux
0
CVE-2016-3760
https://www.cvedetails.com/cve/CVE-2016-3760/
CWE-20
https://android.googlesource.com/platform/packages/apps/Bluetooth/+/122feb9a0b04290f55183ff2f0384c6c53756bd8
122feb9a0b04290f55183ff2f0384c6c53756bd8
Add guest mode functionality (3/3) Add a flag to enable() to start Bluetooth in restricted mode. In restricted mode, all devices that are paired during restricted mode are deleted upon leaving restricted mode. Right now restricted mode is only entered while a guest user is active. Bug: 27410683 Change-Id: If4a8855faf362d7f6de509d7ddc7197d1ac75cee
static bool initNative(JNIEnv* env, jobject obj) { ALOGV("%s:",__FUNCTION__); sJniAdapterServiceObj = env->NewGlobalRef(obj); sJniCallbacksObj = env->NewGlobalRef(env->GetObjectField(obj, sJniCallbacksField)); if (sBluetoothInterface) { int ret = sBluetoothInterface->init(&sBluetoothCallbacks); if (ret != BT_STATUS_SUCCESS) { ALOGE("Error while setting the callbacks: %d\n", ret); sBluetoothInterface = NULL; return JNI_FALSE; } ret = sBluetoothInterface->set_os_callouts(&sBluetoothOsCallouts); if (ret != BT_STATUS_SUCCESS) { ALOGE("Error while setting Bluetooth callouts: %d\n", ret); sBluetoothInterface->cleanup(); sBluetoothInterface = NULL; return JNI_FALSE; } if ( (sBluetoothSocketInterface = (btsock_interface_t *) sBluetoothInterface->get_profile_interface(BT_PROFILE_SOCKETS_ID)) == NULL) { ALOGE("Error getting socket interface"); } return JNI_TRUE; } return JNI_FALSE; }
static bool initNative(JNIEnv* env, jobject obj) { ALOGV("%s:",__FUNCTION__); sJniAdapterServiceObj = env->NewGlobalRef(obj); sJniCallbacksObj = env->NewGlobalRef(env->GetObjectField(obj, sJniCallbacksField)); if (sBluetoothInterface) { int ret = sBluetoothInterface->init(&sBluetoothCallbacks); if (ret != BT_STATUS_SUCCESS) { ALOGE("Error while setting the callbacks: %d\n", ret); sBluetoothInterface = NULL; return JNI_FALSE; } ret = sBluetoothInterface->set_os_callouts(&sBluetoothOsCallouts); if (ret != BT_STATUS_SUCCESS) { ALOGE("Error while setting Bluetooth callouts: %d\n", ret); sBluetoothInterface->cleanup(); sBluetoothInterface = NULL; return JNI_FALSE; } if ( (sBluetoothSocketInterface = (btsock_interface_t *) sBluetoothInterface->get_profile_interface(BT_PROFILE_SOCKETS_ID)) == NULL) { ALOGE("Error getting socket interface"); } return JNI_TRUE; } return JNI_FALSE; }
C
Android
0
CVE-2013-0829
https://www.cvedetails.com/cve/CVE-2013-0829/
CWE-264
https://github.com/chromium/chromium/commit/d123966ec156cd2f92fdada36be39694641b479e
d123966ec156cd2f92fdada36be39694641b479e
File permission fix: now we selectively grant read permission for Sandboxed files We also need to check the read permission and call GrantReadFile() for sandboxed files for CreateSnapshotFile(). BUG=162114 TEST=manual Review URL: https://codereview.chromium.org/11280231 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@170181 0039d316-1c4b-4281-b951-d872f2087c98
void FileAPIMessageFilter::DidGetMetadata( int request_id, base::PlatformFileError result, const base::PlatformFileInfo& info, const FilePath& platform_path) { if (result == base::PLATFORM_FILE_OK) Send(new FileSystemMsg_DidReadMetadata(request_id, info, platform_path)); else Send(new FileSystemMsg_DidFail(request_id, result)); UnregisterOperation(request_id); }
void FileAPIMessageFilter::DidGetMetadata( int request_id, base::PlatformFileError result, const base::PlatformFileInfo& info, const FilePath& platform_path) { if (result == base::PLATFORM_FILE_OK) Send(new FileSystemMsg_DidReadMetadata(request_id, info, platform_path)); else Send(new FileSystemMsg_DidFail(request_id, result)); UnregisterOperation(request_id); }
C
Chrome
0
CVE-2010-5313
https://www.cvedetails.com/cve/CVE-2010-5313/
CWE-362
https://github.com/torvalds/linux/commit/fc3a9157d3148ab91039c75423da8ef97be3e105
fc3a9157d3148ab91039c75423da8ef97be3e105
KVM: X86: Don't report L2 emulation failures to user-space This patch prevents that emulation failures which result from emulating an instruction for an L2-Guest results in being reported to userspace. Without this patch a malicious L2-Guest would be able to kill the L1 by triggering a race-condition between an vmexit and the instruction emulator. With this patch the L2 will most likely only kill itself in this situation. Signed-off-by: Joerg Roedel <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]>
static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { events->exception.injected = vcpu->arch.exception.pending && !kvm_exception_is_soft(vcpu->arch.exception.nr); events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; events->exception.pad = 0; events->exception.error_code = vcpu->arch.exception.error_code; events->interrupt.injected = vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; events->interrupt.soft = 0; events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending; events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); events->nmi.pad = 0; events->sipi_vector = vcpu->arch.sipi_vector; events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW); memset(&events->reserved, 0, sizeof(events->reserved)); }
static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { events->exception.injected = vcpu->arch.exception.pending && !kvm_exception_is_soft(vcpu->arch.exception.nr); events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; events->exception.pad = 0; events->exception.error_code = vcpu->arch.exception.error_code; events->interrupt.injected = vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; events->interrupt.soft = 0; events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending; events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); events->nmi.pad = 0; events->sipi_vector = vcpu->arch.sipi_vector; events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW); memset(&events->reserved, 0, sizeof(events->reserved)); }
C
linux
0
CVE-2016-10066
https://www.cvedetails.com/cve/CVE-2016-10066/
CWE-119
https://github.com/ImageMagick/ImageMagick/commit/f6e9d0d9955e85bdd7540b251cd50d598dacc5e6
f6e9d0d9955e85bdd7540b251cd50d598dacc5e6
null
static MagickBooleanType ParseImageResourceBlocks(Image *image, const unsigned char *blocks,size_t length, MagickBooleanType *has_merged_image) { const unsigned char *p; StringInfo *profile; unsigned int count, long_sans; unsigned short id, short_sans; if (length < 16) return(MagickFalse); profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,blocks); (void) SetImageProfile(image,"8bim",profile); profile=DestroyStringInfo(profile); for (p=blocks; (p >= blocks) && (p < (blocks+length-16)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); switch (id) { case 0x03ed: { char value[MaxTextExtent]; unsigned short resolution; /* Resolution info. */ p=PushShortPixel(MSBEndian,p,&resolution); image->x_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->x_resolution); (void) SetImageProperty(image,"tiff:XResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->y_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->y_resolution); (void) SetImageProperty(image,"tiff:YResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if (*(p+4) == 0) *has_merged_image=MagickFalse; p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } return(MagickTrue); }
static MagickBooleanType ParseImageResourceBlocks(Image *image, const unsigned char *blocks,size_t length, MagickBooleanType *has_merged_image) { const unsigned char *p; StringInfo *profile; unsigned int count, long_sans; unsigned short id, short_sans; if (length < 16) return(MagickFalse); profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,blocks); (void) SetImageProfile(image,"8bim",profile); profile=DestroyStringInfo(profile); for (p=blocks; (p >= blocks) && (p < (blocks+length-16)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); switch (id) { case 0x03ed: { char value[MaxTextExtent]; unsigned short resolution; /* Resolution info. */ p=PushShortPixel(MSBEndian,p,&resolution); image->x_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->x_resolution); (void) SetImageProperty(image,"tiff:XResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->y_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->y_resolution); (void) SetImageProperty(image,"tiff:YResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if (*(p+4) == 0) *has_merged_image=MagickFalse; p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } return(MagickTrue); }
C
ImageMagick
0
CVE-2016-5202
null
null
https://github.com/chromium/chromium/commit/79708b391b2e91d63b5d009ec6202c7d7ededf93
79708b391b2e91d63b5d009ec6202c7d7ededf93
Ensure that OpenVR only adds placeholder buttons when needed. The current implementation of the OpenVRGamepadHelper always adds the optional grip and secondary axes buttons; however, if those buttons are missing and no additional buttons need to be supported, they should not be included. A prime example of this is the vive controller, which has a trigger, a grip, and a touchpad, but no secondary axis button. This is essentially the controller that the new TestGamepadOptionalData test builds. Bug: 964026 Change-Id: I1de93b5bd7bd0d9e75013cf33b8f333e5d70270f Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1627914 Reviewed-by: Bill Orr <[email protected]> Commit-Queue: Alexander Cooper <[email protected]> Cr-Commit-Position: refs/heads/master@{#662843}
device::XrButtonId GetAxisId(unsigned int offset) { return static_cast<device::XrButtonId>(device::XrButtonId::kAxisPrimary + offset); }
device::XrButtonId GetAxisId(unsigned int offset) { return static_cast<device::XrButtonId>(device::XrButtonId::kAxisPrimary + offset); }
C
Chrome
0
CVE-2016-5126
https://www.cvedetails.com/cve/CVE-2016-5126/
CWE-119
https://git.qemu.org/?p=qemu.git;a=commit;h=a6b3167fa0e825aebb5a7cd8b437b6d41584a196
a6b3167fa0e825aebb5a7cd8b437b6d41584a196
null
static bool is_request_lun_aligned(int64_t sector_num, int nb_sectors, IscsiLun *iscsilun) { if ((sector_num * BDRV_SECTOR_SIZE) % iscsilun->block_size || (nb_sectors * BDRV_SECTOR_SIZE) % iscsilun->block_size) { error_report("iSCSI misaligned request: " "iscsilun->block_size %u, sector_num %" PRIi64 ", nb_sectors %d", iscsilun->block_size, sector_num, nb_sectors); return 0; } return 1; }
static bool is_request_lun_aligned(int64_t sector_num, int nb_sectors, IscsiLun *iscsilun) { if ((sector_num * BDRV_SECTOR_SIZE) % iscsilun->block_size || (nb_sectors * BDRV_SECTOR_SIZE) % iscsilun->block_size) { error_report("iSCSI misaligned request: " "iscsilun->block_size %u, sector_num %" PRIi64 ", nb_sectors %d", iscsilun->block_size, sector_num, nb_sectors); return 0; } return 1; }
C
qemu
0
CVE-2015-6763
https://www.cvedetails.com/cve/CVE-2015-6763/
null
https://github.com/chromium/chromium/commit/f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
MacViews: Enable secure text input for password Textfields. In Cocoa the NSTextInputContext automatically enables secure text input when activated and it's in the secure text entry mode. RenderWidgetHostViewMac did the similar thing for ages following the WebKit example. views::Textfield needs to do the same thing in a fashion that's sycnrhonized with RenderWidgetHostViewMac, otherwise the race conditions are possible when the Textfield gets focus, activates the secure text input mode and the RWHVM loses focus immediately afterwards and disables the secure text input instead of leaving it in the enabled state. BUG=818133,677220 Change-Id: I6db6c4b59e4a1a72cbb7f8c7056f71b04a3df08b Reviewed-on: https://chromium-review.googlesource.com/943064 Commit-Queue: Michail Pishchagin <[email protected]> Reviewed-by: Pavel Feldman <[email protected]> Reviewed-by: Avi Drissman <[email protected]> Reviewed-by: Peter Kasting <[email protected]> Cr-Commit-Position: refs/heads/master@{#542517}
void LocalDOMWindow::setDefaultStatus(const String& string) { default_status_ = string; }
void LocalDOMWindow::setDefaultStatus(const String& string) { default_status_ = string; }
C
Chrome
0
CVE-2010-1152
https://www.cvedetails.com/cve/CVE-2010-1152/
CWE-20
https://github.com/memcached/memcached/commit/d9cd01ede97f4145af9781d448c62a3318952719
d9cd01ede97f4145af9781d448c62a3318952719
Use strncmp when checking for large ascii multigets.
static void handle_binary_protocol_error(conn *c) { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0); if (settings.verbose) { fprintf(stderr, "Protocol error (opcode %02x), close connection %d\n", c->binary_header.request.opcode, c->sfd); } c->write_and_go = conn_closing; }
static void handle_binary_protocol_error(conn *c) { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0); if (settings.verbose) { fprintf(stderr, "Protocol error (opcode %02x), close connection %d\n", c->binary_header.request.opcode, c->sfd); } c->write_and_go = conn_closing; }
C
memcached
0
CVE-2016-5219
https://www.cvedetails.com/cve/CVE-2016-5219/
CWE-416
https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae
a4150b688a754d3d10d2ca385155b1c95d77d6ae
Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM This makes the query of GL_COMPLETION_STATUS_KHR to programs much cheaper by minimizing the round-trip to the GPU thread. Bug: 881152, 957001 Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630 Commit-Queue: Kenneth Russell <[email protected]> Reviewed-by: Kentaro Hara <[email protected]> Reviewed-by: Geoff Lang <[email protected]> Reviewed-by: Kenneth Russell <[email protected]> Cr-Commit-Position: refs/heads/master@{#657568}
bool GLES2Implementation::GetSamplerParameterivHelper(GLuint /* sampler */, GLenum /* pname */, GLint* /* params */) { return false; }
bool GLES2Implementation::GetSamplerParameterivHelper(GLuint /* sampler */, GLenum /* pname */, GLint* /* params */) { return false; }
C
Chrome
0
CVE-2019-11487
https://www.cvedetails.com/cve/CVE-2019-11487/
CWE-416
https://github.com/torvalds/linux/commit/6b3a707736301c2128ca85ce85fb13f60b5e350a
6b3a707736301c2128ca85ce85fb13f60b5e350a
Merge branch 'page-refs' (page ref overflow) Merge page ref overflow branch. Jann Horn reported that he can overflow the page ref count with sufficient memory (and a filesystem that is intentionally extremely slow). Admittedly it's not exactly easy. To have more than four billion references to a page requires a minimum of 32GB of kernel memory just for the pointers to the pages, much less any metadata to keep track of those pointers. Jann needed a total of 140GB of memory and a specially crafted filesystem that leaves all reads pending (in order to not ever free the page references and just keep adding more). Still, we have a fairly straightforward way to limit the two obvious user-controllable sources of page references: direct-IO like page references gotten through get_user_pages(), and the splice pipe page duplication. So let's just do that. * branch page-refs: fs: prevent page refcount overflow in pipe_buf_get mm: prevent get_user_pages() from overflowing page refcount mm: add 'try_get_page()' helper function mm: make page ref count overflow check tighter and more explicit
static struct page *alloc_buddy_huge_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { int order = huge_page_order(h); struct page *page; gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; if (nid == NUMA_NO_NODE) nid = numa_mem_id(); page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask); if (page) __count_vm_event(HTLB_BUDDY_PGALLOC); else __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); return page; }
static struct page *alloc_buddy_huge_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { int order = huge_page_order(h); struct page *page; gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; if (nid == NUMA_NO_NODE) nid = numa_mem_id(); page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask); if (page) __count_vm_event(HTLB_BUDDY_PGALLOC); else __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); return page; }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/690d0a9175790c4bd3abd066932bc08203c164ca
690d0a9175790c4bd3abd066932bc08203c164ca
Avoid excessive nesting / recursion in browser URL handling. BUG=31517 TEST=ChildProcessSecurityPolicyTest Review URL: http://codereview.chromium.org/525038 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@35585 0039d316-1c4b-4281-b951-d872f2087c98
~SecurityState() { scheme_policy_.clear(); }
~SecurityState() { scheme_policy_.clear(); }
C
Chrome
0
CVE-2018-7730
https://www.cvedetails.com/cve/CVE-2018-7730/
CWE-125
https://cgit.freedesktop.org/exempi/commit/?id=6cbd34025e5fd3ba47b29b602096e456507ce83b
6cbd34025e5fd3ba47b29b602096e456507ce83b
null
bool PSIR_FileWriter::IsLegacyChanged() { if ( ! this->changed ) return false; if ( this->legacyDeleted ) return true; InternalRsrcMap::iterator irPos = this->imgRsrcs.begin(); InternalRsrcMap::iterator irEnd = this->imgRsrcs.end(); for ( ; irPos != irEnd; ++irPos ) { const InternalRsrcInfo & rsrcInfo = irPos->second; if ( rsrcInfo.changed && (rsrcInfo.id != kPSIR_XMP) ) return true; } return false; // Can get here if the XMP is the only thing changed. } // PSIR_FileWriter::IsLegacyChanged
bool PSIR_FileWriter::IsLegacyChanged() { if ( ! this->changed ) return false; if ( this->legacyDeleted ) return true; InternalRsrcMap::iterator irPos = this->imgRsrcs.begin(); InternalRsrcMap::iterator irEnd = this->imgRsrcs.end(); for ( ; irPos != irEnd; ++irPos ) { const InternalRsrcInfo & rsrcInfo = irPos->second; if ( rsrcInfo.changed && (rsrcInfo.id != kPSIR_XMP) ) return true; } return false; // Can get here if the XMP is the only thing changed. } // PSIR_FileWriter::IsLegacyChanged
CPP
exempi
0
CVE-2015-8215
https://www.cvedetails.com/cve/CVE-2015-8215/
CWE-20
https://github.com/torvalds/linux/commit/77751427a1ff25b27d47a4c36b12c3c8667855ac
77751427a1ff25b27d47a4c36b12c3c8667855ac
ipv6: addrconf: validate new MTU before applying it Currently we don't check if the new MTU is valid or not and this allows one to configure a smaller than minimum allowed by RFCs or even bigger than interface own MTU, which is a problem as it may lead to packet drops. If you have a daemon like NetworkManager running, this may be exploited by remote attackers by forging RA packets with an invalid MTU, possibly leading to a DoS. (NetworkManager currently only validates for values too small, but not for too big ones.) The fix is just to make sure the new value is valid. That is, between IPV6_MIN_MTU and interface's MTU. Note that similar check is already performed at ndisc_router_discovery(), for when kernel itself parses the RA. Signed-off-by: Marcelo Ricardo Leitner <[email protected]> Signed-off-by: Sabrina Dubroca <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags, u32 prefered_lft, u32 valid_lft) { u32 flags; clock_t expires; unsigned long timeout; bool was_managetempaddr; bool had_prefixroute; ASSERT_RTNL(); if (!valid_lft || (prefered_lft > valid_lft)) return -EINVAL; if (ifa_flags & IFA_F_MANAGETEMPADDR && (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64)) return -EINVAL; timeout = addrconf_timeout_fixup(valid_lft, HZ); if (addrconf_finite_timeout(timeout)) { expires = jiffies_to_clock_t(timeout * HZ); valid_lft = timeout; flags = RTF_EXPIRES; } else { expires = 0; flags = 0; ifa_flags |= IFA_F_PERMANENT; } timeout = addrconf_timeout_fixup(prefered_lft, HZ); if (addrconf_finite_timeout(timeout)) { if (timeout == 0) ifa_flags |= IFA_F_DEPRECATED; prefered_lft = timeout; } spin_lock_bh(&ifp->lock); was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR; had_prefixroute = ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE); ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE); ifp->flags |= ifa_flags; ifp->tstamp = jiffies; ifp->valid_lft = valid_lft; ifp->prefered_lft = prefered_lft; spin_unlock_bh(&ifp->lock); if (!(ifp->flags&IFA_F_TENTATIVE)) ipv6_ifa_notify(0, ifp); if (!(ifa_flags & IFA_F_NOPREFIXROUTE)) { addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev, expires, flags); } else if (had_prefixroute) { enum cleanup_prefix_rt_t action; unsigned long rt_expires; write_lock_bh(&ifp->idev->lock); action = check_cleanup_prefix_route(ifp, &rt_expires); write_unlock_bh(&ifp->idev->lock); if (action != CLEANUP_PREFIX_RT_NOP) { cleanup_prefix_route(ifp, rt_expires, action == CLEANUP_PREFIX_RT_DEL); } } if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) { if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR)) valid_lft = prefered_lft = 0; manage_tempaddrs(ifp->idev, ifp, valid_lft, prefered_lft, !was_managetempaddr, jiffies); } addrconf_verify_rtnl(); return 0; }
static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags, u32 prefered_lft, u32 valid_lft) { u32 flags; clock_t expires; unsigned long timeout; bool was_managetempaddr; bool had_prefixroute; ASSERT_RTNL(); if (!valid_lft || (prefered_lft > valid_lft)) return -EINVAL; if (ifa_flags & IFA_F_MANAGETEMPADDR && (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64)) return -EINVAL; timeout = addrconf_timeout_fixup(valid_lft, HZ); if (addrconf_finite_timeout(timeout)) { expires = jiffies_to_clock_t(timeout * HZ); valid_lft = timeout; flags = RTF_EXPIRES; } else { expires = 0; flags = 0; ifa_flags |= IFA_F_PERMANENT; } timeout = addrconf_timeout_fixup(prefered_lft, HZ); if (addrconf_finite_timeout(timeout)) { if (timeout == 0) ifa_flags |= IFA_F_DEPRECATED; prefered_lft = timeout; } spin_lock_bh(&ifp->lock); was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR; had_prefixroute = ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE); ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE); ifp->flags |= ifa_flags; ifp->tstamp = jiffies; ifp->valid_lft = valid_lft; ifp->prefered_lft = prefered_lft; spin_unlock_bh(&ifp->lock); if (!(ifp->flags&IFA_F_TENTATIVE)) ipv6_ifa_notify(0, ifp); if (!(ifa_flags & IFA_F_NOPREFIXROUTE)) { addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev, expires, flags); } else if (had_prefixroute) { enum cleanup_prefix_rt_t action; unsigned long rt_expires; write_lock_bh(&ifp->idev->lock); action = check_cleanup_prefix_route(ifp, &rt_expires); write_unlock_bh(&ifp->idev->lock); if (action != CLEANUP_PREFIX_RT_NOP) { cleanup_prefix_route(ifp, rt_expires, action == CLEANUP_PREFIX_RT_DEL); } } if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) { if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR)) valid_lft = prefered_lft = 0; manage_tempaddrs(ifp->idev, ifp, valid_lft, prefered_lft, !was_managetempaddr, jiffies); } addrconf_verify_rtnl(); return 0; }
C
linux
0
CVE-2016-6720
https://www.cvedetails.com/cve/CVE-2016-6720/
CWE-200
https://android.googlesource.com/platform/frameworks/av/+/640b04121d7cd2cac90e2f7c82b97fce05f074a5
640b04121d7cd2cac90e2f7c82b97fce05f074a5
IOMX: do not clear buffer if it's allocated by component The component might depends on their buffers to be initialized in certain ways to work. Don't clear unless we're allocating it. bug: 31586647 Change-Id: Ia0a125797e414998ef0cd8ce03672f5b1e0bbf7a (cherry picked from commit ea76573aa276f51950007217a97903c4fe64f685)
status_t OMXNodeInstance::allocateBufferWithBackup( OMX_U32 portIndex, const sp<IMemory> &params, OMX::buffer_id *buffer, OMX_U32 allottedSize) { if (params == NULL || buffer == NULL) { ALOGE("b/25884056"); return BAD_VALUE; } Mutex::Autolock autoLock(mLock); if (allottedSize > params->size() || portIndex >= NELEM(mNumPortBuffers)) { return BAD_VALUE; } bool copy = mMetadataType[portIndex] == kMetadataBufferTypeInvalid; BufferMeta *buffer_meta = new BufferMeta( params, portIndex, (portIndex == kPortIndexInput) && copy /* copyToOmx */, (portIndex == kPortIndexOutput) && copy /* copyFromOmx */, NULL /* data */); OMX_BUFFERHEADERTYPE *header; OMX_ERRORTYPE err = OMX_AllocateBuffer( mHandle, &header, portIndex, buffer_meta, allottedSize); if (err != OMX_ErrorNone) { CLOG_ERROR(allocateBufferWithBackup, err, SIMPLE_BUFFER(portIndex, (size_t)allottedSize, params->pointer())); delete buffer_meta; buffer_meta = NULL; *buffer = 0; return StatusFromOMXError(err); } CHECK_EQ(header->pAppPrivate, buffer_meta); *buffer = makeBufferID(header); addActiveBuffer(portIndex, *buffer); sp<GraphicBufferSource> bufferSource(getGraphicBufferSource()); if (bufferSource != NULL && portIndex == kPortIndexInput) { bufferSource->addCodecBuffer(header); } CLOG_BUFFER(allocateBufferWithBackup, NEW_BUFFER_FMT(*buffer, portIndex, "%zu@%p :> %u@%p", params->size(), params->pointer(), allottedSize, header->pBuffer)); return OK; }
status_t OMXNodeInstance::allocateBufferWithBackup( OMX_U32 portIndex, const sp<IMemory> &params, OMX::buffer_id *buffer, OMX_U32 allottedSize) { if (params == NULL || buffer == NULL) { ALOGE("b/25884056"); return BAD_VALUE; } Mutex::Autolock autoLock(mLock); if (allottedSize > params->size() || portIndex >= NELEM(mNumPortBuffers)) { return BAD_VALUE; } bool copy = mMetadataType[portIndex] == kMetadataBufferTypeInvalid; BufferMeta *buffer_meta = new BufferMeta( params, portIndex, (portIndex == kPortIndexInput) && copy /* copyToOmx */, (portIndex == kPortIndexOutput) && copy /* copyFromOmx */, NULL /* data */); OMX_BUFFERHEADERTYPE *header; OMX_ERRORTYPE err = OMX_AllocateBuffer( mHandle, &header, portIndex, buffer_meta, allottedSize); if (err != OMX_ErrorNone) { CLOG_ERROR(allocateBufferWithBackup, err, SIMPLE_BUFFER(portIndex, (size_t)allottedSize, params->pointer())); delete buffer_meta; buffer_meta = NULL; *buffer = 0; return StatusFromOMXError(err); } CHECK_EQ(header->pAppPrivate, buffer_meta); memset(header->pBuffer, 0, header->nAllocLen); *buffer = makeBufferID(header); addActiveBuffer(portIndex, *buffer); sp<GraphicBufferSource> bufferSource(getGraphicBufferSource()); if (bufferSource != NULL && portIndex == kPortIndexInput) { bufferSource->addCodecBuffer(header); } CLOG_BUFFER(allocateBufferWithBackup, NEW_BUFFER_FMT(*buffer, portIndex, "%zu@%p :> %u@%p", params->size(), params->pointer(), allottedSize, header->pBuffer)); return OK; }
C
Android
1
CVE-2019-5827
https://www.cvedetails.com/cve/CVE-2019-5827/
CWE-190
https://github.com/chromium/chromium/commit/517ac71c9ee27f856f9becde8abea7d1604af9d4
517ac71c9ee27f856f9becde8abea7d1604af9d4
sqlite: backport bugfixes for dbfuzz2 Bug: 952406 Change-Id: Icbec429742048d6674828726c96d8e265c41b595 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1568152 Reviewed-by: Chris Mumford <[email protected]> Commit-Queue: Darwin Huang <[email protected]> Cr-Commit-Position: refs/heads/master@{#651030}
static int dbpageColumn( sqlite3_vtab_cursor *pCursor, sqlite3_context *ctx, int i ){ DbpageCursor *pCsr = (DbpageCursor *)pCursor; int rc = SQLITE_OK; switch( i ){ case 0: { /* pgno */ sqlite3_result_int(ctx, pCsr->pgno); break; } case 1: { /* data */ DbPage *pDbPage = 0; rc = sqlite3PagerGet(pCsr->pPager, pCsr->pgno, (DbPage**)&pDbPage, 0); if( rc==SQLITE_OK ){ sqlite3_result_blob(ctx, sqlite3PagerGetData(pDbPage), pCsr->szPage, SQLITE_TRANSIENT); } sqlite3PagerUnref(pDbPage); break; } default: { /* schema */ sqlite3 *db = sqlite3_context_db_handle(ctx); sqlite3_result_text(ctx, db->aDb[pCsr->iDb].zDbSName, -1, SQLITE_STATIC); break; } } return SQLITE_OK; }
static int dbpageColumn( sqlite3_vtab_cursor *pCursor, sqlite3_context *ctx, int i ){ DbpageCursor *pCsr = (DbpageCursor *)pCursor; int rc = SQLITE_OK; switch( i ){ case 0: { /* pgno */ sqlite3_result_int(ctx, pCsr->pgno); break; } case 1: { /* data */ DbPage *pDbPage = 0; rc = sqlite3PagerGet(pCsr->pPager, pCsr->pgno, (DbPage**)&pDbPage, 0); if( rc==SQLITE_OK ){ sqlite3_result_blob(ctx, sqlite3PagerGetData(pDbPage), pCsr->szPage, SQLITE_TRANSIENT); } sqlite3PagerUnref(pDbPage); break; } default: { /* schema */ sqlite3 *db = sqlite3_context_db_handle(ctx); sqlite3_result_text(ctx, db->aDb[pCsr->iDb].zDbSName, -1, SQLITE_STATIC); break; } } return SQLITE_OK; }
C
Chrome
0
CVE-2015-1274
https://www.cvedetails.com/cve/CVE-2015-1274/
CWE-254
https://github.com/chromium/chromium/commit/d27468a832d5316884bd02f459cbf493697fd7e1
d27468a832d5316884bd02f459cbf493697fd7e1
Switch to equalIgnoringASCIICase throughout modules/accessibility BUG=627682 Review-Url: https://codereview.chromium.org/2793913007 Cr-Commit-Position: refs/heads/master@{#461858}
AXObject::AXRange AXLayoutObject::textControlSelection() const { if (!getLayoutObject()) return AXRange(); LayoutObject* layout = nullptr; if (getLayoutObject()->isTextControl()) { layout = getLayoutObject(); } else { Element* focusedElement = getDocument()->focusedElement(); if (focusedElement && focusedElement->layoutObject() && focusedElement->layoutObject()->isTextControl()) layout = focusedElement->layoutObject(); } if (!layout) return AXRange(); AXObject* axObject = axObjectCache().getOrCreate(layout); if (!axObject || !axObject->isAXLayoutObject()) return AXRange(); VisibleSelection selection = layout->frame()->selection().computeVisibleSelectionInDOMTreeDeprecated(); TextControlElement* textControl = toLayoutTextControl(layout)->textControlElement(); ASSERT(textControl); int start = textControl->selectionStart(); int end = textControl->selectionEnd(); return AXRange(axObject, start, selection.visibleStart().affinity(), axObject, end, selection.visibleEnd().affinity()); }
AXObject::AXRange AXLayoutObject::textControlSelection() const { if (!getLayoutObject()) return AXRange(); LayoutObject* layout = nullptr; if (getLayoutObject()->isTextControl()) { layout = getLayoutObject(); } else { Element* focusedElement = getDocument()->focusedElement(); if (focusedElement && focusedElement->layoutObject() && focusedElement->layoutObject()->isTextControl()) layout = focusedElement->layoutObject(); } if (!layout) return AXRange(); AXObject* axObject = axObjectCache().getOrCreate(layout); if (!axObject || !axObject->isAXLayoutObject()) return AXRange(); VisibleSelection selection = layout->frame()->selection().computeVisibleSelectionInDOMTreeDeprecated(); TextControlElement* textControl = toLayoutTextControl(layout)->textControlElement(); ASSERT(textControl); int start = textControl->selectionStart(); int end = textControl->selectionEnd(); return AXRange(axObject, start, selection.visibleStart().affinity(), axObject, end, selection.visibleEnd().affinity()); }
C
Chrome
0
CVE-2011-4112
https://www.cvedetails.com/cve/CVE-2011-4112/
CWE-264
https://github.com/torvalds/linux/commit/550fd08c2cebad61c548def135f67aba284c6162
550fd08c2cebad61c548def135f67aba284c6162
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared After the last patch, We are left in a state in which only drivers calling ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real hardware call ether_setup for their net_devices and don't hold any state in their skbs. There are a handful of drivers that violate this assumption of course, and need to be fixed up. This patch identifies those drivers, and marks them as not being able to support the safe transmission of skbs by clearning the IFF_TX_SKB_SHARING flag in priv_flags Signed-off-by: Neil Horman <[email protected]> CC: Karsten Keil <[email protected]> CC: "David S. Miller" <[email protected]> CC: Jay Vosburgh <[email protected]> CC: Andy Gospodarek <[email protected]> CC: Patrick McHardy <[email protected]> CC: Krzysztof Halasa <[email protected]> CC: "John W. Linville" <[email protected]> CC: Greg Kroah-Hartman <[email protected]> CC: Marcel Holtmann <[email protected]> CC: Johannes Berg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void tun_sock_destruct(struct sock *sk) { free_netdev(tun_sk(sk)->tun->dev); }
static void tun_sock_destruct(struct sock *sk) { free_netdev(tun_sk(sk)->tun->dev); }
C
linux
0
CVE-2013-0904
https://www.cvedetails.com/cve/CVE-2013-0904/
CWE-119
https://github.com/chromium/chromium/commit/b2b21468c1f7f08b30a7c1755316f6026c50eb2a
b2b21468c1f7f08b30a7c1755316f6026c50eb2a
Separate repaint and layout requirements of StyleDifference (Step 1) Previously StyleDifference was an enum that proximately bigger values imply smaller values (e.g. StyleDifferenceLayout implies StyleDifferenceRepaint). This causes unnecessary repaints in some cases on layout change. Convert StyleDifference to a structure containing relatively independent flags. This change doesn't directly improve the result, but can make further repaint optimizations possible. Step 1 doesn't change any functionality. RenderStyle still generate the legacy StyleDifference enum when comparing styles and convert the result to the new StyleDifference. Implicit requirements are not handled during the conversion. Converted call sites to use the new StyleDifference according to the following conversion rules: - diff == StyleDifferenceEqual (&& !context) => diff.hasNoChange() - diff == StyleDifferenceRepaint => diff.needsRepaintObjectOnly() - diff == StyleDifferenceRepaintLayer => diff.needsRepaintLayer() - diff == StyleDifferenceRepaint || diff == StyleDifferenceRepaintLayer => diff.needsRepaintLayer() - diff >= StyleDifferenceRepaint => diff.needsRepaint() || diff.needsLayout() - diff >= StyleDifferenceRepaintLayer => diff.needsRepaintLayer() || diff.needsLayout() - diff > StyleDifferenceRepaintLayer => diff.needsLayout() - diff == StyleDifferencePositionedMovementLayoutOnly => diff.needsPositionedMovementLayoutOnly() - diff == StyleDifferenceLayout => diff.needsFullLayout() BUG=358460 TEST=All existing layout tests. [email protected], [email protected], [email protected] Committed: https://src.chromium.org/viewvc/blink?view=rev&revision=171983 Review URL: https://codereview.chromium.org/236203020 git-svn-id: svn://svn.chromium.org/blink/trunk@172331 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void RenderFlexibleBox::flipForWrapReverse(const Vector<LineContext>& lineContexts, LayoutUnit crossAxisStartEdge) { LayoutUnit contentExtent = crossAxisContentExtent(); RenderBox* child = m_orderIterator.first(); for (size_t lineNumber = 0; lineNumber < lineContexts.size(); ++lineNumber) { for (size_t childNumber = 0; childNumber < lineContexts[lineNumber].numberOfChildren; ++childNumber, child = m_orderIterator.next()) { ASSERT(child); LayoutUnit lineCrossAxisExtent = lineContexts[lineNumber].crossAxisExtent; LayoutUnit originalOffset = lineContexts[lineNumber].crossAxisOffset - crossAxisStartEdge; LayoutUnit newOffset = contentExtent - originalOffset - lineCrossAxisExtent; adjustAlignmentForChild(child, newOffset - originalOffset); } } }
void RenderFlexibleBox::flipForWrapReverse(const Vector<LineContext>& lineContexts, LayoutUnit crossAxisStartEdge) { LayoutUnit contentExtent = crossAxisContentExtent(); RenderBox* child = m_orderIterator.first(); for (size_t lineNumber = 0; lineNumber < lineContexts.size(); ++lineNumber) { for (size_t childNumber = 0; childNumber < lineContexts[lineNumber].numberOfChildren; ++childNumber, child = m_orderIterator.next()) { ASSERT(child); LayoutUnit lineCrossAxisExtent = lineContexts[lineNumber].crossAxisExtent; LayoutUnit originalOffset = lineContexts[lineNumber].crossAxisOffset - crossAxisStartEdge; LayoutUnit newOffset = contentExtent - originalOffset - lineCrossAxisExtent; adjustAlignmentForChild(child, newOffset - originalOffset); } } }
C
Chrome
0
CVE-2016-2543
https://www.cvedetails.com/cve/CVE-2016-2543/
null
https://github.com/torvalds/linux/commit/030e2c78d3a91dd0d27fef37e91950dde333eba1
030e2c78d3a91dd0d27fef37e91950dde333eba1
ALSA: seq: Fix missing NULL check at remove_events ioctl snd_seq_ioctl_remove_events() calls snd_seq_fifo_clear() unconditionally even if there is no FIFO assigned, and this leads to an Oops due to NULL dereference. The fix is just to add a proper NULL check. Reported-by: Dmitry Vyukov <[email protected]> Tested-by: Dmitry Vyukov <[email protected]> Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo) { if (!snd_seq_queue_check_access(tempo->queue, client)) return -EPERM; return snd_seq_queue_timer_set_tempo(tempo->queue, client, tempo); }
int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo) { if (!snd_seq_queue_check_access(tempo->queue, client)) return -EPERM; return snd_seq_queue_timer_set_tempo(tempo->queue, client, tempo); }
C
linux
0
CVE-2012-2895
https://www.cvedetails.com/cve/CVE-2012-2895/
CWE-119
https://github.com/chromium/chromium/commit/3475f5e448ddf5e48888f3d0563245cc46e3c98b
3475f5e448ddf5e48888f3d0563245cc46e3c98b
ash: Add launcher overflow bubble. - Host a LauncherView in bubble to display overflown items; - Mouse wheel and two-finger scroll to scroll the LauncherView in bubble in case overflow bubble is overflown; - Fit bubble when items are added/removed; - Keep launcher bar on screen when the bubble is shown; BUG=128054 TEST=Verify launcher overflown items are in a bubble instead of menu. Review URL: https://chromiumcodereview.appspot.com/10659003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@146460 0039d316-1c4b-4281-b951-d872f2087c98
void LauncherView::OnBoundsAnimatorProgressed(views::BoundsAnimator* animator) { FOR_EACH_OBSERVER(LauncherIconObserver, observers_, OnLauncherIconPositionsChanged()); PreferredSizeChanged(); }
void LauncherView::OnBoundsAnimatorProgressed(views::BoundsAnimator* animator) { FOR_EACH_OBSERVER(LauncherIconObserver, observers_, OnLauncherIconPositionsChanged()); }
C
Chrome
1
CVE-2015-1344
https://www.cvedetails.com/cve/CVE-2015-1344/
CWE-264
https://github.com/lxc/lxcfs/commit/8ee2a503e102b1a43ec4d83113dc275ab20a869a
8ee2a503e102b1a43ec4d83113dc275ab20a869a
Implement privilege check when moving tasks When writing pids to a tasks file in lxcfs, lxcfs was checking for privilege over the tasks file but not over the pid being moved. Since the cgm_movepid request is done as root on the host, not with the requestor's credentials, we must copy the check which cgmanager was doing to ensure that the requesting task is allowed to change the victim task's cgroup membership. This is CVE-2015-1344 https://bugs.launchpad.net/ubuntu/+source/lxcfs/+bug/1512854 Signed-off-by: Serge Hallyn <[email protected]>
static int send_creds(int sock, struct ucred *cred, char v, bool pingfirst) { struct msghdr msg = { 0 }; struct iovec iov; struct cmsghdr *cmsg; char cmsgbuf[CMSG_SPACE(sizeof(*cred))]; char buf[1]; buf[0] = 'p'; if (pingfirst) { if (msgrecv(sock, buf, 1) != 1) { fprintf(stderr, "%s: Error getting reply from server over socketpair\n", __func__); return SEND_CREDS_FAIL; } } msg.msg_control = cmsgbuf; msg.msg_controllen = sizeof(cmsgbuf); cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_len = CMSG_LEN(sizeof(struct ucred)); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_CREDENTIALS; memcpy(CMSG_DATA(cmsg), cred, sizeof(*cred)); msg.msg_name = NULL; msg.msg_namelen = 0; buf[0] = v; iov.iov_base = buf; iov.iov_len = sizeof(buf); msg.msg_iov = &iov; msg.msg_iovlen = 1; if (sendmsg(sock, &msg, 0) < 0) { fprintf(stderr, "%s: failed at sendmsg: %s\n", __func__, strerror(errno)); if (errno == 3) return SEND_CREDS_NOTSK; return SEND_CREDS_FAIL; } return SEND_CREDS_OK; }
static int send_creds(int sock, struct ucred *cred, char v, bool pingfirst) { struct msghdr msg = { 0 }; struct iovec iov; struct cmsghdr *cmsg; char cmsgbuf[CMSG_SPACE(sizeof(*cred))]; char buf[1]; buf[0] = 'p'; if (pingfirst) { if (msgrecv(sock, buf, 1) != 1) { fprintf(stderr, "%s: Error getting reply from server over socketpair\n", __func__); return SEND_CREDS_FAIL; } } msg.msg_control = cmsgbuf; msg.msg_controllen = sizeof(cmsgbuf); cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_len = CMSG_LEN(sizeof(struct ucred)); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_CREDENTIALS; memcpy(CMSG_DATA(cmsg), cred, sizeof(*cred)); msg.msg_name = NULL; msg.msg_namelen = 0; buf[0] = v; iov.iov_base = buf; iov.iov_len = sizeof(buf); msg.msg_iov = &iov; msg.msg_iovlen = 1; if (sendmsg(sock, &msg, 0) < 0) { fprintf(stderr, "%s: failed at sendmsg: %s\n", __func__, strerror(errno)); if (errno == 3) return SEND_CREDS_NOTSK; return SEND_CREDS_FAIL; } return SEND_CREDS_OK; }
C
lxcfs
0
CVE-2018-16427
https://www.cvedetails.com/cve/CVE-2018-16427/
CWE-125
https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
fixed out of bounds reads Thanks to Eric Sesterhenn from X41 D-SEC GmbH for reporting and suggesting security fixes.
static int hextoint(char *src, unsigned int len) { char hex[16]; char *end; int res; if(len >= sizeof(hex)) return -1; strncpy(hex, src, len+1); hex[len] = '\0'; res = strtol(hex, &end, 0x10); if(end != (char*)&hex[len]) return -1; return res; }
static int hextoint(char *src, unsigned int len) { char hex[16]; char *end; int res; if(len >= sizeof(hex)) return -1; strncpy(hex, src, len+1); hex[len] = '\0'; res = strtol(hex, &end, 0x10); if(end != (char*)&hex[len]) return -1; return res; }
C
OpenSC
0
CVE-2018-6096
https://www.cvedetails.com/cve/CVE-2018-6096/
null
https://github.com/chromium/chromium/commit/36f801fdbec07d116a6f4f07bb363f10897d6a51
36f801fdbec07d116a6f4f07bb363f10897d6a51
If a page calls |window.focus()|, kick it out of fullscreen. BUG=776418, 800056 Change-Id: I1880fe600e4814c073f247c43b1c1ac80c8fc017 Reviewed-on: https://chromium-review.googlesource.com/852378 Reviewed-by: Nasko Oskov <[email protected]> Reviewed-by: Philip Jägenstedt <[email protected]> Commit-Queue: Avi Drissman <[email protected]> Cr-Commit-Position: refs/heads/master@{#533790}
const char* DialogTypeToString(ChromeClient::DialogType dialog_type) { switch (dialog_type) { case ChromeClient::kAlertDialog: return "alert"; case ChromeClient::kConfirmDialog: return "confirm"; case ChromeClient::kPromptDialog: return "prompt"; case ChromeClient::kPrintDialog: return "print"; case ChromeClient::kHTMLDialog: NOTREACHED(); } NOTREACHED(); return ""; }
const char* DialogTypeToString(ChromeClient::DialogType dialog_type) { switch (dialog_type) { case ChromeClient::kAlertDialog: return "alert"; case ChromeClient::kConfirmDialog: return "confirm"; case ChromeClient::kPromptDialog: return "prompt"; case ChromeClient::kPrintDialog: return "print"; case ChromeClient::kHTMLDialog: NOTREACHED(); } NOTREACHED(); return ""; }
C
Chrome
0
CVE-2014-7826
https://www.cvedetails.com/cve/CVE-2014-7826/
CWE-264
https://github.com/torvalds/linux/commit/086ba77a6db00ed858ff07451bedee197df868c9
086ba77a6db00ed858ff07451bedee197df868c9
tracing/syscalls: Ignore numbers outside NR_syscalls' range ARM has some private syscalls (for example, set_tls(2)) which lie outside the range of NR_syscalls. If any of these are called while syscall tracing is being performed, out-of-bounds array access will occur in the ftrace and perf sys_{enter,exit} handlers. # trace-cmd record -e raw_syscalls:* true && trace-cmd report ... true-653 [000] 384.675777: sys_enter: NR 192 (0, 1000, 3, 4000022, ffffffff, 0) true-653 [000] 384.675812: sys_exit: NR 192 = 1995915264 true-653 [000] 384.675971: sys_enter: NR 983045 (76f74480, 76f74000, 76f74b28, 76f74480, 76f76f74, 1) true-653 [000] 384.675988: sys_exit: NR 983045 = 0 ... # trace-cmd record -e syscalls:* true [ 17.289329] Unable to handle kernel paging request at virtual address aaaaaace [ 17.289590] pgd = 9e71c000 [ 17.289696] [aaaaaace] *pgd=00000000 [ 17.289985] Internal error: Oops: 5 [#1] PREEMPT SMP ARM [ 17.290169] Modules linked in: [ 17.290391] CPU: 0 PID: 704 Comm: true Not tainted 3.18.0-rc2+ #21 [ 17.290585] task: 9f4dab00 ti: 9e710000 task.ti: 9e710000 [ 17.290747] PC is at ftrace_syscall_enter+0x48/0x1f8 [ 17.290866] LR is at syscall_trace_enter+0x124/0x184 Fix this by ignoring out-of-NR_syscalls-bounds syscall numbers. Commit cd0980fc8add "tracing: Check invalid syscall nr while tracing syscalls" added the check for less than zero, but it should have also checked for greater than NR_syscalls. Link: http://lkml.kernel.org/p/[email protected] Fixes: cd0980fc8add "tracing: Check invalid syscall nr while tracing syscalls" Cc: [email protected] # 2.6.33+ Signed-off-by: Rabin Vincent <[email protected]> Signed-off-by: Steven Rostedt <[email protected]>
static struct syscall_metadata *syscall_nr_to_meta(int nr) { if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) return NULL; return syscalls_metadata[nr]; }
static struct syscall_metadata *syscall_nr_to_meta(int nr) { if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) return NULL; return syscalls_metadata[nr]; }
C
linux
0
CVE-2017-5548
https://www.cvedetails.com/cve/CVE-2017-5548/
CWE-119
https://github.com/torvalds/linux/commit/05a974efa4bdf6e2a150e3f27dc6fcf0a9ad5655
05a974efa4bdf6e2a150e3f27dc6fcf0a9ad5655
ieee802154: atusb: do not use the stack for buffers to make them DMA able From 4.9 we should really avoid using the stack here as this will not be DMA able on various platforms. This changes the buffers already being present in time of 4.9 being released. This should go into stable as well. Reported-by: Dan Carpenter <[email protected]> Cc: [email protected] Signed-off-by: Stefan Schmidt <[email protected]> Signed-off-by: Marcel Holtmann <[email protected]>
atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries) { struct atusb *atusb = hw->priv; struct device *dev = &atusb->usb_dev->dev; if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) { dev_info(dev, "Automatic frame retransmission is only available from " "firmware version 0.3. Please update if you want this feature."); return -EINVAL; } return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries); }
atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries) { struct atusb *atusb = hw->priv; struct device *dev = &atusb->usb_dev->dev; if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) { dev_info(dev, "Automatic frame retransmission is only available from " "firmware version 0.3. Please update if you want this feature."); return -EINVAL; } return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries); }
C
linux
0
CVE-2017-14223
https://www.cvedetails.com/cve/CVE-2017-14223/
CWE-399
https://github.com/FFmpeg/FFmpeg/commit/afc9c683ed9db01edb357bc8c19edad4282b3a97
afc9c683ed9db01edb357bc8c19edad4282b3a97
avformat/asfdec: Fix DoS in asf_build_simple_index() Fixes: Missing EOF check in loop No testcase Found-by: Xiaohei and Wangchu from Alibaba Security Team Signed-off-by: Michael Niedermayer <[email protected]>
static int asf_read_stream_properties(AVFormatContext *s, int64_t size) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; ASFStream *asf_st; ff_asf_guid g; enum AVMediaType type; int type_specific_size, sizeX; unsigned int tag1; int64_t pos1, pos2, start_time; int test_for_ext_stream_audio, is_dvr_ms_audio = 0; if (s->nb_streams == ASF_MAX_STREAMS) { av_log(s, AV_LOG_ERROR, "too many streams\n"); return AVERROR(EINVAL); } pos1 = avio_tell(pb); st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); avpriv_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */ start_time = asf->hdr.preroll; if (!(asf->hdr.flags & 0x01)) { // if we aren't streaming... int64_t fsize = avio_size(pb); if (fsize <= 0 || (int64_t)asf->hdr.file_size <= 0 || 20*FFABS(fsize - (int64_t)asf->hdr.file_size) < FFMIN(fsize, asf->hdr.file_size)) st->duration = asf->hdr.play_time / (10000000 / 1000) - start_time; } ff_get_guid(pb, &g); test_for_ext_stream_audio = 0; if (!ff_guidcmp(&g, &ff_asf_audio_stream)) { type = AVMEDIA_TYPE_AUDIO; } else if (!ff_guidcmp(&g, &ff_asf_video_stream)) { type = AVMEDIA_TYPE_VIDEO; } else if (!ff_guidcmp(&g, &ff_asf_jfif_media)) { type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_id = AV_CODEC_ID_MJPEG; } else if (!ff_guidcmp(&g, &ff_asf_command_stream)) { type = AVMEDIA_TYPE_DATA; } else if (!ff_guidcmp(&g, &ff_asf_ext_stream_embed_stream_header)) { test_for_ext_stream_audio = 1; type = AVMEDIA_TYPE_UNKNOWN; } else { return -1; } ff_get_guid(pb, &g); avio_skip(pb, 8); /* total_size */ type_specific_size = avio_rl32(pb); avio_rl32(pb); st->id = avio_rl16(pb) & 0x7f; /* stream id */ asf->asfid2avid[st->id] = s->nb_streams - 1; asf_st = &asf->streams[st->id]; avio_rl32(pb); if (test_for_ext_stream_audio) { ff_get_guid(pb, &g); if (!ff_guidcmp(&g, &ff_asf_ext_stream_audio_stream)) { type = AVMEDIA_TYPE_AUDIO; is_dvr_ms_audio = 1; ff_get_guid(pb, &g); avio_rl32(pb); avio_rl32(pb); avio_rl32(pb); ff_get_guid(pb, &g); avio_rl32(pb); } } st->codecpar->codec_type = type; if (type == AVMEDIA_TYPE_AUDIO) { int ret = ff_get_wav_header(s, pb, st->codecpar, type_specific_size, 0); if (ret < 0) return ret; if (is_dvr_ms_audio) { st->request_probe = 1; st->codecpar->codec_tag = 0; } if (st->codecpar->codec_id == AV_CODEC_ID_AAC) st->need_parsing = AVSTREAM_PARSE_NONE; else st->need_parsing = AVSTREAM_PARSE_FULL; /* We have to init the frame size at some point .... */ pos2 = avio_tell(pb); if (size >= (pos2 + 8 - pos1 + 24)) { asf_st->ds_span = avio_r8(pb); asf_st->ds_packet_size = avio_rl16(pb); asf_st->ds_chunk_size = avio_rl16(pb); avio_rl16(pb); // ds_data_size avio_r8(pb); // ds_silence_data } if (asf_st->ds_span > 1) { if (!asf_st->ds_chunk_size || (asf_st->ds_packet_size / asf_st->ds_chunk_size <= 1) || asf_st->ds_packet_size % asf_st->ds_chunk_size) asf_st->ds_span = 0; // disable descrambling } } else if (type == AVMEDIA_TYPE_VIDEO && size - (avio_tell(pb) - pos1 + 24) >= 51) { avio_rl32(pb); avio_rl32(pb); avio_r8(pb); avio_rl16(pb); /* size */ sizeX = avio_rl32(pb); /* size */ st->codecpar->width = avio_rl32(pb); st->codecpar->height = avio_rl32(pb); /* not available for asf */ avio_rl16(pb); /* panes */ st->codecpar->bits_per_coded_sample = avio_rl16(pb); /* depth */ tag1 = avio_rl32(pb); avio_skip(pb, 20); if (sizeX > 40) { st->codecpar->extradata_size = ffio_limit(pb, sizeX - 40); st->codecpar->extradata = av_mallocz(st->codecpar->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!st->codecpar->extradata) return AVERROR(ENOMEM); avio_read(pb, st->codecpar->extradata, st->codecpar->extradata_size); } /* Extract palette from extradata if bpp <= 8 */ /* This code assumes that extradata contains only palette */ /* This is true for all paletted codecs implemented in libavcodec */ if (st->codecpar->extradata_size && (st->codecpar->bits_per_coded_sample <= 8)) { #if HAVE_BIGENDIAN int i; for (i = 0; i < FFMIN(st->codecpar->extradata_size, AVPALETTE_SIZE) / 4; i++) asf_st->palette[i] = av_bswap32(((uint32_t *)st->codecpar->extradata)[i]); #else memcpy(asf_st->palette, st->codecpar->extradata, FFMIN(st->codecpar->extradata_size, AVPALETTE_SIZE)); #endif asf_st->palette_changed = 1; } st->codecpar->codec_tag = tag1; st->codecpar->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tag1); if (tag1 == MKTAG('D', 'V', 'R', ' ')) { st->need_parsing = AVSTREAM_PARSE_FULL; /* issue658 contains wrong w/h and MS even puts a fake seq header * with wrong w/h in extradata while a correct one is in the stream. * maximum lameness */ st->codecpar->width = st->codecpar->height = 0; av_freep(&st->codecpar->extradata); st->codecpar->extradata_size = 0; } if (st->codecpar->codec_id == AV_CODEC_ID_H264) st->need_parsing = AVSTREAM_PARSE_FULL_ONCE; if (st->codecpar->codec_id == AV_CODEC_ID_MPEG4) st->need_parsing = AVSTREAM_PARSE_FULL_ONCE; } pos2 = avio_tell(pb); avio_skip(pb, size - (pos2 - pos1 + 24)); return 0; }
static int asf_read_stream_properties(AVFormatContext *s, int64_t size) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; ASFStream *asf_st; ff_asf_guid g; enum AVMediaType type; int type_specific_size, sizeX; unsigned int tag1; int64_t pos1, pos2, start_time; int test_for_ext_stream_audio, is_dvr_ms_audio = 0; if (s->nb_streams == ASF_MAX_STREAMS) { av_log(s, AV_LOG_ERROR, "too many streams\n"); return AVERROR(EINVAL); } pos1 = avio_tell(pb); st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); avpriv_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */ start_time = asf->hdr.preroll; if (!(asf->hdr.flags & 0x01)) { // if we aren't streaming... int64_t fsize = avio_size(pb); if (fsize <= 0 || (int64_t)asf->hdr.file_size <= 0 || 20*FFABS(fsize - (int64_t)asf->hdr.file_size) < FFMIN(fsize, asf->hdr.file_size)) st->duration = asf->hdr.play_time / (10000000 / 1000) - start_time; } ff_get_guid(pb, &g); test_for_ext_stream_audio = 0; if (!ff_guidcmp(&g, &ff_asf_audio_stream)) { type = AVMEDIA_TYPE_AUDIO; } else if (!ff_guidcmp(&g, &ff_asf_video_stream)) { type = AVMEDIA_TYPE_VIDEO; } else if (!ff_guidcmp(&g, &ff_asf_jfif_media)) { type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_id = AV_CODEC_ID_MJPEG; } else if (!ff_guidcmp(&g, &ff_asf_command_stream)) { type = AVMEDIA_TYPE_DATA; } else if (!ff_guidcmp(&g, &ff_asf_ext_stream_embed_stream_header)) { test_for_ext_stream_audio = 1; type = AVMEDIA_TYPE_UNKNOWN; } else { return -1; } ff_get_guid(pb, &g); avio_skip(pb, 8); /* total_size */ type_specific_size = avio_rl32(pb); avio_rl32(pb); st->id = avio_rl16(pb) & 0x7f; /* stream id */ asf->asfid2avid[st->id] = s->nb_streams - 1; asf_st = &asf->streams[st->id]; avio_rl32(pb); if (test_for_ext_stream_audio) { ff_get_guid(pb, &g); if (!ff_guidcmp(&g, &ff_asf_ext_stream_audio_stream)) { type = AVMEDIA_TYPE_AUDIO; is_dvr_ms_audio = 1; ff_get_guid(pb, &g); avio_rl32(pb); avio_rl32(pb); avio_rl32(pb); ff_get_guid(pb, &g); avio_rl32(pb); } } st->codecpar->codec_type = type; if (type == AVMEDIA_TYPE_AUDIO) { int ret = ff_get_wav_header(s, pb, st->codecpar, type_specific_size, 0); if (ret < 0) return ret; if (is_dvr_ms_audio) { st->request_probe = 1; st->codecpar->codec_tag = 0; } if (st->codecpar->codec_id == AV_CODEC_ID_AAC) st->need_parsing = AVSTREAM_PARSE_NONE; else st->need_parsing = AVSTREAM_PARSE_FULL; /* We have to init the frame size at some point .... */ pos2 = avio_tell(pb); if (size >= (pos2 + 8 - pos1 + 24)) { asf_st->ds_span = avio_r8(pb); asf_st->ds_packet_size = avio_rl16(pb); asf_st->ds_chunk_size = avio_rl16(pb); avio_rl16(pb); // ds_data_size avio_r8(pb); // ds_silence_data } if (asf_st->ds_span > 1) { if (!asf_st->ds_chunk_size || (asf_st->ds_packet_size / asf_st->ds_chunk_size <= 1) || asf_st->ds_packet_size % asf_st->ds_chunk_size) asf_st->ds_span = 0; // disable descrambling } } else if (type == AVMEDIA_TYPE_VIDEO && size - (avio_tell(pb) - pos1 + 24) >= 51) { avio_rl32(pb); avio_rl32(pb); avio_r8(pb); avio_rl16(pb); /* size */ sizeX = avio_rl32(pb); /* size */ st->codecpar->width = avio_rl32(pb); st->codecpar->height = avio_rl32(pb); /* not available for asf */ avio_rl16(pb); /* panes */ st->codecpar->bits_per_coded_sample = avio_rl16(pb); /* depth */ tag1 = avio_rl32(pb); avio_skip(pb, 20); if (sizeX > 40) { st->codecpar->extradata_size = ffio_limit(pb, sizeX - 40); st->codecpar->extradata = av_mallocz(st->codecpar->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!st->codecpar->extradata) return AVERROR(ENOMEM); avio_read(pb, st->codecpar->extradata, st->codecpar->extradata_size); } /* Extract palette from extradata if bpp <= 8 */ /* This code assumes that extradata contains only palette */ /* This is true for all paletted codecs implemented in libavcodec */ if (st->codecpar->extradata_size && (st->codecpar->bits_per_coded_sample <= 8)) { #if HAVE_BIGENDIAN int i; for (i = 0; i < FFMIN(st->codecpar->extradata_size, AVPALETTE_SIZE) / 4; i++) asf_st->palette[i] = av_bswap32(((uint32_t *)st->codecpar->extradata)[i]); #else memcpy(asf_st->palette, st->codecpar->extradata, FFMIN(st->codecpar->extradata_size, AVPALETTE_SIZE)); #endif asf_st->palette_changed = 1; } st->codecpar->codec_tag = tag1; st->codecpar->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tag1); if (tag1 == MKTAG('D', 'V', 'R', ' ')) { st->need_parsing = AVSTREAM_PARSE_FULL; /* issue658 contains wrong w/h and MS even puts a fake seq header * with wrong w/h in extradata while a correct one is in the stream. * maximum lameness */ st->codecpar->width = st->codecpar->height = 0; av_freep(&st->codecpar->extradata); st->codecpar->extradata_size = 0; } if (st->codecpar->codec_id == AV_CODEC_ID_H264) st->need_parsing = AVSTREAM_PARSE_FULL_ONCE; if (st->codecpar->codec_id == AV_CODEC_ID_MPEG4) st->need_parsing = AVSTREAM_PARSE_FULL_ONCE; } pos2 = avio_tell(pb); avio_skip(pb, size - (pos2 - pos1 + 24)); return 0; }
C
FFmpeg
0
CVE-2017-5107
https://www.cvedetails.com/cve/CVE-2017-5107/
CWE-200
https://github.com/chromium/chromium/commit/c25b198675380f713a56649c857b4367601d4a3d
c25b198675380f713a56649c857b4367601d4a3d
[Lock Screen Media Controls] Tweak UI based on new mocks This CL rearranges the different components of the CrOS lock screen media controls based on the newest mocks. This involves resizing most of the child views and their spacings. The artwork was also resized and re-positioned. Additionally, the close button was moved from the main view to the header row child view. Artist and title data about the current session will eventually be placed to the right of the artwork, but right now this space is empty. See the bug for before and after pictures. Bug: 991647 Change-Id: I7b97f31982ccf2912bd2564d5241bfd849d21d92 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1746554 Reviewed-by: Xiyuan Xia <[email protected]> Reviewed-by: Becca Hughes <[email protected]> Commit-Queue: Mia Bergeron <[email protected]> Cr-Commit-Position: refs/heads/master@{#686253}
void LockScreenMediaControlsView::SeekTo(double seek_progress) { DCHECK(position_.has_value()); media_controller_remote_->SeekTo(seek_progress * position_->duration()); }
void LockScreenMediaControlsView::SeekTo(double seek_progress) { DCHECK(position_.has_value()); media_controller_remote_->SeekTo(seek_progress * position_->duration()); }
C
Chrome
0
CVE-2015-1221
https://www.cvedetails.com/cve/CVE-2015-1221/
null
https://github.com/chromium/chromium/commit/a69c7b5d863dacbb08bfaa04359e3bc0bb4470dc
a69c7b5d863dacbb08bfaa04359e3bc0bb4470dc
Make TypingCommand::insertText() to take SelectionInDOMTree instead of VisibleSelection This patch makes |TypingCommand::insertText()| to take |SelectionInDOMTree| instead of |VisibleSelection| to reduce usage of |VisibleSelection| for improving code health. BUG=657237 TEST=n/a Review-Url: https://codereview.chromium.org/2733183002 Cr-Commit-Position: refs/heads/master@{#455368}
static Range* findRangeOfStringAlgorithm( Document& document, const String& target, const EphemeralRangeTemplate<Strategy>& referenceRange, FindOptions options) { if (target.isEmpty()) return nullptr; EphemeralRangeTemplate<Strategy> documentRange = EphemeralRangeTemplate<Strategy>::rangeOfContents(document); EphemeralRangeTemplate<Strategy> searchRange(documentRange); bool forward = !(options & Backwards); bool startInReferenceRange = false; if (referenceRange.isNotNull()) { startInReferenceRange = options & StartInSelection; if (forward && startInReferenceRange) searchRange = EphemeralRangeTemplate<Strategy>( referenceRange.startPosition(), documentRange.endPosition()); else if (forward) searchRange = EphemeralRangeTemplate<Strategy>( referenceRange.endPosition(), documentRange.endPosition()); else if (startInReferenceRange) searchRange = EphemeralRangeTemplate<Strategy>( documentRange.startPosition(), referenceRange.endPosition()); else searchRange = EphemeralRangeTemplate<Strategy>( documentRange.startPosition(), referenceRange.startPosition()); } Range* resultRange = findStringBetweenPositions(target, searchRange, options); if (resultRange && startInReferenceRange && normalizeRange(EphemeralRangeTemplate<Strategy>(resultRange)) == referenceRange) { if (forward) searchRange = EphemeralRangeTemplate<Strategy>( fromPositionInDOMTree<Strategy>(resultRange->endPosition()), searchRange.endPosition()); else searchRange = EphemeralRangeTemplate<Strategy>( searchRange.startPosition(), fromPositionInDOMTree<Strategy>(resultRange->startPosition())); resultRange = findStringBetweenPositions(target, searchRange, options); } if (!resultRange && options & WrapAround) return findStringBetweenPositions(target, documentRange, options); return resultRange; }
static Range* findRangeOfStringAlgorithm( Document& document, const String& target, const EphemeralRangeTemplate<Strategy>& referenceRange, FindOptions options) { if (target.isEmpty()) return nullptr; EphemeralRangeTemplate<Strategy> documentRange = EphemeralRangeTemplate<Strategy>::rangeOfContents(document); EphemeralRangeTemplate<Strategy> searchRange(documentRange); bool forward = !(options & Backwards); bool startInReferenceRange = false; if (referenceRange.isNotNull()) { startInReferenceRange = options & StartInSelection; if (forward && startInReferenceRange) searchRange = EphemeralRangeTemplate<Strategy>( referenceRange.startPosition(), documentRange.endPosition()); else if (forward) searchRange = EphemeralRangeTemplate<Strategy>( referenceRange.endPosition(), documentRange.endPosition()); else if (startInReferenceRange) searchRange = EphemeralRangeTemplate<Strategy>( documentRange.startPosition(), referenceRange.endPosition()); else searchRange = EphemeralRangeTemplate<Strategy>( documentRange.startPosition(), referenceRange.startPosition()); } Range* resultRange = findStringBetweenPositions(target, searchRange, options); if (resultRange && startInReferenceRange && normalizeRange(EphemeralRangeTemplate<Strategy>(resultRange)) == referenceRange) { if (forward) searchRange = EphemeralRangeTemplate<Strategy>( fromPositionInDOMTree<Strategy>(resultRange->endPosition()), searchRange.endPosition()); else searchRange = EphemeralRangeTemplate<Strategy>( searchRange.startPosition(), fromPositionInDOMTree<Strategy>(resultRange->startPosition())); resultRange = findStringBetweenPositions(target, searchRange, options); } if (!resultRange && options & WrapAround) return findStringBetweenPositions(target, documentRange, options); return resultRange; }
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/44a637b47793512bfb1d2589d43b8dc492a97629
44a637b47793512bfb1d2589d43b8dc492a97629
Desist libxml from continuing the parse after a SAX callback has stopped the parse. Attempt 2 -- now with less compile fail on Mac / Clang. BUG=95465 TBR=cdn TEST=covered by existing tests under ASAN Review URL: http://codereview.chromium.org/7892003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@100953 0039d316-1c4b-4281-b951-d872f2087c98
xmlCtxtReadFd(xmlParserCtxtPtr ctxt, int fd, const char *URL, const char *encoding, int options) { xmlParserInputBufferPtr input; xmlParserInputPtr stream; if (fd < 0) return (NULL); if (ctxt == NULL) return (NULL); xmlCtxtReset(ctxt); input = xmlParserInputBufferCreateFd(fd, XML_CHAR_ENCODING_NONE); if (input == NULL) return (NULL); input->closecallback = NULL; stream = xmlNewIOInputStream(ctxt, input, XML_CHAR_ENCODING_NONE); if (stream == NULL) { xmlFreeParserInputBuffer(input); return (NULL); } inputPush(ctxt, stream); return (xmlDoRead(ctxt, URL, encoding, options, 1)); }
xmlCtxtReadFd(xmlParserCtxtPtr ctxt, int fd, const char *URL, const char *encoding, int options) { xmlParserInputBufferPtr input; xmlParserInputPtr stream; if (fd < 0) return (NULL); if (ctxt == NULL) return (NULL); xmlCtxtReset(ctxt); input = xmlParserInputBufferCreateFd(fd, XML_CHAR_ENCODING_NONE); if (input == NULL) return (NULL); input->closecallback = NULL; stream = xmlNewIOInputStream(ctxt, input, XML_CHAR_ENCODING_NONE); if (stream == NULL) { xmlFreeParserInputBuffer(input); return (NULL); } inputPush(ctxt, stream); return (xmlDoRead(ctxt, URL, encoding, options, 1)); }
C
Chrome
0
CVE-2017-9217
https://www.cvedetails.com/cve/CVE-2017-9217/
CWE-20
https://github.com/systemd/systemd/commit/a924f43f30f9c4acaf70618dd2a055f8b0f166be
a924f43f30f9c4acaf70618dd2a055f8b0f166be
resolved: bugfix of null pointer p->question dereferencing (#6020) See https://bugs.launchpad.net/ubuntu/+source/systemd/+bug/1621396
DnsPacket *dns_packet_ref(DnsPacket *p) { if (!p) return NULL; assert(!p->on_stack); assert(p->n_ref > 0); p->n_ref++; return p; }
DnsPacket *dns_packet_ref(DnsPacket *p) { if (!p) return NULL; assert(!p->on_stack); assert(p->n_ref > 0); p->n_ref++; return p; }
C
systemd
0
CVE-2018-16427
https://www.cvedetails.com/cve/CVE-2018-16427/
CWE-125
https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
fixed out of bounds reads Thanks to Eric Sesterhenn from X41 D-SEC GmbH for reporting and suggesting security fixes.
static int piv_find_discovery(sc_card_t *card) { int r = 0; u8 rbuf[256]; size_t rbuflen = sizeof(rbuf); u8 * arbuf = rbuf; piv_private_data_t * priv = PIV_DATA(card); SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); /* * During piv_match or piv_card_reader_lock_obtained, * we use the discovery object to test if card present, and * if PIV AID is active. So we can not use the cache */ /* If not valid, read, cache and test */ if (!(priv->obj_cache[PIV_OBJ_DISCOVERY].flags & PIV_OBJ_CACHE_VALID)) { r = piv_process_discovery(card); } else { /* if already in cache,force read */ r = piv_get_data(card, PIV_OBJ_DISCOVERY, &arbuf, &rbuflen); if (r >= 0) /* make sure it is PIV AID */ r = piv_parse_discovery(card, rbuf, rbuflen, 1); } LOG_FUNC_RETURN(card->ctx, r); }
static int piv_find_discovery(sc_card_t *card) { int r = 0; u8 rbuf[256]; size_t rbuflen = sizeof(rbuf); u8 * arbuf = rbuf; piv_private_data_t * priv = PIV_DATA(card); SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); /* * During piv_match or piv_card_reader_lock_obtained, * we use the discovery object to test if card present, and * if PIV AID is active. So we can not use the cache */ /* If not valid, read, cache and test */ if (!(priv->obj_cache[PIV_OBJ_DISCOVERY].flags & PIV_OBJ_CACHE_VALID)) { r = piv_process_discovery(card); } else { /* if already in cache,force read */ r = piv_get_data(card, PIV_OBJ_DISCOVERY, &arbuf, &rbuflen); if (r >= 0) /* make sure it is PIV AID */ r = piv_parse_discovery(card, rbuf, rbuflen, 1); } LOG_FUNC_RETURN(card->ctx, r); }
C
OpenSC
0
CVE-2014-1713
https://www.cvedetails.com/cve/CVE-2014-1713/
CWE-399
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
f85a87ec670ad0fce9d98d90c9a705b72a288154
document.location bindings fix BUG=352374 [email protected] Review URL: https://codereview.chromium.org/196343011 git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
static void shadowRootAttributeAttributeSetterCallback(v8::Local<v8::String>, v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info) { TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMSetter"); TestObjectPythonV8Internal::shadowRootAttributeAttributeSetter(jsValue, info); TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution"); }
static void shadowRootAttributeAttributeSetterCallback(v8::Local<v8::String>, v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info) { TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMSetter"); TestObjectPythonV8Internal::shadowRootAttributeAttributeSetter(jsValue, info); TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution"); }
C
Chrome
0
CVE-2014-9665
https://www.cvedetails.com/cve/CVE-2014-9665/
CWE-119
https://git.savannah.gnu.org/cgit/freetype/freetype2.git/commit/?id=b3500af717010137046ec4076d1e1c0641e33727
b3500af717010137046ec4076d1e1c0641e33727
null
Conic_To( RAS_ARGS Long cx, Long cy, Long x, Long y ) { Long y1, y2, y3, x3, ymin, ymax; TStates state_bez; ras.arc = ras.arcs; ras.arc[2].x = ras.lastX; ras.arc[2].y = ras.lastY; ras.arc[1].x = cx; ras.arc[1].y = cy; ras.arc[0].x = x; ras.arc[0].y = y; do { y1 = ras.arc[2].y; y2 = ras.arc[1].y; y3 = ras.arc[0].y; x3 = ras.arc[0].x; /* first, categorize the Bezier arc */ if ( y1 <= y3 ) { ymin = y1; ymax = y3; } else { ymin = y3; ymax = y1; } if ( y2 < ymin || y2 > ymax ) { /* this arc has no given direction, split it! */ Split_Conic( ras.arc ); ras.arc += 2; } else if ( y1 == y3 ) { /* this arc is flat, ignore it and pop it from the Bezier stack */ ras.arc -= 2; } else { /* the arc is y-monotonous, either ascending or descending */ /* detect a change of direction */ state_bez = y1 < y3 ? Ascending_State : Descending_State; if ( ras.state != state_bez ) { Bool o = state_bez == Ascending_State ? IS_BOTTOM_OVERSHOOT( y1 ) : IS_TOP_OVERSHOOT( y1 ); /* finalize current profile if any */ if ( ras.state != Unknown_State && End_Profile( RAS_VARS o ) ) goto Fail; /* create a new profile */ if ( New_Profile( RAS_VARS state_bez, o ) ) goto Fail; } /* now call the appropriate routine */ if ( state_bez == Ascending_State ) { if ( Bezier_Up( RAS_VARS 2, Split_Conic, ras.minY, ras.maxY ) ) goto Fail; } else if ( Bezier_Down( RAS_VARS 2, Split_Conic, ras.minY, ras.maxY ) ) goto Fail; } } while ( ras.arc >= ras.arcs ); ras.lastX = x3; ras.lastY = y3; return SUCCESS; Fail: return FAILURE; }
Conic_To( RAS_ARGS Long cx, Long cy, Long x, Long y ) { Long y1, y2, y3, x3, ymin, ymax; TStates state_bez; ras.arc = ras.arcs; ras.arc[2].x = ras.lastX; ras.arc[2].y = ras.lastY; ras.arc[1].x = cx; ras.arc[1].y = cy; ras.arc[0].x = x; ras.arc[0].y = y; do { y1 = ras.arc[2].y; y2 = ras.arc[1].y; y3 = ras.arc[0].y; x3 = ras.arc[0].x; /* first, categorize the Bezier arc */ if ( y1 <= y3 ) { ymin = y1; ymax = y3; } else { ymin = y3; ymax = y1; } if ( y2 < ymin || y2 > ymax ) { /* this arc has no given direction, split it! */ Split_Conic( ras.arc ); ras.arc += 2; } else if ( y1 == y3 ) { /* this arc is flat, ignore it and pop it from the Bezier stack */ ras.arc -= 2; } else { /* the arc is y-monotonous, either ascending or descending */ /* detect a change of direction */ state_bez = y1 < y3 ? Ascending_State : Descending_State; if ( ras.state != state_bez ) { Bool o = state_bez == Ascending_State ? IS_BOTTOM_OVERSHOOT( y1 ) : IS_TOP_OVERSHOOT( y1 ); /* finalize current profile if any */ if ( ras.state != Unknown_State && End_Profile( RAS_VARS o ) ) goto Fail; /* create a new profile */ if ( New_Profile( RAS_VARS state_bez, o ) ) goto Fail; } /* now call the appropriate routine */ if ( state_bez == Ascending_State ) { if ( Bezier_Up( RAS_VARS 2, Split_Conic, ras.minY, ras.maxY ) ) goto Fail; } else if ( Bezier_Down( RAS_VARS 2, Split_Conic, ras.minY, ras.maxY ) ) goto Fail; } } while ( ras.arc >= ras.arcs ); ras.lastX = x3; ras.lastY = y3; return SUCCESS; Fail: return FAILURE; }
C
savannah
0
CVE-2015-8374
https://www.cvedetails.com/cve/CVE-2015-8374/
CWE-200
https://github.com/torvalds/linux/commit/0305cd5f7fca85dae392b9ba85b116896eb7c1c7
0305cd5f7fca85dae392b9ba85b116896eb7c1c7
Btrfs: fix truncation of compressed and inlined extents When truncating a file to a smaller size which consists of an inline extent that is compressed, we did not discard (or made unusable) the data between the new file size and the old file size, wasting metadata space and allowing for the truncated data to be leaked and the data corruption/loss mentioned below. We were also not correctly decrementing the number of bytes used by the inode, we were setting it to zero, giving a wrong report for callers of the stat(2) syscall. The fsck tool also reported an error about a mismatch between the nbytes of the file versus the real space used by the file. Now because we weren't discarding the truncated region of the file, it was possible for a caller of the clone ioctl to actually read the data that was truncated, allowing for a security breach without requiring root access to the system, using only standard filesystem operations. The scenario is the following: 1) User A creates a file which consists of an inline and compressed extent with a size of 2000 bytes - the file is not accessible to any other users (no read, write or execution permission for anyone else); 2) The user truncates the file to a size of 1000 bytes; 3) User A makes the file world readable; 4) User B creates a file consisting of an inline extent of 2000 bytes; 5) User B issues a clone operation from user A's file into its own file (using a length argument of 0, clone the whole range); 6) User B now gets to see the 1000 bytes that user A truncated from its file before it made its file world readbale. User B also lost the bytes in the range [1000, 2000[ bytes from its own file, but that might be ok if his/her intention was reading stale data from user A that was never supposed to be public. Note that this contrasts with the case where we truncate a file from 2000 bytes to 1000 bytes and then truncate it back from 1000 to 2000 bytes. In this case reading any byte from the range [1000, 2000[ will return a value of 0x00, instead of the original data. This problem exists since the clone ioctl was added and happens both with and without my recent data loss and file corruption fixes for the clone ioctl (patch "Btrfs: fix file corruption and data loss after cloning inline extents"). So fix this by truncating the compressed inline extents as we do for the non-compressed case, which involves decompressing, if the data isn't already in the page cache, compressing the truncated version of the extent, writing the compressed content into the inline extent and then truncate it. The following test case for fstests reproduces the problem. In order for the test to pass both this fix and my previous fix for the clone ioctl that forbids cloning a smaller inline extent into a larger one, which is titled "Btrfs: fix file corruption and data loss after cloning inline extents", are needed. Without that other fix the test fails in a different way that does not leak the truncated data, instead part of destination file gets replaced with zeroes (because the destination file has a larger inline extent than the source). seq=`basename $0` seqres=$RESULT_DIR/$seq echo "QA output created by $seq" tmp=/tmp/$$ status=1 # failure is the default! trap "_cleanup; exit \$status" 0 1 2 3 15 _cleanup() { rm -f $tmp.* } # get standard environment, filters and checks . ./common/rc . ./common/filter # real QA test starts here _need_to_be_root _supported_fs btrfs _supported_os Linux _require_scratch _require_cloner rm -f $seqres.full _scratch_mkfs >>$seqres.full 2>&1 _scratch_mount "-o compress" # Create our test files. File foo is going to be the source of a clone operation # and consists of a single inline extent with an uncompressed size of 512 bytes, # while file bar consists of a single inline extent with an uncompressed size of # 256 bytes. For our test's purpose, it's important that file bar has an inline # extent with a size smaller than foo's inline extent. $XFS_IO_PROG -f -c "pwrite -S 0xa1 0 128" \ -c "pwrite -S 0x2a 128 384" \ $SCRATCH_MNT/foo | _filter_xfs_io $XFS_IO_PROG -f -c "pwrite -S 0xbb 0 256" $SCRATCH_MNT/bar | _filter_xfs_io # Now durably persist all metadata and data. We do this to make sure that we get # on disk an inline extent with a size of 512 bytes for file foo. sync # Now truncate our file foo to a smaller size. Because it consists of a # compressed and inline extent, btrfs did not shrink the inline extent to the # new size (if the extent was not compressed, btrfs would shrink it to 128 # bytes), it only updates the inode's i_size to 128 bytes. $XFS_IO_PROG -c "truncate 128" $SCRATCH_MNT/foo # Now clone foo's inline extent into bar. # This clone operation should fail with errno EOPNOTSUPP because the source # file consists only of an inline extent and the file's size is smaller than # the inline extent of the destination (128 bytes < 256 bytes). However the # clone ioctl was not prepared to deal with a file that has a size smaller # than the size of its inline extent (something that happens only for compressed # inline extents), resulting in copying the full inline extent from the source # file into the destination file. # # Note that btrfs' clone operation for inline extents consists of removing the # inline extent from the destination inode and copy the inline extent from the # source inode into the destination inode, meaning that if the destination # inode's inline extent is larger (N bytes) than the source inode's inline # extent (M bytes), some bytes (N - M bytes) will be lost from the destination # file. Btrfs could copy the source inline extent's data into the destination's # inline extent so that we would not lose any data, but that's currently not # done due to the complexity that would be needed to deal with such cases # (specially when one or both extents are compressed), returning EOPNOTSUPP, as # it's normally not a very common case to clone very small files (only case # where we get inline extents) and copying inline extents does not save any # space (unlike for normal, non-inlined extents). $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/foo $SCRATCH_MNT/bar # Now because the above clone operation used to succeed, and due to foo's inline # extent not being shinked by the truncate operation, our file bar got the whole # inline extent copied from foo, making us lose the last 128 bytes from bar # which got replaced by the bytes in range [128, 256[ from foo before foo was # truncated - in other words, data loss from bar and being able to read old and # stale data from foo that should not be possible to read anymore through normal # filesystem operations. Contrast with the case where we truncate a file from a # size N to a smaller size M, truncate it back to size N and then read the range # [M, N[, we should always get the value 0x00 for all the bytes in that range. # We expected the clone operation to fail with errno EOPNOTSUPP and therefore # not modify our file's bar data/metadata. So its content should be 256 bytes # long with all bytes having the value 0xbb. # # Without the btrfs bug fix, the clone operation succeeded and resulted in # leaking truncated data from foo, the bytes that belonged to its range # [128, 256[, and losing data from bar in that same range. So reading the # file gave us the following content: # # 0000000 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 # * # 0000200 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a # * # 0000400 echo "File bar's content after the clone operation:" od -t x1 $SCRATCH_MNT/bar # Also because the foo's inline extent was not shrunk by the truncate # operation, btrfs' fsck, which is run by the fstests framework everytime a # test completes, failed reporting the following error: # # root 5 inode 257 errors 400, nbytes wrong status=0 exit Cc: [email protected] Signed-off-by: Filipe Manana <[email protected]>
static int wait_snapshoting_atomic_t(atomic_t *a) { schedule(); return 0; }
static int wait_snapshoting_atomic_t(atomic_t *a) { schedule(); return 0; }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/ea3d1d84be3d6f97bf50e76511c9e26af6895533
ea3d1d84be3d6f97bf50e76511c9e26af6895533
Fix passing pointers between processes. BUG=31880 Review URL: http://codereview.chromium.org/558036 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@37555 0039d316-1c4b-4281-b951-d872f2087c98
bool WebPluginImpl::acceptsInputEvents() { return accepts_input_events_; }
bool WebPluginImpl::acceptsInputEvents() { return accepts_input_events_; }
C
Chrome
0
CVE-2013-2635
https://www.cvedetails.com/cve/CVE-2013-2635/
CWE-399
https://github.com/torvalds/linux/commit/84d73cd3fb142bf1298a8c13fd4ca50fd2432372
84d73cd3fb142bf1298a8c13fd4ca50fd2432372
rtnl: fix info leak on RTM_GETLINK request for VF devices Initialize the mac address buffer with 0 as the driver specific function will probably not fill the whole buffer. In fact, all in-kernel drivers fill only ETH_ALEN of the MAX_ADDR_LEN bytes, i.e. 6 of the 32 possible bytes. Therefore we currently leak 26 bytes of stack memory to userland via the netlink interface. Signed-off-by: Mathias Krause <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b) { memcpy(v, b, sizeof(*b)); }
static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b) { memcpy(v, b, sizeof(*b)); }
C
linux
0
CVE-2019-13136
https://www.cvedetails.com/cve/CVE-2019-13136/
CWE-190
https://github.com/ImageMagick/ImageMagick/commit/fe5f4b85e6b1b54d3b4588a77133c06ade46d891
fe5f4b85e6b1b54d3b4588a77133c06ade46d891
https://github.com/ImageMagick/ImageMagick/issues/1602
static MagickBooleanType WritePTIFImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { Image *images, *next, *pyramid_image; ImageInfo *write_info; MagickBooleanType status; PointInfo resolution; size_t columns, rows; /* Create pyramid-encoded TIFF image. */ images=NewImageList(); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { Image *clone_image; clone_image=CloneImage(next,0,0,MagickFalse,exception); if (clone_image == (Image *) NULL) break; clone_image->previous=NewImageList(); clone_image->next=NewImageList(); (void) SetImageProperty(clone_image,"tiff:subfiletype","none",exception); AppendImageToList(&images,clone_image); columns=next->columns; rows=next->rows; resolution=next->resolution; while ((columns > 64) && (rows > 64)) { columns/=2; rows/=2; resolution.x/=2; resolution.y/=2; pyramid_image=ResizeImage(next,columns,rows,image->filter,exception); if (pyramid_image == (Image *) NULL) break; DestroyBlob(pyramid_image); pyramid_image->blob=ReferenceBlob(next->blob); pyramid_image->resolution=resolution; (void) SetImageProperty(pyramid_image,"tiff:subfiletype","REDUCEDIMAGE", exception); AppendImageToList(&images,pyramid_image); } } status=MagickFalse; if (images != (Image *) NULL) { /* Write pyramid-encoded TIFF image. */ images=GetFirstImageInList(images); write_info=CloneImageInfo(image_info); write_info->adjoin=MagickTrue; (void) CopyMagickString(write_info->magick,"TIFF",MagickPathExtent); (void) CopyMagickString(images->magick,"TIFF",MagickPathExtent); status=WriteTIFFImage(write_info,images,exception); images=DestroyImageList(images); write_info=DestroyImageInfo(write_info); } return(status); }
static MagickBooleanType WritePTIFImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { Image *images, *next, *pyramid_image; ImageInfo *write_info; MagickBooleanType status; PointInfo resolution; size_t columns, rows; /* Create pyramid-encoded TIFF image. */ images=NewImageList(); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { Image *clone_image; clone_image=CloneImage(next,0,0,MagickFalse,exception); if (clone_image == (Image *) NULL) break; clone_image->previous=NewImageList(); clone_image->next=NewImageList(); (void) SetImageProperty(clone_image,"tiff:subfiletype","none",exception); AppendImageToList(&images,clone_image); columns=next->columns; rows=next->rows; resolution=next->resolution; while ((columns > 64) && (rows > 64)) { columns/=2; rows/=2; resolution.x/=2; resolution.y/=2; pyramid_image=ResizeImage(next,columns,rows,image->filter,exception); if (pyramid_image == (Image *) NULL) break; DestroyBlob(pyramid_image); pyramid_image->blob=ReferenceBlob(next->blob); pyramid_image->resolution=resolution; (void) SetImageProperty(pyramid_image,"tiff:subfiletype","REDUCEDIMAGE", exception); AppendImageToList(&images,pyramid_image); } } status=MagickFalse; if (images != (Image *) NULL) { /* Write pyramid-encoded TIFF image. */ images=GetFirstImageInList(images); write_info=CloneImageInfo(image_info); write_info->adjoin=MagickTrue; (void) CopyMagickString(write_info->magick,"TIFF",MagickPathExtent); (void) CopyMagickString(images->magick,"TIFF",MagickPathExtent); status=WriteTIFFImage(write_info,images,exception); images=DestroyImageList(images); write_info=DestroyImageInfo(write_info); } return(status); }
C
ImageMagick
0
CVE-2018-8087
https://www.cvedetails.com/cve/CVE-2018-8087/
CWE-772
https://github.com/torvalds/linux/commit/0ddcff49b672239dda94d70d0fcf50317a9f4b51
0ddcff49b672239dda94d70d0fcf50317a9f4b51
mac80211_hwsim: fix possible memory leak in hwsim_new_radio_nl() 'hwname' is malloced in hwsim_new_radio_nl() and should be freed before leaving from the error handling cases, otherwise it will cause memory leak. Fixes: ff4dd73dd2b4 ("mac80211_hwsim: check HWSIM_ATTR_RADIO_NAME length") Signed-off-by: Wei Yongjun <[email protected]> Reviewed-by: Ben Hutchings <[email protected]> Signed-off-by: Johannes Berg <[email protected]>
static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, struct genl_info *info) { struct mac80211_hwsim_data *data2; struct ieee80211_rx_status rx_status; const u8 *dst; int frame_data_len; void *frame_data; struct sk_buff *skb = NULL; if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] || !info->attrs[HWSIM_ATTR_FRAME] || !info->attrs[HWSIM_ATTR_RX_RATE] || !info->attrs[HWSIM_ATTR_SIGNAL]) goto out; dst = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_RECEIVER]); frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]); frame_data = (void *)nla_data(info->attrs[HWSIM_ATTR_FRAME]); /* Allocate new skb here */ skb = alloc_skb(frame_data_len, GFP_KERNEL); if (skb == NULL) goto err; if (frame_data_len > IEEE80211_MAX_DATA_LEN) goto err; /* Copy the data */ skb_put_data(skb, frame_data, frame_data_len); data2 = get_hwsim_data_ref_from_addr(dst); if (!data2) goto out; if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup) goto out; if (info->snd_portid != data2->wmediumd) goto out; /* check if radio is configured properly */ if (data2->idle || !data2->started) goto out; /* A frame is received from user space */ memset(&rx_status, 0, sizeof(rx_status)); if (info->attrs[HWSIM_ATTR_FREQ]) { /* throw away off-channel packets, but allow both the temporary * ("hw" scan/remain-on-channel) and regular channel, since the * internal datapath also allows this */ mutex_lock(&data2->mutex); rx_status.freq = nla_get_u32(info->attrs[HWSIM_ATTR_FREQ]); if (rx_status.freq != data2->channel->center_freq && (!data2->tmp_chan || rx_status.freq != data2->tmp_chan->center_freq)) { mutex_unlock(&data2->mutex); goto out; } mutex_unlock(&data2->mutex); } else { rx_status.freq = data2->channel->center_freq; } rx_status.band = data2->channel->band; rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]); rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); data2->rx_pkts++; data2->rx_bytes += skb->len; ieee80211_rx_irqsafe(data2->hw, skb); return 0; err: pr_debug("mac80211_hwsim: error occurred in %s\n", __func__); out: dev_kfree_skb(skb); return -EINVAL; }
static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, struct genl_info *info) { struct mac80211_hwsim_data *data2; struct ieee80211_rx_status rx_status; const u8 *dst; int frame_data_len; void *frame_data; struct sk_buff *skb = NULL; if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] || !info->attrs[HWSIM_ATTR_FRAME] || !info->attrs[HWSIM_ATTR_RX_RATE] || !info->attrs[HWSIM_ATTR_SIGNAL]) goto out; dst = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_RECEIVER]); frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]); frame_data = (void *)nla_data(info->attrs[HWSIM_ATTR_FRAME]); /* Allocate new skb here */ skb = alloc_skb(frame_data_len, GFP_KERNEL); if (skb == NULL) goto err; if (frame_data_len > IEEE80211_MAX_DATA_LEN) goto err; /* Copy the data */ skb_put_data(skb, frame_data, frame_data_len); data2 = get_hwsim_data_ref_from_addr(dst); if (!data2) goto out; if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup) goto out; if (info->snd_portid != data2->wmediumd) goto out; /* check if radio is configured properly */ if (data2->idle || !data2->started) goto out; /* A frame is received from user space */ memset(&rx_status, 0, sizeof(rx_status)); if (info->attrs[HWSIM_ATTR_FREQ]) { /* throw away off-channel packets, but allow both the temporary * ("hw" scan/remain-on-channel) and regular channel, since the * internal datapath also allows this */ mutex_lock(&data2->mutex); rx_status.freq = nla_get_u32(info->attrs[HWSIM_ATTR_FREQ]); if (rx_status.freq != data2->channel->center_freq && (!data2->tmp_chan || rx_status.freq != data2->tmp_chan->center_freq)) { mutex_unlock(&data2->mutex); goto out; } mutex_unlock(&data2->mutex); } else { rx_status.freq = data2->channel->center_freq; } rx_status.band = data2->channel->band; rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]); rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); data2->rx_pkts++; data2->rx_bytes += skb->len; ieee80211_rx_irqsafe(data2->hw, skb); return 0; err: pr_debug("mac80211_hwsim: error occurred in %s\n", __func__); out: dev_kfree_skb(skb); return -EINVAL; }
C
linux
0
CVE-2012-5156
https://www.cvedetails.com/cve/CVE-2012-5156/
CWE-399
https://github.com/chromium/chromium/commit/b15c87071f906301bccc824ce013966ca93998c7
b15c87071f906301bccc824ce013966ca93998c7
Validate and report peer's PID to WorkerProcessIpcDelegate so it will be able to duplicate handles to and from the worker process. As a side effect WorkerProcessLauncher::Delegate is now responsible for retrieving the client's PID and deciding whether a launch failed due to a permanent error condition. BUG=134694 Review URL: https://chromiumcodereview.appspot.com/11143025 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@162778 0039d316-1c4b-4281-b951-d872f2087c98
virtual ~MockIpcDelegate() {}
virtual ~MockIpcDelegate() {}
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/62b8b6e168a12263aab6b88dbef0b900cc37309f
62b8b6e168a12263aab6b88dbef0b900cc37309f
Add partial magnifier to ash palette. The partial magnifier will magnify a small portion of the screen, similar to a spyglass. TEST=./out/Release/ash_unittests --gtest_filter=PartialMagnificationControllerTest.* [email protected] BUG=616112 Review-Url: https://codereview.chromium.org/2239553002 Cr-Commit-Position: refs/heads/master@{#414124}
WmWindow* PaletteTray::GetWindow() { return shelf()->GetWindow(); }
WmWindow* PaletteTray::GetWindow() { return shelf()->GetWindow(); }
C
Chrome
0
CVE-2016-3837
https://www.cvedetails.com/cve/CVE-2016-3837/
CWE-200
https://android.googlesource.com/platform/frameworks/opt/net/wifi/+/a209ff12ba9617c10550678ff93d01fb72a33399
a209ff12ba9617c10550678ff93d01fb72a33399
Deal correctly with short strings The parseMacAddress function anticipates only properly formed MAC addresses (6 hexadecimal octets separated by ":"). This change properly deals with situations where the string is shorter than expected, making sure that the passed in char* reference in parseHexByte never exceeds the end of the string. BUG: 28164077 TEST: Added a main function: int main(int argc, char **argv) { unsigned char addr[6]; if (argc > 1) { memset(addr, 0, sizeof(addr)); parseMacAddress(argv[1], addr); printf("Result: %02x:%02x:%02x:%02x:%02x:%02x\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); } } Tested with "", "a" "ab" "ab:c" "abxc". Change-Id: I0db8d0037e48b62333d475296a45b22ab0efe386
static jobject android_net_wifi_get_driver_version(JNIEnv *env, jclass cls, jint iface) { JNIHelper helper(env); int buffer_length = 256; char *buffer = (char *)malloc(buffer_length); if (!buffer) return NULL; memset(buffer, 0, buffer_length); wifi_interface_handle handle = getIfaceHandle(helper, cls, iface); ALOGD("android_net_wifi_get_driver_version = %p", handle); if (handle == 0) { return NULL; } wifi_error result = hal_fn.wifi_get_driver_version(handle, buffer, buffer_length); if (result == WIFI_SUCCESS) { ALOGD("buffer is %p, length is %d", buffer, buffer_length); JNIObject<jstring> driver_version = helper.newStringUTF(buffer); free(buffer); return driver_version.detach(); } else { ALOGD("Fail to get driver version"); free(buffer); return NULL; } }
static jobject android_net_wifi_get_driver_version(JNIEnv *env, jclass cls, jint iface) { JNIHelper helper(env); int buffer_length = 256; char *buffer = (char *)malloc(buffer_length); if (!buffer) return NULL; memset(buffer, 0, buffer_length); wifi_interface_handle handle = getIfaceHandle(helper, cls, iface); ALOGD("android_net_wifi_get_driver_version = %p", handle); if (handle == 0) { return NULL; } wifi_error result = hal_fn.wifi_get_driver_version(handle, buffer, buffer_length); if (result == WIFI_SUCCESS) { ALOGD("buffer is %p, length is %d", buffer, buffer_length); JNIObject<jstring> driver_version = helper.newStringUTF(buffer); free(buffer); return driver_version.detach(); } else { ALOGD("Fail to get driver version"); free(buffer); return NULL; } }
C
Android
0
null
null
null
https://github.com/chromium/chromium/commit/77b498aa18e610ed8ac3863ae048573d4f943b16
77b498aa18e610ed8ac3863ae048573d4f943b16
Add CHECKs for file descriptors used in select() by InotifyReaderTask Since these are used in an fd_set, there's the possibility that invalid file descriptors cause stack overflow/corruption. BUG=chromium:105162 TEST=No functional changes, compiles and passes tests. Review URL: http://codereview.chromium.org/8681006 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@111369 0039d316-1c4b-4281-b951-d872f2087c98
bool InotifyReader::RemoveWatch(Watch watch, FilePathWatcherImpl* watcher) { if (!valid_) return false; base::AutoLock auto_lock(lock_); watchers_[watch].erase(watcher); if (watchers_[watch].empty()) { watchers_.erase(watch); return (inotify_rm_watch(inotify_fd_, watch) == 0); } return true; }
bool InotifyReader::RemoveWatch(Watch watch, FilePathWatcherImpl* watcher) { if (!valid_) return false; base::AutoLock auto_lock(lock_); watchers_[watch].erase(watcher); if (watchers_[watch].empty()) { watchers_.erase(watch); return (inotify_rm_watch(inotify_fd_, watch) == 0); } return true; }
C
Chrome
0
CVE-2015-1285
https://www.cvedetails.com/cve/CVE-2015-1285/
CWE-200
https://github.com/chromium/chromium/commit/39595f8d4dffcb644d438106dcb64a30c139ff0e
39595f8d4dffcb644d438106dcb64a30c139ff0e
[reland] Do not set default wallpaper unless it should do so. [email protected], [email protected] Bug: 751382 Change-Id: Id0793dfe467f737526a95b1e66ed01fbb8860bda Reviewed-on: https://chromium-review.googlesource.com/619754 Commit-Queue: Xiaoqian Dai <[email protected]> Reviewed-by: Alexander Alekseev <[email protected]> Reviewed-by: Biao She <[email protected]> Cr-Original-Commit-Position: refs/heads/master@{#498325} Reviewed-on: https://chromium-review.googlesource.com/646430 Cr-Commit-Position: refs/heads/master@{#498982}
bool ShouldUseCustomizedDefaultWallpaper() { PrefService* pref_service = g_browser_process->local_state(); return !(pref_service->FindPreference( prefs::kCustomizationDefaultWallpaperURL)->IsDefaultValue()); }
bool ShouldUseCustomizedDefaultWallpaper() { PrefService* pref_service = g_browser_process->local_state(); return !(pref_service->FindPreference( prefs::kCustomizationDefaultWallpaperURL)->IsDefaultValue()); }
C
Chrome
0
CVE-2017-5847
https://www.cvedetails.com/cve/CVE-2017-5847/
CWE-125
https://github.com/GStreamer/gst-plugins-ugly/commit/d21017b52a585f145e8d62781bcc1c5fefc7ee37
d21017b52a585f145e8d62781bcc1c5fefc7ee37
asfdemux: Check that we have enough data available before parsing bool/uint extended content descriptors https://bugzilla.gnome.org/show_bug.cgi?id=777955
gst_asf_demux_push_obj (GstASFDemux * demux, guint32 obj_id) { const gchar *nick; nick = gst_asf_get_guid_nick (asf_object_guids, obj_id); if (g_str_has_prefix (nick, "ASF_OBJ_")) nick += strlen ("ASF_OBJ_"); if (demux->objpath == NULL) { demux->objpath = g_strdup (nick); } else { gchar *newpath; newpath = g_strdup_printf ("%s/%s", demux->objpath, nick); g_free (demux->objpath); demux->objpath = newpath; } return (const gchar *) demux->objpath; }
gst_asf_demux_push_obj (GstASFDemux * demux, guint32 obj_id) { const gchar *nick; nick = gst_asf_get_guid_nick (asf_object_guids, obj_id); if (g_str_has_prefix (nick, "ASF_OBJ_")) nick += strlen ("ASF_OBJ_"); if (demux->objpath == NULL) { demux->objpath = g_strdup (nick); } else { gchar *newpath; newpath = g_strdup_printf ("%s/%s", demux->objpath, nick); g_free (demux->objpath); demux->objpath = newpath; } return (const gchar *) demux->objpath; }
C
gst-plugins-ugly
0
CVE-2015-8382
https://www.cvedetails.com/cve/CVE-2015-8382/
CWE-119
https://git.php.net/?p=php-src.git;a=commit;h=c351b47ce85a3a147cfa801fa9f0149ab4160834
c351b47ce85a3a147cfa801fa9f0149ab4160834
null
PHPAPI pcre* pcre_get_compiled_regex(char *regex, pcre_extra **extra, int *preg_options TSRMLS_DC) { pcre_cache_entry * pce = pcre_get_compiled_regex_cache(regex, strlen(regex) TSRMLS_CC); if (extra) { *extra = pce ? pce->extra : NULL; } if (preg_options) { *preg_options = pce ? pce->preg_options : 0; } return pce ? pce->re : NULL; }
PHPAPI pcre* pcre_get_compiled_regex(char *regex, pcre_extra **extra, int *preg_options TSRMLS_DC) { pcre_cache_entry * pce = pcre_get_compiled_regex_cache(regex, strlen(regex) TSRMLS_CC); if (extra) { *extra = pce ? pce->extra : NULL; } if (preg_options) { *preg_options = pce ? pce->preg_options : 0; } return pce ? pce->re : NULL; }
C
php
0
CVE-2011-2877
https://www.cvedetails.com/cve/CVE-2011-2877/
CWE-20
https://github.com/chromium/chromium/commit/d31f450c723ba46b53c1762e51188557447d85fd
d31f450c723ba46b53c1762e51188557447d85fd
[WK2] LayerTreeCoordinator should release unused UpdatedAtlases https://bugs.webkit.org/show_bug.cgi?id=95072 Reviewed by Jocelyn Turcotte. Release graphic buffers that haven't been used for a while in order to save memory. This way we can give back memory to the system when no user interaction happens after a period of time, for example when we are in the background. * Shared/ShareableBitmap.h: * WebProcess/WebPage/CoordinatedGraphics/LayerTreeCoordinator.cpp: (WebKit::LayerTreeCoordinator::LayerTreeCoordinator): (WebKit::LayerTreeCoordinator::beginContentUpdate): (WebKit): (WebKit::LayerTreeCoordinator::scheduleReleaseInactiveAtlases): (WebKit::LayerTreeCoordinator::releaseInactiveAtlasesTimerFired): * WebProcess/WebPage/CoordinatedGraphics/LayerTreeCoordinator.h: (LayerTreeCoordinator): * WebProcess/WebPage/UpdateAtlas.cpp: (WebKit::UpdateAtlas::UpdateAtlas): (WebKit::UpdateAtlas::didSwapBuffers): Don't call buildLayoutIfNeeded here. It's enought to call it in beginPaintingOnAvailableBuffer and this way we can track whether this atlas is used with m_areaAllocator. (WebKit::UpdateAtlas::beginPaintingOnAvailableBuffer): * WebProcess/WebPage/UpdateAtlas.h: (WebKit::UpdateAtlas::addTimeInactive): (WebKit::UpdateAtlas::isInactive): (WebKit::UpdateAtlas::isInUse): (UpdateAtlas): git-svn-id: svn://svn.chromium.org/blink/trunk@128473 bbb929c8-8fbe-4397-9dbb-9b2b20218538
bool LayerTreeHost::supportsAcceleratedCompositing() { return true; }
bool LayerTreeHost::supportsAcceleratedCompositing() { return true; }
C
Chrome
0
CVE-2012-6546
https://www.cvedetails.com/cve/CVE-2012-6546/
CWE-200
https://github.com/torvalds/linux/commit/3c0c5cfdcd4d69ffc4b9c0907cec99039f30a50a
3c0c5cfdcd4d69ffc4b9c0907cec99039f30a50a
atm: fix info leak via getsockname() The ATM code fails to initialize the two padding bytes of struct sockaddr_atmpvc inserted for alignment. Add an explicit memset(0) before filling the structure to avoid the info leak. Signed-off-by: Mathias Krause <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr, int *sockaddr_len, int peer) { struct sockaddr_atmpvc *addr; struct atm_vcc *vcc = ATM_SD(sock); if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags)) return -ENOTCONN; *sockaddr_len = sizeof(struct sockaddr_atmpvc); addr = (struct sockaddr_atmpvc *)sockaddr; memset(addr, 0, sizeof(*addr)); addr->sap_family = AF_ATMPVC; addr->sap_addr.itf = vcc->dev->number; addr->sap_addr.vpi = vcc->vpi; addr->sap_addr.vci = vcc->vci; return 0; }
static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr, int *sockaddr_len, int peer) { struct sockaddr_atmpvc *addr; struct atm_vcc *vcc = ATM_SD(sock); if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags)) return -ENOTCONN; *sockaddr_len = sizeof(struct sockaddr_atmpvc); addr = (struct sockaddr_atmpvc *)sockaddr; addr->sap_family = AF_ATMPVC; addr->sap_addr.itf = vcc->dev->number; addr->sap_addr.vpi = vcc->vpi; addr->sap_addr.vci = vcc->vci; return 0; }
C
linux
1
CVE-2014-1738
https://www.cvedetails.com/cve/CVE-2014-1738/
CWE-264
https://github.com/torvalds/linux/commit/2145e15e0557a01b9195d1c7199a1b92cb9be81f
2145e15e0557a01b9195d1c7199a1b92cb9be81f
floppy: don't write kernel-only members to FDRAWCMD ioctl output Do not leak kernel-only floppy_raw_cmd structure members to userspace. This includes the linked-list pointer and the pointer to the allocated DMA space. Signed-off-by: Matthew Daley <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void format_interrupt(void) { switch (interpret_errors()) { case 1: cont->error(); case 2: break; case 0: cont->done(1); } cont->redo(); }
static void format_interrupt(void) { switch (interpret_errors()) { case 1: cont->error(); case 2: break; case 0: cont->done(1); } cont->redo(); }
C
linux
0
CVE-2016-0826
https://www.cvedetails.com/cve/CVE-2016-0826/
CWE-264
https://android.googlesource.com/platform/frameworks/av/+/c9ab2b0bb05a7e19fb057e79b36e232809d70122
c9ab2b0bb05a7e19fb057e79b36e232809d70122
Camera: Disallow dumping clients directly Camera service dumps should only be initiated through ICameraService::dump. Bug: 26265403 Change-Id: If3ca4718ed74bf33ad8a416192689203029e2803
void CameraService::BasicClient::opChanged(int32_t op, const String16& packageName) { String8 name(packageName); String8 myName(mClientPackageName); if (op != AppOpsManager::OP_CAMERA) { ALOGW("Unexpected app ops notification received: %d", op); return; } int32_t res; res = mAppOpsManager.checkOp(AppOpsManager::OP_CAMERA, mClientUid, mClientPackageName); ALOGV("checkOp returns: %d, %s ", res, res == AppOpsManager::MODE_ALLOWED ? "ALLOWED" : res == AppOpsManager::MODE_IGNORED ? "IGNORED" : res == AppOpsManager::MODE_ERRORED ? "ERRORED" : "UNKNOWN"); if (res != AppOpsManager::MODE_ALLOWED) { ALOGI("Camera %d: Access for \"%s\" revoked", mCameraId, myName.string()); mClientPid = getCallingPid(); notifyError(); disconnect(); } }
void CameraService::BasicClient::opChanged(int32_t op, const String16& packageName) { String8 name(packageName); String8 myName(mClientPackageName); if (op != AppOpsManager::OP_CAMERA) { ALOGW("Unexpected app ops notification received: %d", op); return; } int32_t res; res = mAppOpsManager.checkOp(AppOpsManager::OP_CAMERA, mClientUid, mClientPackageName); ALOGV("checkOp returns: %d, %s ", res, res == AppOpsManager::MODE_ALLOWED ? "ALLOWED" : res == AppOpsManager::MODE_IGNORED ? "IGNORED" : res == AppOpsManager::MODE_ERRORED ? "ERRORED" : "UNKNOWN"); if (res != AppOpsManager::MODE_ALLOWED) { ALOGI("Camera %d: Access for \"%s\" revoked", mCameraId, myName.string()); mClientPid = getCallingPid(); notifyError(); disconnect(); } }
C
Android
0
CVE-2014-3538
https://www.cvedetails.com/cve/CVE-2014-3538/
CWE-399
https://github.com/file/file/commit/4a284c89d6ef11aca34da65da7d673050a5ea320
4a284c89d6ef11aca34da65da7d673050a5ea320
* Enforce limit of 8K on regex searches that have no limits * Allow the l modifier for regex to mean line count. Default to byte count. If line count is specified, assume a max of 80 characters per line to limit the byte count. * Don't allow conversions to be used for dates, allowing the mask field to be used as an offset. * Bump the version of the magic format so that regex changes are visible.
parse_apple(struct magic_set *ms, struct magic_entry *me, const char *line) { struct magic *m = &me->mp[0]; return parse_extra(ms, me, line, offsetof(struct magic, apple), sizeof(m->apple), "APPLE", 0); }
parse_apple(struct magic_set *ms, struct magic_entry *me, const char *line) { struct magic *m = &me->mp[0]; return parse_extra(ms, me, line, offsetof(struct magic, apple), sizeof(m->apple), "APPLE", 0); }
C
file
0
CVE-2017-6903
https://www.cvedetails.com/cve/CVE-2017-6903/
CWE-269
https://github.com/ioquake/ioq3/commit/376267d534476a875d8b9228149c4ee18b74a4fd
376267d534476a875d8b9228149c4ee18b74a4fd
Don't load .pk3s as .dlls, and don't load user config files from .pk3s.
void CL_StartDemoLoop( void ) { Cbuf_AddText ("d1\n"); Key_SetCatcher( 0 ); }
void CL_StartDemoLoop( void ) { Cbuf_AddText ("d1\n"); Key_SetCatcher( 0 ); }
C
OpenJK
0
CVE-2016-4486
https://www.cvedetails.com/cve/CVE-2016-4486/
CWE-200
https://github.com/torvalds/linux/commit/5f8e44741f9f216e33736ea4ec65ca9ac03036e6
5f8e44741f9f216e33736ea4ec65ca9ac03036e6
net: fix infoleak in rtnetlink The stack object “map” has a total size of 32 bytes. Its last 4 bytes are padding generated by compiler. These padding bytes are not initialized and sent out via “nla_put”. Signed-off-by: Kangjie Lu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) { char name[IFNAMSIZ]; int err; err = dev_get_phys_port_name(dev, name, sizeof(name)); if (err) { if (err == -EOPNOTSUPP) return 0; return err; } if (nla_put(skb, IFLA_PHYS_PORT_NAME, strlen(name), name)) return -EMSGSIZE; return 0; }
static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) { char name[IFNAMSIZ]; int err; err = dev_get_phys_port_name(dev, name, sizeof(name)); if (err) { if (err == -EOPNOTSUPP) return 0; return err; } if (nla_put(skb, IFLA_PHYS_PORT_NAME, strlen(name), name)) return -EMSGSIZE; return 0; }
C
linux
0
CVE-2013-2140
https://www.cvedetails.com/cve/CVE-2013-2140/
CWE-20
https://github.com/torvalds/linux/commit/604c499cbbcc3d5fe5fb8d53306aa0fae1990109
604c499cbbcc3d5fe5fb8d53306aa0fae1990109
xen/blkback: Check device permissions before allowing OP_DISCARD We need to make sure that the device is not RO or that the request is not past the number of sectors we want to issue the DISCARD operation for. This fixes CVE-2013-2140. Cc: [email protected] Acked-by: Jan Beulich <[email protected]> Acked-by: Ian Campbell <[email protected]> [v1: Made it pr_warn instead of pr_debug] Signed-off-by: Konrad Rzeszutek Wilk <[email protected]>
static struct pending_req *alloc_req(struct xen_blkif *blkif) { struct pending_req *req = NULL; unsigned long flags; spin_lock_irqsave(&blkif->pending_free_lock, flags); if (!list_empty(&blkif->pending_free)) { req = list_entry(blkif->pending_free.next, struct pending_req, free_list); list_del(&req->free_list); } spin_unlock_irqrestore(&blkif->pending_free_lock, flags); return req; }
static struct pending_req *alloc_req(struct xen_blkif *blkif) { struct pending_req *req = NULL; unsigned long flags; spin_lock_irqsave(&blkif->pending_free_lock, flags); if (!list_empty(&blkif->pending_free)) { req = list_entry(blkif->pending_free.next, struct pending_req, free_list); list_del(&req->free_list); } spin_unlock_irqrestore(&blkif->pending_free_lock, flags); return req; }
C
linux
0
CVE-2018-16068
https://www.cvedetails.com/cve/CVE-2018-16068/
CWE-20
https://github.com/chromium/chromium/commit/66e24a8793615bd9d5c238b1745b093090e1f72d
66e24a8793615bd9d5c238b1745b093090e1f72d
[mojo-core] Validate data pipe endpoint metadata Ensures that we don't blindly trust specified buffer size and offset metadata when deserializing data pipe consumer and producer handles. Bug: 877182 Change-Id: I30f3eceafb5cee06284c2714d08357ef911d6fd9 Reviewed-on: https://chromium-review.googlesource.com/1192922 Reviewed-by: Reilly Grant <[email protected]> Commit-Queue: Ken Rockot <[email protected]> Cr-Commit-Position: refs/heads/master@{#586704}
MojoResult DataPipeConsumerDispatcher::AddWatcherRef( const scoped_refptr<WatcherDispatcher>& watcher, uintptr_t context) { base::AutoLock lock(lock_); if (is_closed_ || in_transit_) return MOJO_RESULT_INVALID_ARGUMENT; return watchers_.Add(watcher, context, GetHandleSignalsStateNoLock()); }
MojoResult DataPipeConsumerDispatcher::AddWatcherRef( const scoped_refptr<WatcherDispatcher>& watcher, uintptr_t context) { base::AutoLock lock(lock_); if (is_closed_ || in_transit_) return MOJO_RESULT_INVALID_ARGUMENT; return watchers_.Add(watcher, context, GetHandleSignalsStateNoLock()); }
C
Chrome
0
CVE-2017-5129
https://www.cvedetails.com/cve/CVE-2017-5129/
CWE-416
https://github.com/chromium/chromium/commit/783c28d59c4c748ef9b787d4717882c90c5b227b
783c28d59c4c748ef9b787d4717882c90c5b227b
Keep ScriptProcessorHandler alive across threads When posting a task from the ScriptProcessorHandler::Process to fire a process event, we need to keep the handler alive in case the ScriptProcessorNode goes away (because it has no onaudioprocess handler) and removes the its handler. Bug: 765495 Test: Change-Id: Ib4fa39d7b112c7051897700a1eff9f59a4a7a054 Reviewed-on: https://chromium-review.googlesource.com/677137 Reviewed-by: Hongchan Choi <[email protected]> Reviewed-by: Kentaro Hara <[email protected]> Commit-Queue: Raymond Toy <[email protected]> Cr-Commit-Position: refs/heads/master@{#503629}
void ScriptProcessorHandler::Process(size_t frames_to_process) { AudioBus* input_bus = Input(0).Bus(); AudioBus* output_bus = Output(0).Bus(); unsigned double_buffer_index = this->DoubleBufferIndex(); bool is_double_buffer_index_good = double_buffer_index < 2 && double_buffer_index < input_buffers_.size() && double_buffer_index < output_buffers_.size(); DCHECK(is_double_buffer_index_good); if (!is_double_buffer_index_good) return; AudioBuffer* input_buffer = input_buffers_[double_buffer_index].Get(); AudioBuffer* output_buffer = output_buffers_[double_buffer_index].Get(); unsigned number_of_input_channels = internal_input_bus_->NumberOfChannels(); bool buffers_are_good = output_buffer && BufferSize() == output_buffer->length() && buffer_read_write_index_ + frames_to_process <= BufferSize(); if (internal_input_bus_->NumberOfChannels()) buffers_are_good = buffers_are_good && input_buffer && BufferSize() == input_buffer->length(); DCHECK(buffers_are_good); if (!buffers_are_good) return; bool is_frames_to_process_good = frames_to_process && BufferSize() >= frames_to_process && !(BufferSize() % frames_to_process); DCHECK(is_frames_to_process_good); if (!is_frames_to_process_good) return; unsigned number_of_output_channels = output_bus->NumberOfChannels(); bool channels_are_good = (number_of_input_channels == number_of_input_channels_) && (number_of_output_channels == number_of_output_channels_); DCHECK(channels_are_good); if (!channels_are_good) return; for (unsigned i = 0; i < number_of_input_channels; ++i) internal_input_bus_->SetChannelMemory( i, input_buffer->getChannelData(i).View()->Data() + buffer_read_write_index_, frames_to_process); if (number_of_input_channels) internal_input_bus_->CopyFrom(*input_bus); for (unsigned i = 0; i < number_of_output_channels; ++i) { memcpy(output_bus->Channel(i)->MutableData(), output_buffer->getChannelData(i).View()->Data() + buffer_read_write_index_, sizeof(float) * frames_to_process); } buffer_read_write_index_ = (buffer_read_write_index_ + frames_to_process) % BufferSize(); if (!buffer_read_write_index_) { MutexTryLocker try_locker(process_event_lock_); if (!try_locker.Locked()) { output_buffer->Zero(); } else if (Context()->GetExecutionContext()) { if (Context()->HasRealtimeConstraint()) { TaskRunnerHelper::Get(TaskType::kMediaElementEvent, Context()->GetExecutionContext()) ->PostTask( BLINK_FROM_HERE, CrossThreadBind(&ScriptProcessorHandler::FireProcessEvent, WrapRefPtr(this), double_buffer_index_)); } else { std::unique_ptr<WaitableEvent> waitable_event = WTF::MakeUnique<WaitableEvent>(); TaskRunnerHelper::Get(TaskType::kMediaElementEvent, Context()->GetExecutionContext()) ->PostTask( BLINK_FROM_HERE, CrossThreadBind(&ScriptProcessorHandler:: FireProcessEventForOfflineAudioContext, WrapRefPtr(this), double_buffer_index_, CrossThreadUnretained(waitable_event.get()))); waitable_event->Wait(); } } SwapBuffers(); } }
void ScriptProcessorHandler::Process(size_t frames_to_process) { AudioBus* input_bus = Input(0).Bus(); AudioBus* output_bus = Output(0).Bus(); unsigned double_buffer_index = this->DoubleBufferIndex(); bool is_double_buffer_index_good = double_buffer_index < 2 && double_buffer_index < input_buffers_.size() && double_buffer_index < output_buffers_.size(); DCHECK(is_double_buffer_index_good); if (!is_double_buffer_index_good) return; AudioBuffer* input_buffer = input_buffers_[double_buffer_index].Get(); AudioBuffer* output_buffer = output_buffers_[double_buffer_index].Get(); unsigned number_of_input_channels = internal_input_bus_->NumberOfChannels(); bool buffers_are_good = output_buffer && BufferSize() == output_buffer->length() && buffer_read_write_index_ + frames_to_process <= BufferSize(); if (internal_input_bus_->NumberOfChannels()) buffers_are_good = buffers_are_good && input_buffer && BufferSize() == input_buffer->length(); DCHECK(buffers_are_good); if (!buffers_are_good) return; bool is_frames_to_process_good = frames_to_process && BufferSize() >= frames_to_process && !(BufferSize() % frames_to_process); DCHECK(is_frames_to_process_good); if (!is_frames_to_process_good) return; unsigned number_of_output_channels = output_bus->NumberOfChannels(); bool channels_are_good = (number_of_input_channels == number_of_input_channels_) && (number_of_output_channels == number_of_output_channels_); DCHECK(channels_are_good); if (!channels_are_good) return; for (unsigned i = 0; i < number_of_input_channels; ++i) internal_input_bus_->SetChannelMemory( i, input_buffer->getChannelData(i).View()->Data() + buffer_read_write_index_, frames_to_process); if (number_of_input_channels) internal_input_bus_->CopyFrom(*input_bus); for (unsigned i = 0; i < number_of_output_channels; ++i) { memcpy(output_bus->Channel(i)->MutableData(), output_buffer->getChannelData(i).View()->Data() + buffer_read_write_index_, sizeof(float) * frames_to_process); } buffer_read_write_index_ = (buffer_read_write_index_ + frames_to_process) % BufferSize(); if (!buffer_read_write_index_) { MutexTryLocker try_locker(process_event_lock_); if (!try_locker.Locked()) { output_buffer->Zero(); } else if (Context()->GetExecutionContext()) { if (Context()->HasRealtimeConstraint()) { TaskRunnerHelper::Get(TaskType::kMediaElementEvent, Context()->GetExecutionContext()) ->PostTask(BLINK_FROM_HERE, CrossThreadBind( &ScriptProcessorHandler::FireProcessEvent, CrossThreadUnretained(this), double_buffer_index_)); } else { std::unique_ptr<WaitableEvent> waitable_event = WTF::MakeUnique<WaitableEvent>(); TaskRunnerHelper::Get(TaskType::kMediaElementEvent, Context()->GetExecutionContext()) ->PostTask(BLINK_FROM_HERE, CrossThreadBind( &ScriptProcessorHandler:: FireProcessEventForOfflineAudioContext, CrossThreadUnretained(this), double_buffer_index_, CrossThreadUnretained(waitable_event.get()))); waitable_event->Wait(); } } SwapBuffers(); } }
C
Chrome
1
CVE-2013-0840
https://www.cvedetails.com/cve/CVE-2013-0840/
null
https://github.com/chromium/chromium/commit/7f48b71cb22bb2fc9fcec2013e9eaff55381a43d
7f48b71cb22bb2fc9fcec2013e9eaff55381a43d
Filter more incoming URLs in the CreateWindow path. BUG=170532 Review URL: https://chromiumcodereview.appspot.com/12036002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@178728 0039d316-1c4b-4281-b951-d872f2087c98
RenderViewHost* RenderViewHost::FromID(int render_process_id, int render_view_id) { RenderProcessHost* process = RenderProcessHost::FromID(render_process_id); if (!process) return NULL; RenderWidgetHost* widget = process->GetRenderWidgetHostByID(render_view_id); if (!widget || !widget->IsRenderView()) return NULL; return static_cast<RenderViewHostImpl*>(RenderWidgetHostImpl::From(widget)); }
RenderViewHost* RenderViewHost::FromID(int render_process_id, int render_view_id) { RenderProcessHost* process = RenderProcessHost::FromID(render_process_id); if (!process) return NULL; RenderWidgetHost* widget = process->GetRenderWidgetHostByID(render_view_id); if (!widget || !widget->IsRenderView()) return NULL; return static_cast<RenderViewHostImpl*>(RenderWidgetHostImpl::From(widget)); }
C
Chrome
0
CVE-2016-10210
https://www.cvedetails.com/cve/CVE-2016-10210/
CWE-476
https://github.com/VirusTotal/yara/commit/3119b232c9c453c98d8fa8b6ae4e37ba18117cd4
3119b232c9c453c98d8fa8b6ae4e37ba18117cd4
re_lexer: Make reading escape sequences more robust (#586) * Add test for issue #503 * re_lexer: Make reading escape sequences more robust This commit fixes parsing incomplete escape sequences at the end of a regular expression and parsing things like \xxy (invalid hex digits) which before were silently turned into (char)255. Close #503 * Update re_lexer.c
int main(int argc, char** argv) { yr_initialize(); test_boolean_operators(); test_comparison_operators(); test_arithmetic_operators(); test_bitwise_operators(); test_syntax(); test_anonymous_strings(); test_strings(); test_wildcard_strings(); test_hex_strings(); test_count(); test_at(); test_in(); test_offset(); test_length(); test_of(); test_for(); test_re(); test_filesize(); test_comments(); test_modules(); test_integer_functions(); test_entrypoint(); test_global_rules(); #if defined(HASH_MODULE) test_hash_module(); #endif test_file_descriptor(); yr_finalize(); return 0; }
int main(int argc, char** argv) { yr_initialize(); test_boolean_operators(); test_comparison_operators(); test_arithmetic_operators(); test_bitwise_operators(); test_syntax(); test_anonymous_strings(); test_strings(); test_wildcard_strings(); test_hex_strings(); test_count(); test_at(); test_in(); test_offset(); test_length(); test_of(); test_for(); test_re(); test_filesize(); test_comments(); test_modules(); test_integer_functions(); test_entrypoint(); test_global_rules(); #if defined(HASH_MODULE) test_hash_module(); #endif test_file_descriptor(); yr_finalize(); return 0; }
C
yara
0
CVE-2018-8897
https://www.cvedetails.com/cve/CVE-2018-8897/
CWE-362
https://github.com/torvalds/linux/commit/d8ba61ba58c88d5207c1ba2f7d9a2280e7d03be9
d8ba61ba58c88d5207c1ba2f7d9a2280e7d03be9
x86/entry/64: Don't use IST entry for #BP stack There's nothing IST-worthy about #BP/int3. We don't allow kprobes in the small handful of places in the kernel that run at CPL0 with an invalid stack, and 32-bit kernels have used normal interrupt gates for #BP forever. Furthermore, we don't allow kprobes in places that have usergs while in kernel mode, so "paranoid" is also unnecessary. Signed-off-by: Andy Lutomirski <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Cc: [email protected]
static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d) { unsigned long addr = (unsigned long) d->addr; gate->offset_low = (u16) addr; gate->segment = (u16) d->segment; gate->bits = d->bits; gate->offset_middle = (u16) (addr >> 16); #ifdef CONFIG_X86_64 gate->offset_high = (u32) (addr >> 32); gate->reserved = 0; #endif }
static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d) { unsigned long addr = (unsigned long) d->addr; gate->offset_low = (u16) addr; gate->segment = (u16) d->segment; gate->bits = d->bits; gate->offset_middle = (u16) (addr >> 16); #ifdef CONFIG_X86_64 gate->offset_high = (u32) (addr >> 32); gate->reserved = 0; #endif }
C
linux
0
CVE-2019-1547
https://www.cvedetails.com/cve/CVE-2019-1547/
CWE-311
https://git.openssl.org/gitweb/?p=openssl.git;a=commitdiff;h=21c856b75d81eff61aa63b4f036bb64a85bf6d46
21c856b75d81eff61aa63b4f036bb64a85bf6d46
null
void EC_GROUP_clear_free(EC_GROUP *group) { if (!group) return; if (group->meth->group_clear_finish != 0) group->meth->group_clear_finish(group); else if (group->meth->group_finish != 0) group->meth->group_finish(group); EC_EX_DATA_clear_free_all_data(&group->extra_data); if (EC_GROUP_VERSION(group) && group->mont_data) BN_MONT_CTX_free(group->mont_data); if (group->generator != NULL) EC_POINT_clear_free(group->generator); BN_clear_free(&group->order); BN_clear_free(&group->cofactor); if (group->seed) { OPENSSL_cleanse(group->seed, group->seed_len); OPENSSL_free(group->seed); } OPENSSL_cleanse(group, sizeof(*group)); OPENSSL_free(group); }
void EC_GROUP_clear_free(EC_GROUP *group) { if (!group) return; if (group->meth->group_clear_finish != 0) group->meth->group_clear_finish(group); else if (group->meth->group_finish != 0) group->meth->group_finish(group); EC_EX_DATA_clear_free_all_data(&group->extra_data); if (EC_GROUP_VERSION(group) && group->mont_data) BN_MONT_CTX_free(group->mont_data); if (group->generator != NULL) EC_POINT_clear_free(group->generator); BN_clear_free(&group->order); BN_clear_free(&group->cofactor); if (group->seed) { OPENSSL_cleanse(group->seed, group->seed_len); OPENSSL_free(group->seed); } OPENSSL_cleanse(group, sizeof(*group)); OPENSSL_free(group); }
C
openssl
0
CVE-2013-4623
https://www.cvedetails.com/cve/CVE-2013-4623/
CWE-20
https://github.com/polarssl/polarssl/commit/1922a4e6aade7b1d685af19d4d9339ddb5c02859
1922a4e6aade7b1d685af19d4d9339ddb5c02859
ssl_parse_certificate() now calls x509parse_crt_der() directly
static void ssl_mac_sha1( unsigned char *secret, unsigned char *buf, size_t len, unsigned char *ctr, int type ) { unsigned char header[11]; unsigned char padding[40]; sha1_context sha1; memcpy( header, ctr, 8 ); header[ 8] = (unsigned char) type; header[ 9] = (unsigned char)( len >> 8 ); header[10] = (unsigned char)( len ); memset( padding, 0x36, 40 ); sha1_starts( &sha1 ); sha1_update( &sha1, secret, 20 ); sha1_update( &sha1, padding, 40 ); sha1_update( &sha1, header, 11 ); sha1_update( &sha1, buf, len ); sha1_finish( &sha1, buf + len ); memset( padding, 0x5C, 40 ); sha1_starts( &sha1 ); sha1_update( &sha1, secret, 20 ); sha1_update( &sha1, padding, 40 ); sha1_update( &sha1, buf + len, 20 ); sha1_finish( &sha1, buf + len ); }
static void ssl_mac_sha1( unsigned char *secret, unsigned char *buf, size_t len, unsigned char *ctr, int type ) { unsigned char header[11]; unsigned char padding[40]; sha1_context sha1; memcpy( header, ctr, 8 ); header[ 8] = (unsigned char) type; header[ 9] = (unsigned char)( len >> 8 ); header[10] = (unsigned char)( len ); memset( padding, 0x36, 40 ); sha1_starts( &sha1 ); sha1_update( &sha1, secret, 20 ); sha1_update( &sha1, padding, 40 ); sha1_update( &sha1, header, 11 ); sha1_update( &sha1, buf, len ); sha1_finish( &sha1, buf + len ); memset( padding, 0x5C, 40 ); sha1_starts( &sha1 ); sha1_update( &sha1, secret, 20 ); sha1_update( &sha1, padding, 40 ); sha1_update( &sha1, buf + len, 20 ); sha1_finish( &sha1, buf + len ); }
C
polarssl
0
CVE-2011-2861
https://www.cvedetails.com/cve/CVE-2011-2861/
CWE-20
https://github.com/chromium/chromium/commit/8262245d384be025f13e2a5b3a03b7e5c98374ce
8262245d384be025f13e2a5b3a03b7e5c98374ce
DevTools: move DevToolsAgent/Client into content. BUG=84078 TEST= Review URL: http://codereview.chromium.org/7461019 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@93596 0039d316-1c4b-4281-b951-d872f2087c98
static void StopAltErrorPageFetcher(WebDataSource* data_source) { if (data_source) { NavigationState* state = NavigationState::FromDataSource(data_source); if (state) state->set_alt_error_page_fetcher(NULL); } }
static void StopAltErrorPageFetcher(WebDataSource* data_source) { if (data_source) { NavigationState* state = NavigationState::FromDataSource(data_source); if (state) state->set_alt_error_page_fetcher(NULL); } }
C
Chrome
0
CVE-2012-4565
https://www.cvedetails.com/cve/CVE-2012-4565/
CWE-189
https://github.com/torvalds/linux/commit/8f363b77ee4fbf7c3bbcf5ec2c5ca482d396d664
8f363b77ee4fbf7c3bbcf5ec2c5ca482d396d664
net: fix divide by zero in tcp algorithm illinois Reading TCP stats when using TCP Illinois congestion control algorithm can cause a divide by zero kernel oops. The division by zero occur in tcp_illinois_info() at: do_div(t, ca->cnt_rtt); where ca->cnt_rtt can become zero (when rtt_reset is called) Steps to Reproduce: 1. Register tcp_illinois: # sysctl -w net.ipv4.tcp_congestion_control=illinois 2. Monitor internal TCP information via command "ss -i" # watch -d ss -i 3. Establish new TCP conn to machine Either it fails at the initial conn, or else it needs to wait for a loss or a reset. This is only related to reading stats. The function avg_delay() also performs the same divide, but is guarded with a (ca->cnt_rtt > 0) at its calling point in update_params(). Thus, simply fix tcp_illinois_info(). Function tcp_illinois_info() / get_info() is called without socket lock. Thus, eliminate any race condition on ca->cnt_rtt by using a local stack variable. Simply reuse info.tcpv_rttcnt, as its already set to ca->cnt_rtt. Function avg_delay() is not affected by this race condition, as its called with the socket lock. Cc: Petr Matousek <[email protected]> Signed-off-by: Jesper Dangaard Brouer <[email protected]> Acked-by: Eric Dumazet <[email protected]> Acked-by: Stephen Hemminger <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static inline u32 max_delay(const struct illinois *ca) { return ca->max_rtt - ca->base_rtt; }
static inline u32 max_delay(const struct illinois *ca) { return ca->max_rtt - ca->base_rtt; }
C
linux
0
CVE-2016-9919
https://www.cvedetails.com/cve/CVE-2016-9919/
CWE-20
https://github.com/torvalds/linux/commit/79dc7e3f1cd323be4c81aa1a94faa1b3ed987fb2
79dc7e3f1cd323be4c81aa1a94faa1b3ed987fb2
net: handle no dst on skb in icmp6_send Andrey reported the following while fuzzing the kernel with syzkaller: kasan: CONFIG_KASAN_INLINE enabled kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] SMP KASAN Modules linked in: CPU: 0 PID: 3859 Comm: a.out Not tainted 4.9.0-rc6+ #429 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 task: ffff8800666d4200 task.stack: ffff880067348000 RIP: 0010:[<ffffffff833617ec>] [<ffffffff833617ec>] icmp6_send+0x5fc/0x1e30 net/ipv6/icmp.c:451 RSP: 0018:ffff88006734f2c0 EFLAGS: 00010206 RAX: ffff8800666d4200 RBX: 0000000000000000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: dffffc0000000000 RDI: 0000000000000018 RBP: ffff88006734f630 R08: ffff880064138418 R09: 0000000000000003 R10: dffffc0000000000 R11: 0000000000000005 R12: 0000000000000000 R13: ffffffff84e7e200 R14: ffff880064138484 R15: ffff8800641383c0 FS: 00007fb3887a07c0(0000) GS:ffff88006cc00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000020000000 CR3: 000000006b040000 CR4: 00000000000006f0 Stack: ffff8800666d4200 ffff8800666d49f8 ffff8800666d4200 ffffffff84c02460 ffff8800666d4a1a 1ffff1000ccdaa2f ffff88006734f498 0000000000000046 ffff88006734f440 ffffffff832f4269 ffff880064ba7456 0000000000000000 Call Trace: [<ffffffff83364ddc>] icmpv6_param_prob+0x2c/0x40 net/ipv6/icmp.c:557 [< inline >] ip6_tlvopt_unknown net/ipv6/exthdrs.c:88 [<ffffffff83394405>] ip6_parse_tlv+0x555/0x670 net/ipv6/exthdrs.c:157 [<ffffffff8339a759>] ipv6_parse_hopopts+0x199/0x460 net/ipv6/exthdrs.c:663 [<ffffffff832ee773>] ipv6_rcv+0xfa3/0x1dc0 net/ipv6/ip6_input.c:191 ... icmp6_send / icmpv6_send is invoked for both rx and tx paths. In both cases the dst->dev should be preferred for determining the L3 domain if the dst has been set on the skb. Fallback to the skb->dev if it has not. This covers the case reported here where icmp6_send is invoked on Rx before the route lookup. Fixes: 5d41ce29e ("net: icmp6_send should use dst dev to determine L3 domain") Reported-by: Andrey Konovalov <[email protected]> Signed-off-by: David Ahern <[email protected]> Signed-off-by: David S. Miller <[email protected]>
struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net) { struct ctl_table *table; table = kmemdup(ipv6_icmp_table_template, sizeof(ipv6_icmp_table_template), GFP_KERNEL); if (table) table[0].data = &net->ipv6.sysctl.icmpv6_time; return table; }
struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net) { struct ctl_table *table; table = kmemdup(ipv6_icmp_table_template, sizeof(ipv6_icmp_table_template), GFP_KERNEL); if (table) table[0].data = &net->ipv6.sysctl.icmpv6_time; return table; }
C
linux
0
CVE-2016-7097
https://www.cvedetails.com/cve/CVE-2016-7097/
CWE-285
https://github.com/torvalds/linux/commit/073931017b49d9458aa351605b43a7e34598caef
073931017b49d9458aa351605b43a7e34598caef
posix_acl: Clear SGID bit when setting file permissions When file permissions are modified via chmod(2) and the user is not in the owning group or capable of CAP_FSETID, the setgid bit is cleared in inode_change_ok(). Setting a POSIX ACL via setxattr(2) sets the file permissions as well as the new ACL, but doesn't clear the setgid bit in a similar way; this allows to bypass the check in chmod(2). Fix that. References: CVE-2016-7097 Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Jeff Layton <[email protected]> Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Andreas Gruenbacher <[email protected]>
int orangefs_init_acl(struct inode *inode, struct inode *dir) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct posix_acl *default_acl, *acl; umode_t mode = inode->i_mode; int error = 0; ClearModeFlag(orangefs_inode); error = posix_acl_create(dir, &mode, &default_acl, &acl); if (error) return error; if (default_acl) { error = orangefs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); posix_acl_release(default_acl); } if (acl) { if (!error) error = orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS); posix_acl_release(acl); } /* If mode of the inode was changed, then do a forcible ->setattr */ if (mode != inode->i_mode) { SetModeFlag(orangefs_inode); inode->i_mode = mode; orangefs_flush_inode(inode); } return error; }
int orangefs_init_acl(struct inode *inode, struct inode *dir) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct posix_acl *default_acl, *acl; umode_t mode = inode->i_mode; int error = 0; ClearModeFlag(orangefs_inode); error = posix_acl_create(dir, &mode, &default_acl, &acl); if (error) return error; if (default_acl) { error = orangefs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); posix_acl_release(default_acl); } if (acl) { if (!error) error = orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS); posix_acl_release(acl); } /* If mode of the inode was changed, then do a forcible ->setattr */ if (mode != inode->i_mode) { SetModeFlag(orangefs_inode); inode->i_mode = mode; orangefs_flush_inode(inode); } return error; }
C
linux
0
CVE-2018-11596
https://www.cvedetails.com/cve/CVE-2018-11596/
CWE-119
https://github.com/espruino/Espruino/commit/ce1924193862d58cb43d3d4d9dada710a8361b89
ce1924193862d58cb43d3d4d9dada710a8361b89
fix jsvGetString regression
void jsvArrayPushAll(JsVar *target, JsVar *source, bool checkDuplicates) { assert(jsvIsArray(target)); assert(jsvIsArray(source)); JsvObjectIterator it; jsvObjectIteratorNew(&it, source); while (jsvObjectIteratorHasValue(&it)) { JsVar *v = jsvObjectIteratorGetValue(&it); bool add = true; if (checkDuplicates) { JsVar *idx = jsvGetIndexOf(target, v, false); if (idx) { add = false; jsvUnLock(idx); } } if (add) jsvArrayPush(target, v); jsvUnLock(v); jsvObjectIteratorNext(&it); } jsvObjectIteratorFree(&it); }
void jsvArrayPushAll(JsVar *target, JsVar *source, bool checkDuplicates) { assert(jsvIsArray(target)); assert(jsvIsArray(source)); JsvObjectIterator it; jsvObjectIteratorNew(&it, source); while (jsvObjectIteratorHasValue(&it)) { JsVar *v = jsvObjectIteratorGetValue(&it); bool add = true; if (checkDuplicates) { JsVar *idx = jsvGetIndexOf(target, v, false); if (idx) { add = false; jsvUnLock(idx); } } if (add) jsvArrayPush(target, v); jsvUnLock(v); jsvObjectIteratorNext(&it); } jsvObjectIteratorFree(&it); }
C
Espruino
0
CVE-2013-6636
https://www.cvedetails.com/cve/CVE-2013-6636/
CWE-20
https://github.com/chromium/chromium/commit/5cfe3023574666663d970ce48cdbc8ed15ce61d9
5cfe3023574666663d970ce48cdbc8ed15ce61d9
Clear out some minor TODOs. BUG=none Review URL: https://codereview.chromium.org/1047063002 Cr-Commit-Position: refs/heads/master@{#322959}
void AutofillDialogViews::OverlayView::OnNativeThemeChanged( const ui::NativeTheme* theme) { set_background(views::Background::CreateSolidBackground( theme->GetSystemColor(ui::NativeTheme::kColorId_DialogBackground))); }
void AutofillDialogViews::OverlayView::OnNativeThemeChanged( const ui::NativeTheme* theme) { set_background(views::Background::CreateSolidBackground( theme->GetSystemColor(ui::NativeTheme::kColorId_DialogBackground))); }
C
Chrome
0
CVE-2016-1679
https://www.cvedetails.com/cve/CVE-2016-1679/
null
https://github.com/chromium/chromium/commit/b5bdf3778209179111c9f865af00940e74aa20e7
b5bdf3778209179111c9f865af00940e74aa20e7
V8ValueConverter::ToV8Value should not trigger setters BUG=606390 Review URL: https://codereview.chromium.org/1918793003 Cr-Commit-Position: refs/heads/master@{#390045}
base::Value* reference_value() const { return reference_value_.get(); }
base::Value* reference_value() const { return reference_value_.get(); }
C
Chrome
0
CVE-2016-6271
https://www.cvedetails.com/cve/CVE-2016-6271/
CWE-254
https://github.com/BelledonneCommunications/bzrtp/commit/bbb1e6e2f467ee4bd7b9a8c800e4f07343d7d99b
bbb1e6e2f467ee4bd7b9a8c800e4f07343d7d99b
Add ZRTP Commit packet hvi check on DHPart2 packet reception
uint8_t *messageTypeInttoString(uint32_t messageType) { switch(messageType) { case MSGTYPE_HELLO : return (uint8_t *)"Hello "; break; case MSGTYPE_HELLOACK : return (uint8_t *)"HelloACK"; break; case MSGTYPE_COMMIT : return (uint8_t *)"Commit "; break; case MSGTYPE_DHPART1 : return (uint8_t *)"DHPart1 "; break; case MSGTYPE_DHPART2 : return (uint8_t *)"DHPart2 "; break; case MSGTYPE_CONFIRM1 : return (uint8_t *)"Confirm1"; break; case MSGTYPE_CONFIRM2 : return (uint8_t *)"Confirm2"; break; case MSGTYPE_CONF2ACK : return (uint8_t *)"Conf2ACK"; break; case MSGTYPE_ERROR : return (uint8_t *)"Error "; break; case MSGTYPE_ERRORACK : return (uint8_t *)"ErrorACK"; break; case MSGTYPE_GOCLEAR : return (uint8_t *)"GoClear "; break; case MSGTYPE_CLEARACK : return (uint8_t *)"ClearACK"; break; case MSGTYPE_SASRELAY : return (uint8_t *)"SASrelay"; break; case MSGTYPE_RELAYACK : return (uint8_t *)"RelayACK"; break; case MSGTYPE_PING : return (uint8_t *)"Ping "; break; case MSGTYPE_PINGACK : return (uint8_t *)"PingACK "; break; } return NULL; }
uint8_t *messageTypeInttoString(uint32_t messageType) { switch(messageType) { case MSGTYPE_HELLO : return (uint8_t *)"Hello "; break; case MSGTYPE_HELLOACK : return (uint8_t *)"HelloACK"; break; case MSGTYPE_COMMIT : return (uint8_t *)"Commit "; break; case MSGTYPE_DHPART1 : return (uint8_t *)"DHPart1 "; break; case MSGTYPE_DHPART2 : return (uint8_t *)"DHPart2 "; break; case MSGTYPE_CONFIRM1 : return (uint8_t *)"Confirm1"; break; case MSGTYPE_CONFIRM2 : return (uint8_t *)"Confirm2"; break; case MSGTYPE_CONF2ACK : return (uint8_t *)"Conf2ACK"; break; case MSGTYPE_ERROR : return (uint8_t *)"Error "; break; case MSGTYPE_ERRORACK : return (uint8_t *)"ErrorACK"; break; case MSGTYPE_GOCLEAR : return (uint8_t *)"GoClear "; break; case MSGTYPE_CLEARACK : return (uint8_t *)"ClearACK"; break; case MSGTYPE_SASRELAY : return (uint8_t *)"SASrelay"; break; case MSGTYPE_RELAYACK : return (uint8_t *)"RelayACK"; break; case MSGTYPE_PING : return (uint8_t *)"Ping "; break; case MSGTYPE_PINGACK : return (uint8_t *)"PingACK "; break; } return NULL; }
C
bzrtp
0
CVE-2015-8963
https://www.cvedetails.com/cve/CVE-2015-8963/
CWE-416
https://github.com/torvalds/linux/commit/12ca6ad2e3a896256f086497a7c7406a547ee373
12ca6ad2e3a896256f086497a7c7406a547ee373
perf: Fix race in swevent hash There's a race on CPU unplug where we free the swevent hash array while it can still have events on. This will result in a use-after-free which is BAD. Simply do not free the hash array on unplug. This leaves the thing around and no use-after-free takes place. When the last swevent dies, we do a for_each_possible_cpu() iteration anyway to clean these up, at which time we'll free it, so no leakage will occur. Reported-by: Sasha Levin <[email protected]> Tested-by: Sasha Levin <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Frederic Weisbecker <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Stephane Eranian <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Vince Weaver <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
static int __perf_event_enable(void *info) { struct perf_event *event = info; struct perf_event_context *ctx = event->ctx; struct perf_event *leader = event->group_leader; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); int err; /* * There's a time window between 'ctx->is_active' check * in perf_event_enable function and this place having: * - IRQs on * - ctx->lock unlocked * * where the task could be killed and 'ctx' deactivated * by perf_event_exit_task. */ if (!ctx->is_active) return -EINVAL; raw_spin_lock(&ctx->lock); update_context_time(ctx); if (event->state >= PERF_EVENT_STATE_INACTIVE) goto unlock; /* * set current task's cgroup time reference point */ perf_cgroup_set_timestamp(current, ctx); __perf_event_mark_enabled(event); if (!event_filter_match(event)) { if (is_cgroup_event(event)) perf_cgroup_defer_enabled(event); goto unlock; } /* * If the event is in a group and isn't the group leader, * then don't put it on unless the group is on. */ if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) goto unlock; if (!group_can_go_on(event, cpuctx, 1)) { err = -EEXIST; } else { if (event == leader) err = group_sched_in(event, cpuctx, ctx); else err = event_sched_in(event, cpuctx, ctx); } if (err) { /* * If this event can't go on and it's part of a * group, then the whole group has to come off. */ if (leader != event) { group_sched_out(leader, cpuctx, ctx); perf_mux_hrtimer_restart(cpuctx); } if (leader->attr.pinned) { update_group_times(leader); leader->state = PERF_EVENT_STATE_ERROR; } } unlock: raw_spin_unlock(&ctx->lock); return 0; }
static int __perf_event_enable(void *info) { struct perf_event *event = info; struct perf_event_context *ctx = event->ctx; struct perf_event *leader = event->group_leader; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); int err; /* * There's a time window between 'ctx->is_active' check * in perf_event_enable function and this place having: * - IRQs on * - ctx->lock unlocked * * where the task could be killed and 'ctx' deactivated * by perf_event_exit_task. */ if (!ctx->is_active) return -EINVAL; raw_spin_lock(&ctx->lock); update_context_time(ctx); if (event->state >= PERF_EVENT_STATE_INACTIVE) goto unlock; /* * set current task's cgroup time reference point */ perf_cgroup_set_timestamp(current, ctx); __perf_event_mark_enabled(event); if (!event_filter_match(event)) { if (is_cgroup_event(event)) perf_cgroup_defer_enabled(event); goto unlock; } /* * If the event is in a group and isn't the group leader, * then don't put it on unless the group is on. */ if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) goto unlock; if (!group_can_go_on(event, cpuctx, 1)) { err = -EEXIST; } else { if (event == leader) err = group_sched_in(event, cpuctx, ctx); else err = event_sched_in(event, cpuctx, ctx); } if (err) { /* * If this event can't go on and it's part of a * group, then the whole group has to come off. */ if (leader != event) { group_sched_out(leader, cpuctx, ctx); perf_mux_hrtimer_restart(cpuctx); } if (leader->attr.pinned) { update_group_times(leader); leader->state = PERF_EVENT_STATE_ERROR; } } unlock: raw_spin_unlock(&ctx->lock); return 0; }
C
linux
0
CVE-2017-7533
https://www.cvedetails.com/cve/CVE-2017-7533/
CWE-362
https://github.com/torvalds/linux/commit/49d31c2f389acfe83417083e1208422b4091cd9e
49d31c2f389acfe83417083e1208422b4091cd9e
dentry name snapshots take_dentry_name_snapshot() takes a safe snapshot of dentry name; if the name is a short one, it gets copied into caller-supplied structure, otherwise an extra reference to external name is grabbed (those are never modified). In either case the pointer to stable string is stored into the same structure. dentry must be held by the caller of take_dentry_name_snapshot(), but may be freely dropped afterwards - the snapshot will stay until destroyed by release_dentry_name_snapshot(). Intended use: struct name_snapshot s; take_dentry_name_snapshot(&s, dentry); ... access s.name ... release_dentry_name_snapshot(&s); Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name to pass down with event. Signed-off-by: Al Viro <[email protected]>
void d_drop(struct dentry *dentry) { spin_lock(&dentry->d_lock); __d_drop(dentry); spin_unlock(&dentry->d_lock); }
void d_drop(struct dentry *dentry) { spin_lock(&dentry->d_lock); __d_drop(dentry); spin_unlock(&dentry->d_lock); }
C
linux
0
CVE-2019-11811
https://www.cvedetails.com/cve/CVE-2019-11811/
CWE-416
https://github.com/torvalds/linux/commit/401e7e88d4ef80188ffa07095ac00456f901b8c4
401e7e88d4ef80188ffa07095ac00456f901b8c4
ipmi_si: fix use-after-free of resource->name When we excute the following commands, we got oops rmmod ipmi_si cat /proc/ioports [ 1623.482380] Unable to handle kernel paging request at virtual address ffff00000901d478 [ 1623.482382] Mem abort info: [ 1623.482383] ESR = 0x96000007 [ 1623.482385] Exception class = DABT (current EL), IL = 32 bits [ 1623.482386] SET = 0, FnV = 0 [ 1623.482387] EA = 0, S1PTW = 0 [ 1623.482388] Data abort info: [ 1623.482389] ISV = 0, ISS = 0x00000007 [ 1623.482390] CM = 0, WnR = 0 [ 1623.482393] swapper pgtable: 4k pages, 48-bit VAs, pgdp = 00000000d7d94a66 [ 1623.482395] [ffff00000901d478] pgd=000000dffbfff003, pud=000000dffbffe003, pmd=0000003f5d06e003, pte=0000000000000000 [ 1623.482399] Internal error: Oops: 96000007 [#1] SMP [ 1623.487407] Modules linked in: ipmi_si(E) nls_utf8 isofs rpcrdma ib_iser ib_srpt target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_umad rdma_cm ib_cm dm_mirror dm_region_hash dm_log iw_cm dm_mod aes_ce_blk crypto_simd cryptd aes_ce_cipher ses ghash_ce sha2_ce enclosure sha256_arm64 sg sha1_ce hisi_sas_v2_hw hibmc_drm sbsa_gwdt hisi_sas_main ip_tables mlx5_ib ib_uverbs marvell ib_core mlx5_core ixgbe mdio hns_dsaf ipmi_devintf hns_enet_drv ipmi_msghandler hns_mdio [last unloaded: ipmi_si] [ 1623.532410] CPU: 30 PID: 11438 Comm: cat Kdump: loaded Tainted: G E 5.0.0-rc3+ #168 [ 1623.541498] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.37 11/21/2017 [ 1623.548822] pstate: a0000005 (NzCv daif -PAN -UAO) [ 1623.553684] pc : string+0x28/0x98 [ 1623.557040] lr : vsnprintf+0x368/0x5e8 [ 1623.560837] sp : ffff000013213a80 [ 1623.564191] x29: ffff000013213a80 x28: ffff00001138abb5 [ 1623.569577] x27: ffff000013213c18 x26: ffff805f67d06049 [ 1623.574963] x25: 0000000000000000 x24: ffff00001138abb5 [ 1623.580349] x23: 0000000000000fb7 x22: ffff0000117ed000 [ 1623.585734] x21: ffff000011188fd8 x20: ffff805f67d07000 [ 1623.591119] x19: ffff805f67d06061 x18: ffffffffffffffff [ 1623.596505] x17: 0000000000000200 x16: 0000000000000000 [ 1623.601890] x15: ffff0000117ed748 x14: ffff805f67d07000 [ 1623.607276] x13: ffff805f67d0605e x12: 0000000000000000 [ 1623.612661] x11: 0000000000000000 x10: 0000000000000000 [ 1623.618046] x9 : 0000000000000000 x8 : 000000000000000f [ 1623.623432] x7 : ffff805f67d06061 x6 : fffffffffffffffe [ 1623.628817] x5 : 0000000000000012 x4 : ffff00000901d478 [ 1623.634203] x3 : ffff0a00ffffff04 x2 : ffff805f67d07000 [ 1623.639588] x1 : ffff805f67d07000 x0 : ffffffffffffffff [ 1623.644974] Process cat (pid: 11438, stack limit = 0x000000008d4cbc10) [ 1623.651592] Call trace: [ 1623.654068] string+0x28/0x98 [ 1623.657071] vsnprintf+0x368/0x5e8 [ 1623.660517] seq_vprintf+0x70/0x98 [ 1623.668009] seq_printf+0x7c/0xa0 [ 1623.675530] r_show+0xc8/0xf8 [ 1623.682558] seq_read+0x330/0x440 [ 1623.689877] proc_reg_read+0x78/0xd0 [ 1623.697346] __vfs_read+0x60/0x1a0 [ 1623.704564] vfs_read+0x94/0x150 [ 1623.711339] ksys_read+0x6c/0xd8 [ 1623.717939] __arm64_sys_read+0x24/0x30 [ 1623.725077] el0_svc_common+0x120/0x148 [ 1623.732035] el0_svc_handler+0x30/0x40 [ 1623.738757] el0_svc+0x8/0xc [ 1623.744520] Code: d1000406 aa0103e2 54000149 b4000080 (39400085) [ 1623.753441] ---[ end trace f91b6a4937de9835 ]--- [ 1623.760871] Kernel panic - not syncing: Fatal exception [ 1623.768935] SMP: stopping secondary CPUs [ 1623.775718] Kernel Offset: disabled [ 1623.781998] CPU features: 0x002,21006008 [ 1623.788777] Memory Limit: none [ 1623.798329] Starting crashdump kernel... [ 1623.805202] Bye! If io_setup is called successful in try_smi_init() but try_smi_init() goes out_err before calling ipmi_register_smi(), so ipmi_unregister_smi() will not be called while removing module. It leads to the resource that allocated in io_setup() can not be freed, but the name(DEVICE_NAME) of resource is freed while removing the module. It causes use-after-free when cat /proc/ioports. Fix this by calling io_cleanup() while try_smi_init() goes to out_err. and don't call io_cleanup() until io_setup() returns successful to avoid warning prints. Fixes: 93c303d2045b ("ipmi_si: Clean up shutdown a bit") Cc: [email protected] Reported-by: NuoHan Qiao <[email protected]> Suggested-by: Corey Minyard <[email protected]> Signed-off-by: Yang Yingliang <[email protected]> Signed-off-by: Corey Minyard <[email protected]>
static unsigned char intf_mem_inl(const struct si_sm_io *io, unsigned int offset) { return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; }
static unsigned char intf_mem_inl(const struct si_sm_io *io, unsigned int offset) { return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; }
C
linux
0
CVE-2018-6063
https://www.cvedetails.com/cve/CVE-2018-6063/
CWE-787
https://github.com/chromium/chromium/commit/673ce95d481ea9368c4d4d43ac756ba1d6d9e608
673ce95d481ea9368c4d4d43ac756ba1d6d9e608
Correct mojo::WrapSharedMemoryHandle usage Fixes some incorrect uses of mojo::WrapSharedMemoryHandle which were assuming that the call actually has any control over the memory protection applied to a handle when mapped. Where fixing usage is infeasible for this CL, TODOs are added to annotate follow-up work. Also updates the API and documentation to (hopefully) improve clarity and avoid similar mistakes from being made in the future. BUG=792900 Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel Change-Id: I0578aaa9ca3bfcb01aaf2451315d1ede95458477 Reviewed-on: https://chromium-review.googlesource.com/818282 Reviewed-by: Wei Li <[email protected]> Reviewed-by: Lei Zhang <[email protected]> Reviewed-by: John Abd-El-Malek <[email protected]> Reviewed-by: Daniel Cheng <[email protected]> Reviewed-by: Sadrul Chowdhury <[email protected]> Reviewed-by: Yuzhu Shen <[email protected]> Reviewed-by: Robert Sesek <[email protected]> Commit-Queue: Ken Rockot <[email protected]> Cr-Commit-Position: refs/heads/master@{#530268}
bool SharedMemoryHandleProvider::InitForSize(size_t size) { #if DCHECK_IS_ON() DCHECK_EQ(map_ref_count_, 0); #endif DCHECK(!shared_memory_); shared_memory_.emplace(); if (shared_memory_->CreateAnonymous(size)) { mapped_size_ = size; read_only_flag_ = false; return true; } return false; }
bool SharedMemoryHandleProvider::InitForSize(size_t size) { #if DCHECK_IS_ON() DCHECK_EQ(map_ref_count_, 0); #endif DCHECK(!shared_memory_); shared_memory_.emplace(); if (shared_memory_->CreateAnonymous(size)) { mapped_size_ = size; read_only_flag_ = false; return true; } return false; }
C
Chrome
0
CVE-2014-9644
https://www.cvedetails.com/cve/CVE-2014-9644/
CWE-264
https://github.com/torvalds/linux/commit/4943ba16bbc2db05115707b3ff7b4874e9e3c560
4943ba16bbc2db05115707b3ff7b4874e9e3c560
crypto: include crypto- module prefix in template This adds the module loading prefix "crypto-" to the template lookup as well. For example, attempting to load 'vfat(blowfish)' via AF_ALG now correctly includes the "crypto-" prefix at every level, correctly rejecting "vfat": net-pf-38 algif-hash crypto-vfat(blowfish) crypto-vfat(blowfish)-all crypto-vfat Reported-by: Mathias Krause <[email protected]> Signed-off-by: Kees Cook <[email protected]> Acked-by: Mathias Krause <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
static int crypto_ccm_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); struct ablkcipher_request *abreq = &pctx->abreq; struct scatterlist *dst; unsigned int authsize = crypto_aead_authsize(aead); unsigned int cryptlen = req->cryptlen; u8 *authtag = pctx->auth_tag; u8 *odata = pctx->odata; u8 *iv = req->iv; int err; if (cryptlen < authsize) return -EINVAL; cryptlen -= authsize; err = crypto_ccm_check_iv(iv); if (err) return err; pctx->flags = aead_request_flags(req); scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0); memset(iv + 15 - iv[0], 0, iv[0] + 1); sg_init_table(pctx->src, 2); sg_set_buf(pctx->src, authtag, 16); scatterwalk_sg_chain(pctx->src, 2, req->src); dst = pctx->src; if (req->src != req->dst) { sg_init_table(pctx->dst, 2); sg_set_buf(pctx->dst, authtag, 16); scatterwalk_sg_chain(pctx->dst, 2, req->dst); dst = pctx->dst; } ablkcipher_request_set_tfm(abreq, ctx->ctr); ablkcipher_request_set_callback(abreq, pctx->flags, crypto_ccm_decrypt_done, req); ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); err = crypto_ablkcipher_decrypt(abreq); if (err) return err; err = crypto_ccm_auth(req, req->dst, cryptlen); if (err) return err; /* verify */ if (crypto_memneq(authtag, odata, authsize)) return -EBADMSG; return err; }
static int crypto_ccm_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); struct ablkcipher_request *abreq = &pctx->abreq; struct scatterlist *dst; unsigned int authsize = crypto_aead_authsize(aead); unsigned int cryptlen = req->cryptlen; u8 *authtag = pctx->auth_tag; u8 *odata = pctx->odata; u8 *iv = req->iv; int err; if (cryptlen < authsize) return -EINVAL; cryptlen -= authsize; err = crypto_ccm_check_iv(iv); if (err) return err; pctx->flags = aead_request_flags(req); scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0); memset(iv + 15 - iv[0], 0, iv[0] + 1); sg_init_table(pctx->src, 2); sg_set_buf(pctx->src, authtag, 16); scatterwalk_sg_chain(pctx->src, 2, req->src); dst = pctx->src; if (req->src != req->dst) { sg_init_table(pctx->dst, 2); sg_set_buf(pctx->dst, authtag, 16); scatterwalk_sg_chain(pctx->dst, 2, req->dst); dst = pctx->dst; } ablkcipher_request_set_tfm(abreq, ctx->ctr); ablkcipher_request_set_callback(abreq, pctx->flags, crypto_ccm_decrypt_done, req); ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); err = crypto_ablkcipher_decrypt(abreq); if (err) return err; err = crypto_ccm_auth(req, req->dst, cryptlen); if (err) return err; /* verify */ if (crypto_memneq(authtag, odata, authsize)) return -EBADMSG; return err; }
C
linux
0
CVE-2018-6942
https://www.cvedetails.com/cve/CVE-2018-6942/
CWE-476
https://git.savannah.gnu.org/cgit/freetype/freetype2.git/commit/?id=29c759284e305ec428703c9a5831d0b1fc3497ef
29c759284e305ec428703c9a5831d0b1fc3497ef
null
Ins_FDEF( TT_ExecContext exc, FT_Long* args ) { FT_ULong n; TT_DefRecord* rec; TT_DefRecord* limit; #ifdef TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY /* arguments to opcodes are skipped by `SKIP_Code' */ FT_Byte opcode_pattern[9][12] = { /* #0 inline delta function 1 */ { 0x4B, /* PPEM */ 0x53, /* GTEQ */ 0x23, /* SWAP */ 0x4B, /* PPEM */ 0x51, /* LTEQ */ 0x5A, /* AND */ 0x58, /* IF */ 0x38, /* SHPIX */ 0x1B, /* ELSE */ 0x21, /* POP */ 0x21, /* POP */ 0x59 /* EIF */ }, /* #1 inline delta function 2 */ { 0x4B, /* PPEM */ 0x54, /* EQ */ 0x58, /* IF */ 0x38, /* SHPIX */ 0x1B, /* ELSE */ 0x21, /* POP */ 0x21, /* POP */ 0x59 /* EIF */ }, /* #2 diagonal stroke function */ { 0x20, /* DUP */ 0x20, /* DUP */ 0xB0, /* PUSHB_1 */ /* 1 */ 0x60, /* ADD */ 0x46, /* GC_cur */ 0xB0, /* PUSHB_1 */ /* 64 */ 0x23, /* SWAP */ 0x42 /* WS */ }, /* #3 VacuFormRound function */ { 0x45, /* RCVT */ 0x23, /* SWAP */ 0x46, /* GC_cur */ 0x60, /* ADD */ 0x20, /* DUP */ 0xB0 /* PUSHB_1 */ /* 38 */ }, /* #4 TTFautohint bytecode (old) */ { 0x20, /* DUP */ 0x64, /* ABS */ 0xB0, /* PUSHB_1 */ /* 32 */ 0x60, /* ADD */ 0x66, /* FLOOR */ 0x23, /* SWAP */ 0xB0 /* PUSHB_1 */ }, /* #5 spacing function 1 */ { 0x01, /* SVTCA_x */ 0xB0, /* PUSHB_1 */ /* 24 */ 0x43, /* RS */ 0x58 /* IF */ }, /* #6 spacing function 2 */ { 0x01, /* SVTCA_x */ 0x18, /* RTG */ 0xB0, /* PUSHB_1 */ /* 24 */ 0x43, /* RS */ 0x58 /* IF */ }, /* #7 TypeMan Talk DiagEndCtrl function */ { 0x01, /* SVTCA_x */ 0x20, /* DUP */ 0xB0, /* PUSHB_1 */ /* 3 */ 0x25, /* CINDEX */ }, /* #8 TypeMan Talk Align */ { 0x06, /* SPVTL */ 0x7D, /* RDTG */ }, }; FT_UShort opcode_patterns = 9; FT_UShort opcode_pointer[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; FT_UShort opcode_size[9] = { 12, 8, 8, 6, 7, 4, 5, 4, 2 }; FT_UShort i; #endif /* TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY */ /* FDEF is only allowed in `prep' or `fpgm' */ if ( exc->curRange == tt_coderange_glyph ) { exc->error = FT_THROW( DEF_In_Glyf_Bytecode ); return; } /* some font programs are broken enough to redefine functions! */ /* We will then parse the current table. */ rec = exc->FDefs; limit = rec + exc->numFDefs; n = (FT_ULong)args[0]; for ( ; rec < limit; rec++ ) { if ( rec->opc == n ) break; } if ( rec == limit ) { /* check that there is enough room for new functions */ if ( exc->numFDefs >= exc->maxFDefs ) { exc->error = FT_THROW( Too_Many_Function_Defs ); return; } exc->numFDefs++; } /* Although FDEF takes unsigned 32-bit integer, */ /* func # must be within unsigned 16-bit integer */ if ( n > 0xFFFFU ) { exc->error = FT_THROW( Too_Many_Function_Defs ); return; } rec->range = exc->curRange; rec->opc = (FT_UInt16)n; rec->start = exc->IP + 1; rec->active = TRUE; rec->inline_delta = FALSE; rec->sph_fdef_flags = 0x0000; if ( n > exc->maxFunc ) exc->maxFunc = (FT_UInt16)n; #ifdef TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY /* We don't know for sure these are typeman functions, */ /* however they are only active when RS 22 is called */ if ( n >= 64 && n <= 66 ) rec->sph_fdef_flags |= SPH_FDEF_TYPEMAN_STROKES; #endif /* Now skip the whole function definition. */ /* We don't allow nested IDEFS & FDEFs. */ while ( SkipCode( exc ) == SUCCESS ) { #ifdef TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY if ( SUBPIXEL_HINTING_INFINALITY ) { for ( i = 0; i < opcode_patterns; i++ ) { if ( opcode_pointer[i] < opcode_size[i] && exc->opcode == opcode_pattern[i][opcode_pointer[i]] ) { opcode_pointer[i] += 1; if ( opcode_pointer[i] == opcode_size[i] ) { FT_TRACE6(( "sph: Function %d, opcode ptrn: %d, %s %s\n", i, n, exc->face->root.family_name, exc->face->root.style_name )); switch ( i ) { case 0: rec->sph_fdef_flags |= SPH_FDEF_INLINE_DELTA_1; exc->face->sph_found_func_flags |= SPH_FDEF_INLINE_DELTA_1; break; case 1: rec->sph_fdef_flags |= SPH_FDEF_INLINE_DELTA_2; exc->face->sph_found_func_flags |= SPH_FDEF_INLINE_DELTA_2; break; case 2: switch ( n ) { /* needs to be implemented still */ case 58: rec->sph_fdef_flags |= SPH_FDEF_DIAGONAL_STROKE; exc->face->sph_found_func_flags |= SPH_FDEF_DIAGONAL_STROKE; } break; case 3: switch ( n ) { case 0: rec->sph_fdef_flags |= SPH_FDEF_VACUFORM_ROUND_1; exc->face->sph_found_func_flags |= SPH_FDEF_VACUFORM_ROUND_1; } break; case 4: /* probably not necessary to detect anymore */ rec->sph_fdef_flags |= SPH_FDEF_TTFAUTOHINT_1; exc->face->sph_found_func_flags |= SPH_FDEF_TTFAUTOHINT_1; break; case 5: switch ( n ) { case 0: case 1: case 2: case 4: case 7: case 8: rec->sph_fdef_flags |= SPH_FDEF_SPACING_1; exc->face->sph_found_func_flags |= SPH_FDEF_SPACING_1; } break; case 6: switch ( n ) { case 0: case 1: case 2: case 4: case 7: case 8: rec->sph_fdef_flags |= SPH_FDEF_SPACING_2; exc->face->sph_found_func_flags |= SPH_FDEF_SPACING_2; } break; case 7: rec->sph_fdef_flags |= SPH_FDEF_TYPEMAN_DIAGENDCTRL; exc->face->sph_found_func_flags |= SPH_FDEF_TYPEMAN_DIAGENDCTRL; break; case 8: #if 0 rec->sph_fdef_flags |= SPH_FDEF_TYPEMAN_DIAGENDCTRL; exc->face->sph_found_func_flags |= SPH_FDEF_TYPEMAN_DIAGENDCTRL; #endif break; } opcode_pointer[i] = 0; } } else opcode_pointer[i] = 0; } /* Set sph_compatibility_mode only when deltas are detected */ exc->face->sph_compatibility_mode = ( ( exc->face->sph_found_func_flags & SPH_FDEF_INLINE_DELTA_1 ) | ( exc->face->sph_found_func_flags & SPH_FDEF_INLINE_DELTA_2 ) ); } #endif /* TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY */ switch ( exc->opcode ) { case 0x89: /* IDEF */ case 0x2C: /* FDEF */ exc->error = FT_THROW( Nested_DEFS ); return; case 0x2D: /* ENDF */ rec->end = exc->IP; return; } } }
Ins_FDEF( TT_ExecContext exc, FT_Long* args ) { FT_ULong n; TT_DefRecord* rec; TT_DefRecord* limit; #ifdef TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY /* arguments to opcodes are skipped by `SKIP_Code' */ FT_Byte opcode_pattern[9][12] = { /* #0 inline delta function 1 */ { 0x4B, /* PPEM */ 0x53, /* GTEQ */ 0x23, /* SWAP */ 0x4B, /* PPEM */ 0x51, /* LTEQ */ 0x5A, /* AND */ 0x58, /* IF */ 0x38, /* SHPIX */ 0x1B, /* ELSE */ 0x21, /* POP */ 0x21, /* POP */ 0x59 /* EIF */ }, /* #1 inline delta function 2 */ { 0x4B, /* PPEM */ 0x54, /* EQ */ 0x58, /* IF */ 0x38, /* SHPIX */ 0x1B, /* ELSE */ 0x21, /* POP */ 0x21, /* POP */ 0x59 /* EIF */ }, /* #2 diagonal stroke function */ { 0x20, /* DUP */ 0x20, /* DUP */ 0xB0, /* PUSHB_1 */ /* 1 */ 0x60, /* ADD */ 0x46, /* GC_cur */ 0xB0, /* PUSHB_1 */ /* 64 */ 0x23, /* SWAP */ 0x42 /* WS */ }, /* #3 VacuFormRound function */ { 0x45, /* RCVT */ 0x23, /* SWAP */ 0x46, /* GC_cur */ 0x60, /* ADD */ 0x20, /* DUP */ 0xB0 /* PUSHB_1 */ /* 38 */ }, /* #4 TTFautohint bytecode (old) */ { 0x20, /* DUP */ 0x64, /* ABS */ 0xB0, /* PUSHB_1 */ /* 32 */ 0x60, /* ADD */ 0x66, /* FLOOR */ 0x23, /* SWAP */ 0xB0 /* PUSHB_1 */ }, /* #5 spacing function 1 */ { 0x01, /* SVTCA_x */ 0xB0, /* PUSHB_1 */ /* 24 */ 0x43, /* RS */ 0x58 /* IF */ }, /* #6 spacing function 2 */ { 0x01, /* SVTCA_x */ 0x18, /* RTG */ 0xB0, /* PUSHB_1 */ /* 24 */ 0x43, /* RS */ 0x58 /* IF */ }, /* #7 TypeMan Talk DiagEndCtrl function */ { 0x01, /* SVTCA_x */ 0x20, /* DUP */ 0xB0, /* PUSHB_1 */ /* 3 */ 0x25, /* CINDEX */ }, /* #8 TypeMan Talk Align */ { 0x06, /* SPVTL */ 0x7D, /* RDTG */ }, }; FT_UShort opcode_patterns = 9; FT_UShort opcode_pointer[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; FT_UShort opcode_size[9] = { 12, 8, 8, 6, 7, 4, 5, 4, 2 }; FT_UShort i; #endif /* TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY */ /* FDEF is only allowed in `prep' or `fpgm' */ if ( exc->curRange == tt_coderange_glyph ) { exc->error = FT_THROW( DEF_In_Glyf_Bytecode ); return; } /* some font programs are broken enough to redefine functions! */ /* We will then parse the current table. */ rec = exc->FDefs; limit = rec + exc->numFDefs; n = (FT_ULong)args[0]; for ( ; rec < limit; rec++ ) { if ( rec->opc == n ) break; } if ( rec == limit ) { /* check that there is enough room for new functions */ if ( exc->numFDefs >= exc->maxFDefs ) { exc->error = FT_THROW( Too_Many_Function_Defs ); return; } exc->numFDefs++; } /* Although FDEF takes unsigned 32-bit integer, */ /* func # must be within unsigned 16-bit integer */ if ( n > 0xFFFFU ) { exc->error = FT_THROW( Too_Many_Function_Defs ); return; } rec->range = exc->curRange; rec->opc = (FT_UInt16)n; rec->start = exc->IP + 1; rec->active = TRUE; rec->inline_delta = FALSE; rec->sph_fdef_flags = 0x0000; if ( n > exc->maxFunc ) exc->maxFunc = (FT_UInt16)n; #ifdef TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY /* We don't know for sure these are typeman functions, */ /* however they are only active when RS 22 is called */ if ( n >= 64 && n <= 66 ) rec->sph_fdef_flags |= SPH_FDEF_TYPEMAN_STROKES; #endif /* Now skip the whole function definition. */ /* We don't allow nested IDEFS & FDEFs. */ while ( SkipCode( exc ) == SUCCESS ) { #ifdef TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY if ( SUBPIXEL_HINTING_INFINALITY ) { for ( i = 0; i < opcode_patterns; i++ ) { if ( opcode_pointer[i] < opcode_size[i] && exc->opcode == opcode_pattern[i][opcode_pointer[i]] ) { opcode_pointer[i] += 1; if ( opcode_pointer[i] == opcode_size[i] ) { FT_TRACE6(( "sph: Function %d, opcode ptrn: %d, %s %s\n", i, n, exc->face->root.family_name, exc->face->root.style_name )); switch ( i ) { case 0: rec->sph_fdef_flags |= SPH_FDEF_INLINE_DELTA_1; exc->face->sph_found_func_flags |= SPH_FDEF_INLINE_DELTA_1; break; case 1: rec->sph_fdef_flags |= SPH_FDEF_INLINE_DELTA_2; exc->face->sph_found_func_flags |= SPH_FDEF_INLINE_DELTA_2; break; case 2: switch ( n ) { /* needs to be implemented still */ case 58: rec->sph_fdef_flags |= SPH_FDEF_DIAGONAL_STROKE; exc->face->sph_found_func_flags |= SPH_FDEF_DIAGONAL_STROKE; } break; case 3: switch ( n ) { case 0: rec->sph_fdef_flags |= SPH_FDEF_VACUFORM_ROUND_1; exc->face->sph_found_func_flags |= SPH_FDEF_VACUFORM_ROUND_1; } break; case 4: /* probably not necessary to detect anymore */ rec->sph_fdef_flags |= SPH_FDEF_TTFAUTOHINT_1; exc->face->sph_found_func_flags |= SPH_FDEF_TTFAUTOHINT_1; break; case 5: switch ( n ) { case 0: case 1: case 2: case 4: case 7: case 8: rec->sph_fdef_flags |= SPH_FDEF_SPACING_1; exc->face->sph_found_func_flags |= SPH_FDEF_SPACING_1; } break; case 6: switch ( n ) { case 0: case 1: case 2: case 4: case 7: case 8: rec->sph_fdef_flags |= SPH_FDEF_SPACING_2; exc->face->sph_found_func_flags |= SPH_FDEF_SPACING_2; } break; case 7: rec->sph_fdef_flags |= SPH_FDEF_TYPEMAN_DIAGENDCTRL; exc->face->sph_found_func_flags |= SPH_FDEF_TYPEMAN_DIAGENDCTRL; break; case 8: #if 0 rec->sph_fdef_flags |= SPH_FDEF_TYPEMAN_DIAGENDCTRL; exc->face->sph_found_func_flags |= SPH_FDEF_TYPEMAN_DIAGENDCTRL; #endif break; } opcode_pointer[i] = 0; } } else opcode_pointer[i] = 0; } /* Set sph_compatibility_mode only when deltas are detected */ exc->face->sph_compatibility_mode = ( ( exc->face->sph_found_func_flags & SPH_FDEF_INLINE_DELTA_1 ) | ( exc->face->sph_found_func_flags & SPH_FDEF_INLINE_DELTA_2 ) ); } #endif /* TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY */ switch ( exc->opcode ) { case 0x89: /* IDEF */ case 0x2C: /* FDEF */ exc->error = FT_THROW( Nested_DEFS ); return; case 0x2D: /* ENDF */ rec->end = exc->IP; return; } } }
C
savannah
0
CVE-2017-8929
https://www.cvedetails.com/cve/CVE-2017-8929/
CWE-416
https://github.com/VirusTotal/yara/commit/053e67e3ec81cc9268ce30eaf0d6663d8639ed1e
053e67e3ec81cc9268ce30eaf0d6663d8639ed1e
Fix issue #658
int yr_execute_code( YR_RULES* rules, YR_SCAN_CONTEXT* context, int timeout, time_t start_time) { int64_t mem[MEM_SIZE]; int32_t sp = 0; uint8_t* ip = rules->code_start; YR_VALUE args[MAX_FUNCTION_ARGS]; YR_VALUE *stack; YR_VALUE r1; YR_VALUE r2; YR_VALUE r3; #ifdef PROFILING_ENABLED YR_RULE* current_rule = NULL; #endif YR_RULE* rule; YR_MATCH* match; YR_OBJECT_FUNCTION* function; YR_OBJECT** obj_ptr; YR_ARENA* obj_arena; char* identifier; char* args_fmt; int i; int found; int count; int result = ERROR_SUCCESS; int stop = FALSE; int cycle = 0; int tidx = context->tidx; int stack_size; #ifdef PROFILING_ENABLED clock_t start = clock(); #endif yr_get_configuration(YR_CONFIG_STACK_SIZE, (void*) &stack_size); stack = (YR_VALUE*) yr_malloc(stack_size * sizeof(YR_VALUE)); if (stack == NULL) return ERROR_INSUFFICIENT_MEMORY; FAIL_ON_ERROR_WITH_CLEANUP( yr_arena_create(1024, 0, &obj_arena), yr_free(stack)); while(!stop) { switch(*ip) { case OP_NOP: break; case OP_HALT: assert(sp == 0); // When HALT is reached the stack should be empty. stop = TRUE; break; case OP_PUSH: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); push(r1); break; case OP_POP: pop(r1); break; case OP_CLEAR_M: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); mem[r1.i] = 0; break; case OP_ADD_M: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); pop(r2); if (!is_undef(r2)) mem[r1.i] += r2.i; break; case OP_INCR_M: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); mem[r1.i]++; break; case OP_PUSH_M: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); r1.i = mem[r1.i]; push(r1); break; case OP_POP_M: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); pop(r2); mem[r1.i] = r2.i; break; case OP_SWAPUNDEF: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); pop(r2); if (is_undef(r2)) { r1.i = mem[r1.i]; push(r1); } else { push(r2); } break; case OP_JNUNDEF: pop(r1); push(r1); ip = jmp_if(!is_undef(r1), ip); break; case OP_JLE: pop(r2); pop(r1); push(r1); push(r2); ip = jmp_if(r1.i <= r2.i, ip); break; case OP_JTRUE: pop(r1); push(r1); ip = jmp_if(!is_undef(r1) && r1.i, ip); break; case OP_JFALSE: pop(r1); push(r1); ip = jmp_if(is_undef(r1) || !r1.i, ip); break; case OP_AND: pop(r2); pop(r1); if (is_undef(r1) || is_undef(r2)) r1.i = 0; else r1.i = r1.i && r2.i; push(r1); break; case OP_OR: pop(r2); pop(r1); if (is_undef(r1)) { push(r2); } else if (is_undef(r2)) { push(r1); } else { r1.i = r1.i || r2.i; push(r1); } break; case OP_NOT: pop(r1); if (is_undef(r1)) r1.i = UNDEFINED; else r1.i= !r1.i; push(r1); break; case OP_MOD: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); if (r2.i != 0) r1.i = r1.i % r2.i; else r1.i = UNDEFINED; push(r1); break; case OP_SHR: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i >> r2.i; push(r1); break; case OP_SHL: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i << r2.i; push(r1); break; case OP_BITWISE_NOT: pop(r1); ensure_defined(r1); r1.i = ~r1.i; push(r1); break; case OP_BITWISE_AND: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i & r2.i; push(r1); break; case OP_BITWISE_OR: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i | r2.i; push(r1); break; case OP_BITWISE_XOR: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i ^ r2.i; push(r1); break; case OP_PUSH_RULE: rule = *(YR_RULE**)(ip + 1); ip += sizeof(uint64_t); r1.i = rule->t_flags[tidx] & RULE_TFLAGS_MATCH ? 1 : 0; push(r1); break; case OP_INIT_RULE: #ifdef PROFILING_ENABLED current_rule = *(YR_RULE**)(ip + 1); #endif ip += sizeof(uint64_t); break; case OP_MATCH_RULE: pop(r1); rule = *(YR_RULE**)(ip + 1); ip += sizeof(uint64_t); if (!is_undef(r1) && r1.i) rule->t_flags[tidx] |= RULE_TFLAGS_MATCH; else if (RULE_IS_GLOBAL(rule)) rule->ns->t_flags[tidx] |= NAMESPACE_TFLAGS_UNSATISFIED_GLOBAL; #ifdef PROFILING_ENABLED rule->clock_ticks += clock() - start; start = clock(); #endif assert(sp == 0); break; case OP_OBJ_LOAD: identifier = *(char**)(ip + 1); ip += sizeof(uint64_t); r1.o = (YR_OBJECT*) yr_hash_table_lookup( context->objects_table, identifier, NULL); assert(r1.o != NULL); push(r1); break; case OP_OBJ_FIELD: identifier = *(char**)(ip + 1); ip += sizeof(uint64_t); pop(r1); ensure_defined(r1); r1.o = yr_object_lookup_field(r1.o, identifier); assert(r1.o != NULL); push(r1); break; case OP_OBJ_VALUE: pop(r1); ensure_defined(r1); switch(r1.o->type) { case OBJECT_TYPE_INTEGER: r1.i = ((YR_OBJECT_INTEGER*) r1.o)->value; break; case OBJECT_TYPE_FLOAT: if (isnan(((YR_OBJECT_DOUBLE*) r1.o)->value)) r1.i = UNDEFINED; else r1.d = ((YR_OBJECT_DOUBLE*) r1.o)->value; break; case OBJECT_TYPE_STRING: if (((YR_OBJECT_STRING*) r1.o)->value == NULL) r1.i = UNDEFINED; else r1.p = ((YR_OBJECT_STRING*) r1.o)->value; break; default: assert(FALSE); } push(r1); break; case OP_INDEX_ARRAY: pop(r1); // index pop(r2); // array ensure_defined(r1); ensure_defined(r2); assert(r2.o->type == OBJECT_TYPE_ARRAY); r1.o = yr_object_array_get_item(r2.o, 0, (int) r1.i); if (r1.o == NULL) r1.i = UNDEFINED; push(r1); break; case OP_LOOKUP_DICT: pop(r1); // key pop(r2); // dictionary ensure_defined(r1); ensure_defined(r2); assert(r2.o->type == OBJECT_TYPE_DICTIONARY); r1.o = yr_object_dict_get_item( r2.o, 0, r1.ss->c_string); if (r1.o == NULL) r1.i = UNDEFINED; push(r1); break; case OP_CALL: args_fmt = *(char**)(ip + 1); ip += sizeof(uint64_t); i = (int) strlen(args_fmt); count = 0; while (i > 0) { pop(r1); if (is_undef(r1)) // count the number of undefined args count++; args[i - 1] = r1; i--; } pop(r2); ensure_defined(r2); if (count > 0) { r1.i = UNDEFINED; push(r1); break; } function = (YR_OBJECT_FUNCTION*) r2.o; result = ERROR_INTERNAL_FATAL_ERROR; for (i = 0; i < MAX_OVERLOADED_FUNCTIONS; i++) { if (function->prototypes[i].arguments_fmt == NULL) break; if (strcmp(function->prototypes[i].arguments_fmt, args_fmt) == 0) { result = function->prototypes[i].code(args, context, function); break; } } // if i == MAX_OVERLOADED_FUNCTIONS at this point no matching // prototype was found, but this shouldn't happen. assert(i < MAX_OVERLOADED_FUNCTIONS); // make a copy of the returned object and push the copy into the stack // function->return_obj can't be pushed because it can change in // subsequent calls to the same function. if (result == ERROR_SUCCESS) result = yr_object_copy(function->return_obj, &r1.o); // a pointer to the copied object is stored in a arena in order to // free the object before exiting yr_execute_code if (result == ERROR_SUCCESS) result = yr_arena_write_data(obj_arena, &r1.o, sizeof(r1.o), NULL); stop = (result != ERROR_SUCCESS); push(r1); break; case OP_FOUND: pop(r1); r1.i = r1.s->matches[tidx].tail != NULL ? 1 : 0; push(r1); break; case OP_FOUND_AT: pop(r2); pop(r1); if (is_undef(r1)) { r1.i = 0; push(r1); break; } match = r2.s->matches[tidx].head; r3.i = FALSE; while (match != NULL) { if (r1.i == match->base + match->offset) { r3.i = TRUE; break; } if (r1.i < match->base + match->offset) break; match = match->next; } push(r3); break; case OP_FOUND_IN: pop(r3); pop(r2); pop(r1); ensure_defined(r1); ensure_defined(r2); match = r3.s->matches[tidx].head; r3.i = FALSE; while (match != NULL && !r3.i) { if (match->base + match->offset >= r1.i && match->base + match->offset <= r2.i) { r3.i = TRUE; } if (match->base + match->offset > r2.i) break; match = match->next; } push(r3); break; case OP_COUNT: pop(r1); r1.i = r1.s->matches[tidx].count; push(r1); break; case OP_OFFSET: pop(r2); pop(r1); ensure_defined(r1); match = r2.s->matches[tidx].head; i = 1; r3.i = UNDEFINED; while (match != NULL && r3.i == UNDEFINED) { if (r1.i == i) r3.i = match->base + match->offset; i++; match = match->next; } push(r3); break; case OP_LENGTH: pop(r2); pop(r1); ensure_defined(r1); match = r2.s->matches[tidx].head; i = 1; r3.i = UNDEFINED; while (match != NULL && r3.i == UNDEFINED) { if (r1.i == i) r3.i = match->match_length; i++; match = match->next; } push(r3); break; case OP_OF: found = 0; count = 0; pop(r1); while (!is_undef(r1)) { if (r1.s->matches[tidx].tail != NULL) found++; count++; pop(r1); } pop(r2); if (is_undef(r2)) r1.i = found >= count ? 1 : 0; else r1.i = found >= r2.i ? 1 : 0; push(r1); break; case OP_FILESIZE: r1.i = context->file_size; push(r1); break; case OP_ENTRYPOINT: r1.i = context->entry_point; push(r1); break; case OP_INT8: pop(r1); r1.i = read_int8_t_little_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_INT16: pop(r1); r1.i = read_int16_t_little_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_INT32: pop(r1); r1.i = read_int32_t_little_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_UINT8: pop(r1); r1.i = read_uint8_t_little_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_UINT16: pop(r1); r1.i = read_uint16_t_little_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_UINT32: pop(r1); r1.i = read_uint32_t_little_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_INT8BE: pop(r1); r1.i = read_int8_t_big_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_INT16BE: pop(r1); r1.i = read_int16_t_big_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_INT32BE: pop(r1); r1.i = read_int32_t_big_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_UINT8BE: pop(r1); r1.i = read_uint8_t_big_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_UINT16BE: pop(r1); r1.i = read_uint16_t_big_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_UINT32BE: pop(r1); r1.i = read_uint32_t_big_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_CONTAINS: pop(r2); pop(r1); ensure_defined(r1); ensure_defined(r2); r1.i = memmem(r1.ss->c_string, r1.ss->length, r2.ss->c_string, r2.ss->length) != NULL; push(r1); break; case OP_IMPORT: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); result = yr_modules_load((char*) r1.p, context); if (result != ERROR_SUCCESS) stop = TRUE; break; case OP_MATCHES: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); if (r1.ss->length == 0) { r1.i = FALSE; push(r1); break; } result = yr_re_exec( (uint8_t*) r2.re->code, (uint8_t*) r1.ss->c_string, r1.ss->length, 0, r2.re->flags | RE_FLAGS_SCAN, NULL, NULL, &found); if (result != ERROR_SUCCESS) stop = TRUE; r1.i = found >= 0; push(r1); break; case OP_INT_TO_DBL: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); r2 = stack[sp - r1.i]; if (is_undef(r2)) stack[sp - r1.i].i = UNDEFINED; else stack[sp - r1.i].d = (double) r2.i; break; case OP_STR_TO_BOOL: pop(r1); ensure_defined(r1); r1.i = r1.ss->length > 0; push(r1); break; case OP_INT_EQ: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i == r2.i; push(r1); break; case OP_INT_NEQ: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i != r2.i; push(r1); break; case OP_INT_LT: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i < r2.i; push(r1); break; case OP_INT_GT: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i > r2.i; push(r1); break; case OP_INT_LE: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i <= r2.i; push(r1); break; case OP_INT_GE: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i >= r2.i; push(r1); break; case OP_INT_ADD: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i + r2.i; push(r1); break; case OP_INT_SUB: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i - r2.i; push(r1); break; case OP_INT_MUL: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i * r2.i; push(r1); break; case OP_INT_DIV: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); if (r2.i != 0) r1.i = r1.i / r2.i; else r1.i = UNDEFINED; push(r1); break; case OP_INT_MINUS: pop(r1); ensure_defined(r1); r1.i = -r1.i; push(r1); break; case OP_DBL_LT: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.d < r2.d; push(r1); break; case OP_DBL_GT: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.d > r2.d; push(r1); break; case OP_DBL_LE: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.d <= r2.d; push(r1); break; case OP_DBL_GE: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.d >= r2.d; push(r1); break; case OP_DBL_EQ: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.d == r2.d; push(r1); break; case OP_DBL_NEQ: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.d != r2.d; push(r1); break; case OP_DBL_ADD: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.d = r1.d + r2.d; push(r1); break; case OP_DBL_SUB: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.d = r1.d - r2.d; push(r1); break; case OP_DBL_MUL: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.d = r1.d * r2.d; push(r1); break; case OP_DBL_DIV: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.d = r1.d / r2.d; push(r1); break; case OP_DBL_MINUS: pop(r1); ensure_defined(r1); r1.d = -r1.d; push(r1); break; case OP_STR_EQ: case OP_STR_NEQ: case OP_STR_LT: case OP_STR_LE: case OP_STR_GT: case OP_STR_GE: pop(r2); pop(r1); ensure_defined(r1); ensure_defined(r2); switch(*ip) { case OP_STR_EQ: r1.i = (sized_string_cmp(r1.ss, r2.ss) == 0); break; case OP_STR_NEQ: r1.i = (sized_string_cmp(r1.ss, r2.ss) != 0); break; case OP_STR_LT: r1.i = (sized_string_cmp(r1.ss, r2.ss) < 0); break; case OP_STR_LE: r1.i = (sized_string_cmp(r1.ss, r2.ss) <= 0); break; case OP_STR_GT: r1.i = (sized_string_cmp(r1.ss, r2.ss) > 0); break; case OP_STR_GE: r1.i = (sized_string_cmp(r1.ss, r2.ss) >= 0); break; } push(r1); break; default: assert(FALSE); } if (timeout > 0) // timeout == 0 means no timeout { if (++cycle == 10) { if (difftime(time(NULL), start_time) > timeout) { #ifdef PROFILING_ENABLED assert(current_rule != NULL); current_rule->clock_ticks += clock() - start; #endif result = ERROR_SCAN_TIMEOUT; stop = TRUE; } cycle = 0; } } ip++; } obj_ptr = (YR_OBJECT**) yr_arena_base_address(obj_arena); while (obj_ptr != NULL) { yr_object_destroy(*obj_ptr); obj_ptr = (YR_OBJECT**) yr_arena_next_address( obj_arena, obj_ptr, sizeof(YR_OBJECT*)); } yr_arena_destroy(obj_arena); yr_modules_unload_all(context); yr_free(stack); return result; }
int yr_execute_code( YR_RULES* rules, YR_SCAN_CONTEXT* context, int timeout, time_t start_time) { int64_t mem[MEM_SIZE]; int32_t sp = 0; uint8_t* ip = rules->code_start; YR_VALUE args[MAX_FUNCTION_ARGS]; YR_VALUE *stack; YR_VALUE r1; YR_VALUE r2; YR_VALUE r3; #ifdef PROFILING_ENABLED YR_RULE* current_rule = NULL; #endif YR_RULE* rule; YR_MATCH* match; YR_OBJECT_FUNCTION* function; char* identifier; char* args_fmt; int i; int found; int count; int result = ERROR_SUCCESS; int stop = FALSE; int cycle = 0; int tidx = context->tidx; int stack_size; #ifdef PROFILING_ENABLED clock_t start = clock(); #endif yr_get_configuration(YR_CONFIG_STACK_SIZE, (void*) &stack_size); stack = (YR_VALUE*) yr_malloc(stack_size * sizeof(YR_VALUE)); if (stack == NULL) return ERROR_INSUFFICIENT_MEMORY; while(!stop) { switch(*ip) { case OP_NOP: break; case OP_HALT: assert(sp == 0); // When HALT is reached the stack should be empty. stop = TRUE; break; case OP_PUSH: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); push(r1); break; case OP_POP: pop(r1); break; case OP_CLEAR_M: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); mem[r1.i] = 0; break; case OP_ADD_M: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); pop(r2); if (!is_undef(r2)) mem[r1.i] += r2.i; break; case OP_INCR_M: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); mem[r1.i]++; break; case OP_PUSH_M: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); r1.i = mem[r1.i]; push(r1); break; case OP_POP_M: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); pop(r2); mem[r1.i] = r2.i; break; case OP_SWAPUNDEF: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); pop(r2); if (is_undef(r2)) { r1.i = mem[r1.i]; push(r1); } else { push(r2); } break; case OP_JNUNDEF: pop(r1); push(r1); ip = jmp_if(!is_undef(r1), ip); break; case OP_JLE: pop(r2); pop(r1); push(r1); push(r2); ip = jmp_if(r1.i <= r2.i, ip); break; case OP_JTRUE: pop(r1); push(r1); ip = jmp_if(!is_undef(r1) && r1.i, ip); break; case OP_JFALSE: pop(r1); push(r1); ip = jmp_if(is_undef(r1) || !r1.i, ip); break; case OP_AND: pop(r2); pop(r1); if (is_undef(r1) || is_undef(r2)) r1.i = 0; else r1.i = r1.i && r2.i; push(r1); break; case OP_OR: pop(r2); pop(r1); if (is_undef(r1)) { push(r2); } else if (is_undef(r2)) { push(r1); } else { r1.i = r1.i || r2.i; push(r1); } break; case OP_NOT: pop(r1); if (is_undef(r1)) r1.i = UNDEFINED; else r1.i= !r1.i; push(r1); break; case OP_MOD: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); if (r2.i != 0) r1.i = r1.i % r2.i; else r1.i = UNDEFINED; push(r1); break; case OP_SHR: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i >> r2.i; push(r1); break; case OP_SHL: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i << r2.i; push(r1); break; case OP_BITWISE_NOT: pop(r1); ensure_defined(r1); r1.i = ~r1.i; push(r1); break; case OP_BITWISE_AND: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i & r2.i; push(r1); break; case OP_BITWISE_OR: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i | r2.i; push(r1); break; case OP_BITWISE_XOR: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i ^ r2.i; push(r1); break; case OP_PUSH_RULE: rule = *(YR_RULE**)(ip + 1); ip += sizeof(uint64_t); r1.i = rule->t_flags[tidx] & RULE_TFLAGS_MATCH ? 1 : 0; push(r1); break; case OP_INIT_RULE: #ifdef PROFILING_ENABLED current_rule = *(YR_RULE**)(ip + 1); #endif ip += sizeof(uint64_t); break; case OP_MATCH_RULE: pop(r1); rule = *(YR_RULE**)(ip + 1); ip += sizeof(uint64_t); if (!is_undef(r1) && r1.i) rule->t_flags[tidx] |= RULE_TFLAGS_MATCH; else if (RULE_IS_GLOBAL(rule)) rule->ns->t_flags[tidx] |= NAMESPACE_TFLAGS_UNSATISFIED_GLOBAL; #ifdef PROFILING_ENABLED rule->clock_ticks += clock() - start; start = clock(); #endif break; case OP_OBJ_LOAD: identifier = *(char**)(ip + 1); ip += sizeof(uint64_t); r1.o = (YR_OBJECT*) yr_hash_table_lookup( context->objects_table, identifier, NULL); assert(r1.o != NULL); push(r1); break; case OP_OBJ_FIELD: identifier = *(char**)(ip + 1); ip += sizeof(uint64_t); pop(r1); ensure_defined(r1); r1.o = yr_object_lookup_field(r1.o, identifier); assert(r1.o != NULL); push(r1); break; case OP_OBJ_VALUE: pop(r1); ensure_defined(r1); switch(r1.o->type) { case OBJECT_TYPE_INTEGER: r1.i = ((YR_OBJECT_INTEGER*) r1.o)->value; break; case OBJECT_TYPE_FLOAT: if (isnan(((YR_OBJECT_DOUBLE*) r1.o)->value)) r1.i = UNDEFINED; else r1.d = ((YR_OBJECT_DOUBLE*) r1.o)->value; break; case OBJECT_TYPE_STRING: if (((YR_OBJECT_STRING*) r1.o)->value == NULL) r1.i = UNDEFINED; else r1.p = ((YR_OBJECT_STRING*) r1.o)->value; break; default: assert(FALSE); } push(r1); break; case OP_INDEX_ARRAY: pop(r1); // index pop(r2); // array ensure_defined(r1); ensure_defined(r2); assert(r2.o->type == OBJECT_TYPE_ARRAY); r1.o = yr_object_array_get_item(r2.o, 0, (int) r1.i); if (r1.o == NULL) r1.i = UNDEFINED; push(r1); break; case OP_LOOKUP_DICT: pop(r1); // key pop(r2); // dictionary ensure_defined(r1); ensure_defined(r2); assert(r2.o->type == OBJECT_TYPE_DICTIONARY); r1.o = yr_object_dict_get_item( r2.o, 0, r1.ss->c_string); if (r1.o == NULL) r1.i = UNDEFINED; push(r1); break; case OP_CALL: args_fmt = *(char**)(ip + 1); ip += sizeof(uint64_t); i = (int) strlen(args_fmt); count = 0; while (i > 0) { pop(r1); if (is_undef(r1)) // count the number of undefined args count++; args[i - 1] = r1; i--; } pop(r2); ensure_defined(r2); if (count > 0) { r1.i = UNDEFINED; push(r1); break; } function = (YR_OBJECT_FUNCTION*) r2.o; result = ERROR_INTERNAL_FATAL_ERROR; for (i = 0; i < MAX_OVERLOADED_FUNCTIONS; i++) { if (function->prototypes[i].arguments_fmt == NULL) break; if (strcmp(function->prototypes[i].arguments_fmt, args_fmt) == 0) { result = function->prototypes[i].code(args, context, function); break; } } assert(i < MAX_OVERLOADED_FUNCTIONS); if (result == ERROR_SUCCESS) { r1.o = function->return_obj; push(r1); } else { stop = TRUE; } break; case OP_FOUND: pop(r1); r1.i = r1.s->matches[tidx].tail != NULL ? 1 : 0; push(r1); break; case OP_FOUND_AT: pop(r2); pop(r1); if (is_undef(r1)) { r1.i = 0; push(r1); break; } match = r2.s->matches[tidx].head; r3.i = FALSE; while (match != NULL) { if (r1.i == match->base + match->offset) { r3.i = TRUE; break; } if (r1.i < match->base + match->offset) break; match = match->next; } push(r3); break; case OP_FOUND_IN: pop(r3); pop(r2); pop(r1); ensure_defined(r1); ensure_defined(r2); match = r3.s->matches[tidx].head; r3.i = FALSE; while (match != NULL && !r3.i) { if (match->base + match->offset >= r1.i && match->base + match->offset <= r2.i) { r3.i = TRUE; } if (match->base + match->offset > r2.i) break; match = match->next; } push(r3); break; case OP_COUNT: pop(r1); r1.i = r1.s->matches[tidx].count; push(r1); break; case OP_OFFSET: pop(r2); pop(r1); ensure_defined(r1); match = r2.s->matches[tidx].head; i = 1; r3.i = UNDEFINED; while (match != NULL && r3.i == UNDEFINED) { if (r1.i == i) r3.i = match->base + match->offset; i++; match = match->next; } push(r3); break; case OP_LENGTH: pop(r2); pop(r1); ensure_defined(r1); match = r2.s->matches[tidx].head; i = 1; r3.i = UNDEFINED; while (match != NULL && r3.i == UNDEFINED) { if (r1.i == i) r3.i = match->match_length; i++; match = match->next; } push(r3); break; case OP_OF: found = 0; count = 0; pop(r1); while (!is_undef(r1)) { if (r1.s->matches[tidx].tail != NULL) found++; count++; pop(r1); } pop(r2); if (is_undef(r2)) r1.i = found >= count ? 1 : 0; else r1.i = found >= r2.i ? 1 : 0; push(r1); break; case OP_FILESIZE: r1.i = context->file_size; push(r1); break; case OP_ENTRYPOINT: r1.i = context->entry_point; push(r1); break; case OP_INT8: pop(r1); r1.i = read_int8_t_little_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_INT16: pop(r1); r1.i = read_int16_t_little_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_INT32: pop(r1); r1.i = read_int32_t_little_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_UINT8: pop(r1); r1.i = read_uint8_t_little_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_UINT16: pop(r1); r1.i = read_uint16_t_little_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_UINT32: pop(r1); r1.i = read_uint32_t_little_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_INT8BE: pop(r1); r1.i = read_int8_t_big_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_INT16BE: pop(r1); r1.i = read_int16_t_big_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_INT32BE: pop(r1); r1.i = read_int32_t_big_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_UINT8BE: pop(r1); r1.i = read_uint8_t_big_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_UINT16BE: pop(r1); r1.i = read_uint16_t_big_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_UINT32BE: pop(r1); r1.i = read_uint32_t_big_endian(context->iterator, (size_t) r1.i); push(r1); break; case OP_CONTAINS: pop(r2); pop(r1); ensure_defined(r1); ensure_defined(r2); r1.i = memmem(r1.ss->c_string, r1.ss->length, r2.ss->c_string, r2.ss->length) != NULL; push(r1); break; case OP_IMPORT: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); result = yr_modules_load((char*) r1.p, context); if (result != ERROR_SUCCESS) stop = TRUE; break; case OP_MATCHES: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); if (r1.ss->length == 0) { r1.i = FALSE; push(r1); break; } result = yr_re_exec( (uint8_t*) r2.re->code, (uint8_t*) r1.ss->c_string, r1.ss->length, 0, r2.re->flags | RE_FLAGS_SCAN, NULL, NULL, &found); if (result != ERROR_SUCCESS) stop = TRUE; r1.i = found >= 0; push(r1); break; case OP_INT_TO_DBL: r1.i = *(uint64_t*)(ip + 1); ip += sizeof(uint64_t); r2 = stack[sp - r1.i]; if (is_undef(r2)) stack[sp - r1.i].i = UNDEFINED; else stack[sp - r1.i].d = (double) r2.i; break; case OP_STR_TO_BOOL: pop(r1); ensure_defined(r1); r1.i = r1.ss->length > 0; push(r1); break; case OP_INT_EQ: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i == r2.i; push(r1); break; case OP_INT_NEQ: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i != r2.i; push(r1); break; case OP_INT_LT: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i < r2.i; push(r1); break; case OP_INT_GT: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i > r2.i; push(r1); break; case OP_INT_LE: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i <= r2.i; push(r1); break; case OP_INT_GE: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i >= r2.i; push(r1); break; case OP_INT_ADD: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i + r2.i; push(r1); break; case OP_INT_SUB: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i - r2.i; push(r1); break; case OP_INT_MUL: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.i * r2.i; push(r1); break; case OP_INT_DIV: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); if (r2.i != 0) r1.i = r1.i / r2.i; else r1.i = UNDEFINED; push(r1); break; case OP_INT_MINUS: pop(r1); ensure_defined(r1); r1.i = -r1.i; push(r1); break; case OP_DBL_LT: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.d < r2.d; push(r1); break; case OP_DBL_GT: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.d > r2.d; push(r1); break; case OP_DBL_LE: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.d <= r2.d; push(r1); break; case OP_DBL_GE: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.d >= r2.d; push(r1); break; case OP_DBL_EQ: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.d == r2.d; push(r1); break; case OP_DBL_NEQ: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.i = r1.d != r2.d; push(r1); break; case OP_DBL_ADD: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.d = r1.d + r2.d; push(r1); break; case OP_DBL_SUB: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.d = r1.d - r2.d; push(r1); break; case OP_DBL_MUL: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.d = r1.d * r2.d; push(r1); break; case OP_DBL_DIV: pop(r2); pop(r1); ensure_defined(r2); ensure_defined(r1); r1.d = r1.d / r2.d; push(r1); break; case OP_DBL_MINUS: pop(r1); ensure_defined(r1); r1.d = -r1.d; push(r1); break; case OP_STR_EQ: case OP_STR_NEQ: case OP_STR_LT: case OP_STR_LE: case OP_STR_GT: case OP_STR_GE: pop(r2); pop(r1); ensure_defined(r1); ensure_defined(r2); switch(*ip) { case OP_STR_EQ: r1.i = (sized_string_cmp(r1.ss, r2.ss) == 0); break; case OP_STR_NEQ: r1.i = (sized_string_cmp(r1.ss, r2.ss) != 0); break; case OP_STR_LT: r1.i = (sized_string_cmp(r1.ss, r2.ss) < 0); break; case OP_STR_LE: r1.i = (sized_string_cmp(r1.ss, r2.ss) <= 0); break; case OP_STR_GT: r1.i = (sized_string_cmp(r1.ss, r2.ss) > 0); break; case OP_STR_GE: r1.i = (sized_string_cmp(r1.ss, r2.ss) >= 0); break; } push(r1); break; default: assert(FALSE); } if (timeout > 0) // timeout == 0 means no timeout { if (++cycle == 10) { if (difftime(time(NULL), start_time) > timeout) { #ifdef PROFILING_ENABLED assert(current_rule != NULL); current_rule->clock_ticks += clock() - start; #endif result = ERROR_SCAN_TIMEOUT; stop = TRUE; } cycle = 0; } } ip++; } yr_modules_unload_all(context); yr_free(stack); return result; }
C
yara
1
CVE-2014-3690
https://www.cvedetails.com/cve/CVE-2014-3690/
CWE-399
https://github.com/torvalds/linux/commit/d974baa398f34393db76be45f7d4d04fbdbb4a0a
d974baa398f34393db76be45f7d4d04fbdbb4a0a
x86,kvm,vmx: Preserve CR4 across VM entry CR4 isn't constant; at least the TSD and PCE bits can vary. TBH, treating CR0 and CR3 as constant scares me a bit, too, but it looks like it's correct. This adds a branch and a read from cr4 to each vm entry. Because it is extremely likely that consecutive entries into the same vcpu will have the same host cr4 value, this fixes up the vmcs instead of restoring cr4 after the fact. A subsequent patch will add a kernel-wide cr4 shadow, reducing the overhead in the common case to just two memory reads and a branch. Signed-off-by: Andy Lutomirski <[email protected]> Acked-by: Paolo Bonzini <[email protected]> Cc: [email protected] Cc: Petr Matousek <[email protected]> Cc: Gleb Natapov <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) { ktime_t remaining = hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); u64 value; if (ktime_to_ns(remaining) <= 0) return 0; value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; do_div(value, 1000000); return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; }
static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) { ktime_t remaining = hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); u64 value; if (ktime_to_ns(remaining) <= 0) return 0; value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; do_div(value, 1000000); return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; }
C
linux
0
CVE-2016-3698
https://www.cvedetails.com/cve/CVE-2016-3698/
CWE-284
https://github.com/jpirko/libndp/commit/a4892df306e0532487f1634ba6d4c6d4bb381c7f
a4892df306e0532487f1634ba6d4c6d4bb381c7f
libndp: validate the IPv6 hop limit None of the NDP messages should ever come from a non-local network; as stated in RFC4861's 6.1.1 (RS), 6.1.2 (RA), 7.1.1 (NS), 7.1.2 (NA), and 8.1. (redirect): - The IP Hop Limit field has a value of 255, i.e., the packet could not possibly have been forwarded by a router. This fixes CVE-2016-3698. Reported by: Julien BERNARD <[email protected]> Signed-off-by: Lubomir Rintel <[email protected]> Signed-off-by: Jiri Pirko <[email protected]>
static void ndp_msg_addrto_adjust_all_nodes(struct in6_addr *addr) { struct in6_addr any = IN6ADDR_ANY_INIT; if (memcmp(addr, &any, sizeof(any))) return; addr->s6_addr32[0] = htonl(0xFF020000); addr->s6_addr32[1] = 0; addr->s6_addr32[2] = 0; addr->s6_addr32[3] = htonl(0x1); }
static void ndp_msg_addrto_adjust_all_nodes(struct in6_addr *addr) { struct in6_addr any = IN6ADDR_ANY_INIT; if (memcmp(addr, &any, sizeof(any))) return; addr->s6_addr32[0] = htonl(0xFF020000); addr->s6_addr32[1] = 0; addr->s6_addr32[2] = 0; addr->s6_addr32[3] = htonl(0x1); }
C
libndp
0
CVE-2011-4930
https://www.cvedetails.com/cve/CVE-2011-4930/
CWE-134
https://htcondor-git.cs.wisc.edu/?p=condor.git;a=commitdiff;h=5e5571d1a431eb3c61977b6dd6ec90186ef79867
5e5571d1a431eb3c61977b6dd6ec90186ef79867
null
BaseShadow::jobWantsGracefulRemoval() { bool job_wants_graceful_removal = false; ClassAd *thejobAd = getJobAd(); if( thejobAd ) { thejobAd->LookupBool( ATTR_WANT_GRACEFUL_REMOVAL, job_wants_graceful_removal ); } return job_wants_graceful_removal; }
BaseShadow::jobWantsGracefulRemoval() { bool job_wants_graceful_removal = false; ClassAd *thejobAd = getJobAd(); if( thejobAd ) { thejobAd->LookupBool( ATTR_WANT_GRACEFUL_REMOVAL, job_wants_graceful_removal ); } return job_wants_graceful_removal; }
CPP
htcondor
0
CVE-2012-6712
https://www.cvedetails.com/cve/CVE-2012-6712/
CWE-119
https://github.com/torvalds/linux/commit/2da424b0773cea3db47e1e81db71eeebde8269d4
2da424b0773cea3db47e1e81db71eeebde8269d4
iwlwifi: Sanity check for sta_id On my testing, I saw some strange behavior [ 421.739708] iwlwifi 0000:01:00.0: ACTIVATE a non DRIVER active station id 148 addr 00:00:00:00:00:00 [ 421.739719] iwlwifi 0000:01:00.0: iwl_sta_ucode_activate Added STA id 148 addr 00:00:00:00:00:00 to uCode not sure how it happen, but adding the sanity check to prevent memory corruption Signed-off-by: Wey-Yi Guy <[email protected]> Signed-off-by: John W. Linville <[email protected]>
int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct iwl_link_quality_cmd *lq, u8 flags, bool init) { int ret = 0; unsigned long flags_spin; struct iwl_host_cmd cmd = { .id = REPLY_TX_LINK_QUALITY_CMD, .len = { sizeof(struct iwl_link_quality_cmd), }, .flags = flags, .data = { lq, }, }; if (WARN_ON(lq->sta_id == IWL_INVALID_STATION)) return -EINVAL; spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin); if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) { spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin); return -EINVAL; } spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin); iwl_dump_lq_cmd(priv, lq); if (WARN_ON(init && (cmd.flags & CMD_ASYNC))) return -EINVAL; if (is_lq_table_valid(priv, ctx, lq)) ret = iwl_trans_send_cmd(trans(priv), &cmd); else ret = -EINVAL; if (cmd.flags & CMD_ASYNC) return ret; if (init) { IWL_DEBUG_INFO(priv, "init LQ command complete, " "clearing sta addition status for sta %d\n", lq->sta_id); spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin); priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin); } return ret; }
int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct iwl_link_quality_cmd *lq, u8 flags, bool init) { int ret = 0; unsigned long flags_spin; struct iwl_host_cmd cmd = { .id = REPLY_TX_LINK_QUALITY_CMD, .len = { sizeof(struct iwl_link_quality_cmd), }, .flags = flags, .data = { lq, }, }; if (WARN_ON(lq->sta_id == IWL_INVALID_STATION)) return -EINVAL; spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin); if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) { spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin); return -EINVAL; } spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin); iwl_dump_lq_cmd(priv, lq); if (WARN_ON(init && (cmd.flags & CMD_ASYNC))) return -EINVAL; if (is_lq_table_valid(priv, ctx, lq)) ret = iwl_trans_send_cmd(trans(priv), &cmd); else ret = -EINVAL; if (cmd.flags & CMD_ASYNC) return ret; if (init) { IWL_DEBUG_INFO(priv, "init LQ command complete, " "clearing sta addition status for sta %d\n", lq->sta_id); spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin); priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin); } return ret; }
C
linux
0
CVE-2017-5546
https://www.cvedetails.com/cve/CVE-2017-5546/
null
https://github.com/torvalds/linux/commit/c4e490cf148e85ead0d1b1c2caaba833f1d5b29f
c4e490cf148e85ead0d1b1c2caaba833f1d5b29f
mm/slab.c: fix SLAB freelist randomization duplicate entries This patch fixes a bug in the freelist randomization code. When a high random number is used, the freelist will contain duplicate entries. It will result in different allocations sharing the same chunk. It will result in odd behaviours and crashes. It should be uncommon but it depends on the machines. We saw it happening more often on some machines (every few hours of running tests). Fixes: c7ce4f60ac19 ("mm: SLAB freelist randomization") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: John Sperbeck <[email protected]> Signed-off-by: Thomas Garnier <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Pekka Enberg <[email protected]> Cc: David Rientjes <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct page *page) { }
static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct page *page) { }
C
linux
0
CVE-2018-15857
https://www.cvedetails.com/cve/CVE-2018-15857/
CWE-416
https://github.com/xkbcommon/libxkbcommon/commit/c1e5ac16e77a21f87bdf3bc4dea61b037a17dddb
c1e5ac16e77a21f87bdf3bc4dea61b037a17dddb
xkbcomp: fix pointer value for FreeStmt Signed-off-by: Peter Hutterer <[email protected]>
FreeStmt(ParseCommon *stmt) { ParseCommon *next; while (stmt) { next = stmt->next; switch (stmt->type) { case STMT_INCLUDE: FreeInclude((IncludeStmt *) stmt); /* stmt is already free'd here. */ stmt = NULL; break; case STMT_EXPR: FreeExpr((ExprDef *) stmt); break; case STMT_VAR: FreeStmt((ParseCommon *) ((VarDef *) stmt)->name); FreeStmt((ParseCommon *) ((VarDef *) stmt)->value); break; case STMT_TYPE: FreeStmt((ParseCommon *) ((KeyTypeDef *) stmt)->body); break; case STMT_INTERP: FreeStmt((ParseCommon *) ((InterpDef *) stmt)->match); FreeStmt((ParseCommon *) ((InterpDef *) stmt)->def); break; case STMT_VMOD: FreeStmt((ParseCommon *) ((VModDef *) stmt)->value); break; case STMT_SYMBOLS: FreeStmt((ParseCommon *) ((SymbolsDef *) stmt)->symbols); break; case STMT_MODMAP: FreeStmt((ParseCommon *) ((ModMapDef *) stmt)->keys); break; case STMT_GROUP_COMPAT: FreeStmt((ParseCommon *) ((GroupCompatDef *) stmt)->def); break; case STMT_LED_MAP: FreeStmt((ParseCommon *) ((LedMapDef *) stmt)->body); break; case STMT_LED_NAME: FreeStmt((ParseCommon *) ((LedNameDef *) stmt)->name); break; default: break; } free(stmt); stmt = next; } }
FreeStmt(ParseCommon *stmt) { ParseCommon *next; while (stmt) { next = stmt->next; switch (stmt->type) { case STMT_INCLUDE: FreeInclude((IncludeStmt *) stmt); /* stmt is already free'd here. */ stmt = NULL; break; case STMT_EXPR: FreeExpr((ExprDef *) stmt); break; case STMT_VAR: FreeStmt((ParseCommon *) ((VarDef *) stmt)->name); FreeStmt((ParseCommon *) ((VarDef *) stmt)->value); break; case STMT_TYPE: FreeStmt((ParseCommon *) ((KeyTypeDef *) stmt)->body); break; case STMT_INTERP: FreeStmt((ParseCommon *) ((InterpDef *) stmt)->match); FreeStmt((ParseCommon *) ((InterpDef *) stmt)->def); break; case STMT_VMOD: FreeStmt((ParseCommon *) ((VModDef *) stmt)->value); break; case STMT_SYMBOLS: FreeStmt((ParseCommon *) ((SymbolsDef *) stmt)->symbols); break; case STMT_MODMAP: FreeStmt((ParseCommon *) ((ModMapDef *) stmt)->keys); break; case STMT_GROUP_COMPAT: FreeStmt((ParseCommon *) ((GroupCompatDef *) stmt)->def); break; case STMT_LED_MAP: FreeStmt((ParseCommon *) ((LedMapDef *) stmt)->body); break; case STMT_LED_NAME: FreeStmt((ParseCommon *) ((LedNameDef *) stmt)->name); break; default: break; } free(stmt); stmt = next; } }
C
libxkbcommon
0
CVE-2013-6376
https://www.cvedetails.com/cve/CVE-2013-6376/
CWE-189
https://github.com/torvalds/linux/commit/17d68b763f09a9ce824ae23eb62c9efc57b69271
17d68b763f09a9ce824ae23eb62c9efc57b69271
KVM: x86: fix guest-initiated crash with x2apic (CVE-2013-6376) A guest can cause a BUG_ON() leading to a host kernel crash. When the guest writes to the ICR to request an IPI, while in x2apic mode the following things happen, the destination is read from ICR2, which is a register that the guest can control. kvm_irq_delivery_to_apic_fast uses the high 16 bits of ICR2 as the cluster id. A BUG_ON is triggered, which is a protection against accessing map->logical_map with an out-of-bounds access and manages to avoid that anything really unsafe occurs. The logic in the code is correct from real HW point of view. The problem is that KVM supports only one cluster with ID 0 in clustered mode, but the code that has the bug does not take this into account. Reported-by: Lars Bull <[email protected]> Cc: [email protected] Signed-off-by: Gleb Natapov <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) { return dest == 0xff || kvm_apic_id(apic) == dest; }
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) { return dest == 0xff || kvm_apic_id(apic) == dest; }
C
linux
0
CVE-2014-3153
https://www.cvedetails.com/cve/CVE-2014-3153/
CWE-264
https://github.com/torvalds/linux/commit/e9c243a5a6de0be8e584c604d353412584b592f8
e9c243a5a6de0be8e584c604d353412584b592f8
futex-prevent-requeue-pi-on-same-futex.patch futex: Forbid uaddr == uaddr2 in futex_requeue(..., requeue_pi=1) If uaddr == uaddr2, then we have broken the rule of only requeueing from a non-pi futex to a pi futex with this call. If we attempt this, then dangling pointers may be left for rt_waiter resulting in an exploitable condition. This change brings futex_requeue() in line with futex_wait_requeue_pi() which performs the same check as per commit 6f7b0a2a5c0f ("futex: Forbid uaddr == uaddr2 in futex_wait_requeue_pi()") [ tglx: Compare the resulting keys as well, as uaddrs might be different depending on the mapping ] Fixes CVE-2014-3153. Reported-by: Pinkie Pie Signed-off-by: Will Drewry <[email protected]> Signed-off-by: Kees Cook <[email protected]> Cc: [email protected] Signed-off-by: Thomas Gleixner <[email protected]> Reviewed-by: Darren Hart <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static struct futex_hash_bucket *hash_futex(union futex_key *key) { u32 hash = jhash2((u32*)&key->both.word, (sizeof(key->both.word)+sizeof(key->both.ptr))/4, key->both.offset); return &futex_queues[hash & (futex_hashsize - 1)]; }
static struct futex_hash_bucket *hash_futex(union futex_key *key) { u32 hash = jhash2((u32*)&key->both.word, (sizeof(key->both.word)+sizeof(key->both.ptr))/4, key->both.offset); return &futex_queues[hash & (futex_hashsize - 1)]; }
C
linux
0
CVE-2010-1149
https://www.cvedetails.com/cve/CVE-2010-1149/
CWE-200
https://cgit.freedesktop.org/udisks/commit/?id=0fcc7cb3b66f23fac53ae08647aa0007a2bd56c4
0fcc7cb3b66f23fac53ae08647aa0007a2bd56c4
null
update_info_presentation (Device *device) { gboolean hide; gboolean nopolicy; hide = FALSE; if (g_udev_device_has_property (device->priv->d, "UDISKS_PRESENTATION_HIDE")) hide = g_udev_device_get_property_as_boolean (device->priv->d, "UDISKS_PRESENTATION_HIDE"); device_set_device_presentation_hide (device, hide); nopolicy = FALSE; if (g_udev_device_has_property (device->priv->d, "UDISKS_PRESENTATION_NOPOLICY")) nopolicy = g_udev_device_get_property_as_boolean (device->priv->d, "UDISKS_PRESENTATION_NOPOLICY"); device_set_device_presentation_nopolicy (device, nopolicy); device_set_device_presentation_name (device, g_udev_device_get_property (device->priv->d, "UDISKS_PRESENTATION_NAME")); device_set_device_presentation_icon_name (device, g_udev_device_get_property (device->priv->d, "UDISKS_PRESENTATION_ICON_NAME")); return TRUE; }
update_info_presentation (Device *device) { gboolean hide; gboolean nopolicy; hide = FALSE; if (g_udev_device_has_property (device->priv->d, "UDISKS_PRESENTATION_HIDE")) hide = g_udev_device_get_property_as_boolean (device->priv->d, "UDISKS_PRESENTATION_HIDE"); device_set_device_presentation_hide (device, hide); nopolicy = FALSE; if (g_udev_device_has_property (device->priv->d, "UDISKS_PRESENTATION_NOPOLICY")) nopolicy = g_udev_device_get_property_as_boolean (device->priv->d, "UDISKS_PRESENTATION_NOPOLICY"); device_set_device_presentation_nopolicy (device, nopolicy); device_set_device_presentation_name (device, g_udev_device_get_property (device->priv->d, "UDISKS_PRESENTATION_NAME")); device_set_device_presentation_icon_name (device, g_udev_device_get_property (device->priv->d, "UDISKS_PRESENTATION_ICON_NAME")); return TRUE; }
C
udisks
0
null
null
null
https://github.com/chromium/chromium/commit/7cb8e1ae121cf6b14aa0a59cc708de630c0ef965
7cb8e1ae121cf6b14aa0a59cc708de630c0ef965
Move variations prefs into the variations component These prefs are used by variations code that is targeted for componentization. BUG=382865 TBR=thakis Review URL: https://codereview.chromium.org/1265423003 Cr-Commit-Position: refs/heads/master@{#343661}
bool ProcessSingletonNotificationCallback( const base::CommandLine& command_line, const base::FilePath& current_directory) { if (!g_browser_process || g_browser_process->IsShuttingDown()) { #if defined(OS_WIN) browser_watcher::ExitFunnel::RecordSingleEvent( chrome::kBrowserExitCodesRegistryPath, L"ProcessSingletonIsShuttingDown"); #endif // defined(OS_WIN) return false; } if (command_line.HasSwitch(switches::kOriginalProcessStartTime)) { std::string start_time_string = command_line.GetSwitchValueASCII(switches::kOriginalProcessStartTime); int64 remote_start_time; if (base::StringToInt64(start_time_string, &remote_start_time)) { base::TimeDelta elapsed = base::Time::Now() - base::Time::FromInternalValue(remote_start_time); if (command_line.HasSwitch(switches::kFastStart)) { UMA_HISTOGRAM_LONG_TIMES( "Startup.WarmStartTimeFromRemoteProcessStartFast", elapsed); } else { UMA_HISTOGRAM_LONG_TIMES( "Startup.WarmStartTimeFromRemoteProcessStart", elapsed); } } } g_browser_process->platform_part()->PlatformSpecificCommandLineProcessing( command_line); base::FilePath user_data_dir = g_browser_process->profile_manager()->user_data_dir(); base::FilePath startup_profile_dir = GetStartupProfilePath(user_data_dir, command_line); StartupBrowserCreator::ProcessCommandLineAlreadyRunning( command_line, current_directory, startup_profile_dir); return true; }
bool ProcessSingletonNotificationCallback( const base::CommandLine& command_line, const base::FilePath& current_directory) { if (!g_browser_process || g_browser_process->IsShuttingDown()) { #if defined(OS_WIN) browser_watcher::ExitFunnel::RecordSingleEvent( chrome::kBrowserExitCodesRegistryPath, L"ProcessSingletonIsShuttingDown"); #endif // defined(OS_WIN) return false; } if (command_line.HasSwitch(switches::kOriginalProcessStartTime)) { std::string start_time_string = command_line.GetSwitchValueASCII(switches::kOriginalProcessStartTime); int64 remote_start_time; if (base::StringToInt64(start_time_string, &remote_start_time)) { base::TimeDelta elapsed = base::Time::Now() - base::Time::FromInternalValue(remote_start_time); if (command_line.HasSwitch(switches::kFastStart)) { UMA_HISTOGRAM_LONG_TIMES( "Startup.WarmStartTimeFromRemoteProcessStartFast", elapsed); } else { UMA_HISTOGRAM_LONG_TIMES( "Startup.WarmStartTimeFromRemoteProcessStart", elapsed); } } } g_browser_process->platform_part()->PlatformSpecificCommandLineProcessing( command_line); base::FilePath user_data_dir = g_browser_process->profile_manager()->user_data_dir(); base::FilePath startup_profile_dir = GetStartupProfilePath(user_data_dir, command_line); StartupBrowserCreator::ProcessCommandLineAlreadyRunning( command_line, current_directory, startup_profile_dir); return true; }
C
Chrome
0
CVE-2013-0882
https://www.cvedetails.com/cve/CVE-2013-0882/
CWE-119
https://github.com/chromium/chromium/commit/25f9415f43d607d3d01f542f067e3cc471983e6b
25f9415f43d607d3d01f542f067e3cc471983e6b
Add HTMLFormControlElement::supportsAutofocus to fix a FIXME comment. This virtual function should return true if the form control can hanlde 'autofocucs' attribute if it is specified. Note: HTMLInputElement::supportsAutofocus reuses InputType::isInteractiveContent because interactiveness is required for autofocus capability. BUG=none TEST=none; no behavior changes. Review URL: https://codereview.chromium.org/143343003 git-svn-id: svn://svn.chromium.org/blink/trunk@165432 bbb929c8-8fbe-4397-9dbb-9b2b20218538
HTMLKeygenElement::HTMLKeygenElement(Document& document, HTMLFormElement* form) : HTMLFormControlElementWithState(keygenTag, document, form) { ScriptWrappable::init(this); ensureUserAgentShadowRoot(); }
HTMLKeygenElement::HTMLKeygenElement(Document& document, HTMLFormElement* form) : HTMLFormControlElementWithState(keygenTag, document, form) { ScriptWrappable::init(this); ensureUserAgentShadowRoot(); }
C
Chrome
0
CVE-2014-3690
https://www.cvedetails.com/cve/CVE-2014-3690/
CWE-399
https://github.com/torvalds/linux/commit/d974baa398f34393db76be45f7d4d04fbdbb4a0a
d974baa398f34393db76be45f7d4d04fbdbb4a0a
x86,kvm,vmx: Preserve CR4 across VM entry CR4 isn't constant; at least the TSD and PCE bits can vary. TBH, treating CR0 and CR3 as constant scares me a bit, too, but it looks like it's correct. This adds a branch and a read from cr4 to each vm entry. Because it is extremely likely that consecutive entries into the same vcpu will have the same host cr4 value, this fixes up the vmcs instead of restoring cr4 after the fact. A subsequent patch will add a kernel-wide cr4 shadow, reducing the overhead in the common case to just two memory reads and a branch. Signed-off-by: Andy Lutomirski <[email protected]> Acked-by: Paolo Bonzini <[email protected]> Cc: [email protected] Cc: Petr Matousek <[email protected]> Cc: Gleb Natapov <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void __vmx_load_host_state(struct vcpu_vmx *vmx) { if (!vmx->host_state.loaded) return; ++vmx->vcpu.stat.host_state_reload; vmx->host_state.loaded = 0; #ifdef CONFIG_X86_64 if (is_long_mode(&vmx->vcpu)) rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); #endif if (vmx->host_state.gs_ldt_reload_needed) { kvm_load_ldt(vmx->host_state.ldt_sel); #ifdef CONFIG_X86_64 load_gs_index(vmx->host_state.gs_sel); #else loadsegment(gs, vmx->host_state.gs_sel); #endif } if (vmx->host_state.fs_reload_needed) loadsegment(fs, vmx->host_state.fs_sel); #ifdef CONFIG_X86_64 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) { loadsegment(ds, vmx->host_state.ds_sel); loadsegment(es, vmx->host_state.es_sel); } #endif reload_tss(); #ifdef CONFIG_X86_64 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); #endif if (vmx->host_state.msr_host_bndcfgs) wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); /* * If the FPU is not active (through the host task or * the guest vcpu), then restore the cr0.TS bit. */ if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded) stts(); load_gdt(this_cpu_ptr(&host_gdt)); }
static void __vmx_load_host_state(struct vcpu_vmx *vmx) { if (!vmx->host_state.loaded) return; ++vmx->vcpu.stat.host_state_reload; vmx->host_state.loaded = 0; #ifdef CONFIG_X86_64 if (is_long_mode(&vmx->vcpu)) rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); #endif if (vmx->host_state.gs_ldt_reload_needed) { kvm_load_ldt(vmx->host_state.ldt_sel); #ifdef CONFIG_X86_64 load_gs_index(vmx->host_state.gs_sel); #else loadsegment(gs, vmx->host_state.gs_sel); #endif } if (vmx->host_state.fs_reload_needed) loadsegment(fs, vmx->host_state.fs_sel); #ifdef CONFIG_X86_64 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) { loadsegment(ds, vmx->host_state.ds_sel); loadsegment(es, vmx->host_state.es_sel); } #endif reload_tss(); #ifdef CONFIG_X86_64 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); #endif if (vmx->host_state.msr_host_bndcfgs) wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); /* * If the FPU is not active (through the host task or * the guest vcpu), then restore the cr0.TS bit. */ if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded) stts(); load_gdt(this_cpu_ptr(&host_gdt)); }
C
linux
0
CVE-2015-1274
https://www.cvedetails.com/cve/CVE-2015-1274/
CWE-254
https://github.com/chromium/chromium/commit/d27468a832d5316884bd02f459cbf493697fd7e1
d27468a832d5316884bd02f459cbf493697fd7e1
Switch to equalIgnoringASCIICase throughout modules/accessibility BUG=627682 Review-Url: https://codereview.chromium.org/2793913007 Cr-Commit-Position: refs/heads/master@{#461858}
bool AXObject::computeAncestorExposesActiveDescendant() const { const AXObject* parent = parentObjectUnignored(); if (!parent) return false; if (parent->supportsActiveDescendant() && !parent->getAttribute(aria_activedescendantAttr).isEmpty()) { return true; } return parent->ancestorExposesActiveDescendant(); }
bool AXObject::computeAncestorExposesActiveDescendant() const { const AXObject* parent = parentObjectUnignored(); if (!parent) return false; if (parent->supportsActiveDescendant() && !parent->getAttribute(aria_activedescendantAttr).isEmpty()) { return true; } return parent->ancestorExposesActiveDescendant(); }
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/df831400bcb63db4259b5858281b1727ba972a2a
df831400bcb63db4259b5858281b1727ba972a2a
WebKit2: Support window bounce when panning. https://bugs.webkit.org/show_bug.cgi?id=58065 <rdar://problem/9244367> Reviewed by Adam Roben. Make gestureDidScroll synchronous, as once we scroll, we need to know whether or not we are at the beginning or end of the scrollable document. If we are at either end of the scrollable document, we call the Windows 7 API to bounce the window to give an indication that you are past an end of the document. * UIProcess/WebPageProxy.cpp: (WebKit::WebPageProxy::gestureDidScroll): Pass a boolean for the reply, and return it. * UIProcess/WebPageProxy.h: * UIProcess/win/WebView.cpp: (WebKit::WebView::WebView): Inititalize a new variable. (WebKit::WebView::onGesture): Once we send the message to scroll, check if have gone to an end of the document, and if we have, bounce the window. * UIProcess/win/WebView.h: * WebProcess/WebPage/WebPage.h: * WebProcess/WebPage/WebPage.messages.in: GestureDidScroll is now sync. * WebProcess/WebPage/win/WebPageWin.cpp: (WebKit::WebPage::gestureDidScroll): When we are done scrolling, check if we have a vertical scrollbar and if we are at the beginning or the end of the scrollable document. git-svn-id: svn://svn.chromium.org/blink/trunk@83197 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void WebPageProxy::getStatusBarIsVisible(bool& statusBarIsVisible) { statusBarIsVisible = m_uiClient.statusBarIsVisible(this); }
void WebPageProxy::getStatusBarIsVisible(bool& statusBarIsVisible) { statusBarIsVisible = m_uiClient.statusBarIsVisible(this); }
C
Chrome
0
CVE-2018-16229
https://www.cvedetails.com/cve/CVE-2018-16229/
CWE-125
https://github.com/the-tcpdump-group/tcpdump/commit/211124b972e74f0da66bc8b16f181f78793e2f66
211124b972e74f0da66bc8b16f181f78793e2f66
(for 4.9.3) CVE-2018-16229/DCCP: Fix printing "Timestamp" and "Timestamp Echo" options Add some comments. Moreover: Put a function definition name at the beginning of the line. (This change was ported from commit 6df4852 in the master branch.) Ryan Ackroyd had independently identified this buffer over-read later by means of fuzzing and provided the packet capture file for the test.
void dccp_print(netdissect_options *ndo, const u_char *bp, const u_char *data2, u_int len) { const struct dccp_hdr *dh; const struct ip *ip; const struct ip6_hdr *ip6; const u_char *cp; u_short sport, dport; u_int hlen; u_int fixed_hdrlen; uint8_t dccph_type; dh = (const struct dccp_hdr *)bp; ip = (const struct ip *)data2; if (IP_V(ip) == 6) ip6 = (const struct ip6_hdr *)data2; else ip6 = NULL; /* make sure we have enough data to look at the X bit */ cp = (const u_char *)(dh + 1); if (cp > ndo->ndo_snapend) { ND_PRINT((ndo, "[Invalid packet|dccp]")); return; } if (len < sizeof(struct dccp_hdr)) { ND_PRINT((ndo, "truncated-dccp - %u bytes missing!", len - (u_int)sizeof(struct dccp_hdr))); return; } /* get the length of the generic header */ fixed_hdrlen = dccp_basic_hdr_len(dh); if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-dccp - %u bytes missing!", len - fixed_hdrlen)); return; } ND_TCHECK2(*dh, fixed_hdrlen); sport = EXTRACT_16BITS(&dh->dccph_sport); dport = EXTRACT_16BITS(&dh->dccph_dport); hlen = dh->dccph_doff * 4; if (ip6) { ND_PRINT((ndo, "%s.%d > %s.%d: ", ip6addr_string(ndo, &ip6->ip6_src), sport, ip6addr_string(ndo, &ip6->ip6_dst), dport)); } else { ND_PRINT((ndo, "%s.%d > %s.%d: ", ipaddr_string(ndo, &ip->ip_src), sport, ipaddr_string(ndo, &ip->ip_dst), dport)); } ND_PRINT((ndo, "DCCP")); if (ndo->ndo_qflag) { ND_PRINT((ndo, " %d", len - hlen)); if (hlen > len) { ND_PRINT((ndo, " [bad hdr length %u - too long, > %u]", hlen, len)); } return; } /* other variables in generic header */ if (ndo->ndo_vflag) { ND_PRINT((ndo, " (CCVal %d, CsCov %d, ", DCCPH_CCVAL(dh), DCCPH_CSCOV(dh))); } /* checksum calculation */ if (ndo->ndo_vflag && ND_TTEST2(bp[0], len)) { uint16_t sum = 0, dccp_sum; dccp_sum = EXTRACT_16BITS(&dh->dccph_checksum); ND_PRINT((ndo, "cksum 0x%04x ", dccp_sum)); if (IP_V(ip) == 4) sum = dccp_cksum(ndo, ip, dh, len); else if (IP_V(ip) == 6) sum = dccp6_cksum(ndo, ip6, dh, len); if (sum != 0) ND_PRINT((ndo, "(incorrect -> 0x%04x)",in_cksum_shouldbe(dccp_sum, sum))); else ND_PRINT((ndo, "(correct)")); } if (ndo->ndo_vflag) ND_PRINT((ndo, ")")); ND_PRINT((ndo, " ")); dccph_type = DCCPH_TYPE(dh); switch (dccph_type) { case DCCP_PKT_REQUEST: { const struct dccp_hdr_request *dhr = (const struct dccp_hdr_request *)(bp + fixed_hdrlen); fixed_hdrlen += 4; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_TCHECK(*dhr); ND_PRINT((ndo, "%s (service=%d) ", tok2str(dccp_pkt_type_str, "", dccph_type), EXTRACT_32BITS(&dhr->dccph_req_service))); break; } case DCCP_PKT_RESPONSE: { const struct dccp_hdr_response *dhr = (const struct dccp_hdr_response *)(bp + fixed_hdrlen); fixed_hdrlen += 12; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_TCHECK(*dhr); ND_PRINT((ndo, "%s (service=%d) ", tok2str(dccp_pkt_type_str, "", dccph_type), EXTRACT_32BITS(&dhr->dccph_resp_service))); break; } case DCCP_PKT_DATA: ND_PRINT((ndo, "%s ", tok2str(dccp_pkt_type_str, "", dccph_type))); break; case DCCP_PKT_ACK: { fixed_hdrlen += 8; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_PRINT((ndo, "%s ", tok2str(dccp_pkt_type_str, "", dccph_type))); break; } case DCCP_PKT_DATAACK: { fixed_hdrlen += 8; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_PRINT((ndo, "%s ", tok2str(dccp_pkt_type_str, "", dccph_type))); break; } case DCCP_PKT_CLOSEREQ: fixed_hdrlen += 8; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_PRINT((ndo, "%s ", tok2str(dccp_pkt_type_str, "", dccph_type))); break; case DCCP_PKT_CLOSE: fixed_hdrlen += 8; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_PRINT((ndo, "%s ", tok2str(dccp_pkt_type_str, "", dccph_type))); break; case DCCP_PKT_RESET: { const struct dccp_hdr_reset *dhr = (const struct dccp_hdr_reset *)(bp + fixed_hdrlen); fixed_hdrlen += 12; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_TCHECK(*dhr); ND_PRINT((ndo, "%s (code=%s) ", tok2str(dccp_pkt_type_str, "", dccph_type), dccp_reset_code(dhr->dccph_reset_code))); break; } case DCCP_PKT_SYNC: fixed_hdrlen += 8; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_PRINT((ndo, "%s ", tok2str(dccp_pkt_type_str, "", dccph_type))); break; case DCCP_PKT_SYNCACK: fixed_hdrlen += 8; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_PRINT((ndo, "%s ", tok2str(dccp_pkt_type_str, "", dccph_type))); break; default: ND_PRINT((ndo, "%s ", tok2str(dccp_pkt_type_str, "unknown-type-%u", dccph_type))); break; } if ((DCCPH_TYPE(dh) != DCCP_PKT_DATA) && (DCCPH_TYPE(dh) != DCCP_PKT_REQUEST)) dccp_print_ack_no(ndo, bp); if (ndo->ndo_vflag < 2) return; ND_PRINT((ndo, "seq %" PRIu64, dccp_seqno(bp))); /* process options */ if (hlen > fixed_hdrlen){ u_int optlen; cp = bp + fixed_hdrlen; ND_PRINT((ndo, " <")); hlen -= fixed_hdrlen; while(1){ optlen = dccp_print_option(ndo, cp, hlen); if (!optlen) break; if (hlen <= optlen) break; hlen -= optlen; cp += optlen; ND_PRINT((ndo, ", ")); } ND_PRINT((ndo, ">")); } return; trunc: ND_PRINT((ndo, "%s", tstr)); return; }
void dccp_print(netdissect_options *ndo, const u_char *bp, const u_char *data2, u_int len) { const struct dccp_hdr *dh; const struct ip *ip; const struct ip6_hdr *ip6; const u_char *cp; u_short sport, dport; u_int hlen; u_int fixed_hdrlen; uint8_t dccph_type; dh = (const struct dccp_hdr *)bp; ip = (const struct ip *)data2; if (IP_V(ip) == 6) ip6 = (const struct ip6_hdr *)data2; else ip6 = NULL; /* make sure we have enough data to look at the X bit */ cp = (const u_char *)(dh + 1); if (cp > ndo->ndo_snapend) { ND_PRINT((ndo, "[Invalid packet|dccp]")); return; } if (len < sizeof(struct dccp_hdr)) { ND_PRINT((ndo, "truncated-dccp - %u bytes missing!", len - (u_int)sizeof(struct dccp_hdr))); return; } /* get the length of the generic header */ fixed_hdrlen = dccp_basic_hdr_len(dh); if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-dccp - %u bytes missing!", len - fixed_hdrlen)); return; } ND_TCHECK2(*dh, fixed_hdrlen); sport = EXTRACT_16BITS(&dh->dccph_sport); dport = EXTRACT_16BITS(&dh->dccph_dport); hlen = dh->dccph_doff * 4; if (ip6) { ND_PRINT((ndo, "%s.%d > %s.%d: ", ip6addr_string(ndo, &ip6->ip6_src), sport, ip6addr_string(ndo, &ip6->ip6_dst), dport)); } else { ND_PRINT((ndo, "%s.%d > %s.%d: ", ipaddr_string(ndo, &ip->ip_src), sport, ipaddr_string(ndo, &ip->ip_dst), dport)); } ND_PRINT((ndo, "DCCP")); if (ndo->ndo_qflag) { ND_PRINT((ndo, " %d", len - hlen)); if (hlen > len) { ND_PRINT((ndo, " [bad hdr length %u - too long, > %u]", hlen, len)); } return; } /* other variables in generic header */ if (ndo->ndo_vflag) { ND_PRINT((ndo, " (CCVal %d, CsCov %d, ", DCCPH_CCVAL(dh), DCCPH_CSCOV(dh))); } /* checksum calculation */ if (ndo->ndo_vflag && ND_TTEST2(bp[0], len)) { uint16_t sum = 0, dccp_sum; dccp_sum = EXTRACT_16BITS(&dh->dccph_checksum); ND_PRINT((ndo, "cksum 0x%04x ", dccp_sum)); if (IP_V(ip) == 4) sum = dccp_cksum(ndo, ip, dh, len); else if (IP_V(ip) == 6) sum = dccp6_cksum(ndo, ip6, dh, len); if (sum != 0) ND_PRINT((ndo, "(incorrect -> 0x%04x)",in_cksum_shouldbe(dccp_sum, sum))); else ND_PRINT((ndo, "(correct)")); } if (ndo->ndo_vflag) ND_PRINT((ndo, ")")); ND_PRINT((ndo, " ")); dccph_type = DCCPH_TYPE(dh); switch (dccph_type) { case DCCP_PKT_REQUEST: { const struct dccp_hdr_request *dhr = (const struct dccp_hdr_request *)(bp + fixed_hdrlen); fixed_hdrlen += 4; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_TCHECK(*dhr); ND_PRINT((ndo, "%s (service=%d) ", tok2str(dccp_pkt_type_str, "", dccph_type), EXTRACT_32BITS(&dhr->dccph_req_service))); break; } case DCCP_PKT_RESPONSE: { const struct dccp_hdr_response *dhr = (const struct dccp_hdr_response *)(bp + fixed_hdrlen); fixed_hdrlen += 12; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_TCHECK(*dhr); ND_PRINT((ndo, "%s (service=%d) ", tok2str(dccp_pkt_type_str, "", dccph_type), EXTRACT_32BITS(&dhr->dccph_resp_service))); break; } case DCCP_PKT_DATA: ND_PRINT((ndo, "%s ", tok2str(dccp_pkt_type_str, "", dccph_type))); break; case DCCP_PKT_ACK: { fixed_hdrlen += 8; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_PRINT((ndo, "%s ", tok2str(dccp_pkt_type_str, "", dccph_type))); break; } case DCCP_PKT_DATAACK: { fixed_hdrlen += 8; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_PRINT((ndo, "%s ", tok2str(dccp_pkt_type_str, "", dccph_type))); break; } case DCCP_PKT_CLOSEREQ: fixed_hdrlen += 8; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_PRINT((ndo, "%s ", tok2str(dccp_pkt_type_str, "", dccph_type))); break; case DCCP_PKT_CLOSE: fixed_hdrlen += 8; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_PRINT((ndo, "%s ", tok2str(dccp_pkt_type_str, "", dccph_type))); break; case DCCP_PKT_RESET: { const struct dccp_hdr_reset *dhr = (const struct dccp_hdr_reset *)(bp + fixed_hdrlen); fixed_hdrlen += 12; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_TCHECK(*dhr); ND_PRINT((ndo, "%s (code=%s) ", tok2str(dccp_pkt_type_str, "", dccph_type), dccp_reset_code(dhr->dccph_reset_code))); break; } case DCCP_PKT_SYNC: fixed_hdrlen += 8; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_PRINT((ndo, "%s ", tok2str(dccp_pkt_type_str, "", dccph_type))); break; case DCCP_PKT_SYNCACK: fixed_hdrlen += 8; if (len < fixed_hdrlen) { ND_PRINT((ndo, "truncated-%s - %u bytes missing!", tok2str(dccp_pkt_type_str, "", dccph_type), len - fixed_hdrlen)); return; } ND_PRINT((ndo, "%s ", tok2str(dccp_pkt_type_str, "", dccph_type))); break; default: ND_PRINT((ndo, "%s ", tok2str(dccp_pkt_type_str, "unknown-type-%u", dccph_type))); break; } if ((DCCPH_TYPE(dh) != DCCP_PKT_DATA) && (DCCPH_TYPE(dh) != DCCP_PKT_REQUEST)) dccp_print_ack_no(ndo, bp); if (ndo->ndo_vflag < 2) return; ND_PRINT((ndo, "seq %" PRIu64, dccp_seqno(bp))); /* process options */ if (hlen > fixed_hdrlen){ u_int optlen; cp = bp + fixed_hdrlen; ND_PRINT((ndo, " <")); hlen -= fixed_hdrlen; while(1){ optlen = dccp_print_option(ndo, cp, hlen); if (!optlen) break; if (hlen <= optlen) break; hlen -= optlen; cp += optlen; ND_PRINT((ndo, ", ")); } ND_PRINT((ndo, ">")); } return; trunc: ND_PRINT((ndo, "%s", tstr)); return; }
C
tcpdump
0