func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static inline int numeric_entity_is_allowed(unsigned uni_cp, int document_type)
{
/* less restrictive than unicode_cp_is_allowed */
switch (document_type) {
case ENT_HTML_DOC_HTML401:
/* all non-SGML characters (those marked with UNUSED in DESCSET) should be
* representable with numeric entities */
return uni_cp <= 0x10FFFF;
case ENT_HTML_DOC_HTML5:
/* 8.1.4. The numeric character reference forms described above are allowed to
* reference any Unicode code point other than U+0000, U+000D, permanently
* undefined Unicode characters (noncharacters), and control characters other
* than space characters (U+0009, U+000A, U+000C and U+000D) */
/* seems to allow surrogate characters, then */
return (uni_cp >= 0x20 && uni_cp <= 0x7E) ||
(uni_cp >= 0x09 && uni_cp <= 0x0C && uni_cp != 0x0B) || /* form feed U+0C allowed, but not U+0D */
(uni_cp >= 0xA0 && uni_cp <= 0x10FFFF &&
((uni_cp & 0xFFFF) < 0xFFFE) && /* last two of each plane (nonchars) disallowed */
(uni_cp < 0xFDD0 || uni_cp > 0xFDEF)); /* U+FDD0-U+FDEF (nonchars) disallowed */
case ENT_HTML_DOC_XHTML:
case ENT_HTML_DOC_XML1:
/* OTOH, XML 1.0 requires "character references to match the production for Char
* See <http://www.w3.org/TR/REC-xml/#NT-CharRef> */
return unicode_cp_is_allowed(uni_cp, document_type);
default:
return 1;
}
}
| 0 |
[
"CWE-190"
] |
php-src
|
0da8b8b801f9276359262f1ef8274c7812d3dfda
| 249,681,271,228,156,140,000,000,000,000,000,000,000 | 28 |
Fix bug #72135 - don't create strings with lengths outside int range
|
static ut64 getmainsymbol(ELFOBJ *bin) {
struct r_bin_elf_symbol_t *symbol;
int i;
if (!(symbol = Elf_(r_bin_elf_get_symbols) (bin))) {
return UT64_MAX;
}
for (i = 0; !symbol[i].last; i++) {
if (!strcmp (symbol[i].name, "main")) {
ut64 paddr = symbol[i].offset;
return Elf_(r_bin_elf_p2v) (bin, paddr);
}
}
return UT64_MAX;
}
| 0 |
[
"CWE-125"
] |
radare2
|
c6d0076c924891ad9948a62d89d0bcdaf965f0cd
| 299,639,023,661,061,870,000,000,000,000,000,000,000 | 14 |
Fix #8731 - Crash in ELF parser with negative 32bit number
|
virDomainDiskBackingStoreParse(xmlXPathContextPtr ctxt,
virStorageSourcePtr src,
unsigned int flags,
virDomainXMLOptionPtr xmlopt)
{
VIR_XPATH_NODE_AUTORESTORE(ctxt);
xmlNodePtr source;
g_autoptr(virStorageSource) backingStore = NULL;
g_autofree char *type = NULL;
g_autofree char *format = NULL;
g_autofree char *idx = NULL;
if (!(ctxt->node = virXPathNode("./backingStore", ctxt)))
return 0;
/* terminator does not have a type */
if (!(type = virXMLPropString(ctxt->node, "type"))) {
if (!(src->backingStore = virStorageSourceNew()))
return -1;
return 0;
}
if (!(flags & VIR_DOMAIN_DEF_PARSE_INACTIVE))
idx = virXMLPropString(ctxt->node, "index");
if (!(format = virXPathString("string(./format/@type)", ctxt))) {
virReportError(VIR_ERR_XML_ERROR, "%s",
_("missing disk backing store format"));
return -1;
}
if (!(source = virXPathNode("./source", ctxt))) {
virReportError(VIR_ERR_XML_ERROR, "%s",
_("missing disk backing store source"));
return -1;
}
if (!(backingStore = virDomainStorageSourceParseBase(type, format, idx)))
return -1;
/* backing store is always read-only */
backingStore->readonly = true;
if (virDomainStorageSourceParse(source, ctxt, backingStore, flags, xmlopt) < 0 ||
virDomainDiskBackingStoreParse(ctxt, backingStore, flags, xmlopt) < 0)
return -1;
src->backingStore = g_steal_pointer(&backingStore);
return 0;
}
| 0 |
[
"CWE-212"
] |
libvirt
|
a5b064bf4b17a9884d7d361733737fb614ad8979
| 256,973,968,503,482,900,000,000,000,000,000,000,000 | 51 |
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used
Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410
(v6.1.0-122-g3b076391be) we support http cookies. Since they may contain
somewhat sensitive information we should not format them into the XML
unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted.
Reported-by: Han Han <[email protected]>
Signed-off-by: Peter Krempa <[email protected]>
Reviewed-by: Erik Skultety <[email protected]>
|
int set_plugin()
{
int reply;
int plugin_set= 0;
char *strength;
bool option_read= FALSE;
reply= get_response((const char *) "\n\nVALIDATE PASSWORD PLUGIN can be used "
"to test passwords\nand improve security. "
"It checks the strength of password\nand "
"allows the users to set only those "
"passwords which are\nsecure enough. "
"Would you like to setup VALIDATE "
"PASSWORD plugin?\n\nPress y|Y for Yes, "
"any other key for No: ");
if (reply == (int) 'y' || reply == (int) 'Y')
{
#ifdef _WIN32
const char *query_tmp;
query_tmp= "INSTALL PLUGIN validate_password SONAME "
"'validate_password.dll'";
if (!execute_query(&query_tmp, strlen(query_tmp)))
#else
const char *query_tmp;
query_tmp= "INSTALL PLUGIN validate_password SONAME "
"'validate_password.so'";
if (!execute_query(&query_tmp, strlen(query_tmp)))
#endif
{
plugin_set= 1;
while(!option_read)
{
reply= get_response((const char *) "\n\nThere are three levels of "
"password validation policy.\n\n"
"Please enter 0 for LOW, 1 for "
"MEDIUM and 2 for STRONG: ");
switch (reply){
case (int ) '0':
strength= (char *) "LOW";
option_read= TRUE;
break;
case (int) '1':
strength= (char *) "MEDIUM";
option_read= TRUE;
break;
case (int) '2':
strength= (char *) "STRONG";
option_read= TRUE;
break;
default:
fprintf(stdout, "\nInvalid option provided.\n");
}
}
char *query, *end;
int tmp= sizeof("SET GLOBAL validate_password_policy = ") + 3;
int strength_length= strlen(strength);
/*
query string needs memory which is atleast the length of initial part
of query plus twice the size of variable being appended.
*/
query= (char *)my_malloc(PSI_NOT_INSTRUMENTED,
(strength_length * 2 + tmp) * sizeof(char),
MYF(MY_WME));
end= my_stpcpy(query, "SET GLOBAL validate_password_policy = ");
*end++ = '\'';
end+= mysql_real_escape_string(&mysql, end, strength, strength_length);
*end++ = '\'';
if (!execute_query((const char **) &query,(unsigned int) (end-query)))
DBUG_PRINT("info", ("query success!"));
my_free(query);
}
else
fprintf(stdout, "\nVALIDATE PASSWORD PLUGIN is not available.\n"
"Proceeding with the further steps without the plugin.\n");
}
return(plugin_set);
}
| 0 |
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
| 323,958,935,381,689,700,000,000,000,000,000,000,000 | 76 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
|
static void cli_session_setup_gensec_remote_done(struct tevent_req *subreq)
{
struct tevent_req *req =
tevent_req_callback_data(subreq,
struct tevent_req);
struct cli_session_setup_gensec_state *state =
tevent_req_data(req,
struct cli_session_setup_gensec_state);
NTSTATUS status;
TALLOC_FREE(state->inbuf);
TALLOC_FREE(state->recv_iov);
status = cli_sesssetup_blob_recv(subreq, state, &state->blob_in,
&state->inbuf, &state->recv_iov);
TALLOC_FREE(subreq);
data_blob_free(&state->blob_out);
if (!NT_STATUS_IS_OK(status) &&
!NT_STATUS_EQUAL(status, NT_STATUS_MORE_PROCESSING_REQUIRED))
{
tevent_req_nterror(req, status);
return;
}
if (NT_STATUS_IS_OK(status)) {
struct smbXcli_session *session = NULL;
bool is_guest = false;
if (smbXcli_conn_protocol(state->cli->conn) >= PROTOCOL_SMB2_02) {
session = state->cli->smb2.session;
} else {
session = state->cli->smb1.session;
}
is_guest = smbXcli_session_is_guest(session);
if (is_guest) {
/*
* We can't finish the gensec handshake, we don't
* have a negotiated session key.
*
* So just pretend we are completely done.
*
* Note that smbXcli_session_is_guest()
* always returns false if we require signing.
*/
state->blob_in = data_blob_null;
state->local_ready = true;
}
state->remote_ready = true;
}
if (state->local_ready && state->remote_ready) {
cli_session_setup_gensec_ready(req);
return;
}
cli_session_setup_gensec_local_next(req);
}
| 0 |
[
"CWE-94"
] |
samba
|
94295b7aa22d2544af5323bca70d3dcb97fd7c64
| 121,087,376,748,242,800,000,000,000,000,000,000,000 | 59 |
CVE-2016-2019: s3:libsmb: add comment regarding smbXcli_session_is_guest() with mandatory signing
BUG: https://bugzilla.samba.org/show_bug.cgi?id=11860
Signed-off-by: Stefan Metzmacher <[email protected]>
|
static tool_rc check_options(void) {
tool_rc rc = tool_rc_success;
/* Check the tpm import specific options */
if (ctx.import_tpm) {
if (!ctx.input_seed_file) {
LOG_ERR("Expected SymSeed to be specified via \"-s\","
" missing option.");
rc = tool_rc_option_error;
}
/* If a key file is specified we choose aes else null
for symmetricAlgdefinition */
if (!ctx.input_enc_key_file) {
ctx.key_type = TPM2_ALG_NULL;
} else {
ctx.key_type = TPM2_ALG_AES;
}
if (ctx.key_auth_str) {
LOG_ERR("Cannot specify key password when importing a TPM key.\n"
"use tpm2_changeauth after import");
rc = tool_rc_option_error;
}
} else { /* Openssl specific option(s) */
if (!ctx.key_type) {
LOG_ERR("Expected key type to be specified via \"-G\","
" missing option.");
rc = tool_rc_option_error;
}
if (ctx.cp_hash_path) {
LOG_WARN("CAUTION CpHash calculation includes parameters that"
"have a derived/random seed!");
}
}
/* Common options */
if (!ctx.input_key_file) {
LOG_ERR("Expected to be imported key data to be specified via \"-i\","
" missing option.");
rc = tool_rc_option_error;
}
if (!ctx.public_key_file) {
LOG_ERR("Expected output public file missing, specify \"-u\","
" missing option.");
rc = tool_rc_option_error;
}
if (!ctx.private_key_file) {
LOG_ERR("Expected output private file missing, specify \"-r\","
" missing option.");
rc = tool_rc_option_error;
}
if (!ctx.parent.ctx_path) {
LOG_ERR("Expected parent key to be specified via \"-C\","
" missing option.");
rc = tool_rc_option_error;
}
return rc;
}
| 0 |
[
"CWE-798"
] |
tpm2-tools
|
c069e4f179d5e6653a84fb236816c375dca82515
| 8,323,635,901,329,901,000,000,000,000,000,000,000 | 67 |
tpm2_import: fix fixed AES key CVE-2021-3565
tpm2_import used a fixed AES key for the inner wrapper, which means that
a MITM attack would be able to unwrap the imported key. Even the
use of an encrypted session will not prevent this. The TPM only
encrypts the first parameter which is the fixed symmetric key.
To fix this, ensure the key size is 16 bytes or bigger and use
OpenSSL to generate a secure random AES key.
Fixes: #2738
Signed-off-by: William Roberts <[email protected]>
|
static void pty_chr_update_read_handler_locked(CharDriverState *chr)
{
PtyCharDriver *s = chr->opaque;
GPollFD pfd;
int rc;
QIOChannelFile *fioc = QIO_CHANNEL_FILE(s->ioc);
pfd.fd = fioc->fd;
pfd.events = G_IO_OUT;
pfd.revents = 0;
do {
rc = g_poll(&pfd, 1, 0);
} while (rc == -1 && errno == EINTR);
assert(rc >= 0);
if (pfd.revents & G_IO_HUP) {
pty_chr_state(chr, 0);
} else {
pty_chr_state(chr, 1);
}
}
| 0 |
[
"CWE-416"
] |
qemu
|
a4afa548fc6dd9842ed86639b4d37d4d1c4ad480
| 31,362,492,708,349,070,000,000,000,000,000,000,000 | 21 |
char: move front end handlers in CharBackend
Since the hanlders are associated with a CharBackend, rather than the
CharDriverState, it is more appropriate to store in CharBackend. This
avoids the handler copy dance in qemu_chr_fe_set_handlers() then
mux_chr_update_read_handler(), by storing the CharBackend pointer
directly.
Also a mux CharDriver should go through mux->backends[focused], since
chr->be will stay NULL. Before that, it was possible to call
chr->handler by mistake with surprising results, for ex through
qemu_chr_be_can_write(), which would result in calling the last set
handler front end, not the one with focus.
Signed-off-by: Marc-André Lureau <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
rfbSendUpdateBuf(rfbClientPtr cl)
{
if(cl->sock<0)
return FALSE;
if (rfbWriteExact(cl, cl->updateBuf, cl->ublen) < 0) {
rfbLogPerror("rfbSendUpdateBuf: write");
rfbCloseClient(cl);
return FALSE;
}
cl->ublen = 0;
return TRUE;
}
| 0 |
[] |
libvncserver
|
804335f9d296440bb708ca844f5d89b58b50b0c6
| 218,710,907,035,448,860,000,000,000,000,000,000,000 | 14 |
Thread safety for zrle, zlib, tight.
Proposed tight security type fix for debian bug 517422.
|
_XimDelayModeGetIMValues(
Xim im,
XIMArg *arg)
{
XimDefIMValues im_values;
_XimGetCurrentIMValues(im, &im_values);
return(_XimGetIMValueData(im, (XPointer)&im_values, arg,
im->core.im_resources, im->core.im_num_resources));
}
| 0 |
[
"CWE-190"
] |
libx11
|
1a566c9e00e5f35c1f9e7f3d741a02e5170852b2
| 338,729,451,245,497,630,000,000,000,000,000,000,000 | 10 |
Zero out buffers in functions
It looks like uninitialized stack or heap memory can leak
out via padding bytes.
Signed-off-by: Matthieu Herrb <[email protected]>
Reviewed-by: Matthieu Herrb <[email protected]>
|
static int get_sample_rate_v2(struct snd_usb_audio *chip, int iface,
int altsetting, int clock)
{
struct usb_device *dev = chip->dev;
__le32 data;
int err;
err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
UAC2_CS_CONTROL_SAM_FREQ << 8,
snd_usb_ctrl_intf(chip) | (clock << 8),
&data, sizeof(data));
if (err < 0) {
dev_warn(&dev->dev, "%d:%d: cannot get freq (v2): err %d\n",
iface, altsetting, err);
return 0;
}
return le32_to_cpu(data);
}
| 0 |
[] |
sound
|
447d6275f0c21f6cc97a88b3a0c601436a4cdf2a
| 8,380,453,448,635,180,000,000,000,000,000,000,000 | 20 |
ALSA: usb-audio: Add sanity checks for endpoint accesses
Add some sanity check codes before actually accessing the endpoint via
get_endpoint() in order to avoid the invalid access through a
malformed USB descriptor. Mostly just checking bNumEndpoints, but in
one place (snd_microii_spdif_default_get()), the validity of iface and
altsetting index is checked as well.
Bugzilla: https://bugzilla.suse.com/show_bug.cgi?id=971125
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
|
bool ms_verify_authorizer(Connection *con, int peer_type, int protocol,
bufferlist& authorizer, bufferlist& authorizer_reply,
bool& isvalid, CryptoKey& session_key,
std::unique_ptr<AuthAuthorizerChallenge> *challenge) override {
isvalid = true;
return true;
}
| 0 |
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
| 156,014,642,889,801,580,000,000,000,000,000,000,000 | 7 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
|
CanonOutputT() : buffer_(NULL), buffer_len_(0), cur_len_(0) {}
| 0 |
[] |
envoy
|
3b5acb2f43548862dadb243de7cf3994986a8e04
| 186,598,519,041,358,840,000,000,000,000,000,000,000 | 1 |
http, url: Bring back chromium_url and http_parser_parse_url (#198)
* Revert GURL as HTTP URL parser utility
This reverts:
1. commit c9c4709c844b90b9bb2935d784a428d667c9df7d
2. commit d828958b591a6d79f4b5fa608ece9962b7afbe32
3. commit 2d69e30c51f2418faf267aaa6c1126fce9948c62
Signed-off-by: Dhi Aurrahman <[email protected]>
|
Item_field *field_for_view_update() { return 0; }
| 0 |
[
"CWE-617"
] |
server
|
2e7891080667c59ac80f788eef4d59d447595772
| 221,477,565,287,919,360,000,000,000,000,000,000,000 | 1 |
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view
This bug could manifest itself after pushing a where condition over a
mergeable derived table / view / CTE DT into a grouping view / derived
table / CTE V whose item list contained set functions with constant
arguments such as MIN(2), SUM(1) etc. In such cases the field references
used in the condition pushed into the view V that correspond set functions
are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation
of the virtual method const_item() for the class Item_direct_view_ref the
wrapped set functions with constant arguments could be erroneously taken
for constant items. This could lead to a wrong result set returned by the
main select query in 10.2. In 10.4 where a possibility of pushing condition
from HAVING into WHERE had been added this could cause a crash.
Approved by Sergey Petrunya <[email protected]>
|
circle_draw( FT_F26Dot6 center_x,
FT_F26Dot6 center_y,
FT_F26Dot6 radius,
FTDemo_Handle* handle,
FTDemo_Display* display,
grColor color )
{
FT_Outline outline;
ft_outline_new_circle( &outline, radius, handle );
FT_Outline_Translate( &outline, center_x & 63, center_y & 63 );
ft_outline_draw( &outline, 1., (center_x >> 6), (center_y >> 6), handle, display, color );
FT_Outline_Done( handle->library, &outline );
}
| 0 |
[
"CWE-120"
] |
freetype2-demos
|
b995299b73ba4cd259f221f500d4e63095508bec
| 266,875,825,959,375,660,000,000,000,000,000,000,000 | 16 |
Fix Savannah bug #30054.
* src/ftdiff.c, src/ftgrid.c, src/ftmulti.c, src/ftstring.c,
src/ftview.c: Use precision for `%s' where appropriate to avoid
buffer overflows.
|
ttml_xml_type_find (GstTypeFind * tf, gpointer unused)
{
if (xml_check_first_element (tf, "tt", 2, FALSE)) {
gst_type_find_suggest (tf, GST_TYPE_FIND_MAXIMUM, TTML_XML_CAPS);
}
}
| 0 |
[
"CWE-125"
] |
gst-plugins-base
|
2fdccfd64fc609e44e9c4b8eed5bfdc0ab9c9095
| 286,906,739,885,102,700,000,000,000,000,000,000,000 | 6 |
typefind: bounds check windows ico detection
Fixes out of bounds read
https://bugzilla.gnome.org/show_bug.cgi?id=774902
|
psutil_disk_partitions(PyObject *self, PyObject *args) {
DWORD num_bytes;
char drive_strings[255];
char *drive_letter = drive_strings;
char mp_buf[MAX_PATH];
char mp_path[MAX_PATH];
int all;
int type;
int ret;
unsigned int old_mode = 0;
char opts[20];
HANDLE mp_h;
BOOL mp_flag= TRUE;
LPTSTR fs_type[MAX_PATH + 1] = { 0 };
DWORD pflags = 0;
PyObject *py_all;
PyObject *py_retlist = PyList_New(0);
PyObject *py_tuple = NULL;
if (py_retlist == NULL) {
return NULL;
}
// avoid to visualize a message box in case something goes wrong
// see https://github.com/giampaolo/psutil/issues/264
old_mode = SetErrorMode(SEM_FAILCRITICALERRORS);
if (! PyArg_ParseTuple(args, "O", &py_all))
goto error;
all = PyObject_IsTrue(py_all);
Py_BEGIN_ALLOW_THREADS
num_bytes = GetLogicalDriveStrings(254, drive_letter);
Py_END_ALLOW_THREADS
if (num_bytes == 0) {
PyErr_SetFromWindowsErr(0);
goto error;
}
while (*drive_letter != 0) {
py_tuple = NULL;
opts[0] = 0;
fs_type[0] = 0;
Py_BEGIN_ALLOW_THREADS
type = GetDriveType(drive_letter);
Py_END_ALLOW_THREADS
// by default we only show hard drives and cd-roms
if (all == 0) {
if ((type == DRIVE_UNKNOWN) ||
(type == DRIVE_NO_ROOT_DIR) ||
(type == DRIVE_REMOTE) ||
(type == DRIVE_RAMDISK)) {
goto next;
}
// floppy disk: skip it by default as it introduces a
// considerable slowdown.
if ((type == DRIVE_REMOVABLE) &&
(strcmp(drive_letter, "A:\\") == 0)) {
goto next;
}
}
ret = GetVolumeInformation(
(LPCTSTR)drive_letter, NULL, _ARRAYSIZE(drive_letter),
NULL, NULL, &pflags, (LPTSTR)fs_type, _ARRAYSIZE(fs_type));
if (ret == 0) {
// We might get here in case of a floppy hard drive, in
// which case the error is (21, "device not ready").
// Let's pretend it didn't happen as we already have
// the drive name and type ('removable').
strcat_s(opts, _countof(opts), "");
SetLastError(0);
}
else {
if (pflags & FILE_READ_ONLY_VOLUME)
strcat_s(opts, _countof(opts), "ro");
else
strcat_s(opts, _countof(opts), "rw");
if (pflags & FILE_VOLUME_IS_COMPRESSED)
strcat_s(opts, _countof(opts), ",compressed");
// Check for mount points on this volume and add/get info
// (checks first to know if we can even have mount points)
if (pflags & FILE_SUPPORTS_REPARSE_POINTS) {
mp_h = FindFirstVolumeMountPoint(drive_letter, mp_buf, MAX_PATH);
if (mp_h != INVALID_HANDLE_VALUE) {
while (mp_flag) {
// Append full mount path with drive letter
strcpy_s(mp_path, _countof(mp_path), drive_letter);
strcat_s(mp_path, _countof(mp_path), mp_buf);
py_tuple = Py_BuildValue(
"(ssss)",
drive_letter,
mp_path,
fs_type, // Typically NTFS
opts);
if (!py_tuple || PyList_Append(py_retlist, py_tuple) == -1) {
FindVolumeMountPointClose(mp_h);
goto error;
}
Py_DECREF(py_tuple);
// Continue looking for more mount points
mp_flag = FindNextVolumeMountPoint(mp_h, mp_buf, MAX_PATH);
}
FindVolumeMountPointClose(mp_h);
}
}
}
if (strlen(opts) > 0)
strcat_s(opts, _countof(opts), ",");
strcat_s(opts, _countof(opts), psutil_get_drive_type(type));
py_tuple = Py_BuildValue(
"(ssss)",
drive_letter,
drive_letter,
fs_type, // either FAT, FAT32, NTFS, HPFS, CDFS, UDF or NWFS
opts);
if (!py_tuple)
goto error;
if (PyList_Append(py_retlist, py_tuple))
goto error;
Py_DECREF(py_tuple);
goto next;
next:
drive_letter = strchr(drive_letter, 0) + 1;
}
SetErrorMode(old_mode);
return py_retlist;
error:
SetErrorMode(old_mode);
Py_XDECREF(py_tuple);
Py_DECREF(py_retlist);
return NULL;
}
| 1 |
[
"CWE-415"
] |
psutil
|
7d512c8e4442a896d56505be3e78f1156f443465
| 83,376,880,753,088,590,000,000,000,000,000,000,000 | 149 |
Use Py_CLEAR instead of Py_DECREF to also set the variable to NULL (#1616)
These files contain loops that convert system data into python objects
and during the process they create objects and dereference their
refcounts after they have been added to the resulting list.
However, in case of errors during the creation of those python objects,
the refcount to previously allocated objects is dropped again with
Py_XDECREF, which should be a no-op in case the paramater is NULL. Even
so, in most of these loops the variables pointing to the objects are
never set to NULL, even after Py_DECREF is called at the end of the loop
iteration. This means, after the first iteration, if an error occurs
those python objects will get their refcount dropped two times,
resulting in a possible double-free.
|
base64_decode_string (const char *enc)
{
if (enc == NULL)
return NULL;
char *dec = g_strdup (enc);
gsize len;
g_base64_decode_inplace (dec, &len);
dec[len] = '\0';
return dec;
}
| 1 |
[] |
cockpit
|
c51f6177576d7e12614c64d316cf0b67addd17c9
| 273,534,858,954,204,460,000,000,000,000,000,000,000 | 11 |
ws: Fix bug parsing invalid base64 headers
The len parameter to g_base64_decode_inplace() is a inout
parameter, and needs to be initialized. Lets just use
the simpler g_base64_decode() function. This fixes a segfault.
Closes #10819
|
static void clear_subscriber_list(struct snd_seq_client *client,
struct snd_seq_client_port *port,
struct snd_seq_port_subs_info *grp,
int is_src)
{
struct list_head *p, *n;
list_for_each_safe(p, n, &grp->list_head) {
struct snd_seq_subscribers *subs;
struct snd_seq_client *c;
struct snd_seq_client_port *aport;
subs = get_subscriber(p, is_src);
if (is_src)
aport = get_client_port(&subs->info.dest, &c);
else
aport = get_client_port(&subs->info.sender, &c);
delete_and_unsubscribe_port(client, port, subs, is_src, false);
if (!aport) {
/* looks like the connected port is being deleted.
* we decrease the counter, and when both ports are deleted
* remove the subscriber info
*/
if (atomic_dec_and_test(&subs->ref_count))
kfree(subs);
continue;
}
/* ok we got the connected port */
delete_and_unsubscribe_port(c, aport, subs, !is_src, true);
kfree(subs);
snd_seq_port_unlock(aport);
snd_seq_client_unlock(c);
}
}
| 0 |
[
"CWE-416",
"CWE-362"
] |
linux
|
71105998845fb012937332fe2e806d443c09e026
| 239,758,878,717,212,500,000,000,000,000,000,000,000 | 36 |
ALSA: seq: Fix use-after-free at creating a port
There is a potential race window opened at creating and deleting a
port via ioctl, as spotted by fuzzing. snd_seq_create_port() creates
a port object and returns its pointer, but it doesn't take the
refcount, thus it can be deleted immediately by another thread.
Meanwhile, snd_seq_ioctl_create_port() still calls the function
snd_seq_system_client_ev_port_start() with the created port object
that is being deleted, and this triggers use-after-free like:
BUG: KASAN: use-after-free in snd_seq_ioctl_create_port+0x504/0x630 [snd_seq] at addr ffff8801f2241cb1
=============================================================================
BUG kmalloc-512 (Tainted: G B ): kasan: bad access detected
-----------------------------------------------------------------------------
INFO: Allocated in snd_seq_create_port+0x94/0x9b0 [snd_seq] age=1 cpu=3 pid=4511
___slab_alloc+0x425/0x460
__slab_alloc+0x20/0x40
kmem_cache_alloc_trace+0x150/0x190
snd_seq_create_port+0x94/0x9b0 [snd_seq]
snd_seq_ioctl_create_port+0xd1/0x630 [snd_seq]
snd_seq_do_ioctl+0x11c/0x190 [snd_seq]
snd_seq_ioctl+0x40/0x80 [snd_seq]
do_vfs_ioctl+0x54b/0xda0
SyS_ioctl+0x79/0x90
entry_SYSCALL_64_fastpath+0x16/0x75
INFO: Freed in port_delete+0x136/0x1a0 [snd_seq] age=1 cpu=2 pid=4717
__slab_free+0x204/0x310
kfree+0x15f/0x180
port_delete+0x136/0x1a0 [snd_seq]
snd_seq_delete_port+0x235/0x350 [snd_seq]
snd_seq_ioctl_delete_port+0xc8/0x180 [snd_seq]
snd_seq_do_ioctl+0x11c/0x190 [snd_seq]
snd_seq_ioctl+0x40/0x80 [snd_seq]
do_vfs_ioctl+0x54b/0xda0
SyS_ioctl+0x79/0x90
entry_SYSCALL_64_fastpath+0x16/0x75
Call Trace:
[<ffffffff81b03781>] dump_stack+0x63/0x82
[<ffffffff81531b3b>] print_trailer+0xfb/0x160
[<ffffffff81536db4>] object_err+0x34/0x40
[<ffffffff815392d3>] kasan_report.part.2+0x223/0x520
[<ffffffffa07aadf4>] ? snd_seq_ioctl_create_port+0x504/0x630 [snd_seq]
[<ffffffff815395fe>] __asan_report_load1_noabort+0x2e/0x30
[<ffffffffa07aadf4>] snd_seq_ioctl_create_port+0x504/0x630 [snd_seq]
[<ffffffffa07aa8f0>] ? snd_seq_ioctl_delete_port+0x180/0x180 [snd_seq]
[<ffffffff8136be50>] ? taskstats_exit+0xbc0/0xbc0
[<ffffffffa07abc5c>] snd_seq_do_ioctl+0x11c/0x190 [snd_seq]
[<ffffffffa07abd10>] snd_seq_ioctl+0x40/0x80 [snd_seq]
[<ffffffff8136d433>] ? acct_account_cputime+0x63/0x80
[<ffffffff815b515b>] do_vfs_ioctl+0x54b/0xda0
.....
We may fix this in a few different ways, and in this patch, it's fixed
simply by taking the refcount properly at snd_seq_create_port() and
letting the caller unref the object after use. Also, there is another
potential use-after-free by sprintf() call in snd_seq_create_port(),
and this is moved inside the lock.
This fix covers CVE-2017-15265.
Reported-and-tested-by: Michael23 Yu <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
|
SecureElementStatus_t SecureElementComputeAesCmac( uint8_t* micBxBuffer, uint8_t* buffer, uint16_t size,
KeyIdentifier_t keyID, uint32_t* cmac )
{
if( keyID >= LORAMAC_CRYPTO_MULTICAST_KEYS )
{
// Never accept multicast key identifier for cmac computation
return SECURE_ELEMENT_ERROR_INVALID_KEY_ID;
}
return ComputeCmac( micBxBuffer, buffer, size, keyID, cmac );
}
| 0 |
[
"CWE-120",
"CWE-787"
] |
LoRaMac-node
|
e3063a91daa7ad8a687223efa63079f0c24568e4
| 237,220,962,777,869,260,000,000,000,000,000,000,000 | 10 |
Added received buffer size checks.
|
vrend_resource_alloc_buffer(struct vrend_resource *gr, uint32_t flags)
{
const uint32_t bind = gr->base.bind;
const uint32_t size = gr->base.width0;
if (bind == VIRGL_BIND_CUSTOM) {
/* use iovec directly when attached */
gr->storage_bits |= VREND_STORAGE_HOST_SYSTEM_MEMORY;
gr->ptr = calloc(1, size);
if (!gr->ptr)
return -ENOMEM;
} else if (bind == VIRGL_BIND_STAGING) {
/* staging buffers only use guest memory -- nothing to do. */
} else if (bind == VIRGL_BIND_INDEX_BUFFER) {
gr->target = GL_ELEMENT_ARRAY_BUFFER_ARB;
vrend_create_buffer(gr, size, flags);
} else if (bind == VIRGL_BIND_STREAM_OUTPUT) {
gr->target = GL_TRANSFORM_FEEDBACK_BUFFER;
vrend_create_buffer(gr, size, flags);
} else if (bind == VIRGL_BIND_VERTEX_BUFFER) {
gr->target = GL_ARRAY_BUFFER_ARB;
vrend_create_buffer(gr, size, flags);
} else if (bind == VIRGL_BIND_CONSTANT_BUFFER) {
gr->target = GL_UNIFORM_BUFFER;
vrend_create_buffer(gr, size, flags);
} else if (bind == VIRGL_BIND_QUERY_BUFFER) {
gr->target = GL_QUERY_BUFFER;
vrend_create_buffer(gr, size, flags);
} else if (bind == VIRGL_BIND_COMMAND_ARGS) {
gr->target = GL_DRAW_INDIRECT_BUFFER;
vrend_create_buffer(gr, size, flags);
} else if (bind == 0 || bind == VIRGL_BIND_SHADER_BUFFER) {
gr->target = GL_ARRAY_BUFFER_ARB;
vrend_create_buffer(gr, size, flags);
} else if (bind & VIRGL_BIND_SAMPLER_VIEW) {
/*
* On Desktop we use GL_ARB_texture_buffer_object on GLES we use
* GL_EXT_texture_buffer (it is in the ANDRIOD extension pack).
*/
#if GL_TEXTURE_BUFFER != GL_TEXTURE_BUFFER_EXT
#error "GL_TEXTURE_BUFFER enums differ, they shouldn't."
#endif
/* need to check GL version here */
if (has_feature(feat_arb_or_gles_ext_texture_buffer)) {
gr->target = GL_TEXTURE_BUFFER;
} else {
gr->target = GL_PIXEL_PACK_BUFFER_ARB;
}
vrend_create_buffer(gr, size, flags);
} else {
vrend_printf("%s: Illegal buffer binding flags 0x%x\n", __func__, bind);
return -EINVAL;
}
return 0;
}
| 0 |
[
"CWE-787"
] |
virglrenderer
|
95e581fd181b213c2ed7cdc63f2abc03eaaa77ec
| 316,706,643,189,554,740,000,000,000,000,000,000,000 | 57 |
vrend: Add test to resource OOB write and fix it
v2: Also check that no depth != 1 has been send when none is due
Closes: #250
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Chia-I Wu <[email protected]>
|
static int ing_filter(struct sk_buff *skb)
{
struct Qdisc *q;
struct net_device *dev = skb->dev;
int result = TC_ACT_OK;
if (dev->qdisc_ingress) {
__u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
if (MAX_RED_LOOP < ttl++) {
printk("Redir loop detected Dropping packet (%s->%s)\n",
skb->input_dev->name, skb->dev->name);
return TC_ACT_SHOT;
}
skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl);
skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
spin_lock(&dev->ingress_lock);
if ((q = dev->qdisc_ingress) != NULL)
result = q->enqueue(skb, q);
spin_unlock(&dev->ingress_lock);
}
return result;
}
| 0 |
[] |
linux
|
e89e9cf539a28df7d0eb1d0a545368e9920b34ac
| 319,876,174,648,257,800,000,000,000,000,000,000,000 | 27 |
[IPv4/IPv6]: UFO Scatter-gather approach
Attached is kernel patch for UDP Fragmentation Offload (UFO) feature.
1. This patch incorporate the review comments by Jeff Garzik.
2. Renamed USO as UFO (UDP Fragmentation Offload)
3. udp sendfile support with UFO
This patches uses scatter-gather feature of skb to generate large UDP
datagram. Below is a "how-to" on changes required in network device
driver to use the UFO interface.
UDP Fragmentation Offload (UFO) Interface:
-------------------------------------------
UFO is a feature wherein the Linux kernel network stack will offload the
IP fragmentation functionality of large UDP datagram to hardware. This
will reduce the overhead of stack in fragmenting the large UDP datagram to
MTU sized packets
1) Drivers indicate their capability of UFO using
dev->features |= NETIF_F_UFO | NETIF_F_HW_CSUM | NETIF_F_SG
NETIF_F_HW_CSUM is required for UFO over ipv6.
2) UFO packet will be submitted for transmission using driver xmit routine.
UFO packet will have a non-zero value for
"skb_shinfo(skb)->ufo_size"
skb_shinfo(skb)->ufo_size will indicate the length of data part in each IP
fragment going out of the adapter after IP fragmentation by hardware.
skb->data will contain MAC/IP/UDP header and skb_shinfo(skb)->frags[]
contains the data payload. The skb->ip_summed will be set to CHECKSUM_HW
indicating that hardware has to do checksum calculation. Hardware should
compute the UDP checksum of complete datagram and also ip header checksum of
each fragmented IP packet.
For IPV6 the UFO provides the fragment identification-id in
skb_shinfo(skb)->ip6_frag_id. The adapter should use this ID for generating
IPv6 fragments.
Signed-off-by: Ananda Raju <[email protected]>
Signed-off-by: Rusty Russell <[email protected]> (forwarded)
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
|
gpu_data_for_centered_image(ImageRenderData *ans, unsigned int screen_width_px, unsigned int screen_height_px, unsigned int width, unsigned int height) {
static const ImageRef source_rect = { .src_rect = { .left=0, .top=0, .bottom=1, .right=1 }};
const ImageRef *ref = &source_rect;
float width_frac = 2 * MIN(1, width / (float)screen_width_px), height_frac = 2 * MIN(1, height / (float)screen_height_px);
float hmargin = (2 - width_frac) / 2;
float vmargin = (2 - height_frac) / 2;
const ImageRect r = { .left = -1 + hmargin, .right = -1 + hmargin + width_frac, .top = 1 - vmargin, .bottom = 1 - vmargin - height_frac };
set_vertex_data(ans, ref, &r);
}
| 0 |
[
"CWE-787"
] |
kitty
|
82c137878c2b99100a3cdc1c0f0efea069313901
| 108,651,783,320,714,670,000,000,000,000,000,000,000 | 9 |
Graphics protocol: Dont return filename in the error message when opening file fails, since filenames can contain control characters
Fixes #3128
|
static WERROR dcesrv_dssetup_DsRoleDemoteDc(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx,
struct dssetup_DsRoleDemoteDc *r)
{
DCESRV_FAULT(DCERPC_FAULT_OP_RNG_ERROR);
}
| 0 |
[
"CWE-200"
] |
samba
|
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
| 59,250,306,635,275,885,000,000,000,000,000,000,000 | 5 |
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]>
|
int MonClient::authenticate(double timeout)
{
std::lock_guard lock(monc_lock);
if (active_con) {
ldout(cct, 5) << "already authenticated" << dendl;
return 0;
}
sub.want("monmap", monmap.get_epoch() ? monmap.get_epoch() + 1 : 0, 0);
sub.want("config", 0, 0);
if (!_opened())
_reopen_session();
utime_t until = ceph_clock_now();
until += timeout;
if (timeout > 0.0)
ldout(cct, 10) << "authenticate will time out at " << until << dendl;
authenticate_err = 1; // == in progress
while (!active_con && authenticate_err >= 0) {
if (timeout > 0.0) {
int r = auth_cond.WaitUntil(monc_lock, until);
if (r == ETIMEDOUT && !active_con) {
ldout(cct, 0) << "authenticate timed out after " << timeout << dendl;
authenticate_err = -r;
}
} else {
auth_cond.Wait(monc_lock);
}
}
if (active_con) {
ldout(cct, 5) << __func__ << " success, global_id "
<< active_con->get_global_id() << dendl;
// active_con should not have been set if there was an error
ceph_assert(authenticate_err >= 0);
authenticated = true;
}
if (authenticate_err < 0 && auth_registry.no_keyring_disabled_cephx()) {
lderr(cct) << __func__ << " NOTE: no keyring found; disabled cephx authentication" << dendl;
}
return authenticate_err;
}
| 0 |
[
"CWE-294"
] |
ceph
|
2927fd91d41e505237cc73f9700e5c6a63e5cb4f
| 187,773,702,045,180,780,000,000,000,000,000,000,000 | 44 |
mon/MonClient: bring back CEPHX_V2 authorizer challenges
Commit c58c5754dfd2 ("msg/async/ProtocolV1: use AuthServer and
AuthClient") introduced a backwards compatibility issue into msgr1.
To fix it, commit 321548010578 ("mon/MonClient: skip CEPHX_V2
challenge if client doesn't support it") set out to skip authorizer
challenges for peers that don't support CEPHX_V2. However, it
made it so that authorizer challenges are skipped for all peers in
both msgr1 and msgr2 cases, effectively disabling the protection
against replay attacks that was put in place in commit f80b848d3f83
("auth/cephx: add authorizer challenge", CVE-2018-1128).
This is because con->get_features() always returns 0 at that
point. In msgr1 case, the peer shares its features along with the
authorizer, but while they are available in connect_msg.features they
aren't assigned to con until ProtocolV1::open(). In msgr2 case, the
peer doesn't share its features until much later (in CLIENT_IDENT
frame, i.e. after the authentication phase). The result is that
!CEPHX_V2 branch is taken in all cases and replay attack protection
is lost.
Only clusters with cephx_service_require_version set to 2 on the
service daemons would not be silently downgraded. But, since the
default is 1 and there are no reports of looping on BADAUTHORIZER
faults, I'm pretty sure that no one has ever done that. Note that
cephx_require_version set to 2 would have no effect even though it
is supposed to be stronger than cephx_service_require_version
because MonClient::handle_auth_request() didn't check it.
To fix:
- for msgr1, check connect_msg.features (as was done before commit
c58c5754dfd2) and challenge if CEPHX_V2 is supported. Together
with two preceding patches that resurrect proper cephx_* option
handling in msgr1, this covers both "I want old clients to work"
and "I wish to require better authentication" use cases.
- for msgr2, don't check anything and always challenge. CEPHX_V2
predates msgr2, anyone speaking msgr2 must support it.
Signed-off-by: Ilya Dryomov <[email protected]>
(cherry picked from commit 4a82c72e3bdddcb625933e83af8b50a444b961f1)
Conflicts:
src/msg/async/ProtocolV1.cc [ commit c58c5754dfd2
("msg/async/ProtocolV1: use AuthServer and AuthClient") not
in nautilus. This means that only msgr2 is affected, so drop
ProtocolV1.cc hunk. As a result, skip_authorizer_challenge is
never set, but this is fine because msgr1 still uses old ms_*
auth methods and tests CEPHX_V2 appropriately. ]
|
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
struct sk_buff *skb)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_frame_info *fi = get_frame_info(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ath_buf *bf;
int fragno;
u16 seqno;
bf = ath_tx_get_buffer(sc);
if (!bf) {
ath_dbg(common, XMIT, "TX buffers are full\n");
return NULL;
}
ATH_TXBUF_RESET(bf);
if (tid) {
fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
seqno = tid->seq_next;
hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
if (fragno)
hdr->seq_ctrl |= cpu_to_le16(fragno);
if (!ieee80211_has_morefrags(hdr->frame_control))
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
bf->bf_state.seqno = seqno;
}
bf->bf_mpdu = skb;
bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
skb->len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
bf->bf_mpdu = NULL;
bf->bf_buf_addr = 0;
ath_err(ath9k_hw_common(sc->sc_ah),
"dma_mapping_error() on TX\n");
ath_tx_return_buffer(sc, bf);
return NULL;
}
fi->bf = bf;
return bf;
}
| 0 |
[
"CWE-362",
"CWE-241"
] |
linux
|
21f8aaee0c62708654988ce092838aa7df4d25d8
| 57,244,717,207,161,490,000,000,000,000,000,000,000 | 51 |
ath9k: protect tid->sched check
We check tid->sched without a lock taken on ath_tx_aggr_sleep(). That
is race condition which can result of doing list_del(&tid->list) twice
(second time with poisoned list node) and cause crash like shown below:
[424271.637220] BUG: unable to handle kernel paging request at 00100104
[424271.637328] IP: [<f90fc072>] ath_tx_aggr_sleep+0x62/0xe0 [ath9k]
...
[424271.639953] Call Trace:
[424271.639998] [<f90f6900>] ? ath9k_get_survey+0x110/0x110 [ath9k]
[424271.640083] [<f90f6942>] ath9k_sta_notify+0x42/0x50 [ath9k]
[424271.640177] [<f809cfef>] sta_ps_start+0x8f/0x1c0 [mac80211]
[424271.640258] [<c10f730e>] ? free_compound_page+0x2e/0x40
[424271.640346] [<f809e915>] ieee80211_rx_handlers+0x9d5/0x2340 [mac80211]
[424271.640437] [<c112f048>] ? kmem_cache_free+0x1d8/0x1f0
[424271.640510] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640578] [<c10fc23c>] ? put_page+0x2c/0x40
[424271.640640] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640706] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640787] [<f809dde3>] ? ieee80211_rx_handlers_result+0x73/0x1d0 [mac80211]
[424271.640897] [<f80a07a0>] ieee80211_prepare_and_rx_handle+0x520/0xad0 [mac80211]
[424271.641009] [<f809e22d>] ? ieee80211_rx_handlers+0x2ed/0x2340 [mac80211]
[424271.641104] [<c13846ce>] ? ip_output+0x7e/0xd0
[424271.641182] [<f80a1057>] ieee80211_rx+0x307/0x7c0 [mac80211]
[424271.641266] [<f90fa6ee>] ath_rx_tasklet+0x88e/0xf70 [ath9k]
[424271.641358] [<f80a0f2c>] ? ieee80211_rx+0x1dc/0x7c0 [mac80211]
[424271.641445] [<f90f82db>] ath9k_tasklet+0xcb/0x130 [ath9k]
Bug report:
https://bugzilla.kernel.org/show_bug.cgi?id=70551
Reported-and-tested-by: Max Sydorenko <[email protected]>
Cc: [email protected]
Signed-off-by: Stanislaw Gruszka <[email protected]>
Signed-off-by: John W. Linville <[email protected]>
|
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{
int err;
u32 ptr32;
if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
return -EFAULT;
err = __get_user(to->si_signo, &from->si_signo);
err |= __get_user(to->si_errno, &from->si_errno);
err |= __get_user(to->si_code, &from->si_code);
err |= __get_user(to->si_pid, &from->si_pid);
err |= __get_user(to->si_uid, &from->si_uid);
err |= __get_user(ptr32, &from->si_ptr);
to->si_ptr = compat_ptr(ptr32);
return err;
}
| 0 |
[
"CWE-399"
] |
linux-2.6
|
e40cd10ccff3d9fbffd57b93780bee4b7b9bff51
| 252,412,625,442,733,600,000,000,000,000,000,000,000 | 19 |
x86: clear DF before calling signal handler
The Linux kernel currently does not clear the direction flag before
calling a signal handler, whereas the x86/x86-64 ABI requires that.
Linux had this behavior/bug forever, but this becomes a real problem
with gcc version 4.3, which assumes that the direction flag is
correctly cleared at the entry of a function.
This patches changes the setup_frame() functions to clear the
direction before entering the signal handler.
Signed-off-by: Aurelien Jarno <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Acked-by: H. Peter Anvin <[email protected]>
|
void open() { list.rewind(); }
| 0 |
[
"CWE-617"
] |
server
|
2e7891080667c59ac80f788eef4d59d447595772
| 123,521,548,973,830,350,000,000,000,000,000,000,000 | 1 |
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view
This bug could manifest itself after pushing a where condition over a
mergeable derived table / view / CTE DT into a grouping view / derived
table / CTE V whose item list contained set functions with constant
arguments such as MIN(2), SUM(1) etc. In such cases the field references
used in the condition pushed into the view V that correspond set functions
are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation
of the virtual method const_item() for the class Item_direct_view_ref the
wrapped set functions with constant arguments could be erroneously taken
for constant items. This could lead to a wrong result set returned by the
main select query in 10.2. In 10.4 where a possibility of pushing condition
from HAVING into WHERE had been added this could cause a crash.
Approved by Sergey Petrunya <[email protected]>
|
static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
{
struct nfs4_ff_layout_mirror *mirror;
mirror = kzalloc(sizeof(*mirror), gfp_flags);
if (mirror != NULL) {
spin_lock_init(&mirror->lock);
refcount_set(&mirror->ref, 1);
INIT_LIST_HEAD(&mirror->mirrors);
}
return mirror;
}
| 0 |
[
"CWE-787"
] |
linux
|
ed34695e15aba74f45247f1ee2cf7e09d449f925
| 184,560,789,726,157,730,000,000,000,000,000,000,000 | 12 |
pNFS/flexfiles: fix incorrect size check in decode_nfs_fh()
We (adam zabrocki, alexander matrosov, alexander tereshkin, maksym
bazalii) observed the check:
if (fh->size > sizeof(struct nfs_fh))
should not use the size of the nfs_fh struct which includes an extra two
bytes from the size field.
struct nfs_fh {
unsigned short size;
unsigned char data[NFS_MAXFHSIZE];
}
but should determine the size from data[NFS_MAXFHSIZE] so the memcpy
will not write 2 bytes beyond destination. The proposed fix is to
compare against the NFS_MAXFHSIZE directly, as is done elsewhere in fs
code base.
Fixes: d67ae825a59d ("pnfs/flexfiles: Add the FlexFile Layout Driver")
Signed-off-by: Nikola Livic <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
static int tipc_sk_insert(struct tipc_sock *tsk)
{
struct sock *sk = &tsk->sk;
struct net *net = sock_net(sk);
struct tipc_net *tn = net_generic(net, tipc_net_id);
u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
while (remaining--) {
portid++;
if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
portid = TIPC_MIN_PORT;
tsk->portid = portid;
sock_hold(&tsk->sk);
if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
tsk_rht_params))
return 0;
sock_put(&tsk->sk);
}
return -1;
}
| 0 |
[
"CWE-703"
] |
linux
|
45e093ae2830cd1264677d47ff9a95a71f5d9f9c
| 49,892,430,050,799,670,000,000,000,000,000,000,000 | 22 |
tipc: check nl sock before parsing nested attributes
Make sure the socket for which the user is listing publication exists
before parsing the socket netlink attributes.
Prior to this patch a call without any socket caused a NULL pointer
dereference in tipc_nl_publ_dump().
Tested-and-reported-by: Baozeng Ding <[email protected]>
Signed-off-by: Richard Alpe <[email protected]>
Acked-by: Jon Maloy <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void Inspect::operator()(Assignment_Ptr assn)
{
append_token(assn->variable(), assn);
append_colon_separator();
assn->value()->perform(this);
if (assn->is_default()) {
append_optional_space();
append_string("!default");
}
append_delimiter();
}
| 0 |
[
"CWE-476"
] |
libsass
|
38f4c3699d06b64128bebc7cf1e8b3125be74dc4
| 230,841,634,087,045,750,000,000,000,000,000,000,000 | 11 |
Fix possible bug with handling empty reference combinators
Fixes #2665
|
static void dump_pagetable(unsigned long address)
{
pgd_t *base = __va(read_cr3());
pgd_t *pgd = &base[pgd_index(address)];
pmd_t *pmd;
pte_t *pte;
#ifdef CONFIG_X86_PAE
printk("*pdpt = %016Lx ", pgd_val(*pgd));
if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
goto out;
#endif
pmd = pmd_offset(pud_offset(pgd, address), address);
printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
/*
* We must not directly access the pte in the highpte
* case if the page table is located in highmem.
* And let's rather not kmap-atomic the pte, just in case
* it's allocated already:
*/
if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
goto out;
pte = pte_offset_kernel(pmd, address);
printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
out:
printk("\n");
}
| 0 |
[
"CWE-264"
] |
linux
|
548acf19234dbda5a52d5a8e7e205af46e9da840
| 186,573,364,097,277,500,000,000,000,000,000,000,000 | 29 |
x86/mm: Expand the exception table logic to allow new handling options
Huge amounts of help from Andy Lutomirski and Borislav Petkov to
produce this. Andy provided the inspiration to add classes to the
exception table with a clever bit-squeezing trick, Boris pointed
out how much cleaner it would all be if we just had a new field.
Linus Torvalds blessed the expansion with:
' I'd rather not be clever in order to save just a tiny amount of space
in the exception table, which isn't really criticial for anybody. '
The third field is another relative function pointer, this one to a
handler that executes the actions.
We start out with three handlers:
1: Legacy - just jumps the to fixup IP
2: Fault - provide the trap number in %ax to the fixup code
3: Cleaned up legacy for the uaccess error hack
Signed-off-by: Tony Luck <[email protected]>
Reviewed-by: Borislav Petkov <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: http://lkml.kernel.org/r/f6af78fcbd348cf4939875cfda9c19689b5e50b8.1455732970.git.tony.luck@intel.com
Signed-off-by: Ingo Molnar <[email protected]>
|
static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
u8 tx_key, struct sk_buff *skb,
struct tipc_crypto *__rx)
{
struct tipc_msg *hdr = buf_msg(skb);
struct tipc_ehdr *ehdr;
u32 user = msg_user(hdr);
u64 seqno;
int ehsz;
/* Make room for encryption header */
ehsz = (user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
WARN_ON(skb_headroom(skb) < ehsz);
ehdr = (struct tipc_ehdr *)skb_push(skb, ehsz);
/* Obtain a seqno first:
* Use the key seqno (= cluster wise) if dest is unknown or we're in
* cluster key mode, otherwise it's better for a per-peer seqno!
*/
if (!__rx || aead->mode == CLUSTER_KEY)
seqno = atomic64_inc_return(&aead->seqno);
else
seqno = atomic64_inc_return(&__rx->sndnxt);
/* Revoke the key if seqno is wrapped around */
if (unlikely(!seqno))
return tipc_crypto_key_revoke(net, tx_key);
/* Word 1-2 */
ehdr->seqno = cpu_to_be64(seqno);
/* Words 0, 3- */
ehdr->version = TIPC_EVERSION;
ehdr->user = 0;
ehdr->keepalive = 0;
ehdr->tx_key = tx_key;
ehdr->destined = (__rx) ? 1 : 0;
ehdr->rx_key_active = (__rx) ? __rx->key.active : 0;
ehdr->rx_nokey = (__rx) ? __rx->nokey : 0;
ehdr->master_key = aead->crypto->key_master;
ehdr->reserved_1 = 0;
ehdr->reserved_2 = 0;
switch (user) {
case LINK_CONFIG:
ehdr->user = LINK_CONFIG;
memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN);
break;
default:
if (user == LINK_PROTOCOL && msg_type(hdr) == STATE_MSG) {
ehdr->user = LINK_PROTOCOL;
ehdr->keepalive = msg_is_keepalive(hdr);
}
ehdr->addr = hdr->hdr[3];
break;
}
return ehsz;
}
| 0 |
[
"CWE-20"
] |
linux
|
fa40d9734a57bcbfa79a280189799f76c88f7bb0
| 90,022,925,516,424,000,000,000,000,000,000,000,000 | 59 |
tipc: fix size validations for the MSG_CRYPTO type
The function tipc_crypto_key_rcv is used to parse MSG_CRYPTO messages
to receive keys from other nodes in the cluster in order to decrypt any
further messages from them.
This patch verifies that any supplied sizes in the message body are
valid for the received message.
Fixes: 1ef6f7c9390f ("tipc: add automatic session key exchange")
Signed-off-by: Max VA <[email protected]>
Acked-by: Ying Xue <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
Acked-by: Jon Maloy <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void build_guest_fsinfo_for_virtual_device(char const *syspath,
GuestFilesystemInfo *fs,
Error **errp)
{
DIR *dir;
char *dirpath;
struct dirent *entry;
dirpath = g_strdup_printf("%s/slaves", syspath);
dir = opendir(dirpath);
if (!dir) {
if (errno != ENOENT) {
error_setg_errno(errp, errno, "opendir(\"%s\")", dirpath);
}
g_free(dirpath);
return;
}
for (;;) {
errno = 0;
entry = readdir(dir);
if (entry == NULL) {
if (errno) {
error_setg_errno(errp, errno, "readdir(\"%s\")", dirpath);
}
break;
}
if (entry->d_type == DT_LNK) {
char *path;
g_debug(" slave device '%s'", entry->d_name);
path = g_strdup_printf("%s/slaves/%s", syspath, entry->d_name);
build_guest_fsinfo_for_device(path, fs, errp);
g_free(path);
if (*errp) {
break;
}
}
}
g_free(dirpath);
closedir(dir);
}
| 0 |
[
"CWE-190"
] |
qemu
|
141b197408ab398c4f474ac1a728ab316e921f2b
| 158,328,791,243,104,520,000,000,000,000,000,000,000 | 45 |
qga: check bytes count read by guest-file-read
While reading file content via 'guest-file-read' command,
'qmp_guest_file_read' routine allocates buffer of count+1
bytes. It could overflow for large values of 'count'.
Add check to avoid it.
Reported-by: Fakhri Zulkifli <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Cc: [email protected]
Signed-off-by: Michael Roth <[email protected]>
|
static inline char *
string_extract_single_quoted (string, sindex)
char *string;
int *sindex;
{
register int i;
size_t slen;
char *t;
DECLARE_MBSTATE;
/* Don't need slen for ADVANCE_CHAR unless multibyte chars possible. */
slen = (MB_CUR_MAX > 1) ? strlen (string + *sindex) + *sindex : 0;
i = *sindex;
while (string[i] && string[i] != '\'')
ADVANCE_CHAR (string, slen, i);
t = substring (string, *sindex, i);
if (string[i])
i++;
*sindex = i;
return (t);
| 0 |
[
"CWE-20"
] |
bash
|
4f747edc625815f449048579f6e65869914dd715
| 233,935,451,620,095,700,000,000,000,000,000,000,000 | 23 |
Bash-4.4 patch 7
|
void KrecipesView::editRecipe()
{
KrePanel vis_panel = panelMap[ rightPanel->visiblePanel() ];
switch ( vis_panel ) {
case RecipeView:
actionRecipe( viewPanel->currentRecipes() [ 0 ], 1 );
break;
case SelectP:
selectPanel->getActionsHandler()->edit();
break;
case MatcherP:
ingredientMatcherPanel->getActionsHandler()->edit();
default:
break;
}
}
| 0 |
[] |
krecipes
|
cd1490fb5fe82cbe9172a43be13298001b446ecd
| 21,274,183,578,996,853,000,000,000,000,000,000,000 | 17 |
Use WebKit instead of KHTML for printing recipes, fixes sourceforge #2990118 and
#2960140.
svn path=/trunk/extragear/utils/krecipes/; revision=1137824
|
static void urlParsePostBody(struct URL *url,
const struct HttpConnection *http,
const char *buf, int len) {
struct HashMap contentType;
initHashMap(&contentType, urlDestroyHashMapEntry, NULL);
const char *ctHeader = getFromHashMap(&http->header, "content-type");
urlParseHeaderLine(&contentType, ctHeader, ctHeader ? strlen(ctHeader) : 0);
if (getRefFromHashMap(&contentType, "application/x-www-form-urlencoded")) {
urlParseQueryString(&url->args, buf, len);
} else if (getRefFromHashMap(&contentType, "multipart/form-data")) {
const char *boundary = getFromHashMap(&contentType, "boundary");
if (boundary && *boundary) {
const char *lastPart = NULL;
for (const char *part = buf; len > 0; ) {
const char *ptr;
if ((part == buf && (ptr = urlMemstr(part, len, "--")) != NULL) ||
(ptr = urlMemstr(part, len, "\r\n--")) != NULL) {
len -= ptr - part + (part == buf ? 2 : 4);
part = ptr + (part == buf ? 2 : 4);
if (!urlMemcmp(part, len, boundary)) {
int i = strlen(boundary);
len -= i;
part += i;
if (!urlMemcmp(part, len, "\r\n")) {
len -= 2;
part += 2;
if (lastPart) {
urlParsePart(url, lastPart, ptr - lastPart);
} else {
if (ptr != buf) {
info("[http] Ignoring prologue before \"multipart/form-data\"!");
}
}
lastPart = part;
} else if (!urlMemcmp(part, len, "--\r\n")) {
len -= 4;
part += 4;
urlParsePart(url, lastPart, ptr - lastPart);
lastPart = NULL;
if (len > 0) {
info("[http] Ignoring epilogue past end of \"multipart/"
"form-data\"!");
}
}
}
}
/* elf-2018.09.09: Detection of broken multipart/form-data
fixes DoS vulnerability.
On 9/9/18 10:43 AM, Imre Rad wrote:
Hi Markus, Marc!
I identified a vulnerability today in Shellinabox, it is
remote a denial of service, shellinaboxd eating up 100% cpu
and not processing subsequent requests after the attack was
mounted.
*/
else {
warn ("[http] Ignorning broken multipart/form-data");
break;
}
}
if (lastPart) {
warn("[http] Missing final \"boundary\" for \"multipart/form-data\"!");
}
} else {
warn("[http] Missing \"boundary\" information for \"multipart/form-data\"!");
}
}
destroyHashMap(&contentType);
}
| 0 |
[
"CWE-400",
"CWE-703",
"CWE-835"
] |
shellinabox
|
4f0ecc31ac6f985e0dd3f5a52cbfc0e9251f6361
| 285,165,302,624,117,740,000,000,000,000,000,000,000 | 71 |
Rolling code for version 2.21
|
static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
const struct bpf_reg_state *off_reg,
u32 *alu_limit, u8 opcode)
{
bool off_is_neg = off_reg->smin_value < 0;
bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
(opcode == BPF_SUB && !off_is_neg);
u32 max = 0, ptr_limit = 0;
if (!tnum_is_const(off_reg->var_off) &&
(off_reg->smin_value < 0) != (off_reg->smax_value < 0))
return REASON_BOUNDS;
switch (ptr_reg->type) {
case PTR_TO_STACK:
/* Offset 0 is out-of-bounds, but acceptable start for the
* left direction, see BPF_REG_FP. Also, unknown scalar
* offset where we would need to deal with min/max bounds is
* currently prohibited for unprivileged.
*/
max = MAX_BPF_STACK + mask_to_left;
ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
break;
case PTR_TO_MAP_VALUE:
max = ptr_reg->map_ptr->value_size;
ptr_limit = (mask_to_left ?
ptr_reg->smin_value :
ptr_reg->umax_value) + ptr_reg->off;
break;
default:
return REASON_TYPE;
}
if (ptr_limit >= max)
return REASON_LIMIT;
*alu_limit = ptr_limit;
return 0;
}
| 0 |
[
"CWE-125"
] |
bpf
|
049c4e13714ecbca567b4d5f6d563f05d431c80e
| 197,233,882,200,896,700,000,000,000,000,000,000,000 | 38 |
bpf: Fix alu32 const subreg bound tracking on bitwise operations
Fix a bug in the verifier's scalar32_min_max_*() functions which leads to
incorrect tracking of 32 bit bounds for the simulation of and/or/xor bitops.
When both the src & dst subreg is a known constant, then the assumption is
that scalar_min_max_*() will take care to update bounds correctly. However,
this is not the case, for example, consider a register R2 which has a tnum
of 0xffffffff00000000, meaning, lower 32 bits are known constant and in this
case of value 0x00000001. R2 is then and'ed with a register R3 which is a
64 bit known constant, here, 0x100000002.
What can be seen in line '10:' is that 32 bit bounds reach an invalid state
where {u,s}32_min_value > {u,s}32_max_value. The reason is scalar32_min_max_*()
delegates 32 bit bounds updates to scalar_min_max_*(), however, that really
only takes place when both the 64 bit src & dst register is a known constant.
Given scalar32_min_max_*() is intended to be designed as closely as possible
to scalar_min_max_*(), update the 32 bit bounds in this situation through
__mark_reg32_known() which will set all {u,s}32_{min,max}_value to the correct
constant, which is 0x00000000 after the fix (given 0x00000001 & 0x00000002 in
32 bit space). This is possible given var32_off already holds the final value
as dst_reg->var_off is updated before calling scalar32_min_max_*().
Before fix, invalid tracking of R2:
[...]
9: R0_w=inv1337 R1=ctx(id=0,off=0,imm=0) R2_w=inv(id=0,smin_value=-9223372036854775807 (0x8000000000000001),smax_value=9223372032559808513 (0x7fffffff00000001),umin_value=1,umax_value=0xffffffff00000001,var_off=(0x1; 0xffffffff00000000),s32_min_value=1,s32_max_value=1,u32_min_value=1,u32_max_value=1) R3_w=inv4294967298 R10=fp0
9: (5f) r2 &= r3
10: R0_w=inv1337 R1=ctx(id=0,off=0,imm=0) R2_w=inv(id=0,smin_value=0,smax_value=4294967296 (0x100000000),umin_value=0,umax_value=0x100000000,var_off=(0x0; 0x100000000),s32_min_value=1,s32_max_value=0,u32_min_value=1,u32_max_value=0) R3_w=inv4294967298 R10=fp0
[...]
After fix, correct tracking of R2:
[...]
9: R0_w=inv1337 R1=ctx(id=0,off=0,imm=0) R2_w=inv(id=0,smin_value=-9223372036854775807 (0x8000000000000001),smax_value=9223372032559808513 (0x7fffffff00000001),umin_value=1,umax_value=0xffffffff00000001,var_off=(0x1; 0xffffffff00000000),s32_min_value=1,s32_max_value=1,u32_min_value=1,u32_max_value=1) R3_w=inv4294967298 R10=fp0
9: (5f) r2 &= r3
10: R0_w=inv1337 R1=ctx(id=0,off=0,imm=0) R2_w=inv(id=0,smin_value=0,smax_value=4294967296 (0x100000000),umin_value=0,umax_value=0x100000000,var_off=(0x0; 0x100000000),s32_min_value=0,s32_max_value=0,u32_min_value=0,u32_max_value=0) R3_w=inv4294967298 R10=fp0
[...]
Fixes: 3f50f132d840 ("bpf: Verifier, do explicit ALU32 bounds tracking")
Fixes: 2921c90d4718 ("bpf: Fix a verifier failure with xor")
Reported-by: Manfred Paul (@_manfp)
Reported-by: Thadeu Lima de Souza Cascardo <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Reviewed-by: John Fastabend <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
|
static void preloadobjstms(fz_context *ctx, pdf_document *doc)
{
pdf_obj *obj;
int num;
int xref_len = pdf_xref_len(ctx, doc);
for (num = 0; num < xref_len; num++)
{
if (pdf_get_xref_entry(ctx, doc, num)->type == 'o')
{
obj = pdf_load_object(ctx, doc, num);
pdf_drop_obj(ctx, obj);
}
}
}
| 0 |
[
"CWE-119"
] |
mupdf
|
520cc26d18c9ee245b56e9e91f9d4fcae02be5f0
| 199,911,020,102,260,400,000,000,000,000,000,000,000 | 15 |
Bug 689699: Avoid buffer overrun.
When cleaning a pdf file, various lists (of pdf_xref_len length) are
defined early on.
If we trigger a repair during the clean, this can cause pdf_xref_len
to increase causing an overrun.
Fix this by watching for changes in the length, and checking accesses
to the list for validity.
This also appears to fix bugs 698700-698703.
|
int streamParseStrictIDOrReply(client *c, robj *o, streamID *id, uint64_t missing_seq, int *seq_given) {
return streamGenericParseIDOrReply(c,o,id,missing_seq,1,seq_given);
}
| 0 |
[
"CWE-703",
"CWE-401"
] |
redis
|
4a7a4e42db8ff757cdf3f4a824f66426036034ef
| 146,731,482,412,628,700,000,000,000,000,000,000,000 | 3 |
Fix memory leak in streamGetEdgeID (#10753)
si is initialized by streamIteratorStart(), we should call
streamIteratorStop() on it when done.
regression introduced in #9127 (redis 7.0)
|
GF_Err dims_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem)
{
GF_DIMSSampleEntryBox *ptr = (GF_DIMSSampleEntryBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_DIMC:
BOX_FIELD_ASSIGN(config, GF_DIMSSceneConfigBox)
break;
case GF_ISOM_BOX_TYPE_DIST:
BOX_FIELD_ASSIGN(scripts, GF_DIMSScriptTypesBox)
break;
}
return GF_OK;
}
| 0 |
[
"CWE-476"
] |
gpac
|
d527325a9b72218612455a534a508f9e1753f76e
| 195,695,596,086,208,930,000,000,000,000,000,000,000 | 13 |
fixed #1768
|
ns_client_sendraw(ns_client_t *client, dns_message_t *message) {
isc_result_t result;
unsigned char *data;
isc_buffer_t buffer;
isc_region_t r;
isc_region_t *mr;
REQUIRE(NS_CLIENT_VALID(client));
CTRACE("sendraw");
mr = dns_message_getrawmessage(message);
if (mr == NULL) {
result = ISC_R_UNEXPECTEDEND;
goto done;
}
client_allocsendbuf(client, &buffer, &data);
if (mr->length > isc_buffer_length(&buffer)) {
result = ISC_R_NOSPACE;
goto done;
}
/*
* Copy message to buffer and fixup id.
*/
isc_buffer_availableregion(&buffer, &r);
result = isc_buffer_copyregion(&buffer, mr);
if (result != ISC_R_SUCCESS) {
goto done;
}
r.base[0] = (client->message->id >> 8) & 0xff;
r.base[1] = client->message->id & 0xff;
#ifdef HAVE_DNSTAP
if (client->view != NULL) {
bool tcp = TCP_CLIENT(client);
dns_dtmsgtype_t dtmsgtype;
if (client->message->opcode == dns_opcode_update) {
dtmsgtype = DNS_DTTYPE_UR;
} else if ((client->message->flags & DNS_MESSAGEFLAG_RD) != 0) {
dtmsgtype = DNS_DTTYPE_CR;
} else {
dtmsgtype = DNS_DTTYPE_AR;
}
dns_dt_send(client->view, dtmsgtype, &client->peeraddr,
&client->destsockaddr, tcp, NULL,
&client->requesttime, NULL, &buffer);
}
#endif
client_sendpkg(client, &buffer);
return;
done:
if (client->tcpbuf != NULL) {
isc_mem_put(client->mctx, client->tcpbuf,
NS_CLIENT_TCP_BUFFER_SIZE);
client->tcpbuf = NULL;
}
ns_client_drop(client, result);
}
| 0 |
[
"CWE-617"
] |
bind9
|
15996f0cb15631b95a801e3e88928494a69ad6ee
| 180,156,904,690,619,540,000,000,000,000,000,000,000 | 64 |
ns_client_error() could assert if rcode was overridden to NOERROR
The client->rcode_override was originally created to force the server
to send SERVFAIL in some cases when it would normally have sent FORMERR.
More recently, it was used in a3ba95116ed04594ea59a8124bf781b30367a7a2
commit (part of GL #2790) to force the sending of a TC=1 NOERROR
response, triggering a retry via TCP, when a UDP packet could not be
sent due to ISC_R_MAXSIZE.
This ran afoul of a pre-existing INSIST in ns_client_error() when
RRL was in use. the INSIST was based on the assumption that
ns_client_error() could never result in a non-error rcode. as
that assumption is no longer valid, the INSIST has been removed.
|
static int atusb_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
{
struct atusb *atusb = hw->priv;
int ret;
ret = atusb_write_subreg(atusb, SR_CHANNEL, channel);
if (ret < 0)
return ret;
return 0;
}
| 0 |
[
"CWE-416"
] |
linux
|
7fd25e6fc035f4b04b75bca6d7e8daa069603a76
| 309,576,404,590,828,750,000,000,000,000,000,000,000 | 10 |
ieee802154: atusb: fix use-after-free at disconnect
The disconnect callback was accessing the hardware-descriptor private
data after having having freed it.
Fixes: 7490b008d123 ("ieee802154: add support for atusb transceiver")
Cc: stable <[email protected]> # 4.2
Cc: Alexander Aring <[email protected]>
Reported-by: [email protected]
Signed-off-by: Johan Hovold <[email protected]>
Signed-off-by: Stefan Schmidt <[email protected]>
|
do_add_recipient (ctrl_t ctrl, const char *name,
certlist_t *recplist, int is_encrypt_to, int recp_required)
{
int rc = gpgsm_add_to_certlist (ctrl, name, 0, recplist, is_encrypt_to);
if (rc)
{
if (recp_required)
{
log_error ("can't encrypt to '%s': %s\n", name, gpg_strerror (rc));
gpgsm_status2 (ctrl, STATUS_INV_RECP,
get_inv_recpsgnr_code (rc), name, NULL);
}
else
log_info (_("Note: won't be able to encrypt to '%s': %s\n"),
name, gpg_strerror (rc));
}
}
| 0 |
[] |
gnupg
|
abd5f6752d693b7f313c19604f0723ecec4d39a6
| 252,912,023,658,385,630,000,000,000,000,000,000,000 | 17 |
dirmngr,gpgsm: Return NULL on fail
* dirmngr/ldapserver.c (ldapserver_parse_one): Set SERVER to NULL.
* sm/gpgsm.c (parse_keyserver_line): Ditto.
--
Reported-by: Joshua Rogers <[email protected]>
"If something inside the ldapserver_parse_one function failed,
'server' would be freed, then returned, leading to a
use-after-free. This code is likely copied from sm/gpgsm.c, which
was also susceptible to this bug."
Signed-off-by: Werner Koch <[email protected]>
|
void _modinit(module_t *m)
{
service_named_bind_command("chanserv", &cs_flags);
}
| 1 |
[
"CWE-284"
] |
atheme
|
c597156adc60a45b5f827793cd420945f47bc03b
| 35,445,785,264,083,520,000,000,000,000,000,000,000 | 4 |
chanserv/flags: make Anope FLAGS compatibility an option
Previously, ChanServ FLAGS behavior could be modified by registering or
dropping the keyword nicks "LIST", "CLEAR", and "MODIFY".
Now, a configuration option is available that when turned on (default),
disables registration of these keyword nicks and enables this
compatibility feature. When turned off, registration of these keyword
nicks is possible, and compatibility to Anope's FLAGS command is
disabled.
Fixes atheme/atheme#397
|
static int FNAME(walk_addr_generic)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gpa_t addr, u64 access)
{
int ret;
pt_element_t pte;
pt_element_t __user *ptep_user;
gfn_t table_gfn;
u64 pt_access, pte_access;
unsigned index, accessed_dirty, pte_pkey;
u64 nested_access;
gpa_t pte_gpa;
bool have_ad;
int offset;
u64 walk_nx_mask = 0;
const int write_fault = access & PFERR_WRITE_MASK;
const int user_fault = access & PFERR_USER_MASK;
const int fetch_fault = access & PFERR_FETCH_MASK;
u16 errcode = 0;
gpa_t real_gpa;
gfn_t gfn;
trace_kvm_mmu_pagetable_walk(addr, access);
retry_walk:
walker->level = mmu->root_level;
pte = mmu->get_guest_pgd(vcpu);
have_ad = PT_HAVE_ACCESSED_DIRTY(mmu);
#if PTTYPE == 64
walk_nx_mask = 1ULL << PT64_NX_SHIFT;
if (walker->level == PT32E_ROOT_LEVEL) {
pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
trace_kvm_mmu_paging_element(pte, walker->level);
if (!FNAME(is_present_gpte)(pte))
goto error;
--walker->level;
}
#endif
walker->max_level = walker->level;
ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
/*
* FIXME: on Intel processors, loads of the PDPTE registers for PAE paging
* by the MOV to CR instruction are treated as reads and do not cause the
* processor to set the dirty flag in any EPT paging-structure entry.
*/
nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK;
pte_access = ~0;
++walker->level;
do {
unsigned long host_addr;
pt_access = pte_access;
--walker->level;
index = PT_INDEX(addr, walker->level);
table_gfn = gpte_to_gfn(pte);
offset = index * sizeof(pt_element_t);
pte_gpa = gfn_to_gpa(table_gfn) + offset;
BUG_ON(walker->level < 1);
walker->table_gfn[walker->level - 1] = table_gfn;
walker->pte_gpa[walker->level - 1] = pte_gpa;
real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(table_gfn),
nested_access, &walker->fault);
/*
* FIXME: This can happen if emulation (for of an INS/OUTS
* instruction) triggers a nested page fault. The exit
* qualification / exit info field will incorrectly have
* "guest page access" as the nested page fault's cause,
* instead of "guest page structure access". To fix this,
* the x86_exception struct should be augmented with enough
* information to fix the exit_qualification or exit_info_1
* fields.
*/
if (unlikely(real_gpa == UNMAPPED_GVA))
return 0;
host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gpa_to_gfn(real_gpa),
&walker->pte_writable[walker->level - 1]);
if (unlikely(kvm_is_error_hva(host_addr)))
goto error;
ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
if (unlikely(__get_user(pte, ptep_user)))
goto error;
walker->ptep_user[walker->level - 1] = ptep_user;
trace_kvm_mmu_paging_element(pte, walker->level);
/*
* Inverting the NX it lets us AND it like other
* permission bits.
*/
pte_access = pt_access & (pte ^ walk_nx_mask);
if (unlikely(!FNAME(is_present_gpte)(pte)))
goto error;
if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, walker->level))) {
errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
goto error;
}
walker->ptes[walker->level - 1] = pte;
/* Convert to ACC_*_MASK flags for struct guest_walker. */
walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
} while (!FNAME(is_last_gpte)(mmu, walker->level, pte));
pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
/* Convert to ACC_*_MASK flags for struct guest_walker. */
walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
if (unlikely(errcode))
goto error;
gfn = gpte_to_gfn_lvl(pte, walker->level);
gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
if (PTTYPE == 32 && walker->level > PG_LEVEL_4K && is_cpuid_PSE36())
gfn += pse36_gfn_delta(pte);
real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault);
if (real_gpa == UNMAPPED_GVA)
return 0;
walker->gfn = real_gpa >> PAGE_SHIFT;
if (!write_fault)
FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte);
else
/*
* On a write fault, fold the dirty bit into accessed_dirty.
* For modes without A/D bits support accessed_dirty will be
* always clear.
*/
accessed_dirty &= pte >>
(PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
if (unlikely(!accessed_dirty)) {
ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
addr, write_fault);
if (unlikely(ret < 0))
goto error;
else if (ret)
goto retry_walk;
}
pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
__func__, (u64)pte, walker->pte_access,
walker->pt_access[walker->level - 1]);
return 1;
error:
errcode |= write_fault | user_fault;
if (fetch_fault && (is_efer_nx(mmu) || is_cr4_smep(mmu)))
errcode |= PFERR_FETCH_MASK;
walker->fault.vector = PF_VECTOR;
walker->fault.error_code_valid = true;
walker->fault.error_code = errcode;
#if PTTYPE == PTTYPE_EPT
/*
* Use PFERR_RSVD_MASK in error_code to to tell if EPT
* misconfiguration requires to be injected. The detection is
* done by is_rsvd_bits_set() above.
*
* We set up the value of exit_qualification to inject:
* [2:0] - Derive from the access bits. The exit_qualification might be
* out of date if it is serving an EPT misconfiguration.
* [5:3] - Calculated by the page walk of the guest EPT page tables
* [7:8] - Derived from [7:8] of real exit_qualification
*
* The other bits are set to 0.
*/
if (!(errcode & PFERR_RSVD_MASK)) {
vcpu->arch.exit_qualification &= (EPT_VIOLATION_GVA_IS_VALID |
EPT_VIOLATION_GVA_TRANSLATED);
if (write_fault)
vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_WRITE;
if (user_fault)
vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_READ;
if (fetch_fault)
vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_INSTR;
/*
* Note, pte_access holds the raw RWX bits from the EPTE, not
* ACC_*_MASK flags!
*/
vcpu->arch.exit_qualification |= (pte_access & VMX_EPT_RWX_MASK) <<
EPT_VIOLATION_RWX_SHIFT;
}
#endif
walker->fault.address = addr;
walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
walker->fault.async_page_fault = false;
trace_kvm_mmu_walker_error(walker->fault.error_code);
return 0;
}
| 0 |
[
"CWE-416"
] |
linux
|
f122dfe4476890d60b8c679128cd2259ec96a24c
| 313,159,861,811,772,040,000,000,000,000,000,000,000 | 208 |
KVM: x86: Use __try_cmpxchg_user() to update guest PTE A/D bits
Use the recently introduced __try_cmpxchg_user() to update guest PTE A/D
bits instead of mapping the PTE into kernel address space. The VM_PFNMAP
path is broken as it assumes that vm_pgoff is the base pfn of the mapped
VMA range, which is conceptually wrong as vm_pgoff is the offset relative
to the file and has nothing to do with the pfn. The horrific hack worked
for the original use case (backing guest memory with /dev/mem), but leads
to accessing "random" pfns for pretty much any other VM_PFNMAP case.
Fixes: bd53cb35a3e9 ("X86/KVM: Handle PFNs outside of kernel reach when touching GPTEs")
Debugged-by: Tadeusz Struk <[email protected]>
Tested-by: Tadeusz Struk <[email protected]>
Reported-by: [email protected]
Cc: [email protected]
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
{
struct socket *sock;
struct sockaddr_storage address;
int err, fput_needed;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock) {
err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
if (err >= 0) {
err = security_socket_bind(sock,
(struct sockaddr *)&address,
addrlen);
if (!err)
err = sock->ops->bind(sock,
(struct sockaddr *)
&address, addrlen);
}
fput_light(sock->file, fput_needed);
}
return err;
}
| 0 |
[] |
linux-2.6
|
644595f89620ba8446cc555be336d24a34464950
| 100,680,354,279,348,760,000,000,000,000,000,000,000 | 22 |
compat: Handle COMPAT_USE_64BIT_TIME in net/socket.c
Use helper functions aware of COMPAT_USE_64BIT_TIME to write struct
timeval and struct timespec to userspace in net/socket.c.
Signed-off-by: H. Peter Anvin <[email protected]>
|
void create_length_to_internal_length_null()
{
DBUG_ASSERT(length == 0);
key_length= pack_length= 0;
}
| 0 |
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
| 223,700,204,635,508,270,000,000,000,000,000,000,000 | 5 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]>
|
//! Convert pixel values from RGB to CMY color spaces.
CImg<T>& RGBtoCMY() {
if (_spectrum!=3)
throw CImgInstanceException(_cimg_instance
"RGBtoCMY(): Instance is not a RGB image.",
cimg_instance);
T *p1 = data(0,0,0,0), *p2 = data(0,0,0,1), *p3 = data(0,0,0,2);
const longT whd = (longT)width()*height()*depth();
cimg_pragma_openmp(parallel for cimg_openmp_if_size(whd,2048))
for (longT N = 0; N<whd; ++N) {
const Tfloat
R = (Tfloat)p1[N],
G = (Tfloat)p2[N],
B = (Tfloat)p3[N],
C = 255 - R,
M = 255 - G,
Y = 255 - B;
p1[N] = (T)cimg::cut(C,0,255),
p2[N] = (T)cimg::cut(M,0,255),
p3[N] = (T)cimg::cut(Y,0,255);
}
return *this;
| 0 |
[
"CWE-119",
"CWE-787"
] |
CImg
|
ac8003393569aba51048c9d67e1491559877b1d1
| 266,396,579,249,694,450,000,000,000,000,000,000,000 | 23 |
.
|
set_sigint_handler ()
{
if (sigmodes[SIGINT] & SIG_HARD_IGNORE)
return ((SigHandler *)SIG_IGN);
else if (sigmodes[SIGINT] & SIG_IGNORED)
return ((SigHandler *)set_signal_handler (SIGINT, SIG_IGN)); /* XXX */
else if (sigmodes[SIGINT] & SIG_TRAPPED)
return ((SigHandler *)set_signal_handler (SIGINT, trap_handler));
/* The signal is not trapped, so set the handler to the shell's special
interrupt handler. */
else if (interactive) /* XXX - was interactive_shell */
return (set_signal_handler (SIGINT, sigint_sighandler));
else
return (set_signal_handler (SIGINT, termsig_sighandler));
}
| 0 |
[] |
bash
|
955543877583837c85470f7fb8a97b7aa8d45e6c
| 220,071,258,754,061,300,000,000,000,000,000,000,000 | 18 |
bash-4.4-rc2 release
|
RequestParser::RequestParser() : request_(nullptr) {
}
| 0 |
[
"CWE-22"
] |
webcc
|
55a45fd5039061d5cc62e9f1b9d1f7e97a15143f
| 210,038,562,691,637,730,000,000,000,000,000,000,000 | 2 |
fix static file serving security issue; fix url path encoding issue
|
static int map_create(union bpf_attr *attr)
{
int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_map *map;
int f_flags;
int err;
err = CHECK_ATTR(BPF_MAP_CREATE);
if (err)
return -EINVAL;
if (attr->btf_vmlinux_value_type_id) {
if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
attr->btf_key_type_id || attr->btf_value_type_id)
return -EINVAL;
} else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
return -EINVAL;
}
if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
attr->map_extra != 0)
return -EINVAL;
f_flags = bpf_get_file_flag(attr->map_flags);
if (f_flags < 0)
return f_flags;
if (numa_node != NUMA_NO_NODE &&
((unsigned int)numa_node >= nr_node_ids ||
!node_online(numa_node)))
return -EINVAL;
/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
map = find_and_alloc_map(attr);
if (IS_ERR(map))
return PTR_ERR(map);
err = bpf_obj_name_cpy(map->name, attr->map_name,
sizeof(attr->map_name));
if (err < 0)
goto free_map;
atomic64_set(&map->refcnt, 1);
atomic64_set(&map->usercnt, 1);
mutex_init(&map->freeze_mutex);
map->spin_lock_off = -EINVAL;
map->timer_off = -EINVAL;
if (attr->btf_key_type_id || attr->btf_value_type_id ||
/* Even the map's value is a kernel's struct,
* the bpf_prog.o must have BTF to begin with
* to figure out the corresponding kernel's
* counter part. Thus, attr->btf_fd has
* to be valid also.
*/
attr->btf_vmlinux_value_type_id) {
struct btf *btf;
btf = btf_get_by_fd(attr->btf_fd);
if (IS_ERR(btf)) {
err = PTR_ERR(btf);
goto free_map;
}
if (btf_is_kernel(btf)) {
btf_put(btf);
err = -EACCES;
goto free_map;
}
map->btf = btf;
if (attr->btf_value_type_id) {
err = map_check_btf(map, btf, attr->btf_key_type_id,
attr->btf_value_type_id);
if (err)
goto free_map;
}
map->btf_key_type_id = attr->btf_key_type_id;
map->btf_value_type_id = attr->btf_value_type_id;
map->btf_vmlinux_value_type_id =
attr->btf_vmlinux_value_type_id;
}
err = security_bpf_map_alloc(map);
if (err)
goto free_map;
err = bpf_map_alloc_id(map);
if (err)
goto free_map_sec;
bpf_map_save_memcg(map);
err = bpf_map_new_fd(map, f_flags);
if (err < 0) {
/* failed to allocate fd.
* bpf_map_put_with_uref() is needed because the above
* bpf_map_alloc_id() has published the map
* to the userspace and the userspace may
* have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
*/
bpf_map_put_with_uref(map);
return err;
}
return err;
free_map_sec:
security_bpf_map_free(map);
free_map:
btf_put(map->btf);
map->ops->map_free(map);
return err;
}
| 0 |
[
"CWE-367"
] |
bpf
|
353050be4c19e102178ccc05988101887c25ae53
| 39,824,529,893,475,023,000,000,000,000,000,000,000 | 114 |
bpf: Fix toctou on read-only map's constant scalar tracking
Commit a23740ec43ba ("bpf: Track contents of read-only maps as scalars") is
checking whether maps are read-only both from BPF program side and user space
side, and then, given their content is constant, reading out their data via
map->ops->map_direct_value_addr() which is then subsequently used as known
scalar value for the register, that is, it is marked as __mark_reg_known()
with the read value at verification time. Before a23740ec43ba, the register
content was marked as an unknown scalar so the verifier could not make any
assumptions about the map content.
The current implementation however is prone to a TOCTOU race, meaning, the
value read as known scalar for the register is not guaranteed to be exactly
the same at a later point when the program is executed, and as such, the
prior made assumptions of the verifier with regards to the program will be
invalid which can cause issues such as OOB access, etc.
While the BPF_F_RDONLY_PROG map flag is always fixed and required to be
specified at map creation time, the map->frozen property is initially set to
false for the map given the map value needs to be populated, e.g. for global
data sections. Once complete, the loader "freezes" the map from user space
such that no subsequent updates/deletes are possible anymore. For the rest
of the lifetime of the map, this freeze one-time trigger cannot be undone
anymore after a successful BPF_MAP_FREEZE cmd return. Meaning, any new BPF_*
cmd calls which would update/delete map entries will be rejected with -EPERM
since map_get_sys_perms() removes the FMODE_CAN_WRITE permission. This also
means that pending update/delete map entries must still complete before this
guarantee is given. This corner case is not an issue for loaders since they
create and prepare such program private map in successive steps.
However, a malicious user is able to trigger this TOCTOU race in two different
ways: i) via userfaultfd, and ii) via batched updates. For i) userfaultfd is
used to expand the competition interval, so that map_update_elem() can modify
the contents of the map after map_freeze() and bpf_prog_load() were executed.
This works, because userfaultfd halts the parallel thread which triggered a
map_update_elem() at the time where we copy key/value from the user buffer and
this already passed the FMODE_CAN_WRITE capability test given at that time the
map was not "frozen". Then, the main thread performs the map_freeze() and
bpf_prog_load(), and once that had completed successfully, the other thread
is woken up to complete the pending map_update_elem() which then changes the
map content. For ii) the idea of the batched update is similar, meaning, when
there are a large number of updates to be processed, it can increase the
competition interval between the two. It is therefore possible in practice to
modify the contents of the map after executing map_freeze() and bpf_prog_load().
One way to fix both i) and ii) at the same time is to expand the use of the
map's map->writecnt. The latter was introduced in fc9702273e2e ("bpf: Add mmap()
support for BPF_MAP_TYPE_ARRAY") and further refined in 1f6cb19be2e2 ("bpf:
Prevent re-mmap()'ing BPF map as writable for initially r/o mapping") with
the rationale to make a writable mmap()'ing of a map mutually exclusive with
read-only freezing. The counter indicates writable mmap() mappings and then
prevents/fails the freeze operation. Its semantics can be expanded beyond
just mmap() by generally indicating ongoing write phases. This would essentially
span any parallel regular and batched flavor of update/delete operation and
then also have map_freeze() fail with -EBUSY. For the check_mem_access() in
the verifier we expand upon the bpf_map_is_rdonly() check ensuring that all
last pending writes have completed via bpf_map_write_active() test. Once the
map->frozen is set and bpf_map_write_active() indicates a map->writecnt of 0
only then we are really guaranteed to use the map's data as known constants.
For map->frozen being set and pending writes in process of still being completed
we fall back to marking that register as unknown scalar so we don't end up
making assumptions about it. With this, both TOCTOU reproducers from i) and
ii) are fixed.
Note that the map->writecnt has been converted into a atomic64 in the fix in
order to avoid a double freeze_mutex mutex_{un,}lock() pair when updating
map->writecnt in the various map update/delete BPF_* cmd flavors. Spanning
the freeze_mutex over entire map update/delete operations in syscall side
would not be possible due to then causing everything to be serialized.
Similarly, something like synchronize_rcu() after setting map->frozen to wait
for update/deletes to complete is not possible either since it would also
have to span the user copy which can sleep. On the libbpf side, this won't
break d66562fba1ce ("libbpf: Add BPF object skeleton support") as the
anonymous mmap()-ed "map initialization image" is remapped as a BPF map-backed
mmap()-ed memory where for .rodata it's non-writable.
Fixes: a23740ec43ba ("bpf: Track contents of read-only maps as scalars")
Reported-by: [email protected]
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Andrii Nakryiko <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
|
static bool kbd_match(struct input_handler *handler, struct input_dev *dev)
{
int i;
if (test_bit(EV_SND, dev->evbit))
return true;
if (test_bit(EV_KEY, dev->evbit)) {
for (i = KEY_RESERVED; i < BTN_MISC; i++)
if (test_bit(i, dev->keybit))
return true;
for (i = KEY_BRL_DOT1; i <= KEY_BRL_DOT10; i++)
if (test_bit(i, dev->keybit))
return true;
}
return false;
}
| 0 |
[
"CWE-416"
] |
linux
|
6ca03f90527e499dd5e32d6522909e2ad390896b
| 31,316,148,261,356,230,000,000,000,000,000,000,000 | 18 |
vt: keyboard, simplify vt_kdgkbsent
Use 'strlen' of the string, add one for NUL terminator and simply do
'copy_to_user' instead of the explicit 'for' loop. This makes the
KDGKBSENT case more compact.
The only thing we need to take care about is NULL 'func_table[i]'. Use
an empty string in that case.
The original check for overflow could never trigger as the func_buf
strings are always shorter or equal to 'struct kbsentry's.
Cc: <[email protected]>
Signed-off-by: Jiri Slaby <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
dissect_kafka_string_new(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int hf_item, int offset, int *p_string_offset, int *p_string_length)
{
gint64 val;
guint len;
proto_item *pi;
len = tvb_get_varint(tvb, offset, 5, &val, ENC_VARINT_ZIGZAG);
if (len == 0) {
pi = proto_tree_add_string_format_value(tree, hf_item, tvb, offset+len, 0, NULL, "<INVALID>");
expert_add_info(pinfo, pi, &ei_kafka_bad_varint);
len = 5;
val = 0;
} else if (val > 0) {
// there is payload available, possibly with 0 octets
proto_tree_add_item(tree, hf_item, tvb, offset+len, (gint)val, ENC_UTF_8);
} else if (val == 0) {
// there is empty payload (0 octets)
proto_tree_add_string_format_value(tree, hf_item, tvb, offset+len, 0, NULL, "<EMPTY>");
} else if (val == -1) {
// there is no payload (null)
proto_tree_add_string_format_value(tree, hf_item, tvb, offset+len, 0, NULL, "<NULL>");
val = 0;
} else {
pi = proto_tree_add_string_format_value(tree, hf_item, tvb, offset+len, 0, NULL, "<INVALID>");
expert_add_info(pinfo, pi, &ei_kafka_bad_string_length);
val = 0;
}
if (p_string_offset != NULL) {
*p_string_offset = offset+len;
}
if (p_string_length != NULL) {
*p_string_length = (gint)val;
}
return offset+len+(gint)val;
}
| 0 |
[
"CWE-401"
] |
wireshark
|
f4374967bbf9c12746b8ec3cd54dddada9dd353e
| 42,766,368,779,534,260,000,000,000,000,000,000,000 | 38 |
Kafka: Limit our decompression size.
Don't assume that the Internet has our best interests at heart when it
gives us the size of our decompression buffer. Assign an arbitrary limit
of 50 MB.
This fixes #16739 in that it takes care of
** (process:17681): WARNING **: 20:03:07.440: Dissector bug, protocol Kafka, in packet 31: ../epan/proto.c:7043: failed assertion "end >= fi->start"
which is different from the original error output. It looks like *that*
might have taken care of in one of the other recent Kafka bug fixes.
The decompression routines return a success or failure status. Use
gbooleans instead of ints for that.
|
int cil_resolve_portcon(struct cil_tree_node *current, void *extra_args)
{
struct cil_portcon *portcon = current->data;
struct cil_symtab_datum *context_datum = NULL;
int rc = SEPOL_ERR;
if (portcon->context_str != NULL) {
rc = cil_resolve_name(current, portcon->context_str, CIL_SYM_CONTEXTS, extra_args, &context_datum);
if (rc != SEPOL_OK) {
goto exit;
}
portcon->context = (struct cil_context*)context_datum;
} else {
rc = cil_resolve_context(current, portcon->context, extra_args);
if (rc != SEPOL_OK) {
goto exit;
}
}
return SEPOL_OK;
exit:
return rc;
}
| 0 |
[
"CWE-125"
] |
selinux
|
340f0eb7f3673e8aacaf0a96cbfcd4d12a405521
| 254,362,680,320,195,700,000,000,000,000,000,000,000 | 24 |
libsepol/cil: Check for statements not allowed in optional blocks
While there are some checks for invalid statements in an optional
block when resolving the AST, there are no checks when building the
AST.
OSS-Fuzz found the following policy which caused a null dereference
in cil_tree_get_next_path().
(blockinherit b3)
(sid SID)
(sidorder(SID))
(optional o
(ibpkeycon :(1 0)s)
(block b3
(filecon""block())
(filecon""block())))
The problem is that the blockinherit copies block b3 before
the optional block is disabled. When the optional is disabled,
block b3 is deleted along with everything else in the optional.
Later, when filecon statements with the same path are found an
error message is produced and in trying to find out where the block
was copied from, the reference to the deleted block is used. The
error handling code assumes (rightly) that if something was copied
from a block then that block should still exist.
It is clear that in-statements, blocks, and macros cannot be in an
optional, because that allows nodes to be copied from the optional
block to somewhere outside even though the optional could be disabled
later. When optionals are disabled the AST is reset and the
resolution is restarted at the point of resolving macro calls, so
anything resolved before macro calls will never be re-resolved.
This includes tunableifs, in-statements, blockinherits,
blockabstracts, and macro definitions. Tunable declarations also
cannot be in an optional block because they are needed to resolve
tunableifs. It should be fine to allow blockinherit statements in
an optional, because that is copying nodes from outside the optional
to the optional and if the optional is later disabled, everything
will be deleted anyway.
Check and quit with an error if a tunable declaration, in-statement,
block, blockabstract, or macro definition is found within an
optional when either building or resolving the AST.
Signed-off-by: James Carter <[email protected]>
|
static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache)
{
return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
}
| 0 |
[] |
net-next
|
fdf5af0daf8019cec2396cdef8fb042d80fe71fa
| 10,458,078,271,409,826,000,000,000,000,000,000,000 | 4 |
tcp: drop SYN+FIN messages
Denys Fedoryshchenko reported that SYN+FIN attacks were bringing his
linux machines to their limits.
Dont call conn_request() if the TCP flags includes SYN flag
Reported-by: Denys Fedoryshchenko <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void sync_child_event(struct perf_event *child_event,
struct task_struct *child)
{
struct perf_event *parent_event = child_event->parent;
u64 child_val;
if (child_event->attr.inherit_stat)
perf_event_read_event(child_event, child);
child_val = perf_event_count(child_event);
/*
* Add back the child's count to the parent's count:
*/
atomic64_add(child_val, &parent_event->child_count);
atomic64_add(child_event->total_time_enabled,
&parent_event->child_total_time_enabled);
atomic64_add(child_event->total_time_running,
&parent_event->child_total_time_running);
/*
* Remove this event from the parent's list
*/
WARN_ON_ONCE(parent_event->ctx->parent_ctx);
mutex_lock(&parent_event->child_mutex);
list_del_init(&child_event->child_list);
mutex_unlock(&parent_event->child_mutex);
/*
* Release the parent event, if this was the last
* reference to it.
*/
put_event(parent_event);
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
linux
|
8176cced706b5e5d15887584150764894e94e02f
| 294,720,815,319,598,500,000,000,000,000,000,000,000 | 34 |
perf: Treat attr.config as u64 in perf_swevent_init()
Trinity discovered that we fail to check all 64 bits of
attr.config passed by user space, resulting to out-of-bounds
access of the perf_swevent_enabled array in
sw_perf_event_destroy().
Introduced in commit b0a873ebb ("perf: Register PMU
implementations").
Signed-off-by: Tommi Rantala <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: [email protected]
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
void usb_device_handle_attach(USBDevice *dev)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
if (klass->handle_attach) {
klass->handle_attach(dev);
}
}
| 0 |
[
"CWE-119"
] |
qemu
|
9f8e9895c504149d7048e9fc5eb5cbb34b16e49a
| 244,323,472,503,348,300,000,000,000,000,000,000,000 | 7 |
usb: sanity check setup_index+setup_len in post_load
CVE-2013-4541
s->setup_len and s->setup_index are fed into usb_packet_copy as
size/offset into s->data_buf, it's possible for invalid state to exploit
this to load arbitrary data.
setup_len and setup_index should be checked to make sure
they are not negative.
Cc: Gerd Hoffmann <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Gerd Hoffmann <[email protected]>
Signed-off-by: Juan Quintela <[email protected]>
|
static inline char *alloc_secdata(void)
{
return (char *)get_zeroed_page(GFP_KERNEL);
}
| 0 |
[] |
linux-2.6
|
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
| 235,608,650,024,836,700,000,000,000,000,000,000,000 | 4 |
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]>
|
mrb_yield_with_class(mrb_state *mrb, mrb_value b, mrb_int argc, const mrb_value *argv, mrb_value self, struct RClass *c)
{
struct RProc *p;
mrb_sym mid = mrb->c->ci->mid;
mrb_callinfo *ci;
mrb_value val;
mrb_int n;
check_block(mrb, b);
ci = mrb->c->ci;
n = mrb_ci_nregs(ci);
p = mrb_proc_ptr(b);
ci = cipush(mrb, n, CINFO_SKIP, c, p, mid, 0 /* dummy */);
ci->nk = 0;
if (argc >= CALL_MAXARGS) {
ci->n = 15;
n = 3;
}
else {
ci->n = argc;
n = argc + 2;
}
mrb_stack_extend(mrb, n);
mrb->c->ci->stack[0] = self;
if (ci->n == 15) {
mrb->c->ci->stack[1] = mrb_ary_new_from_values(mrb, argc, argv);
argc = 1;
}
else if (argc > 0) {
stack_copy(mrb->c->ci->stack+1, argv, argc);
}
mrb->c->ci->stack[argc+1] = mrb_nil_value(); /* clear blk */
if (MRB_PROC_CFUNC_P(p)) {
ci->cci = CINFO_DIRECT;
val = MRB_PROC_CFUNC(p)(mrb, self);
cipop(mrb);
}
else {
val = mrb_run(mrb, p, self);
}
return val;
}
| 0 |
[
"CWE-416",
"CWE-787"
] |
mruby
|
aaa28a508903041dd7399d4159a8ace9766b022f
| 312,833,174,662,784,700,000,000,000,000,000,000,000 | 43 |
vm.c: stack may be reallocated in functions calls.
Probably due to recursive VM calls via `mrb_funcall()`.
|
mono_image_load_module_dynamic (MonoReflectionAssemblyBuilder *ab, MonoString *fileName)
{
char *name;
MonoImage *image;
MonoImageOpenStatus status;
MonoDynamicAssembly *assembly;
guint32 module_count;
MonoImage **new_modules;
gboolean *new_modules_loaded;
name = mono_string_to_utf8 (fileName);
image = mono_image_open (name, &status);
if (!image) {
MonoException *exc;
if (status == MONO_IMAGE_ERROR_ERRNO)
exc = mono_get_exception_file_not_found (fileName);
else
exc = mono_get_exception_bad_image_format (name);
g_free (name);
mono_raise_exception (exc);
}
g_free (name);
assembly = ab->dynamic_assembly;
image->assembly = (MonoAssembly*)assembly;
module_count = image->assembly->image->module_count;
new_modules = g_new0 (MonoImage *, module_count + 1);
new_modules_loaded = g_new0 (gboolean, module_count + 1);
if (image->assembly->image->modules)
memcpy (new_modules, image->assembly->image->modules, module_count * sizeof (MonoImage *));
if (image->assembly->image->modules_loaded)
memcpy (new_modules_loaded, image->assembly->image->modules_loaded, module_count * sizeof (gboolean));
new_modules [module_count] = image;
new_modules_loaded [module_count] = TRUE;
mono_image_addref (image);
g_free (image->assembly->image->modules);
image->assembly->image->modules = new_modules;
image->assembly->image->modules_loaded = new_modules_loaded;
image->assembly->image->module_count ++;
mono_assembly_load_references (image, &status);
if (status) {
mono_image_close (image);
mono_raise_exception (mono_get_exception_file_not_found (fileName));
}
return mono_module_get_object (mono_domain_get (), image);
}
| 0 |
[
"CWE-20"
] |
mono
|
4905ef1130feb26c3150b28b97e4a96752e0d399
| 251,125,416,184,071,060,000,000,000,000,000,000,000 | 53 |
Handle invalid instantiation of generic methods.
* verify.c: Add new function to internal verifier API to check
method instantiations.
* reflection.c (mono_reflection_bind_generic_method_parameters):
Check the instantiation before returning it.
Fixes #655847
|
static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
u64 chunk_offset;
u64 sys_chunk_offset;
u64 alloc_profile;
int ret;
chunk_offset = find_next_chunk(fs_info);
alloc_profile = btrfs_metadata_alloc_profile(fs_info);
ret = __btrfs_alloc_chunk(trans, chunk_offset, alloc_profile);
if (ret)
return ret;
sys_chunk_offset = find_next_chunk(fs_info);
alloc_profile = btrfs_system_alloc_profile(fs_info);
ret = __btrfs_alloc_chunk(trans, sys_chunk_offset, alloc_profile);
return ret;
}
| 0 |
[
"CWE-476",
"CWE-284"
] |
linux
|
09ba3bc9dd150457c506e4661380a6183af651c1
| 247,313,597,408,530,600,000,000,000,000,000,000,000 | 19 |
btrfs: merge btrfs_find_device and find_device
Both btrfs_find_device() and find_device() does the same thing except
that the latter does not take the seed device onto account in the device
scanning context. We can merge them.
Signed-off-by: Anand Jain <[email protected]>
Reviewed-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]>
|
zfont_global_glyph_code(const gs_memory_t *mem, gs_const_string *gstr, gs_glyph *pglyph)
{
ref v;
int code = name_ref(mem, gstr->data, gstr->size, &v, 0);
if (code < 0)
return code;
*pglyph = (gs_glyph)name_index(mem, &v);
return 0;
}
| 0 |
[
"CWE-704"
] |
ghostpdl
|
548bb434e81dadcc9f71adf891a3ef5bea8e2b4e
| 153,868,075,304,476,300,000,000,000,000,000,000,000 | 10 |
PS interpreter - add some type checking
These were 'probably' safe anyway, since they mostly treat the objects
as integers without checking, which at least can't result in a crash.
Nevertheless, we ought to check.
The return from comparedictkeys could be wrong if one of the keys had
a value which was not an array, it could incorrectly decide the two
were in fact the same.
|
mysql_ping(MYSQL *mysql)
{
int res;
DBUG_ENTER("mysql_ping");
res= simple_command(mysql,COM_PING,0,0,0);
if (res == CR_SERVER_LOST && mysql->reconnect)
res= simple_command(mysql,COM_PING,0,0,0);
DBUG_RETURN(res);
}
| 0 |
[] |
mysql-server
|
3d8134d2c9b74bc8883ffe2ef59c168361223837
| 123,930,945,900,362,860,000,000,000,000,000,000,000 | 9 |
Bug#25988681: USE-AFTER-FREE IN MYSQL_STMT_CLOSE()
Description: If mysql_stmt_close() encountered error,
it recorded error in prepared statement
but then frees memory assigned to prepared
statement. If mysql_stmt_error() is used
to get error information, it will result
into use after free.
In all cases where mysql_stmt_close() can
fail, error would have been set by
cli_advanced_command in MYSQL structure.
Solution: Don't copy error from MYSQL using set_stmt_errmsg.
There is no automated way to test the fix since
it is in mysql_stmt_close() which does not expect
any reply from server.
Reviewed-By: Georgi Kodinov <[email protected]>
Reviewed-By: Ramil Kalimullin <[email protected]>
|
long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
{
/* This is only valid for single tasks */
if (pid <= 0 || tgid <= 0)
return -EINVAL;
/* Not even root can pretend to send signals from the kernel.
* Nor can they impersonate a kill()/tgkill(), which adds source info.
*/
if (info->si_code != SI_QUEUE) {
/* We used to allow any < 0 si_code */
WARN_ON_ONCE(info->si_code < 0);
return -EPERM;
}
info->si_signo = sig;
return do_send_specific(tgid, pid, sig, info);
}
| 1 |
[] |
linux-2.6
|
243b422af9ea9af4ead07a8ad54c90d4f9b6081a
| 283,021,141,878,868,740,000,000,000,000,000,000,000 | 18 |
Relax si_code check in rt_sigqueueinfo and rt_tgsigqueueinfo
Commit da48524eb206 ("Prevent rt_sigqueueinfo and rt_tgsigqueueinfo
from spoofing the signal code") made the check on si_code too strict.
There are several legitimate places where glibc wants to queue a
negative si_code different from SI_QUEUE:
- This was first noticed with glibc's aio implementation, which wants
to queue a signal with si_code SI_ASYNCIO; the current kernel
causes glibc's tst-aio4 test to fail because rt_sigqueueinfo()
fails with EPERM.
- Further examination of the glibc source shows that getaddrinfo_a()
wants to use SI_ASYNCNL (which the kernel does not even define).
The timer_create() fallback code wants to queue signals with SI_TIMER.
As suggested by Oleg Nesterov <[email protected]>, loosen the check to
forbid only the problematic SI_TKILL case.
Reported-by: Klaus Dittrich <[email protected]>
Acked-by: Julien Tinnes <[email protected]>
Cc: <[email protected]>
Signed-off-by: Roland Dreier <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
getUint8(uint8_t* buffer)
{
return buffer[0];
}
| 0 |
[
"CWE-122"
] |
libiec61850
|
033ab5b6488250c8c3b838f25a7cbc3e099230bb
| 334,908,515,751,420,370,000,000,000,000,000,000,000 | 4 |
- COTP: fixed possible heap buffer overflow when handling message with invalid (zero) value in length field (#250)
|
sasl_authxid_can_login(struct sasl_session *const restrict p, const char *const restrict authxid,
struct myuser **const restrict muo, char *const restrict val_name,
char *const restrict val_eid, const char *const restrict other_val_eid)
{
return_val_if_fail(p != NULL, false);
return_val_if_fail(p->si != NULL, false);
return_val_if_fail(p->mechptr != NULL, false);
struct myuser *const mu = myuser_find_by_nick(authxid);
if (! mu)
{
(void) slog(LG_DEBUG, "%s: myuser_find_by_nick: does not exist", MOWGLI_FUNC_NAME);
return false;
}
if (metadata_find(mu, "private:freeze:freezer"))
{
(void) logcommand(p->si, CMDLOG_LOGIN, "failed LOGIN to \2%s\2 (frozen)", entity(mu)->name);
return false;
}
if (muo)
*muo = mu;
(void) mowgli_strlcpy(val_name, entity(mu)->name, NICKLEN + 1);
(void) mowgli_strlcpy(val_eid, entity(mu)->id, IDLEN + 1);
if (p->mechptr->password_based && (mu->flags & MU_NOPASSWORD))
{
(void) logcommand(p->si, CMDLOG_LOGIN, "failed LOGIN %s to \2%s\2 (password authentication disabled)",
p->mechptr->name, entity(mu)->name);
return false;
}
if (strcmp(val_eid, other_val_eid) == 0)
// We have already executed the user_can_login hook for this user
return true;
struct hook_user_login_check req = {
.si = p->si,
.mu = mu,
.allowed = true,
};
(void) hook_call_user_can_login(&req);
if (! req.allowed)
(void) logcommand(p->si, CMDLOG_LOGIN, "failed LOGIN to \2%s\2 (denied by hook)", entity(mu)->name);
return req.allowed;
}
| 0 |
[
"CWE-287",
"CWE-288"
] |
atheme
|
4e664c75d0b280a052eb8b5e81aa41944e593c52
| 230,184,601,148,723,400,000,000,000,000,000,000,000 | 53 |
saslserv/main: Track EID we're pending login to
The existing model does not remember that we've sent a SVSLOGIN for a
given SASL session, and simply assumes that if a client is introduced
with a SASL session open, that session must have succeeded. The security
of this approach requires ircd to implicitly abort SASL sessions on
client registration.
This also means that if a client successfully authenticates and then
does something else its pending login is forgotten about, even though a
SVSLOGIN has been sent for it, and the ircd is going to think it's
logged in.
This change removes the dependency on ircd's state machine by keeping
explicit track of the pending login, i.e. the one we've most recently
sent a SVSLOGIN for. The next commit will ensure that a client abort
(even an implicit one) doesn't blow that information away.
|
void t_go_generator::generate_deserialize_container(ofstream& out,
t_type* orig_type,
bool pointer_field,
bool declare,
string prefix) {
t_type* ttype = get_true_type(orig_type);
string eq(" = ");
if (declare) {
eq = " := ";
}
// Declare variables, read header
if (ttype->is_map()) {
out << indent() << "_, _, size, err := iprot.ReadMapBegin()" << endl;
out << indent() << "if err != nil {" << endl;
out << indent() << " return thrift.PrependError(\"error reading map begin: \", err)" << endl;
out << indent() << "}" << endl;
out << indent() << "tMap := make(" << type_to_go_type(orig_type) << ", size)" << endl;
out << indent() << prefix << eq << " " << (pointer_field ? "&" : "") << "tMap" << endl;
} else if (ttype->is_set()) {
t_set* t = (t_set*)ttype;
out << indent() << "_, size, err := iprot.ReadSetBegin()" << endl;
out << indent() << "if err != nil {" << endl;
out << indent() << " return thrift.PrependError(\"error reading set begin: \", err)" << endl;
out << indent() << "}" << endl;
out << indent() << "tSet := make(map["
<< type_to_go_key_type(t->get_elem_type()->get_true_type()) << "]struct{}, size)" << endl;
out << indent() << prefix << eq << " " << (pointer_field ? "&" : "") << "tSet" << endl;
} else if (ttype->is_list()) {
out << indent() << "_, size, err := iprot.ReadListBegin()" << endl;
out << indent() << "if err != nil {" << endl;
out << indent() << " return thrift.PrependError(\"error reading list begin: \", err)" << endl;
out << indent() << "}" << endl;
out << indent() << "tSlice := make(" << type_to_go_type(orig_type) << ", 0, size)" << endl;
out << indent() << prefix << eq << " " << (pointer_field ? "&" : "") << "tSlice" << endl;
} else {
throw "INVALID TYPE IN generate_deserialize_container '" + ttype->get_name() + "' for prefix '"
+ prefix + "'";
}
// For loop iterates over elements
out << indent() << "for i := 0; i < size; i ++ {" << endl;
indent_up();
if (pointer_field) {
prefix = "(*" + prefix + ")";
}
if (ttype->is_map()) {
generate_deserialize_map_element(out, (t_map*)ttype, declare, prefix);
} else if (ttype->is_set()) {
generate_deserialize_set_element(out, (t_set*)ttype, declare, prefix);
} else if (ttype->is_list()) {
generate_deserialize_list_element(out, (t_list*)ttype, declare, prefix);
}
indent_down();
out << indent() << "}" << endl;
// Read container end
if (ttype->is_map()) {
out << indent() << "if err := iprot.ReadMapEnd(); err != nil {" << endl;
out << indent() << " return thrift.PrependError(\"error reading map end: \", err)" << endl;
out << indent() << "}" << endl;
} else if (ttype->is_set()) {
out << indent() << "if err := iprot.ReadSetEnd(); err != nil {" << endl;
out << indent() << " return thrift.PrependError(\"error reading set end: \", err)" << endl;
out << indent() << "}" << endl;
} else if (ttype->is_list()) {
out << indent() << "if err := iprot.ReadListEnd(); err != nil {" << endl;
out << indent() << " return thrift.PrependError(\"error reading list end: \", err)" << endl;
out << indent() << "}" << endl;
}
}
| 0 |
[
"CWE-77"
] |
thrift
|
2007783e874d524a46b818598a45078448ecc53e
| 16,221,944,225,506,617,000,000,000,000,000,000,000 | 74 |
THRIFT-3893 Command injection in format_go_output
Client: Go
Patch: Jens Geyer
|
static CURLcode ossl_connect_step3(struct Curl_easy *data,
struct connectdata *conn, int sockindex)
{
CURLcode result = CURLE_OK;
struct ssl_connect_data *connssl = &conn->ssl[sockindex];
DEBUGASSERT(ssl_connect_3 == connssl->connecting_state);
/*
* We check certificates to authenticate the server; otherwise we risk
* man-in-the-middle attack; NEVERTHELESS, if we're told explicitly not to
* verify the peer, ignore faults and failures from the server cert
* operations.
*/
result = servercert(data, conn, connssl, (SSL_CONN_CONFIG(verifypeer) ||
SSL_CONN_CONFIG(verifyhost)));
if(!result)
connssl->connecting_state = ssl_connect_done;
return result;
}
| 0 |
[] |
curl
|
139a54ed0a172adaaf1a78d6f4fff50b2c3f9e08
| 126,271,105,218,148,050,000,000,000,000,000,000,000 | 23 |
openssl: don't leak the SRP credentials in redirects either
Follow-up to 620ea21410030
Reported-by: Harry Sintonen
Closes #8751
|
static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
{
struct rq *rq = cpu_rq(dead_cpu);
/* Must be exiting, otherwise would be on tasklist. */
BUG_ON(!p->exit_state);
/* Cannot have done final schedule yet: would have vanished. */
BUG_ON(p->state == TASK_DEAD);
get_task_struct(p);
/*
* Drop lock around migration; if someone else moves it,
* that's OK. No task can be added to this CPU, so iteration is
* fine.
*/
raw_spin_unlock_irq(&rq->lock);
move_task_off_dead_cpu(dead_cpu, p);
raw_spin_lock_irq(&rq->lock);
put_task_struct(p);
}
| 0 |
[
"CWE-703",
"CWE-835"
] |
linux
|
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
| 246,223,990,373,990,420,000,000,000,000,000,000,000 | 23 |
Sched: fix skip_clock_update optimization
idle_balance() drops/retakes rq->lock, leaving the previous task
vulnerable to set_tsk_need_resched(). Clear it after we return
from balancing instead, and in setup_thread_stack() as well, so
no successfully descheduled or never scheduled task has it set.
Need resched confused the skip_clock_update logic, which assumes
that the next call to update_rq_clock() will come nearly immediately
after being set. Make the optimization robust against the waking
a sleeper before it sucessfully deschedules case by checking that
the current task has not been dequeued before setting the flag,
since it is that useless clock update we're trying to save, and
clear unconditionally in schedule() proper instead of conditionally
in put_prev_task().
Signed-off-by: Mike Galbraith <[email protected]>
Reported-by: Bjoern B. Brandenburg <[email protected]>
Tested-by: Yong Zhang <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: [email protected]
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
static void naludmx_queue_param_set(GF_NALUDmxCtx *ctx, char *data, u32 size, u32 ps_type, s32 ps_id)
{
GF_List *list = NULL, *alt_list = NULL;
GF_NALUFFParam *sl;
u32 i, count, crc;
if (!size) return;
crc = gf_crc_32(data, size);
if (ctx->codecid==GF_CODECID_HEVC) {
switch (ps_type) {
case GF_HEVC_NALU_VID_PARAM:
if (!ctx->vps) ctx->vps = gf_list_new();
list = ctx->vps;
break;
case GF_HEVC_NALU_SEQ_PARAM:
list = ctx->sps;
break;
case GF_HEVC_NALU_PIC_PARAM:
list = ctx->pps;
break;
default:
assert(0);
return;
}
} else if (ctx->codecid==GF_CODECID_VVC) {
switch (ps_type) {
case GF_VVC_NALU_VID_PARAM:
if (!ctx->vps) ctx->vps = gf_list_new();
list = ctx->vps;
break;
case GF_VVC_NALU_SEQ_PARAM:
list = ctx->sps;
break;
case GF_VVC_NALU_PIC_PARAM:
list = ctx->pps;
break;
case GF_VVC_NALU_DEC_PARAM:
if (!ctx->vvc_dci) ctx->vvc_dci = gf_list_new();
list = ctx->vvc_dci;
break;
case GF_VVC_NALU_APS_PREFIX:
if (!ctx->vvc_aps_pre) ctx->vvc_aps_pre = gf_list_new();
list = ctx->vvc_aps_pre;
break;
default:
assert(0);
return;
}
} else {
switch (ps_type) {
case GF_AVC_NALU_SVC_SUBSEQ_PARAM:
case GF_AVC_NALU_SEQ_PARAM:
list = ctx->sps;
break;
case GF_AVC_NALU_PIC_PARAM:
list = ctx->pps;
alt_list = ctx->pps_svc;
break;
case GF_AVC_NALU_SEQ_PARAM_EXT:
if (!ctx->sps_ext) ctx->sps_ext = gf_list_new();
list = ctx->sps_ext;
break;
default:
assert(0);
return;
}
}
sl = NULL;
count = gf_list_count(list);
for (i=0; i<count; i++) {
sl = gf_list_get(list, i);
if (sl->id != ps_id) {
sl = NULL;
continue;
}
//same ID, same CRC, we don't change our state
if (sl->crc == crc) return;
break;
}
//handle alt PPS list for SVC
if (!sl && alt_list) {
count = gf_list_count(alt_list);
for (i=0; i<count; i++) {
sl = gf_list_get(alt_list, i);
if (sl->id != ps_id) {
sl = NULL;
continue;
}
//same ID, same CRC, we don't change our state
if (sl->crc == crc) return;
break;
}
}
if (sl) {
//otherwise we keep this new param set
sl->data = gf_realloc(sl->data, size);
memcpy(sl->data, data, size);
sl->size = size;
sl->crc = crc;
ctx->ps_modified = GF_TRUE;
return;
}
//TODO we might want to purge the list after a while !!
GF_SAFEALLOC(sl, GF_NALUFFParam);
if (!sl) return;
sl->data = gf_malloc(sizeof(char) * size);
if (!sl->data) {
gf_free(sl);
return;
}
memcpy(sl->data, data, size);
sl->size = size;
sl->id = ps_id;
sl->crc = crc;
ctx->ps_modified = GF_TRUE;
gf_list_add(list, sl);
}
| 0 |
[
"CWE-476"
] |
gpac
|
b43f9d1a4b4e33d08edaef6d313e6ce4bdf554d3
| 251,019,785,237,596,850,000,000,000,000,000,000,000 | 121 |
fixed #2223
|
static size_t buffered_output(pn_transport_t *transport)
{
size_t count = 0;
pni_ssl_t *ssl = transport->ssl;
if (ssl) {
count += ssl->out_count;
if (ssl->bio_net_io) { // pick up any bytes waiting for network io
count += BIO_ctrl_pending(ssl->bio_net_io);
}
}
return count;
}
| 0 |
[] |
qpid-proton
|
4aea0fd8502f5e9af7f22fd60645eeec07bce0b2
| 329,392,008,417,751,140,000,000,000,000,000,000,000 | 12 |
PROTON-2014: [c] Ensure SSL mutual authentication
(cherry picked from commit 97c7733f07712665f3d08091c82c393e4c3adbf7)
|
xmlDictCreateSub(xmlDictPtr sub) {
xmlDictPtr dict = xmlDictCreate();
if ((dict != NULL) && (sub != NULL)) {
#ifdef DICT_DEBUG_PATTERNS
fprintf(stderr, "R");
#endif
dict->subdict = sub;
xmlDictReference(dict->subdict);
}
return(dict);
}
| 1 |
[
"CWE-399"
] |
libxml2
|
8973d58b7498fa5100a876815476b81fd1a2412a
| 122,348,190,032,430,250,000,000,000,000,000,000,000 | 12 |
Add hash randomization to hash and dict structures
Following http://www.ocert.org/advisories/ocert-2011-003.html
it seems that having hash randomization might be a good idea
when using XML with untrusted data
* configure.in: lookup for rand, srand and time
* dict.c: add randomization to dictionaries hash tables
* hash.c: add randomization to normal hash tables
|
static Status Compute(OpKernelContext* context,
const typename TTypes<Tidx, 1>::ConstTensor& arr,
const typename TTypes<T, 1>::ConstTensor& weights,
typename TTypes<T, 1>::Tensor& output,
const Tidx num_bins) {
Tensor all_nonneg_t;
TF_RETURN_IF_ERROR(context->allocate_temp(
DT_BOOL, TensorShape({}), &all_nonneg_t, AllocatorAttributes()));
all_nonneg_t.scalar<bool>().device(context->eigen_cpu_device()) =
(arr >= Tidx(0)).all();
if (!all_nonneg_t.scalar<bool>()()) {
return errors::InvalidArgument("Input arr must be non-negative!");
}
// Allocate partial output bin sums for each worker thread. Worker ids in
// ParallelForWithWorkerId range from 0 to NumThreads() inclusive.
ThreadPool* thread_pool =
context->device()->tensorflow_cpu_worker_threads()->workers;
const int64 num_threads = thread_pool->NumThreads() + 1;
Tensor partial_bins_t;
TF_RETURN_IF_ERROR(context->allocate_temp(
DT_BOOL, TensorShape({num_threads, num_bins}), &partial_bins_t));
auto partial_bins = partial_bins_t.matrix<bool>();
partial_bins.setZero();
thread_pool->ParallelForWithWorkerId(
arr.size(), 8 /* cost */,
[&](int64 start_ind, int64 limit_ind, int64 worker_id) {
for (int64 i = start_ind; i < limit_ind; i++) {
Tidx value = arr(i);
if (value < num_bins) {
partial_bins(worker_id, value) = true;
}
}
});
// Sum the partial bins along the 0th axis.
Eigen::array<int, 1> reduce_dim({0});
output.device(context->eigen_cpu_device()) =
partial_bins.any(reduce_dim).cast<T>();
return Status::OK();
}
| 0 |
[
"CWE-703",
"CWE-787"
] |
tensorflow
|
eebb96c2830d48597d055d247c0e9aebaea94cd5
| 243,152,964,418,099,170,000,000,000,000,000,000,000 | 41 |
Fix an invalid address vulnerability in `tf.raw_ops.RaggedBincount`.
PiperOrigin-RevId: 368293153
Change-Id: I4b4e493d3fd05e7dc55a55de3a041a80a4f275c3
|
static int snd_ctl_dev_disconnect(struct snd_device *device)
{
struct snd_card *card = device->device_data;
struct snd_ctl_file *ctl;
int err, cardnum;
if (snd_BUG_ON(!card))
return -ENXIO;
cardnum = card->number;
if (snd_BUG_ON(cardnum < 0 || cardnum >= SNDRV_CARDS))
return -ENXIO;
read_lock(&card->ctl_files_rwlock);
list_for_each_entry(ctl, &card->ctl_files, list) {
wake_up(&ctl->change_sleep);
kill_fasync(&ctl->fasync, SIGIO, POLL_ERR);
}
read_unlock(&card->ctl_files_rwlock);
if ((err = snd_unregister_device(SNDRV_DEVICE_TYPE_CONTROL,
card, -1)) < 0)
return err;
return 0;
}
| 0 |
[
"CWE-190",
"CWE-189"
] |
linux
|
ac902c112d90a89e59916f751c2745f4dbdbb4bd
| 33,369,253,287,297,530,000,000,000,000,000,000,000 | 24 |
ALSA: control: Handle numid overflow
Each control gets automatically assigned its numids when the control is created.
The allocation is done by incrementing the numid by the amount of allocated
numids per allocation. This means that excessive creation and destruction of
controls (e.g. via SNDRV_CTL_IOCTL_ELEM_ADD/REMOVE) can cause the id to
eventually overflow. Currently when this happens for the control that caused the
overflow kctl->id.numid + kctl->count will also over flow causing it to be
smaller than kctl->id.numid. Most of the code assumes that this is something
that can not happen, so we need to make sure that it won't happen
Signed-off-by: Lars-Peter Clausen <[email protected]>
Acked-by: Jaroslav Kysela <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
|
mime_application_hash (GAppInfo *app)
{
const char *id;
id = g_app_info_get_id (app);
if (id == NULL)
{
return GPOINTER_TO_UINT (app);
}
return g_str_hash (id);
}
| 0 |
[
"CWE-20"
] |
nautilus
|
1630f53481f445ada0a455e9979236d31a8d3bb0
| 289,658,580,841,261,770,000,000,000,000,000,000,000 | 13 |
mime-actions: use file metadata for trusting desktop files
Currently we only trust desktop files that have the executable bit
set, and don't replace the displayed icon or the displayed name until
it's trusted, which prevents for running random programs by a malicious
desktop file.
However, the executable permission is preserved if the desktop file
comes from a compressed file.
To prevent this, add a metadata::trusted metadata to the file once the
user acknowledges the file as trusted. This adds metadata to the file,
which cannot be added unless it has access to the computer.
Also remove the SHEBANG "trusted" content we were putting inside the
desktop file, since that doesn't add more security since it can come
with the file itself.
https://bugzilla.gnome.org/show_bug.cgi?id=777991
|
void GrpcStreamClientHandler::onRemoteClose(Grpc::Status::GrpcStatus status,
const std::string& message) {
context->onGrpcClose(token, status, message);
}
| 0 |
[
"CWE-476"
] |
envoy
|
8788a3cf255b647fd14e6b5e2585abaaedb28153
| 26,611,688,597,433,670,000,000,000,000,000,000,000 | 4 |
1.4 - Do not call into the VM unless the VM Context has been created. (#24)
* Ensure that the in VM Context is created before onDone is called.
Signed-off-by: John Plevyak <[email protected]>
* Update as per offline discussion.
Signed-off-by: John Plevyak <[email protected]>
* Set in_vm_context_created_ in onNetworkNewConnection.
Signed-off-by: John Plevyak <[email protected]>
* Add guards to other network calls.
Signed-off-by: John Plevyak <[email protected]>
* Fix common/wasm tests.
Signed-off-by: John Plevyak <[email protected]>
* Patch tests.
Signed-off-by: John Plevyak <[email protected]>
* Remove unecessary file from cherry-pick.
Signed-off-by: John Plevyak <[email protected]>
|
TEST_F(QueryPlannerTest, CantExplodeWithEmptyBounds2) {
addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
runQuerySortProj(fromjson("{a: {$gt: 3, $lt: 0}}"), BSON("b" << 1), BSONObj());
assertNumSolutions(2U);
assertSolutionExists(
"{sort: {pattern: {b:1}, limit: 0, node: {sortKeyGen: {node: "
"{cscan: {dir: 1}}}}}}");
assertSolutionExists(
"{sort: {pattern: {b:1}, limit: 0, node: {sortKeyGen: {node: "
"{fetch: {node: {ixscan: {pattern: {a:1,b:1,c:1}}}}}}}}}");
}
| 0 |
[] |
mongo
|
ee97c0699fd55b498310996ee002328e533681a3
| 19,533,249,080,697,270,000,000,000,000,000,000,000 | 12 |
SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr.
|
md_skip_unicode_whitespace(const CHAR* label, OFF off, SZ size)
{
SZ char_size;
unsigned codepoint;
while(off < size) {
codepoint = md_decode_unicode(label, off, size, &char_size);
if(!ISUNICODEWHITESPACE_(codepoint) && !ISNEWLINE_(label[off]))
break;
off += char_size;
}
return off;
}
| 0 |
[
"CWE-125",
"CWE-908"
] |
md4c
|
4fc808d8fe8d8904f8525bb4231d854f45e23a19
| 200,878,028,662,726,400,000,000,000,000,000,000,000 | 14 |
md_analyze_line: Avoid reading 1 byte beyond the input size.
Fixes #155.
|
qb_rb_chunk_peek(struct qb_ringbuffer_s * rb, void **data_out, int32_t timeout)
{
uint32_t read_pt;
uint32_t chunk_size;
uint32_t chunk_magic;
int32_t res = 0;
if (rb == NULL) {
return -EINVAL;
}
if (rb->notifier.timedwait_fn) {
res = rb->notifier.timedwait_fn(rb->notifier.instance, timeout);
}
if (res < 0 && res != -EIDRM) {
if (res == -ETIMEDOUT) {
return 0;
} else {
errno = -res;
qb_util_perror(LOG_ERR, "sem_timedwait");
}
return res;
}
read_pt = rb->shared_hdr->read_pt;
chunk_magic = QB_RB_CHUNK_MAGIC_GET(rb, read_pt);
if (chunk_magic != QB_RB_CHUNK_MAGIC) {
if (rb->notifier.post_fn) {
(void)rb->notifier.post_fn(rb->notifier.instance, res);
}
#ifdef EBADMSG
return -EBADMSG;
#else
return -EINVAL;
#endif
}
chunk_size = QB_RB_CHUNK_SIZE_GET(rb, read_pt);
*data_out = QB_RB_CHUNK_DATA_GET(rb, read_pt);
return chunk_size;
}
| 0 |
[
"CWE-59"
] |
libqb
|
e322e98dc264bc5911d6fe1d371e55ac9f95a71e
| 26,898,046,077,076,614,000,000,000,000,000,000,000 | 38 |
ipc: use O_EXCL on SHM files, and randomize the names
Signed-off-by: Christine Caulfield <[email protected]>
|
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
hwaddr *xlat, hwaddr *plen,
bool is_write)
{
IOMMUTLBEntry iotlb;
MemoryRegionSection *section;
MemoryRegion *mr;
rcu_read_lock();
for (;;) {
AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
section = address_space_translate_internal(d, addr, &addr, plen, true);
mr = section->mr;
if (!mr->iommu_ops) {
break;
}
iotlb = mr->iommu_ops->translate(mr, addr, is_write);
addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
| (addr & iotlb.addr_mask));
*plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
if (!(iotlb.perm & (1 << is_write))) {
mr = &io_mem_unassigned;
break;
}
as = iotlb.target_as;
}
if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
*plen = MIN(page, *plen);
}
*xlat = addr;
rcu_read_unlock();
return mr;
}
| 0 |
[] |
qemu
|
c3c1bb99d1c11978d9ce94d1bdcf0705378c1459
| 129,442,289,371,245,500,000,000,000,000,000,000,000 | 39 |
exec: Respect as_tranlsate_internal length clamp
address_space_translate_internal will clamp the *plen length argument
based on the size of the memory region being queried. The iommu walker
logic in addresss_space_translate was ignoring this by discarding the
post fn call value of *plen. Fix by just always using *plen as the
length argument throughout the fn, removing the len local variable.
This fixes a bootloader bug when a single elf section spans multiple
QEMU memory regions.
Signed-off-by: Peter Crosthwaite <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
const char* ExpressionCond::getOpName() const {
return "$cond";
}
| 0 |
[
"CWE-835"
] |
mongo
|
0a076417d1d7fba3632b73349a1fd29a83e68816
| 158,598,152,899,594,500,000,000,000,000,000,000,000 | 3 |
SERVER-38070 fix infinite loop in agg expression
|
void zfree_no_tcache(void *ptr) {
if (ptr == NULL) return;
update_zmalloc_stat_free(zmalloc_size(ptr));
dallocx(ptr, MALLOCX_TCACHE_NONE);
}
| 0 |
[
"CWE-190"
] |
redis
|
d32f2e9999ce003bad0bd2c3bca29f64dcce4433
| 121,042,024,051,275,300,000,000,000,000,000,000,000 | 5 |
Fix integer overflow (CVE-2021-21309). (#8522)
On 32-bit systems, setting the proto-max-bulk-len config parameter to a high value may result with integer overflow and a subsequent heap overflow when parsing an input bulk (CVE-2021-21309).
This fix has two parts:
Set a reasonable limit to the config parameter.
Add additional checks to prevent the problem in other potential but unknown code paths.
|
WandExport void DrawSkewX(DrawingWand *wand,const double degrees)
{
assert(wand != (DrawingWand *) NULL);
assert(wand->signature == MagickWandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
(void) MVGPrintf(wand,"skewX %.20g\n",degrees);
}
| 0 |
[
"CWE-476"
] |
ImageMagick
|
6ad5fc3c9b652eec27fc0b1a0817159f8547d5d9
| 170,225,377,880,240,230,000,000,000,000,000,000,000 | 8 |
https://github.com/ImageMagick/ImageMagick/issues/716
|
int ssl_cipher_ptr_id_cmp(const SSL_CIPHER *const *ap,
const SSL_CIPHER *const *bp)
{
long l;
l = (*ap)->id - (*bp)->id;
if (l == 0L)
return (0);
else
return ((l > 0) ? 1 : -1);
}
| 0 |
[
"CWE-310"
] |
openssl
|
56f1acf5ef8a432992497a04792ff4b3b2c6f286
| 86,009,423,120,954,900,000,000,000,000,000,000,000 | 11 |
Disable SSLv2 default build, default negotiation and weak ciphers.
SSLv2 is by default disabled at build-time. Builds that are not
configured with "enable-ssl2" will not support SSLv2. Even if
"enable-ssl2" is used, users who want to negotiate SSLv2 via the
version-flexible SSLv23_method() will need to explicitly call either
of:
SSL_CTX_clear_options(ctx, SSL_OP_NO_SSLv2);
or
SSL_clear_options(ssl, SSL_OP_NO_SSLv2);
as appropriate. Even if either of those is used, or the application
explicitly uses the version-specific SSLv2_method() or its client
or server variants, SSLv2 ciphers vulnerable to exhaustive search
key recovery have been removed. Specifically, the SSLv2 40-bit
EXPORT ciphers, and SSLv2 56-bit DES are no longer available.
Mitigation for CVE-2016-0800
Reviewed-by: Emilia Käsper <[email protected]>
|
static void set_pixel_format(VncState *vs,
int bits_per_pixel, int depth,
int big_endian_flag, int true_color_flag,
int red_max, int green_max, int blue_max,
int red_shift, int green_shift, int blue_shift)
{
if (!true_color_flag) {
vnc_client_error(vs);
return;
}
vs->client_pf.rmax = red_max;
vs->client_pf.rbits = hweight_long(red_max);
vs->client_pf.rshift = red_shift;
vs->client_pf.rmask = red_max << red_shift;
vs->client_pf.gmax = green_max;
vs->client_pf.gbits = hweight_long(green_max);
vs->client_pf.gshift = green_shift;
vs->client_pf.gmask = green_max << green_shift;
vs->client_pf.bmax = blue_max;
vs->client_pf.bbits = hweight_long(blue_max);
vs->client_pf.bshift = blue_shift;
vs->client_pf.bmask = blue_max << blue_shift;
vs->client_pf.bits_per_pixel = bits_per_pixel;
vs->client_pf.bytes_per_pixel = bits_per_pixel / 8;
vs->client_pf.depth = bits_per_pixel == 32 ? 24 : bits_per_pixel;
vs->client_be = big_endian_flag;
set_pixel_conversion(vs);
graphic_hw_invalidate(NULL);
graphic_hw_update(NULL);
}
| 0 |
[
"CWE-125"
] |
qemu
|
bea60dd7679364493a0d7f5b54316c767cf894ef
| 324,496,784,351,134,270,000,000,000,000,000,000,000 | 33 |
ui/vnc: fix potential memory corruption issues
this patch makes the VNC server work correctly if the
server surface and the guest surface have different sizes.
Basically the server surface is adjusted to not exceed VNC_MAX_WIDTH
x VNC_MAX_HEIGHT and additionally the width is rounded up to multiple of
VNC_DIRTY_PIXELS_PER_BIT.
If we have a resolution whose width is not dividable by VNC_DIRTY_PIXELS_PER_BIT
we now get a small black bar on the right of the screen.
If the surface is too big to fit the limits only the upper left area is shown.
On top of that this fixes 2 memory corruption issues:
The first was actually discovered during playing
around with a Windows 7 vServer. During resolution
change in Windows 7 it happens sometimes that Windows
changes to an intermediate resolution where
server_stride % cmp_bytes != 0 (in vnc_refresh_server_surface).
This happens only if width % VNC_DIRTY_PIXELS_PER_BIT != 0.
The second is a theoretical issue, but is maybe exploitable
by the guest. If for some reason the guest surface size is bigger
than VNC_MAX_WIDTH x VNC_MAX_HEIGHT we end up in severe corruption since
this limit is nowhere enforced.
Signed-off-by: Peter Lieven <[email protected]>
Signed-off-by: Gerd Hoffmann <[email protected]>
|
std::string help() const override {
return "Revokes privileges from a role";
}
| 0 |
[
"CWE-613"
] |
mongo
|
e55d6e2292e5dbe2f97153251d8193d1cc89f5d7
| 123,861,696,082,239,340,000,000,000,000,000,000,000 | 3 |
SERVER-38984 Validate unique User ID on UserCache hit
|
void CClient::Update()
{
if(State() == IClient::STATE_DEMOPLAYBACK)
{
m_DemoPlayer.Update();
if(m_DemoPlayer.IsPlaying())
{
// update timers
const CDemoPlayer::CPlaybackInfo *pInfo = m_DemoPlayer.Info();
m_CurGameTick = pInfo->m_Info.m_CurrentTick;
m_PrevGameTick = pInfo->m_PreviousTick;
m_GameIntraTick = pInfo->m_IntraTick;
m_GameTickTime = pInfo->m_TickTime;
}
else
{
// disconnect on error
Disconnect();
}
}
else if(State() == IClient::STATE_ONLINE && m_RecivedSnapshots >= 3)
{
// switch snapshot
int Repredict = 0;
int64 Freq = time_freq();
int64 Now = m_GameTime.Get(time_get());
int64 PredNow = m_PredictedTime.Get(time_get());
while(1)
{
CSnapshotStorage::CHolder *pCur = m_aSnapshots[SNAP_CURRENT];
int64 TickStart = (pCur->m_Tick)*time_freq()/50;
if(TickStart < Now)
{
CSnapshotStorage::CHolder *pNext = m_aSnapshots[SNAP_CURRENT]->m_pNext;
if(pNext)
{
m_aSnapshots[SNAP_PREV] = m_aSnapshots[SNAP_CURRENT];
m_aSnapshots[SNAP_CURRENT] = pNext;
// set ticks
m_CurGameTick = m_aSnapshots[SNAP_CURRENT]->m_Tick;
m_PrevGameTick = m_aSnapshots[SNAP_PREV]->m_Tick;
if(m_aSnapshots[SNAP_CURRENT] && m_aSnapshots[SNAP_PREV])
{
GameClient()->OnNewSnapshot();
Repredict = 1;
}
}
else
break;
}
else
break;
}
if(m_aSnapshots[SNAP_CURRENT] && m_aSnapshots[SNAP_PREV])
{
int64 CurtickStart = (m_aSnapshots[SNAP_CURRENT]->m_Tick)*time_freq()/50;
int64 PrevtickStart = (m_aSnapshots[SNAP_PREV]->m_Tick)*time_freq()/50;
int PrevPredTick = (int)(PredNow*50/time_freq());
int NewPredTick = PrevPredTick+1;
m_GameIntraTick = (Now - PrevtickStart) / (float)(CurtickStart-PrevtickStart);
m_GameTickTime = (Now - PrevtickStart) / (float)Freq; //(float)SERVER_TICK_SPEED);
CurtickStart = NewPredTick*time_freq()/50;
PrevtickStart = PrevPredTick*time_freq()/50;
m_PredIntraTick = (PredNow - PrevtickStart) / (float)(CurtickStart-PrevtickStart);
if(NewPredTick < m_aSnapshots[SNAP_PREV]->m_Tick-SERVER_TICK_SPEED || NewPredTick > m_aSnapshots[SNAP_PREV]->m_Tick+SERVER_TICK_SPEED)
{
m_pConsole->Print(IConsole::OUTPUT_LEVEL_ADDINFO, "client", "prediction time reset!");
m_PredictedTime.Init(m_aSnapshots[SNAP_CURRENT]->m_Tick*time_freq()/50);
}
if(NewPredTick > m_PredTick)
{
m_PredTick = NewPredTick;
Repredict = 1;
// send input
SendInput();
}
}
// only do sane predictions
if(Repredict)
{
if(m_PredTick > m_CurGameTick && m_PredTick < m_CurGameTick+50)
GameClient()->OnPredict();
}
// fetch server info if we don't have it
if(State() >= IClient::STATE_LOADING &&
m_CurrentServerInfoRequestTime >= 0 &&
time_get() > m_CurrentServerInfoRequestTime)
{
m_ServerBrowser.Request(m_ServerAddress);
m_CurrentServerInfoRequestTime = time_get()+time_freq()*2;
}
}
// STRESS TEST: join the server again
if(g_Config.m_DbgStress)
{
static int64 ActionTaken = 0;
int64 Now = time_get();
if(State() == IClient::STATE_OFFLINE)
{
if(Now > ActionTaken+time_freq()*2)
{
m_pConsole->Print(IConsole::OUTPUT_LEVEL_DEBUG, "stress", "reconnecting!");
Connect(g_Config.m_DbgStressServer);
ActionTaken = Now;
}
}
else
{
if(Now > ActionTaken+time_freq()*(10+g_Config.m_DbgStress))
{
m_pConsole->Print(IConsole::OUTPUT_LEVEL_DEBUG, "stress", "disconnecting!");
Disconnect();
ActionTaken = Now;
}
}
}
// pump the network
PumpNetwork();
// update the maser server registry
MasterServer()->Update();
// update the server browser
m_ServerBrowser.Update(m_ResortServerBrowser);
m_ResortServerBrowser = false;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
teeworlds
|
ff254722a2683867fcb3e67569ffd36226c4bc62
| 269,614,721,713,107,000,000,000,000,000,000,000,000 | 140 |
added some checks to snap handling
|
void GetSizeSplitsVector(const TfLiteTensor* size_splits,
std::vector<int64_t>* size_splits_vector) {
const auto num_elements = NumElements(size_splits);
for (int i = 0; i < num_elements; ++i) {
size_splits_vector->push_back(GetTensorData<T>(size_splits)[i]);
}
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
1970c2158b1ffa416d159d03c3370b9a462aee35
| 137,991,284,238,334,580,000,000,000,000,000,000,000 | 7 |
[tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
|
void operator()(OpKernelContext* context, const Tensor& x,
const Tensor& scale, const Tensor& offset,
const Tensor& estimated_mean,
const Tensor& estimated_variance, const Tensor* side_input,
U epsilon, U exponential_avg_factor,
FusedBatchNormActivationMode activation_mode, Tensor* y,
Tensor* batch_mean, Tensor* batch_var, Tensor* saved_mean,
Tensor* saved_inv_var, TensorFormat tensor_format,
bool use_reserved_space) {
auto* stream = context->op_device_context()->stream();
OP_REQUIRES(context, stream, errors::Internal("No GPU stream available"));
const int64_t batch_size = GetTensorDim(x, tensor_format, 'N');
const int64_t channels = GetTensorDim(x, tensor_format, 'C');
const int64_t height = GetTensorDim(x, tensor_format, 'H');
const int64_t width = GetTensorDim(x, tensor_format, 'W');
// If use_reserved_space we have reserve_space_3 output (only in
// FusedBatchNormV3 op).
#if GOOGLE_CUDA
// Check if cuDNN batch normalization has a fast NHWC implementation:
// (1) In inference mode it's always fast.
// (2) Tensorflow enabled batchnorm spatial persistence, we are called
// from
// FusedBatchNormV3, i.e. use_reserved_space is true.
const bool fast_nhwc_batch_norm =
!is_training ||
(BatchnormSpatialPersistentEnabled() &&
DataTypeToEnum<T>::value == DT_HALF && use_reserved_space);
#else
// fast NHWC implementation is a CUDA only feature
const bool fast_nhwc_batch_norm = false;
#endif
// If input tensor is in NHWC format, and we have a fast cuDNN
// implementation, there is no need to do data format conversion.
TensorFormat compute_format =
fast_nhwc_batch_norm && tensor_format == FORMAT_NHWC ? FORMAT_NHWC
: FORMAT_NCHW;
VLOG(2) << "FusedBatchNorm:"
<< " batch_size: " << batch_size << " channels: " << channels
<< " height: " << height << " width:" << width
<< " x shape: " << x.shape().DebugString()
<< " scale shape: " << scale.shape().DebugString()
<< " offset shape: " << offset.shape().DebugString()
<< " activation mode: " << ToString(activation_mode)
<< " tensor format: " << ToString(tensor_format)
<< " compute format: " << ToString(compute_format);
auto maybe_make_dummy_output = [context, use_reserved_space]() -> Status {
if (use_reserved_space) {
Tensor* dummy_reserve_space = nullptr;
return context->allocate_output(5, {}, &dummy_reserve_space);
}
return Status::OK();
};
// If input is empty, return NaN mean/variance
if (x.shape().num_elements() == 0) {
OP_REQUIRES_OK(context, maybe_make_dummy_output());
functor::SetNanFunctor<GPUDevice, U> f;
f(context->eigen_device<GPUDevice>(), batch_mean->flat<U>());
f(context->eigen_device<GPUDevice>(), batch_var->flat<U>());
return;
}
// In inference mode we use custom CUDA kernel, because cuDNN does not
// support side input and activations for inference.
const bool has_side_input = side_input != nullptr;
const bool has_activation =
activation_mode != FusedBatchNormActivationMode::kIdentity;
if (!is_training && (has_side_input || has_activation)) {
OP_REQUIRES_OK(context, maybe_make_dummy_output());
FusedBatchNormInferenceFunctor<GPUDevice, T, U> inference_functor;
if (has_side_input) {
inference_functor(context, tensor_format, x.tensor<T, 4>(),
scale.vec<U>(), offset.vec<U>(),
estimated_mean.vec<U>(), estimated_variance.vec<U>(),
side_input->tensor<T, 4>(), epsilon, activation_mode,
y->tensor<T, 4>());
} else {
typename TTypes<T, 4>::ConstTensor empty_tensor(nullptr, 0, 0, 0, 0);
inference_functor(context, tensor_format, x.tensor<T, 4>(),
scale.vec<U>(), offset.vec<U>(),
estimated_mean.vec<U>(), estimated_variance.vec<U>(),
empty_tensor, epsilon, activation_mode,
y->tensor<T, 4>());
}
return;
}
Tensor x_maybe_transformed = x;
Tensor x_transformed;
Tensor y_transformed;
se::DeviceMemory<T> y_ptr;
if (tensor_format == compute_format) {
y_ptr = StreamExecutorUtil::AsDeviceMemory<T>(*y);
} else if (tensor_format == FORMAT_NHWC && compute_format == FORMAT_NCHW) {
OP_REQUIRES_OK(context, context->allocate_temp(
DataTypeToEnum<T>::value,
ShapeFromFormat(compute_format, batch_size,
height, width, channels),
&x_transformed));
functor::NHWCToNCHW<GPUDevice, T, 4>()(
context->eigen_device<GPUDevice>(),
const_cast<const Tensor&>(x_maybe_transformed).tensor<T, 4>(),
x_transformed.tensor<T, 4>());
x_maybe_transformed = x_transformed;
OP_REQUIRES_OK(context, context->allocate_temp(
DataTypeToEnum<T>::value,
ShapeFromFormat(compute_format, batch_size,
height, width, channels),
&y_transformed));
y_ptr = StreamExecutorUtil::AsDeviceMemory<T>(y_transformed);
} else {
context->SetStatus(errors::Internal(
"Unsupported tensor format: ", ToString(tensor_format),
" and compute format: ", ToString(compute_format)));
return;
}
const se::dnn::DataLayout data_layout =
compute_format == FORMAT_NHWC ? se::dnn::DataLayout::kBatchYXDepth
: se::dnn::DataLayout::kBatchDepthYX;
se::dnn::BatchDescriptor x_desc;
x_desc.set_count(batch_size)
.set_feature_map_count(channels)
.set_height(height)
.set_width(width)
.set_layout(data_layout);
se::dnn::BatchDescriptor scale_offset_desc;
scale_offset_desc.set_count(1)
.set_feature_map_count(channels)
.set_height(1)
.set_width(1)
.set_layout(se::dnn::DataLayout::kBatchDepthYX);
auto x_ptr = StreamExecutorUtil::AsDeviceMemory<T>(x_maybe_transformed);
auto scale_ptr = StreamExecutorUtil::AsDeviceMemory<U>(scale);
auto offset_ptr = StreamExecutorUtil::AsDeviceMemory<U>(offset);
auto estimated_mean_ptr =
StreamExecutorUtil::AsDeviceMemory<U>(estimated_mean);
auto estimated_variance_ptr =
StreamExecutorUtil::AsDeviceMemory<U>(estimated_variance);
auto side_input_ptr =
side_input != nullptr
? StreamExecutorUtil::AsDeviceMemory<T>(*side_input)
: se::DeviceMemory<T>();
auto batch_mean_ptr = StreamExecutorUtil::AsDeviceMemory<U>(*batch_mean);
auto batch_var_ptr = StreamExecutorUtil::AsDeviceMemory<U>(*batch_var);
auto saved_mean_ptr = StreamExecutorUtil::AsDeviceMemory<U>(*saved_mean);
auto saved_inv_var_ptr =
StreamExecutorUtil::AsDeviceMemory<U>(*saved_inv_var);
std::unique_ptr<functor::CudnnBatchNormAllocatorInOutput<U>>
reserve_space_allocator;
std::unique_ptr<functor::CudnnBatchNormAllocatorInTemp<uint8>>
workspace_allocator;
if (use_reserved_space) {
reserve_space_allocator.reset(
new functor::CudnnBatchNormAllocatorInOutput<U>(context, 5));
workspace_allocator.reset(
new functor::CudnnBatchNormAllocatorInTemp<uint8>(context));
}
if (!batch_mean->SharesBufferWith(estimated_mean) &&
exponential_avg_factor != 1.0f) {
OP_REQUIRES(
context,
stream
->ThenMemcpyD2D(&batch_mean_ptr, estimated_mean_ptr,
estimated_mean.NumElements() * sizeof(U))
.ok(),
errors::Internal("MatrixTriangularSolveOp: failed to copy rhs "
"from device"));
}
if (!batch_var->SharesBufferWith(estimated_variance) &&
exponential_avg_factor != 1.0f) {
OP_REQUIRES(
context,
stream
->ThenMemcpyD2D(&batch_var_ptr, estimated_variance_ptr,
estimated_variance.NumElements() * sizeof(U))
.ok(),
errors::Internal("MatrixTriangularSolveOp: failed to copy rhs "
"from device"));
}
bool cudnn_launch_status =
stream
->ThenBatchNormalizationForward(
x_ptr, scale_ptr, offset_ptr, estimated_mean_ptr,
estimated_variance_ptr, side_input_ptr, x_desc,
scale_offset_desc, static_cast<double>(epsilon),
static_cast<double>(exponential_avg_factor),
AsDnnActivationMode(activation_mode), &y_ptr, &batch_mean_ptr,
&batch_var_ptr, &saved_mean_ptr, &saved_inv_var_ptr,
is_training, reserve_space_allocator.get(),
workspace_allocator.get())
.ok();
if (!cudnn_launch_status) {
context->SetStatus(
errors::Internal("cuDNN launch failure : input shape (",
x.shape().DebugString(), ")"));
return;
}
if (tensor_format == FORMAT_NHWC && compute_format == FORMAT_NCHW) {
functor::NCHWToNHWC<GPUDevice, T, 4>()(
context->eigen_device<GPUDevice>(),
const_cast<const Tensor&>(y_transformed).tensor<T, 4>(),
y->tensor<T, 4>());
}
}
| 0 |
[
"CWE-125"
] |
tensorflow
|
aab9998916c2ffbd8f0592059fad352622f89cda
| 237,398,188,338,344,200,000,000,000,000,000,000,000 | 222 |
Add shape checks to FusedBatchNorm kernels.
PiperOrigin-RevId: 399755576
Change-Id: If8049fde109cc33badb5509d174b9b95aee1ea5e
|
int read_record_hdr(int ifd, void *buffer, struct record_header *record_hdr,
struct file_header *file_hdr, int arch_64, int endian_mismatch,
int oneof, size_t b_size, uint64_t flags)
{
int rc;
do {
if ((rc = sa_fread(ifd, buffer, (size_t) file_hdr->rec_size, SOFT_SIZE, oneof)) != 0)
/* End of sa data file */
return rc;
/* Remap record header structure to that expected by current version */
if (remap_struct(rec_types_nr, file_hdr->rec_types_nr, buffer,
file_hdr->rec_size, RECORD_HEADER_SIZE, b_size) < 0)
return 2;
memcpy(record_hdr, buffer, RECORD_HEADER_SIZE);
/* Normalize endianness */
if (endian_mismatch) {
swap_struct(rec_types_nr, record_hdr, arch_64);
}
/* Raw output in debug mode */
if (DISPLAY_DEBUG_MODE(flags)) {
printf("# uptime_cs; %llu; ust_time; %llu; extra_next; %u; record_type; %d; HH:MM:SS; %02d:%02d:%02d\n",
record_hdr->uptime_cs, record_hdr->ust_time,
record_hdr->extra_next, record_hdr->record_type,
record_hdr->hour, record_hdr->minute, record_hdr->second);
}
/*
* Skip unknown extra structures if present.
* This will be done later for R_COMMENT and R_RESTART records, as extra structures
* are saved after the comment or the number of CPU.
*/
if ((record_hdr->record_type != R_COMMENT) && (record_hdr->record_type != R_RESTART) &&
record_hdr->extra_next && (skip_extra_struct(ifd, endian_mismatch, arch_64) < 0))
return 2;
}
while ((record_hdr->record_type >= R_EXTRA_MIN) && (record_hdr->record_type <= R_EXTRA_MAX)) ;
return 0;
}
| 0 |
[
"CWE-415"
] |
sysstat
|
a5c8abd4a481ee6e27a3acf00e6d9b0f023e20ed
| 241,691,196,470,695,950,000,000,000,000,000,000,000 | 43 |
Fix #242: Double free in check_file_actlst()
Avoid freeing buffer() twice.
Signed-off-by: Sebastien GODARD <[email protected]>
|
int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
qtest_add_func("fuzz/lsi53c895a/lsi_do_dma_empty_queue",
test_lsi_do_dma_empty_queue);
qtest_add_func("fuzz/lsi53c895a/lsi_do_msgout_cancel_req",
test_lsi_do_msgout_cancel_req);
return g_test_run();
}
| 0 |
[] |
qemu
|
4367a20cc442c56b05611b4224de9a61908f9eac
| 107,235,085,817,723,560,000,000,000,000,000,000,000 | 12 |
scsi/lsi53c895a: really fix use-after-free in lsi_do_msgout (CVE-2022-0216)
Set current_req to NULL, not current_req->req, to prevent reusing a free'd
buffer in case of repeated SCSI cancel requests. Also apply the fix to
CLEAR QUEUE and BUS DEVICE RESET messages as well, since they also cancel
the request.
Thanks to Alexander Bulekov for providing a reproducer.
Fixes: CVE-2022-0216
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/972
Signed-off-by: Mauro Matteo Cascella <[email protected]>
Tested-by: Alexander Bulekov <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static uint32_t softfloat_mul(uint32_t x, uint64_t mantissa)
{
uint64_t l = x * (mantissa & 0xffffffff);
uint64_t h = x * (mantissa >> 32);
h += l >> 32;
l &= 0xffffffff;
l += 1 << av_log2(h >> 21);
h += l >> 32;
return h >> 20;
}
| 0 |
[
"CWE-787"
] |
FFmpeg
|
4c3e1956ee35fdcc5ffdb28782050164b4623c0b
| 272,664,741,241,978,270,000,000,000,000,000,000,000 | 10 |
lagarith: reallocate rgb_planes when needed
Fixes invalid writes on pixel format changes.
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
CC:[email protected]
|
BOOL license_write_new_license_request_packet(rdpLicense* license, wStream* s)
{
UINT32 PlatformId = PLATFORMID;
UINT32 PreferredKeyExchangeAlg = KEY_EXCHANGE_ALG_RSA;
Stream_Write_UINT32(s, PreferredKeyExchangeAlg); /* PreferredKeyExchangeAlg (4 bytes) */
Stream_Write_UINT32(s, PlatformId); /* PlatformId (4 bytes) */
Stream_Write(s, license->ClientRandom, 32); /* ClientRandom (32 bytes) */
if (/* EncryptedPremasterSecret */
!license_write_encrypted_premaster_secret_blob(s, license->EncryptedPremasterSecret,
license->ModulusLength) ||
/* ClientUserName */
!license_write_binary_blob(s, license->ClientUserName) ||
/* ClientMachineName */
!license_write_binary_blob(s, license->ClientMachineName))
{
return FALSE;
}
#ifdef WITH_DEBUG_LICENSE
WLog_DBG(TAG, "PreferredKeyExchangeAlg: 0x%08" PRIX32 "", PreferredKeyExchangeAlg);
WLog_DBG(TAG, "ClientRandom:");
winpr_HexDump(TAG, WLOG_DEBUG, license->ClientRandom, 32);
WLog_DBG(TAG, "EncryptedPremasterSecret");
winpr_HexDump(TAG, WLOG_DEBUG, license->EncryptedPremasterSecret->data,
license->EncryptedPremasterSecret->length);
WLog_DBG(TAG, "ClientUserName (%" PRIu16 "): %s", license->ClientUserName->length,
(char*)license->ClientUserName->data);
WLog_DBG(TAG, "ClientMachineName (%" PRIu16 "): %s", license->ClientMachineName->length,
(char*)license->ClientMachineName->data);
#endif
return TRUE;
}
| 0 |
[
"CWE-125"
] |
FreeRDP
|
6ade7b4cbfd71c54b3d724e8f2d6ac76a58e879a
| 213,071,577,410,407,560,000,000,000,000,000,000,000 | 34 |
Fixed OOB Read in license_read_new_or_upgrade_license_packet
CVE-2020-11099 thanks to @antonio-morales for finding this.
|
gx_default_create_compositor(gx_device * dev, gx_device ** pcdev,
const gs_composite_t * pcte,
gs_gstate * pgs, gs_memory_t * memory,
gx_device *cdev)
{
return pcte->type->procs.create_default_compositor
(pcte, pcdev, dev, pgs, memory);
}
| 0 |
[] |
ghostpdl
|
c9b362ba908ca4b1d7c72663a33229588012d7d9
| 51,776,582,757,893,030,000,000,000,000,000,000,000 | 8 |
Bug 699670: disallow copying of the epo device
The erasepage optimisation (epo) subclass device shouldn't be allowed to be
copied because the subclass private data, child and parent pointers end up
being shared between the original device and the copy.
Add an epo_finish_copydevice which NULLs the three offending pointers, and
then communicates to the caller that copying is not allowed.
This also exposed a separate issue with the stype for subclasses devices.
Devices are, I think, unique in having two stype objects associated with them:
the usual one in the memory manager header, and the other stored in the device
structere directly. In order for the stype to be correct, we have to use the
stype for the incoming device, with the ssize of the original device (ssize
should reflect the size of the memory allocation). We correctly did so with the
stype in the device structure, but then used the prototype device's stype to
patch the memory manager stype - meaning the ssize potentially no longer
matched the allocated memory. This caused problems in the garbager where there
is an implicit assumption that the size of a single object clump (c_alone == 1)
is also the size (+ memory manager overheads) of the single object it contains.
The solution is to use the same stype instance to patch the memory manager
data as we do in the device structure (with the correct ssize).
|
static void ipip_tunnel_bind_dev(struct net_device *dev)
{
struct net_device *tdev = NULL;
struct ip_tunnel *tunnel;
struct iphdr *iph;
tunnel = netdev_priv(dev);
iph = &tunnel->parms.iph;
if (iph->daddr) {
struct flowi fl = { .oif = tunnel->parms.link,
.nl_u = { .ip4_u =
{ .daddr = iph->daddr,
.saddr = iph->saddr,
.tos = RT_TOS(iph->tos) } },
.proto = IPPROTO_IPIP };
struct rtable *rt;
if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
tdev = rt->u.dst.dev;
ip_rt_put(rt);
}
dev->flags |= IFF_POINTOPOINT;
}
if (!tdev && tunnel->parms.link)
tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
if (tdev) {
dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
dev->mtu = tdev->mtu - sizeof(struct iphdr);
}
dev->iflink = tunnel->parms.link;
}
| 0 |
[] |
linux-2.6
|
d5aa407f59f5b83d2c50ec88f5bf56d40f1f8978
| 238,331,087,350,966,200,000,000,000,000,000,000,000 | 33 |
tunnels: fix netns vs proto registration ordering
Same stuff as in ip_gre patch: receive hook can be called before netns
setup is done, oopsing in net_generic().
Signed-off-by: Alexey Dobriyan <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int ssh_scp_push_file(ssh_scp scp, const char *filename, size_t size, int mode){
return ssh_scp_push_file64(scp, filename, (uint64_t) size, mode);
}
| 1 |
[] |
libssh
|
4aea835974996b2deb011024c53f4ff4329a95b5
| 65,560,746,232,969,330,000,000,000,000,000,000,000 | 3 |
CVE-2019-14889: scp: Reformat scp.c
Fixes T181
Signed-off-by: Anderson Toshiyuki Sasaki <[email protected]>
Reviewed-by: Andreas Schneider <[email protected]>
(cherry picked from commit 42c727d0c186a1e2fa84a31ab40e16e58b404ab3)
|
static int forgive(int n,int& err)
{
err = WSAGetLastError() ;
if ( !n && !err ) return FINISH ;
#ifndef WIN32
if ( n == 0 ) return FINISH ; // server hungup
#endif
bool bForgive = err == WSAEWOULDBLOCK || err == WSAENOTCONN ;
bool bError = n == SOCKET_ERROR ;
if ( bError && bForgive ) return 0 ;
return n ;
}
| 0 |
[
"CWE-476"
] |
exiv2
|
ae20c30805b330275b2aa0303a42e1f2bbd53661
| 45,267,609,222,989,980,000,000,000,000,000,000,000 | 12 |
Avoid null pointer exception due to NULL return value from strchr.
This fixes #793.
|
PJ_DEF(pj_status_t) pjmedia_wav_player_port_create( pj_pool_t *pool,
const char *filename,
unsigned ptime,
unsigned options,
pj_ssize_t buff_size,
pjmedia_port **p_port )
{
pjmedia_wave_hdr wave_hdr;
pj_ssize_t size_to_read, size_read;
struct file_reader_port *fport;
pjmedia_audio_format_detail *ad;
pj_off_t pos;
pj_str_t name;
unsigned samples_per_frame;
pj_status_t status = PJ_SUCCESS;
/* Check arguments. */
PJ_ASSERT_RETURN(pool && filename && p_port, PJ_EINVAL);
/* Check the file really exists. */
if (!pj_file_exists(filename)) {
return PJ_ENOTFOUND;
}
/* Normalize ptime */
if (ptime == 0)
ptime = 20;
/* Normalize buff_size */
if (buff_size < 1) buff_size = PJMEDIA_FILE_PORT_BUFSIZE;
/* Create fport instance. */
fport = create_file_port(pool);
if (!fport) {
return PJ_ENOMEM;
}
/* Get the file size. */
fport->fsize = pj_file_size(filename);
/* Size must be more than WAVE header size */
if (fport->fsize <= sizeof(pjmedia_wave_hdr)) {
return PJMEDIA_ENOTVALIDWAVE;
}
/* Open file. */
status = pj_file_open( pool, filename, PJ_O_RDONLY, &fport->fd);
if (status != PJ_SUCCESS)
return status;
/* Read the file header plus fmt header only. */
size_read = size_to_read = sizeof(wave_hdr) - 8;
status = pj_file_read( fport->fd, &wave_hdr, &size_read);
if (status != PJ_SUCCESS) {
pj_file_close(fport->fd);
return status;
}
if (size_read != size_to_read) {
pj_file_close(fport->fd);
return PJMEDIA_ENOTVALIDWAVE;
}
/* Normalize WAVE header fields values from little-endian to host
* byte order.
*/
pjmedia_wave_hdr_file_to_host(&wave_hdr);
/* Validate WAVE file. */
if (wave_hdr.riff_hdr.riff != PJMEDIA_RIFF_TAG ||
wave_hdr.riff_hdr.wave != PJMEDIA_WAVE_TAG ||
wave_hdr.fmt_hdr.fmt != PJMEDIA_FMT_TAG)
{
pj_file_close(fport->fd);
TRACE_((THIS_FILE,
"actual value|expected riff=%x|%x, wave=%x|%x fmt=%x|%x",
wave_hdr.riff_hdr.riff, PJMEDIA_RIFF_TAG,
wave_hdr.riff_hdr.wave, PJMEDIA_WAVE_TAG,
wave_hdr.fmt_hdr.fmt, PJMEDIA_FMT_TAG));
return PJMEDIA_ENOTVALIDWAVE;
}
/* Validate format and its attributes (i.e: bits per sample, block align) */
switch (wave_hdr.fmt_hdr.fmt_tag) {
case PJMEDIA_WAVE_FMT_TAG_PCM:
if (wave_hdr.fmt_hdr.bits_per_sample != 16 ||
wave_hdr.fmt_hdr.block_align != 2 * wave_hdr.fmt_hdr.nchan)
status = PJMEDIA_EWAVEUNSUPP;
break;
case PJMEDIA_WAVE_FMT_TAG_ALAW:
case PJMEDIA_WAVE_FMT_TAG_ULAW:
if (wave_hdr.fmt_hdr.bits_per_sample != 8 ||
wave_hdr.fmt_hdr.block_align != wave_hdr.fmt_hdr.nchan)
status = PJMEDIA_ENOTVALIDWAVE;
break;
default:
status = PJMEDIA_EWAVEUNSUPP;
break;
}
if (status != PJ_SUCCESS) {
pj_file_close(fport->fd);
return status;
}
fport->fmt_tag = (pjmedia_wave_fmt_tag)wave_hdr.fmt_hdr.fmt_tag;
fport->bytes_per_sample = (pj_uint16_t)
(wave_hdr.fmt_hdr.bits_per_sample / 8);
/* If length of fmt_header is greater than 16, skip the remaining
* fmt header data.
*/
if (wave_hdr.fmt_hdr.len > 16) {
size_to_read = wave_hdr.fmt_hdr.len - 16;
status = pj_file_setpos(fport->fd, size_to_read, PJ_SEEK_CUR);
if (status != PJ_SUCCESS) {
pj_file_close(fport->fd);
return status;
}
}
/* Repeat reading the WAVE file until we have 'data' chunk */
for (;;) {
pjmedia_wave_subchunk subchunk;
size_read = 8;
status = pj_file_read(fport->fd, &subchunk, &size_read);
if (status != PJ_SUCCESS || size_read != 8) {
pj_file_close(fport->fd);
return PJMEDIA_EWAVETOOSHORT;
}
/* Normalize endianness */
PJMEDIA_WAVE_NORMALIZE_SUBCHUNK(&subchunk);
/* Break if this is "data" chunk */
if (subchunk.id == PJMEDIA_DATA_TAG) {
wave_hdr.data_hdr.data = PJMEDIA_DATA_TAG;
wave_hdr.data_hdr.len = subchunk.len;
break;
}
/* Otherwise skip the chunk contents */
size_to_read = subchunk.len;
status = pj_file_setpos(fport->fd, size_to_read, PJ_SEEK_CUR);
if (status != PJ_SUCCESS) {
pj_file_close(fport->fd);
return status;
}
}
/* Current file position now points to start of data */
status = pj_file_getpos(fport->fd, &pos);
fport->start_data = (unsigned)pos;
fport->data_len = wave_hdr.data_hdr.len;
fport->data_left = wave_hdr.data_hdr.len;
/* Validate length. */
if (wave_hdr.data_hdr.len > fport->fsize - fport->start_data) {
/* Actual data length may be shorter than declared. We should still
* try to play whatever data is there instead of immediately returning
* error.
*/
wave_hdr.data_hdr.len = (pj_uint32_t)fport->fsize - fport->start_data;
// pj_file_close(fport->fd);
// return PJMEDIA_EWAVEUNSUPP;
}
if (wave_hdr.data_hdr.len < ptime * wave_hdr.fmt_hdr.sample_rate *
wave_hdr.fmt_hdr.nchan / 1000)
{
pj_file_close(fport->fd);
return PJMEDIA_EWAVETOOSHORT;
}
/* It seems like we have a valid WAVE file. */
/* Initialize */
fport->options = options;
/* Update port info. */
ad = pjmedia_format_get_audio_format_detail(&fport->base.info.fmt, 1);
pj_strdup2(pool, &name, filename);
samples_per_frame = ptime * wave_hdr.fmt_hdr.sample_rate *
wave_hdr.fmt_hdr.nchan / 1000;
pjmedia_port_info_init(&fport->base.info, &name, SIGNATURE,
wave_hdr.fmt_hdr.sample_rate,
wave_hdr.fmt_hdr.nchan,
BITS_PER_SAMPLE,
samples_per_frame);
/* If file is shorter than buffer size, adjust buffer size to file
* size. Otherwise EOF callback will be called multiple times when
* fill_buffer() is called.
*/
if (wave_hdr.data_hdr.len < (unsigned)buff_size)
buff_size = wave_hdr.data_hdr.len;
/* Create file buffer.
*/
fport->bufsize = (pj_uint32_t)buff_size;
/* samples_per_frame must be smaller than bufsize (because get_frame()
* doesn't handle this case).
*/
if (samples_per_frame * fport->bytes_per_sample >= fport->bufsize) {
pj_file_close(fport->fd);
return PJ_EINVAL;
}
/* Create buffer. */
fport->buf = (char*) pj_pool_alloc(pool, fport->bufsize);
if (!fport->buf) {
pj_file_close(fport->fd);
return PJ_ENOMEM;
}
fport->readpos = fport->buf;
/* Set initial position of the file. */
fport->fpos = fport->start_data;
/* Fill up the buffer. */
status = fill_buffer(fport);
if (status != PJ_SUCCESS) {
pj_file_close(fport->fd);
return status;
}
/* Done. */
*p_port = &fport->base;
PJ_LOG(4,(THIS_FILE,
"File player '%.*s' created: samp.rate=%d, ch=%d, bufsize=%uKB, "
"filesize=%luKB",
(int)fport->base.info.name.slen,
fport->base.info.name.ptr,
ad->clock_rate,
ad->channel_count,
fport->bufsize / 1000,
(unsigned long)(fport->fsize / 1000)));
return PJ_SUCCESS;
}
| 1 |
[
"CWE-703",
"CWE-835"
] |
pjproject
|
947bc1ee6d05be10204b918df75a503415fd3213
| 7,827,034,508,051,481,000,000,000,000,000,000,000 | 249 |
Merge pull request from GHSA-rwgw-vwxg-q799
* Prevent potential infinite loop when parsing WAV format file
* Check if subchunk is negative.
* Fix and add checks
* Change data type from pj_ssize_t to long.
* Modify check
* Fix leak file descriptor and modify check on wav_playlist
* Move overflow/underflow check to pj_file_setpos()
* Use macro to simplify check
* modification based on comments
* Remove unnecessary casting
* Modification based on comments
|
static void tag_free(DOS_FS * fs, DOS_FILE * owner, uint32_t *num_refs,
uint32_t start_cluster)
{
int prev;
uint32_t i, walk;
if (start_cluster == 0)
start_cluster = 2;
for (i = start_cluster; i < fs->clusters + 2; i++) {
FAT_ENTRY curEntry;
get_fat(&curEntry, fs->fat, i, fs);
/* If the current entry is the head of an un-owned chain... */
if (curEntry.value && !FAT_IS_BAD(fs, curEntry.value) &&
!get_owner(fs, i) && !num_refs[i]) {
prev = 0;
/* Walk the chain, claiming ownership as we go */
for (walk = i; walk != -1; walk = next_cluster(fs, walk)) {
if (!get_owner(fs, walk)) {
set_owner(fs, walk, owner);
} else {
/* We've run into cross-links between orphaned chains,
* or a cycle with a tail.
* Terminate this orphan chain (break the link)
*/
set_fat(fs, prev, -1);
/* This is not necessary because 'walk' is owned and thus
* will never become the head of a chain (the only case
* that would matter during reclaim to files).
* It's easier to decrement than to prove that it's
* unnecessary.
*/
num_refs[walk]--;
break;
}
prev = walk;
}
}
}
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
dosfstools
|
07908124838afcc99c577d1d3e84cef2dbd39cb7
| 19,692,885,943,177,140,000,000,000,000,000,000,000 | 42 |
set_fat(): Fix off-by-2 error leading to corruption in FAT12
In FAT12 two 12 bit entries are combined to a 24 bit value (three
bytes). Therefore, when an even numbered FAT entry is set in FAT12, it
must be be combined with the following entry. To prevent accessing
beyond the end of the FAT array, it must be checked that the cluster is
not the last one.
Previously, the check tested that the requested cluster was equal to
fs->clusters - 1. However, fs->clusters is the number of data clusters
not including the two reserved FAT entries at the start so the test
triggered two clusters early.
If the third to last entry was written on a FAT12 filesystem with an
odd number of clusters, the second to last entry would be corrupted.
This corruption may also lead to invalid memory accesses when the
corrupted entry becomes out of bounds and is used later.
Change the test to fs->clusters + 1 to fix.
Reported-by: Hanno Böck
Signed-off-by: Andreas Bombe <[email protected]>
|
ResetCharsets(p)
struct win *p;
{
p->w_gr = nwin_default.gr;
p->w_c1 = nwin_default.c1;
SetCharsets(p, "BBBB02");
if (nwin_default.charset)
SetCharsets(p, nwin_default.charset);
#ifdef ENCODINGS
ResetEncoding(p);
#endif
}
| 0 |
[] |
screen
|
c5db181b6e017cfccb8d7842ce140e59294d9f62
| 134,070,153,311,797,330,000,000,000,000,000,000,000 | 12 |
ansi: add support for xterm OSC 11
It allows for getting and setting the background color. Notably, Vim uses
OSC 11 to learn whether it's running on a light or dark colored terminal
and choose a color scheme accordingly.
Tested with gnome-terminal and xterm. When called with "?" argument the
current background color is returned:
$ echo -ne "\e]11;?\e\\"
$ 11;rgb:2323/2727/2929
Signed-off-by: Lubomir Rintel <[email protected]>
(cherry picked from commit 7059bff20a28778f9d3acf81cad07b1388d02309)
Signed-off-by: Amadeusz Sławiński <[email protected]
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.