func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
create_footnote_ref(struct footnote_list *list, const uint8_t *name, size_t name_size) { struct footnote_ref *ref = calloc(1, sizeof(struct footnote_ref)); if (!ref) return NULL; ref->id = hash_link_ref(name, name_size); return ref; }
0
[]
redcarpet
e5a10516d07114d582d13b9125b733008c61c242
80,420,070,848,556,740,000,000,000,000,000,000,000
10
Avoid rewinding previous inline when auto-linking When a bit like "[email protected]" is processed, first the emphasis is rendered, then the 1 is output verbatim. When the `@` is encountered, Redcarpet tries to find the "local part" of the address and stops when it encounters an invalid char (i.e. here the `!`). The problem is that when it searches for the local part, Redcarpet rewinds the characters but here, the emphasis is already rendered so the previous HTML tag is rewinded as well and is not correctly closed.
static int php_cli_server_client_read_request_on_message_begin(php_http_parser *parser) { return 0; }
0
[]
php-src
2438490addfbfba51e12246a74588b2382caa08a
235,222,942,945,654,800,000,000,000,000,000,000,000
4
slim post data
_gd2GetHeader (gdIOCtxPtr in, int *sx, int *sy, int *cs, int *vers, int *fmt, int *ncx, int *ncy, t_chunk_info ** chunkIdx) { int i; int ch; char id[5]; t_chunk_info *cidx; int sidx; int nc; GD2_DBG (printf ("Reading gd2 header info\n")); for (i = 0; i < 4; i++) { ch = gdGetC (in); if (ch == EOF) { goto fail1; }; id[i] = ch; }; id[4] = 0; GD2_DBG (printf ("Got file code: %s\n", id)); /* Equiv. of 'magick'. */ if (strcmp (id, GD2_ID) != 0) { GD2_DBG (printf ("Not a valid gd2 file\n")); goto fail1; }; /* Version */ if (gdGetWord (vers, in) != 1) { goto fail1; }; GD2_DBG (printf ("Version: %d\n", *vers)); if ((*vers != 1) && (*vers != 2)) { GD2_DBG (printf ("Bad version: %d\n", *vers)); goto fail1; }; /* Image Size */ if (!gdGetWord (sx, in)) { GD2_DBG (printf ("Could not get x-size\n")); goto fail1; } if (!gdGetWord (sy, in)) { GD2_DBG (printf ("Could not get y-size\n")); goto fail1; } GD2_DBG (printf ("Image is %dx%d\n", *sx, *sy)); /* Chunk Size (pixels, not bytes!) */ if (gdGetWord (cs, in) != 1) { goto fail1; }; GD2_DBG (printf ("ChunkSize: %d\n", *cs)); if ((*cs < GD2_CHUNKSIZE_MIN) || (*cs > GD2_CHUNKSIZE_MAX)) { GD2_DBG (printf ("Bad chunk size: %d\n", *cs)); goto fail1; }; /* Data Format */ if (gdGetWord (fmt, in) != 1) { goto fail1; }; GD2_DBG (printf ("Format: %d\n", *fmt)); if ((*fmt != GD2_FMT_RAW) && (*fmt != GD2_FMT_COMPRESSED) && (*fmt != GD2_FMT_TRUECOLOR_RAW) && (*fmt != GD2_FMT_TRUECOLOR_COMPRESSED)) { GD2_DBG (printf ("Bad data format: %d\n", *fmt)); goto fail1; }; /* # of chunks wide */ if (gdGetWord (ncx, in) != 1) { goto fail1; }; GD2_DBG (printf ("%d Chunks Wide\n", *ncx)); /* # of chunks high */ if (gdGetWord (ncy, in) != 1) { goto fail1; }; GD2_DBG (printf ("%d Chunks vertically\n", *ncy)); if (gd2_compressed (*fmt)) { if (*ncx <= 0 || *ncy <= 0 || *ncx > INT_MAX / *ncy) { GD2_DBG(printf ("Illegal chunk counts: %d * %d\n", *ncx, *ncy)); goto fail1; } nc = (*ncx) * (*ncy); GD2_DBG (printf ("Reading %d chunk index entries\n", nc)); if (overflow2(sizeof(t_chunk_info), nc)) { goto fail1; } sidx = sizeof (t_chunk_info) * nc; if (sidx <= 0) { goto fail1; } cidx = gdCalloc (sidx, 1); if (cidx == NULL) { goto fail1; } for (i = 0; i < nc; i++) { if (gdGetInt (&cidx[i].offset, in) != 1) { goto fail2; }; if (gdGetInt (&cidx[i].size, in) != 1) { goto fail2; }; if (cidx[i].offset < 0 || cidx[i].size < 0) goto fail2; }; *chunkIdx = cidx; }; GD2_DBG (printf ("gd2 header complete\n")); return 1; fail2: gdFree(cidx); fail1: return 0; }
0
[ "CWE-415", "CWE-190" ]
libgd
69d2fd2c597ffc0c217de1238b9bf4d4bceba8e6
32,482,377,115,082,415,000,000,000,000,000,000,000
130
Fix #354: Signed Integer Overflow gd_io.c GD2 stores the number of horizontal and vertical chunks as words (i.e. 2 byte unsigned). These values are multiplied and assigned to an int when reading the image, what can cause integer overflows. We have to avoid that, and also make sure that either chunk count is actually greater than zero. If illegal chunk counts are detected, we bail out from reading the image.
exsltSaxonEvalFunction (xmlXPathParserContextPtr ctxt, int nargs) { xmlXPathCompExprPtr expr; xmlXPathObjectPtr ret; if (nargs != 1) { xmlXPathSetArityError(ctxt); return; } if (!xmlXPathStackIsExternal(ctxt)) { xmlXPathSetTypeError(ctxt); return; } expr = (xmlXPathCompExprPtr) xmlXPathPopExternal(ctxt); ret = xmlXPathCompiledEval(expr, ctxt->context); if (ret == NULL) { xmlXPathSetError(ctxt, XPATH_EXPR_ERROR); return; } valuePush(ctxt, ret); }
0
[ "CWE-119" ]
libxslt
8b90c9a699e0eaa98bbeec63a473ddc73aaa238c
160,658,948,293,205,700,000,000,000,000,000,000,000
24
Fix saxon:line-number with namespace nodes exsltSaxonLineNumberFunction must make sure not to pass namespace "nodes" to xmlGetLineNo. Otherwise, an OOB heap read results which typically leads to a segfault. Found with afl-fuzz and ASan.
void CalendarRegressionTest::TestT11632(void) { UErrorCode status = U_ZERO_ERROR; GregorianCalendar cal(TimeZone::createTimeZone("Pacific/Apia"), status); if(U_FAILURE(status)) { dataerrln("Error creating Calendar: %s", u_errorName(status)); return; } failure(status, "Calendar::createInstance(status)"); cal.clear(); failure(status, "clear calendar"); cal.set(UCAL_HOUR, 597); failure(status, "set hour value in calendar"); SimpleDateFormat sdf(UnicodeString("y-MM-dd'T'HH:mm:ss"), status); failure(status, "initializing SimpleDateFormat"); sdf.setCalendar(cal); UnicodeString dstr; UDate d = cal.getTime(status); if (!failure(status, "getTime for date")) { sdf.format(d, dstr); std::string utf8; dstr.toUTF8String(utf8); assertEquals("correct datetime displayed for hour value", UnicodeString("1970-01-25T21:00:00"), dstr); cal.clear(); failure(status, "clear calendar"); cal.set(UCAL_HOUR, 300); failure(status, "set hour value in calendar"); sdf.setCalendar(cal); d = cal.getTime(status); if (!failure(status, "getTime for initial date")) { dstr.remove(); sdf.format(d, dstr); dstr.toUTF8String(utf8); assertEquals("correct datetime displayed for hour value", UnicodeString("1970-01-13T12:00:00"), dstr); } } }
1
[ "CWE-190" ]
icu
71dd84d4ffd6600a70e5bca56a22b957e6642bd4
62,007,454,359,432,360,000,000,000,000,000,000,000
36
ICU-12504 in ICU4C Persian cal, use int64_t math for one operation to avoid overflow; add tests in C and J X-SVN-Rev: 40654
static bool cmd_identify(IDEState *s, uint8_t cmd) { if (s->bs && s->drive_kind != IDE_CD) { if (s->drive_kind != IDE_CFATA) { ide_identify(s); } else { ide_cfata_identify(s); } s->status = READY_STAT | SEEK_STAT; ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop); ide_set_irq(s->bus); return false; } else { if (s->drive_kind == IDE_CD) { ide_set_signature(s); } ide_abort_command(s); } return true; }
0
[ "CWE-189" ]
qemu
940973ae0b45c9b6817bab8e4cf4df99a9ef83d7
330,734,498,269,851,950,000,000,000,000,000,000,000
21
ide: Correct improper smart self test counter reset in ide core. The SMART self test counter was incorrectly being reset to zero, not 1. This had the effect that on every 21st SMART EXECUTE OFFLINE: * We would write off the beginning of a dynamically allocated buffer * We forgot the SMART history Fix this. Signed-off-by: Benoit Canet <[email protected]> Message-id: [email protected] Reviewed-by: Markus Armbruster <[email protected]> Cc: [email protected] Acked-by: Kevin Wolf <[email protected]> [PMM: tweaked commit message as per suggestions from Markus] Signed-off-by: Peter Maydell <[email protected]>
STACK_OF(X509) *X509_chain_up_ref(STACK_OF(X509) *chain) { STACK_OF(X509) *ret = sk_X509_dup(chain); int i; if (ret == NULL) return NULL; for (i = 0; i < sk_X509_num(ret); i++) { X509 *x = sk_X509_value(ret, i); if (!X509_up_ref(x)) goto err; } return ret; err: while (i-- > 0) X509_free(sk_X509_value(ret, i)); sk_X509_free(ret); return NULL; }
0
[ "CWE-476" ]
openssl
8130d654d1de922ea224fa18ee3bc7262edc39c0
69,054,893,055,948,000,000,000,000,000,000,000,000
21
Fix Null pointer deref in X509_issuer_and_serial_hash() The OpenSSL public API function X509_issuer_and_serial_hash() attempts to create a unique hash value based on the issuer and serial number data contained within an X509 certificate. However it fails to correctly handle any errors that may occur while parsing the issuer field (which might occur if the issuer field is maliciously constructed). This may subsequently result in a NULL pointer deref and a crash leading to a potential denial of service attack. The function X509_issuer_and_serial_hash() is never directly called by OpenSSL itself so applications are only vulnerable if they use this function directly and they use it on certificates that may have been obtained from untrusted sources. CVE-2021-23841 Reviewed-by: Richard Levitte <[email protected]> Reviewed-by: Paul Dale <[email protected]>
static int ssl_parse_client_hello( ssl_context *ssl ) { int ret; unsigned int i, j; size_t n; unsigned int ciph_len, sess_len; unsigned int comp_len; unsigned int ext_len = 0; unsigned char *buf, *p, *ext; int renegotiation_info_seen = 0; int handshake_failure = 0; SSL_DEBUG_MSG( 2, ( "=> parse client hello" ) ); if( ssl->renegotiation == SSL_INITIAL_HANDSHAKE && ( ret = ssl_fetch_input( ssl, 5 ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_fetch_input", ret ); return( ret ); } buf = ssl->in_hdr; #if defined(POLARSSL_SSL_SRV_SUPPORT_SSLV2_CLIENT_HELLO) if( ( buf[0] & 0x80 ) != 0 ) return ssl_parse_client_hello_v2( ssl ); #endif SSL_DEBUG_BUF( 4, "record header", buf, 5 ); SSL_DEBUG_MSG( 3, ( "client hello v3, message type: %d", buf[0] ) ); SSL_DEBUG_MSG( 3, ( "client hello v3, message len.: %d", ( buf[3] << 8 ) | buf[4] ) ); SSL_DEBUG_MSG( 3, ( "client hello v3, protocol ver: [%d:%d]", buf[1], buf[2] ) ); /* * SSLv3 Client Hello * * Record layer: * 0 . 0 message type * 1 . 2 protocol version * 3 . 4 message length */ if( buf[0] != SSL_MSG_HANDSHAKE || buf[1] != SSL_MAJOR_VERSION_3 ) { SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CLIENT_HELLO ); } n = ( buf[3] << 8 ) | buf[4]; if( n < 45 || n > 512 ) { SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CLIENT_HELLO ); } if( ssl->renegotiation == SSL_INITIAL_HANDSHAKE && ( ret = ssl_fetch_input( ssl, 5 + n ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_fetch_input", ret ); return( ret ); } buf = ssl->in_msg; if( !ssl->renegotiation ) n = ssl->in_left - 5; else n = ssl->in_msglen; ssl->handshake->update_checksum( ssl, buf, n ); /* * SSL layer: * 0 . 0 handshake type * 1 . 3 handshake length * 4 . 5 protocol version * 6 . 9 UNIX time() * 10 . 37 random bytes * 38 . 38 session id length * 39 . 38+x session id * 39+x . 40+x ciphersuitelist length * 41+x . .. ciphersuitelist * .. . .. compression alg. * .. . .. extensions */ SSL_DEBUG_BUF( 4, "record contents", buf, n ); SSL_DEBUG_MSG( 3, ( "client hello v3, handshake type: %d", buf[0] ) ); SSL_DEBUG_MSG( 3, ( "client hello v3, handshake len.: %d", ( buf[1] << 16 ) | ( buf[2] << 8 ) | buf[3] ) ); SSL_DEBUG_MSG( 3, ( "client hello v3, max. version: [%d:%d]", buf[4], buf[5] ) ); /* * Check the handshake type and protocol version */ if( buf[0] != SSL_HS_CLIENT_HELLO || buf[4] != SSL_MAJOR_VERSION_3 ) { SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CLIENT_HELLO ); } ssl->major_ver = SSL_MAJOR_VERSION_3; ssl->minor_ver = ( buf[5] <= SSL_MINOR_VERSION_3 ) ? buf[5] : SSL_MINOR_VERSION_3; if( ssl->minor_ver < ssl->min_minor_ver ) { SSL_DEBUG_MSG( 1, ( "client only supports ssl smaller than minimum" " [%d:%d] < [%d:%d]", ssl->major_ver, ssl->minor_ver, ssl->min_major_ver, ssl->min_minor_ver ) ); ssl_send_alert_message( ssl, SSL_ALERT_LEVEL_FATAL, SSL_ALERT_MSG_PROTOCOL_VERSION ); return( POLARSSL_ERR_SSL_BAD_HS_PROTOCOL_VERSION ); } ssl->max_major_ver = buf[4]; ssl->max_minor_ver = buf[5]; memcpy( ssl->handshake->randbytes, buf + 6, 32 ); /* * Check the handshake message length */ if( buf[1] != 0 || n != (unsigned int) 4 + ( ( buf[2] << 8 ) | buf[3] ) ) { SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CLIENT_HELLO ); } /* * Check the session length */ sess_len = buf[38]; if( sess_len > 32 ) { SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CLIENT_HELLO ); } ssl->session_negotiate->length = sess_len; memset( ssl->session_negotiate->id, 0, sizeof( ssl->session_negotiate->id ) ); memcpy( ssl->session_negotiate->id, buf + 39, ssl->session_negotiate->length ); /* * Check the ciphersuitelist length */ ciph_len = ( buf[39 + sess_len] << 8 ) | ( buf[40 + sess_len] ); if( ciph_len < 2 || ciph_len > 256 || ( ciph_len % 2 ) != 0 ) { SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CLIENT_HELLO ); } /* * Check the compression algorithms length */ comp_len = buf[41 + sess_len + ciph_len]; if( comp_len < 1 || comp_len > 16 ) { SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CLIENT_HELLO ); } /* * Check the extension length */ if( n > 42 + sess_len + ciph_len + comp_len ) { ext_len = ( buf[42 + sess_len + ciph_len + comp_len] << 8 ) | ( buf[43 + sess_len + ciph_len + comp_len] ); if( ( ext_len > 0 && ext_len < 4 ) || n != 44 + sess_len + ciph_len + comp_len + ext_len ) { SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); SSL_DEBUG_BUF( 3, "Ext", buf + 44 + sess_len + ciph_len + comp_len, ext_len); return( POLARSSL_ERR_SSL_BAD_HS_CLIENT_HELLO ); } } ssl->session_negotiate->compression = SSL_COMPRESS_NULL; #if defined(POLARSSL_ZLIB_SUPPORT) for( i = 0; i < comp_len; ++i ) { if( buf[42 + sess_len + ciph_len + i] == SSL_COMPRESS_DEFLATE ) { ssl->session_negotiate->compression = SSL_COMPRESS_DEFLATE; break; } } #endif SSL_DEBUG_BUF( 3, "client hello, random bytes", buf + 6, 32 ); SSL_DEBUG_BUF( 3, "client hello, session id", buf + 38, sess_len ); SSL_DEBUG_BUF( 3, "client hello, ciphersuitelist", buf + 41 + sess_len, ciph_len ); SSL_DEBUG_BUF( 3, "client hello, compression", buf + 42 + sess_len + ciph_len, comp_len ); /* * Check for TLS_EMPTY_RENEGOTIATION_INFO_SCSV */ for( i = 0, p = buf + 41 + sess_len; i < ciph_len; i += 2, p += 2 ) { if( p[0] == 0 && p[1] == SSL_EMPTY_RENEGOTIATION_INFO ) { SSL_DEBUG_MSG( 3, ( "received TLS_EMPTY_RENEGOTIATION_INFO " ) ); if( ssl->renegotiation == SSL_RENEGOTIATION ) { SSL_DEBUG_MSG( 1, ( "received RENEGOTIATION SCSV during renegotiation" ) ); if( ( ret = ssl_send_fatal_handshake_failure( ssl ) ) != 0 ) return( ret ); return( POLARSSL_ERR_SSL_BAD_HS_CLIENT_HELLO ); } ssl->secure_renegotiation = SSL_SECURE_RENEGOTIATION; break; } } /* * Search for a matching ciphersuite */ for( i = 0; ssl->ciphersuites[ssl->minor_ver][i] != 0; i++ ) { for( j = 0, p = buf + 41 + sess_len; j < ciph_len; j += 2, p += 2 ) { if( p[0] == 0 && p[1] == ssl->ciphersuites[ssl->minor_ver][i] ) goto have_ciphersuite; } } SSL_DEBUG_MSG( 1, ( "got no ciphersuites in common" ) ); return( POLARSSL_ERR_SSL_NO_CIPHER_CHOSEN ); have_ciphersuite: ssl->session_negotiate->ciphersuite = ssl->ciphersuites[ssl->minor_ver][i]; ssl_optimize_checksum( ssl, ssl->session_negotiate->ciphersuite ); ext = buf + 44 + sess_len + ciph_len + comp_len; while( ext_len ) { unsigned int ext_id = ( ( ext[0] << 8 ) | ( ext[1] ) ); unsigned int ext_size = ( ( ext[2] << 8 ) | ( ext[3] ) ); if( ext_size + 4 > ext_len ) { SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CLIENT_HELLO ); } switch( ext_id ) { case TLS_EXT_SERVERNAME: SSL_DEBUG_MSG( 3, ( "found ServerName extension" ) ); if( ssl->f_sni == NULL ) break; ret = ssl_parse_servername_ext( ssl, ext + 4, ext_size ); if( ret != 0 ) return( ret ); break; case TLS_EXT_RENEGOTIATION_INFO: SSL_DEBUG_MSG( 3, ( "found renegotiation extension" ) ); renegotiation_info_seen = 1; ret = ssl_parse_renegotiation_info( ssl, ext + 4, ext_size ); if( ret != 0 ) return( ret ); break; case TLS_EXT_SIG_ALG: SSL_DEBUG_MSG( 3, ( "found signature_algorithms extension" ) ); if( ssl->renegotiation == SSL_RENEGOTIATION ) break; ret = ssl_parse_signature_algorithms_ext( ssl, ext + 4, ext_size ); if( ret != 0 ) return( ret ); break; default: SSL_DEBUG_MSG( 3, ( "unknown extension found: %d (ignoring)", ext_id ) ); } ext_len -= 4 + ext_size; ext += 4 + ext_size; if( ext_len > 0 && ext_len < 4 ) { SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CLIENT_HELLO ); } } /* * Renegotiation security checks */ if( ssl->secure_renegotiation == SSL_LEGACY_RENEGOTIATION && ssl->allow_legacy_renegotiation == SSL_LEGACY_BREAK_HANDSHAKE ) { SSL_DEBUG_MSG( 1, ( "legacy renegotiation, breaking off handshake" ) ); handshake_failure = 1; } else if( ssl->renegotiation == SSL_RENEGOTIATION && ssl->secure_renegotiation == SSL_SECURE_RENEGOTIATION && renegotiation_info_seen == 0 ) { SSL_DEBUG_MSG( 1, ( "renegotiation_info extension missing (secure)" ) ); handshake_failure = 1; } else if( ssl->renegotiation == SSL_RENEGOTIATION && ssl->secure_renegotiation == SSL_LEGACY_RENEGOTIATION && ssl->allow_legacy_renegotiation == SSL_LEGACY_NO_RENEGOTIATION ) { SSL_DEBUG_MSG( 1, ( "legacy renegotiation not allowed" ) ); handshake_failure = 1; } else if( ssl->renegotiation == SSL_RENEGOTIATION && ssl->secure_renegotiation == SSL_LEGACY_RENEGOTIATION && renegotiation_info_seen == 1 ) { SSL_DEBUG_MSG( 1, ( "renegotiation_info extension present (legacy)" ) ); handshake_failure = 1; } if( handshake_failure == 1 ) { if( ( ret = ssl_send_fatal_handshake_failure( ssl ) ) != 0 ) return( ret ); return( POLARSSL_ERR_SSL_BAD_HS_CLIENT_HELLO ); } ssl->in_left = 0; ssl->state++; SSL_DEBUG_MSG( 2, ( "<= parse client hello" ) ); return( 0 ); }
0
[ "CWE-310" ]
polarssl
43f9799ce61c6392a014d0a2ea136b4b3a9ee194
86,688,058,331,029,580,000,000,000,000,000,000,000
365
RSA blinding on CRT operations to counter timing attacks
static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct elo_priv *priv; int ret; struct usb_device *udev; if (!hid_is_usb(hdev)) return -EINVAL; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; INIT_DELAYED_WORK(&priv->work, elo_work); udev = interface_to_usbdev(to_usb_interface(hdev->dev.parent)); priv->usbdev = usb_get_dev(udev); hid_set_drvdata(hdev, priv); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } if (elo_broken_firmware(priv->usbdev)) { hid_info(hdev, "broken firmware found, installing workaround\n"); queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); } return 0; err_free: kfree(priv); return ret; }
1
[ "CWE-200", "CWE-401" ]
linux
817b8b9c5396d2b2d92311b46719aad5d3339dbe
47,515,603,588,365,830,000,000,000,000,000,000,000
41
HID: elo: fix memory leak in elo_probe When hid_parse() in elo_probe() fails, it forgets to call usb_put_dev to decrease the refcount. Fix this by adding usb_put_dev() in the error handling code of elo_probe(). Fixes: fbf42729d0e9 ("HID: elo: update the reference count of the usb device structure") Reported-by: syzkaller <[email protected]> Signed-off-by: Dongliang Mu <[email protected]> Signed-off-by: Jiri Kosina <[email protected]>
QQueueItem(SQL::Query* Q, const std::string& S, SQLConnection* C) : q(Q), query(S), c(C) {}
0
[ "CWE-476" ]
inspircd
8745660fcdac7c1b80c94cfc0ff60928cd4dd4b7
74,578,557,688,248,960,000,000,000,000,000,000,000
1
Initialise and deallocate the MySQL library correctly.
static void primaryexp (LexState *ls, expdesc *v) { /* primaryexp -> NAME | '(' expr ')' */ switch (ls->t.token) { case '(': { int line = ls->linenumber; luaX_next(ls); expr(ls, v); check_match(ls, ')', '(', line); luaK_dischargevars(ls->fs, v); return; } case TK_NAME: { singlevar(ls, v); return; } default: { luaX_syntaxerror(ls, "unexpected symbol"); } } }
0
[ "CWE-125" ]
lua
1f3c6f4534c6411313361697d98d1145a1f030fa
101,157,195,668,261,070,000,000,000,000,000,000,000
20
Bug: Lua can generate wrong code when _ENV is <const>
static int cuse_channel_release(struct inode *inode, struct file *file) { struct fuse_dev *fud = file->private_data; struct cuse_conn *cc = fc_to_cc(fud->fc); int rc; /* remove from the conntbl, no more access from this point on */ mutex_lock(&cuse_lock); list_del_init(&cc->list); mutex_unlock(&cuse_lock); /* remove device */ if (cc->dev) device_unregister(cc->dev); if (cc->cdev) { unregister_chrdev_region(cc->cdev->dev, 1); cdev_del(cc->cdev); } rc = fuse_dev_release(inode, file); /* puts the base reference */ return rc; }
1
[ "CWE-399" ]
linux
2c5816b4beccc8ba709144539f6fdd764f8fa49c
154,068,305,918,509,360,000,000,000,000,000,000,000
23
cuse: fix memory leak The problem is that fuse_dev_alloc() acquires an extra reference to cc.fc, and the original ref count is never dropped. Reported-by: Colin Ian King <[email protected]> Signed-off-by: Miklos Szeredi <[email protected]> Fixes: cc080e9e9be1 ("fuse: introduce per-instance fuse_dev structure") Cc: <[email protected]> # v4.2+
static bool fill_dynamic_entry(ELFOBJ *bin, ut64 entry_offset, Elf_(Dyn) *d) { ut8 sdyn[sizeof (Elf_(Dyn))] = {0}; int j = 0; int len = r_buf_read_at (bin->b, entry_offset, sdyn, sizeof (Elf_(Dyn))); if (len < 1) { return false; } d->d_tag = R_BIN_ELF_READWORD (sdyn, j); d->d_un.d_ptr = R_BIN_ELF_READWORD (sdyn, j); return true; }
0
[ "CWE-787" ]
radare2
3ecdbf8e21186a9c5a4d3cfa3b1e9fd27045340e
142,749,595,712,062,510,000,000,000,000,000,000,000
13
Fix 4 byte oobread in msp430 disassembler ##crash * Only crashes with asan builds * Add missing =SN register * Reported by cnitlrt via huntrdev * BountyID: 1c22055b-b015-47a8-a57b-4982978751d0
static void d_wait_lookup(struct dentry *dentry) { if (d_in_lookup(dentry)) { DECLARE_WAITQUEUE(wait, current); add_wait_queue(dentry->d_wait, &wait); do { set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock(&dentry->d_lock); schedule(); spin_lock(&dentry->d_lock); } while (d_in_lookup(dentry)); } }
0
[ "CWE-362", "CWE-399" ]
linux
49d31c2f389acfe83417083e1208422b4091cd9e
57,211,178,282,332,080,000,000,000,000,000,000,000
13
dentry name snapshots take_dentry_name_snapshot() takes a safe snapshot of dentry name; if the name is a short one, it gets copied into caller-supplied structure, otherwise an extra reference to external name is grabbed (those are never modified). In either case the pointer to stable string is stored into the same structure. dentry must be held by the caller of take_dentry_name_snapshot(), but may be freely dropped afterwards - the snapshot will stay until destroyed by release_dentry_name_snapshot(). Intended use: struct name_snapshot s; take_dentry_name_snapshot(&s, dentry); ... access s.name ... release_dentry_name_snapshot(&s); Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name to pass down with event. Signed-off-by: Al Viro <[email protected]>
static long vnc_client_read_plain(VncState *vs) { int ret; VNC_DEBUG("Read plain %p size %zd offset %zd\n", vs->input.buffer, vs->input.capacity, vs->input.offset); buffer_reserve(&vs->input, 4096); ret = vnc_client_read_buf(vs, buffer_end(&vs->input), 4096); if (!ret) return 0; vs->input.offset += ret; return ret; }
0
[ "CWE-125" ]
qemu
bea60dd7679364493a0d7f5b54316c767cf894ef
221,613,960,545,921,560,000,000,000,000,000,000,000
12
ui/vnc: fix potential memory corruption issues this patch makes the VNC server work correctly if the server surface and the guest surface have different sizes. Basically the server surface is adjusted to not exceed VNC_MAX_WIDTH x VNC_MAX_HEIGHT and additionally the width is rounded up to multiple of VNC_DIRTY_PIXELS_PER_BIT. If we have a resolution whose width is not dividable by VNC_DIRTY_PIXELS_PER_BIT we now get a small black bar on the right of the screen. If the surface is too big to fit the limits only the upper left area is shown. On top of that this fixes 2 memory corruption issues: The first was actually discovered during playing around with a Windows 7 vServer. During resolution change in Windows 7 it happens sometimes that Windows changes to an intermediate resolution where server_stride % cmp_bytes != 0 (in vnc_refresh_server_surface). This happens only if width % VNC_DIRTY_PIXELS_PER_BIT != 0. The second is a theoretical issue, but is maybe exploitable by the guest. If for some reason the guest surface size is bigger than VNC_MAX_WIDTH x VNC_MAX_HEIGHT we end up in severe corruption since this limit is nowhere enforced. Signed-off-by: Peter Lieven <[email protected]> Signed-off-by: Gerd Hoffmann <[email protected]>
int32_t FontData::BoundOffset(int32_t offset) { return offset + bound_offset_; }
0
[ "CWE-703", "CWE-189" ]
sfntly
de776d4ef06ca29c240de3444348894f032b03ff
265,764,298,529,319,840,000,000,000,000,000,000,000
3
Check for integer overflow in sfntly::FontData::Bound(). Also delete dead code and cleanup some nits. This is cl/96914065.
mono_field_get_object (MonoDomain *domain, MonoClass *klass, MonoClassField *field) { MonoReflectionField *res; static MonoClass *monofield_klass; CHECK_OBJECT (MonoReflectionField *, field, klass); if (!monofield_klass) monofield_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoField"); res = (MonoReflectionField *)mono_object_new (domain, monofield_klass); res->klass = klass; res->field = field; MONO_OBJECT_SETREF (res, name, mono_string_new (domain, mono_field_get_name (field))); if (is_field_on_inst (field)) { res->attrs = get_field_on_inst_generic_type (field)->attrs; MONO_OBJECT_SETREF (res, type, mono_type_get_object (domain, field->type)); } else { if (field->type) MONO_OBJECT_SETREF (res, type, mono_type_get_object (domain, field->type)); res->attrs = mono_field_get_flags (field); } CACHE_OBJECT (MonoReflectionField *, field, res, klass); }
0
[ "CWE-20" ]
mono
65292a69c837b8a5f7a392d34db63de592153358
262,571,583,432,090,400,000,000,000,000,000,000,000
23
Handle invalid instantiation of generic methods. * verify.c: Add new function to internal verifier API to check method instantiations. * reflection.c (mono_reflection_bind_generic_method_parameters): Check the instantiation before returning it. Fixes #655847
static void cbq_watchdog(unsigned long arg) { struct Qdisc *sch = (struct Qdisc*)arg; sch->flags &= ~TCQ_F_THROTTLED; netif_schedule(sch->dev); }
0
[ "CWE-200" ]
linux-2.6
8a47077a0b5aa2649751c46e7a27884e6686ccbf
155,497,662,974,472,300,000,000,000,000,000,000,000
7
[NETLINK]: Missing padding fields in dumped structures Plug holes with padding fields and initialized them to zero. Signed-off-by: Patrick McHardy <[email protected]> Signed-off-by: David S. Miller <[email protected]>
perf_event_set_output(struct perf_event *event, struct perf_event *output_event) { struct ring_buffer *rb = NULL, *old_rb = NULL; int ret = -EINVAL; if (!output_event) goto set; /* don't allow circular references */ if (event == output_event) goto out; /* * Don't allow cross-cpu buffers */ if (output_event->cpu != event->cpu) goto out; /* * If its not a per-cpu rb, it must be the same task. */ if (output_event->cpu == -1 && output_event->ctx != event->ctx) goto out; set: mutex_lock(&event->mmap_mutex); /* Can't redirect output if we've got an active mmap() */ if (atomic_read(&event->mmap_count)) goto unlock; if (output_event) { /* get the rb we want to redirect to */ rb = ring_buffer_get(output_event); if (!rb) goto unlock; } old_rb = event->rb; rcu_assign_pointer(event->rb, rb); if (old_rb) ring_buffer_detach(event, old_rb); ret = 0; unlock: mutex_unlock(&event->mmap_mutex); if (old_rb) ring_buffer_put(old_rb); out: return ret; }
0
[ "CWE-703", "CWE-189" ]
linux
8176cced706b5e5d15887584150764894e94e02f
104,277,506,485,943,390,000,000,000,000,000,000,000
50
perf: Treat attr.config as u64 in perf_swevent_init() Trinity discovered that we fail to check all 64 bits of attr.config passed by user space, resulting to out-of-bounds access of the perf_swevent_enabled array in sw_perf_event_destroy(). Introduced in commit b0a873ebb ("perf: Register PMU implementations"). Signed-off-by: Tommi Rantala <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: [email protected] Cc: Paul Mackerras <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
pkcs11dsa_isprivate(const dst_key_t *key) { pk11_object_t *dsa = key->keydata.pkey; CK_ATTRIBUTE *attr; if (dsa == NULL) return (false); attr = pk11_attribute_bytype(dsa, CKA_VALUE2); return (attr != NULL || dsa->ontoken); }
0
[ "CWE-617" ]
bind9
8d807cc21655eaa6e6a08afafeec3682c0f3f2ab
242,535,550,416,423,370,000,000,000,000,000,000,000
9
Fix crash in pk11_numbits() when native-pkcs11 is used When pk11_numbits() is passed a user provided input that contains all zeroes (via crafted DNS message), it would crash with assertion failure. Fix that by properly handling such input.
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mii_ioctl_data *data = if_mii(ifr); struct tg3 *tp = netdev_priv(dev); int err; if (tg3_flag(tp, USE_PHYLIB)) { struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; return phy_mii_ioctl(phydev, ifr, cmd); } switch (cmd) { case SIOCGMIIPHY: data->phy_id = tp->phy_addr; /* fallthru */ case SIOCGMIIREG: { u32 mii_regval; if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ if (!netif_running(dev)) return -EAGAIN; spin_lock_bh(&tp->lock); err = __tg3_readphy(tp, data->phy_id & 0x1f, data->reg_num & 0x1f, &mii_regval); spin_unlock_bh(&tp->lock); data->val_out = mii_regval; return err; } case SIOCSMIIREG: if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ if (!netif_running(dev)) return -EAGAIN; spin_lock_bh(&tp->lock); err = __tg3_writephy(tp, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); spin_unlock_bh(&tp->lock); return err; case SIOCSHWTSTAMP: return tg3_hwtstamp_ioctl(dev, ifr, cmd); default: /* do nothing */ break; } return -EOPNOTSUPP; }
0
[ "CWE-476", "CWE-119" ]
linux
715230a44310a8cf66fbfb5a46f9a62a9b2de424
201,646,525,651,014,260,000,000,000,000,000,000,000
61
tg3: fix length overflow in VPD firmware parsing Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version when present") introduced VPD parsing that contained a potential length overflow. Limit the hardware's reported firmware string length (max 255 bytes) to stay inside the driver's firmware string length (32 bytes). On overflow, truncate the formatted firmware string instead of potentially overwriting portions of the tg3 struct. http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf Signed-off-by: Kees Cook <[email protected]> Reported-by: Oded Horovitz <[email protected]> Reported-by: Brad Spengler <[email protected]> Cc: [email protected] Cc: Matt Carlson <[email protected]> Signed-off-by: David S. Miller <[email protected]>
GF_Box *gitn_New() { ISOM_DECL_BOX_ALLOC(GroupIdToNameBox, GF_ISOM_BOX_TYPE_GITN); return (GF_Box *)tmp; }
0
[ "CWE-125" ]
gpac
bceb03fd2be95097a7b409ea59914f332fb6bc86
315,418,981,447,605,520,000,000,000,000,000,000,000
5
fixed 2 possible heap overflows (inc. #1088)
TEST_F(ConnectionHandlerTest, TransportProtocolDefault) { Network::ListenerCallbacks* listener_callbacks; auto listener = new NiceMock<Network::MockListener>(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks); EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); Network::MockConnectionSocket* accepted_socket = new NiceMock<Network::MockConnectionSocket>(); EXPECT_CALL(*accepted_socket, detectedTransportProtocol()) .WillOnce(Return(absl::string_view(""))); EXPECT_CALL(*accepted_socket, setDetectedTransportProtocol(absl::string_view("raw_buffer"))); EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(nullptr)); listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket}); EXPECT_CALL(*listener, onDestroy()); }
0
[ "CWE-400" ]
envoy
dfddb529e914d794ac552e906b13d71233609bf7
157,004,555,303,717,910,000,000,000,000,000,000,000
17
listener: Add configurable accepted connection limits (#153) Add support for per-listener limits on accepted connections. Signed-off-by: Tony Allen <[email protected]>
zzip_mem_disk_load(ZZIP_MEM_DISK* dir, ZZIP_DISK* disk) { if (! dir || ! disk) { errno=EINVAL; return -1; } if (dir->list) zzip_mem_disk_unload(dir); ___ long count = 0; ___ struct zzip_disk_entry* entry = zzip_disk_findfirst(disk); for (; entry ; entry = zzip_disk_findnext(disk, entry)) { ZZIP_MEM_ENTRY* item = zzip_mem_entry_new(disk, entry); if (! item) goto error; if (dir->last) { dir->last->zz_next = item; } /* chain last */ else { dir->list = item; }; dir->last = item; /* to earlier */ count++; } ____; dir->disk = disk; return count; ____; error: zzip_mem_disk_unload (dir); return -1; }
0
[ "CWE-119" ]
zziplib
596d9dfce2624e849417d4301e8d67935608aa5e
147,757,430,096,148,950,000,000,000,000,000,000,000
19
memdisk (.)
MagickExport MagickBooleanType GetImageEntropy(const Image *image, double *entropy,ExceptionInfo *exception) { ChannelStatistics *channel_statistics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); *entropy=channel_statistics[CompositePixelChannel].entropy; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); }
0
[ "CWE-119", "CWE-787" ]
ImageMagick
025e77fcb2f45b21689931ba3bf74eac153afa48
179,885,456,324,776,730,000,000,000,000,000,000,000
18
https://github.com/ImageMagick/ImageMagick/issues/1615
static int match_file(const void *p, struct file *file, unsigned fd) { return file_has_perm(p, file, file_to_av(file)) ? fd + 1 : 0; }
0
[ "CWE-264" ]
linux
7b0d0b40cd78cadb525df760ee4cac151533c2b5
63,473,617,946,073,450,000,000,000,000,000,000,000
4
selinux: Permit bounded transitions under NO_NEW_PRIVS or NOSUID. If the callee SID is bounded by the caller SID, then allowing the transition to occur poses no risk of privilege escalation and we can therefore safely allow the transition to occur. Add this exemption for both the case where a transition was explicitly requested by the application and the case where an automatic transition is defined in policy. Signed-off-by: Stephen Smalley <[email protected]> Reviewed-by: Andy Lutomirski <[email protected]> Signed-off-by: Paul Moore <[email protected]>
preload_image(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) { tga_source_ptr source = (tga_source_ptr)sinfo; JDIMENSION row; cd_progress_ptr progress = (cd_progress_ptr)cinfo->progress; /* Read the data into a virtual array in input-file row order. */ for (row = 0; row < cinfo->image_height; row++) { if (progress != NULL) { progress->pub.pass_counter = (long)row; progress->pub.pass_limit = (long)cinfo->image_height; (*progress->pub.progress_monitor) ((j_common_ptr)cinfo); } source->pub.buffer = (*cinfo->mem->access_virt_sarray) ((j_common_ptr)cinfo, source->whole_image, row, (JDIMENSION)1, TRUE); (*source->get_pixel_rows) (cinfo, sinfo); } if (progress != NULL) progress->completed_extra_passes++; /* Set up to read from the virtual array in unscrambled order */ source->pub.get_pixel_rows = get_memory_row; source->current_row = 0; /* And read the first row */ return get_memory_row(cinfo, sinfo); }
0
[ "CWE-834" ]
libjpeg-turbo
909a8cfc7bca9b2e6707425bdb74da997e8fa499
306,336,154,109,201,200,000,000,000,000,000,000,000
26
Fix CVE-2018-11813 Refer to change log for details. Fixes #242
int unit_add_dependency_by_name_inverse(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) { Unit *other; int r; _cleanup_free_ char *s = NULL; assert(u); assert(name || path); if (!(name = resolve_template(u, name, path, &s))) return -ENOMEM; if ((r = manager_load_unit(u->manager, name, path, NULL, &other)) < 0) return r; r = unit_add_dependency(other, d, u, add_reference); return r; }
0
[]
systemd
5ba6985b6c8ef85a8bcfeb1b65239c863436e75b
31,241,670,634,751,950,000,000,000,000,000,000,000
18
core: allow PIDs to be watched by two units at the same time In some cases it is interesting to map a PID to two units at the same time. For example, when a user logs in via a getty, which is reexeced to /sbin/login that binary will be explicitly referenced as main pid of the getty service, as well as implicitly referenced as part of the session scope.
static uint64_t toWire64(uint64_t x) {return htonll(x);}
0
[ "CWE-20" ]
thrift
cfaadcc4adcfde2a8232c62ec89870b73ef40df1
271,699,206,677,180,480,000,000,000,000,000,000,000
1
THRIFT-3231 CPP: Limit recursion depth to 64 Client: cpp Patch: Ben Craig <[email protected]>
virDomainObjGetState(virDomainObjPtr dom, int *reason) { if (reason) *reason = dom->state.reason; return dom->state.state; }
0
[ "CWE-212" ]
libvirt
a5b064bf4b17a9884d7d361733737fb614ad8979
94,567,746,335,057,750,000,000,000,000,000,000,000
7
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410 (v6.1.0-122-g3b076391be) we support http cookies. Since they may contain somewhat sensitive information we should not format them into the XML unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted. Reported-by: Han Han <[email protected]> Signed-off-by: Peter Krempa <[email protected]> Reviewed-by: Erik Skultety <[email protected]>
explicit printer(ostream& os) : out(os) {}
0
[ "CWE-770" ]
ceph
ab29bed2fc9f961fe895de1086a8208e21ddaddc
34,812,138,172,673,350,000,000,000,000,000,000,000
2
rgw: fix issues with 'enforce bounds' patch The patch to enforce bounds on max-keys/max-uploads/max-parts had a few issues that would prevent us from compiling it. Instead of changing the code provided by the submitter, we're addressing them in a separate commit to maintain the DCO. Signed-off-by: Joao Eduardo Luis <[email protected]> Signed-off-by: Abhishek Lekshmanan <[email protected]> (cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a) mimic specific fixes: As the largeish change from master g_conf() isn't in mimic yet, use the g_conf global structure, also make rgw_op use the value from req_info ceph context as we do for all the requests
extensions::ScriptExecutor* script_executor() { return script_executor_.get(); }
0
[]
electron
e9fa834757f41c0b9fe44a4dffe3d7d437f52d34
109,073,670,362,603,720,000,000,000,000,000,000,000
3
fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344) * fix: ensure ElectronBrowser mojo service is only bound to authorized render frames Notes: no-notes * refactor: extract electron API IPC to its own mojo interface * fix: just check main frame not primary main frame Co-authored-by: Samuel Attard <[email protected]> Co-authored-by: Samuel Attard <[email protected]>
static llparse_match_t llparse__match_sequence_to_lower( llhttp__internal_t* s, const unsigned char* p, const unsigned char* endp, const unsigned char* seq, uint32_t seq_len) { uint32_t index; llparse_match_t res; index = s->_index; for (; p != endp; p++) { unsigned char current; current = ((*p) >= 'A' && (*p) <= 'Z' ? (*p | 0x20) : (*p)); if (current == seq[index]) { if (++index == seq_len) { res.status = kMatchComplete; goto reset; } } else { res.status = kMatchMismatch; goto reset; } } s->_index = index; res.status = kMatchPause; res.current = p; return res; reset: s->_index = 0; res.current = p; return res; }
0
[ "CWE-444" ]
node
641f786bb1a1f6eb1ff8750782ed939780f2b31a
221,493,400,566,688,900,000,000,000,000,000,000,000
31
http: unset `F_CHUNKED` on new `Transfer-Encoding` Duplicate `Transfer-Encoding` header should be a treated as a single, but with original header values concatenated with a comma separator. In the light of this, even if the past `Transfer-Encoding` ended with `chunked`, we should be not let the `F_CHUNKED` to leak into the next header, because mere presence of another header indicates that `chunked` is not the last transfer-encoding token. CVE-ID: CVE-2020-8287 Refs: https://github.com/nodejs-private/llhttp-private/pull/3 Refs: https://hackerone.com/bugs?report_id=1002188&subject=nodejs PR-URL: https://github.com/nodejs-private/node-private/pull/228 Reviewed-By: Fedor Indutny <[email protected]> Reviewed-By: Rich Trott <[email protected]>
DEFUN (no_ip_community_list_name_expanded, no_ip_community_list_name_expanded_cmd, "no ip community-list expanded WORD (deny|permit) .LINE", NO_STR IP_STR COMMUNITY_LIST_STR "Specify an expanded community-list\n" "Community list name\n" "Specify community to reject\n" "Specify community to accept\n" "An ordered list as a regular-expression\n") { return community_list_unset_vty (vty, argc, argv, COMMUNITY_LIST_EXPANDED); }
0
[ "CWE-125" ]
frr
6d58272b4cf96f0daa846210dd2104877900f921
225,316,903,838,927,620,000,000,000,000,000,000,000
14
[bgpd] cleanup, compact and consolidate capability parsing code 2007-07-26 Paul Jakma <[email protected]> * (general) Clean up and compact capability parsing slightly. Consolidate validation of length and logging of generic TLV, and memcpy of capability data, thus removing such from cap specifc code (not always present or correct). * bgp_open.h: Add structures for the generic capability TLV header and for the data formats of the various specific capabilities we support. Hence remove the badly named, or else misdefined, struct capability. * bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data. Do the length checks *before* memcpy()'ing based on that length (stored capability - should have been validated anyway on input, but..). (bgp_afi_safi_valid_indices) new function to validate (afi,safi) which is about to be used as index into arrays, consolidates several instances of same, at least one of which appeared to be incomplete.. (bgp_capability_mp) Much condensed. (bgp_capability_orf_entry) New, process one ORF entry (bgp_capability_orf) Condensed. Fixed to process all ORF entries. (bgp_capability_restart) Condensed, and fixed to use a cap-specific type, rather than abusing capability_mp. (struct message capcode_str) added to aid generic logging. (size_t cap_minsizes[]) added to aid generic validation of capability length field. (bgp_capability_parse) Generic logging and validation of TLV consolidated here. Code compacted as much as possible. * bgp_packet.c: (bgp_open_receive) Capability parsers now use streams, so no more need here to manually fudge the input stream getp. (bgp_capability_msg_parse) use struct capability_mp_data. Validate lengths /before/ memcpy. Use bgp_afi_safi_valid_indices. (bgp_capability_receive) Exported for use by test harness. * bgp_vty.c: (bgp_show_summary) fix conversion warning (bgp_show_peer) ditto * bgp_debug.h: Fix storage 'extern' after type 'const'. * lib/log.c: (mes_lookup) warning about code not being in same-number array slot should be debug, not warning. E.g. BGP has several discontigious number spaces, allocating from different parts of a space is not uncommon (e.g. IANA assigned versus vendor-assigned code points in some number space).
static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode) { struct smack_known *skp; skp = smk_of_task_struct_obj(ctp); return smk_ptrace_rule_check(current, skp, mode, __func__); }
0
[ "CWE-416" ]
linux
a3727a8bac0a9e77c70820655fd8715523ba3db7
248,234,029,095,336,000,000,000,000,000,000,000,000
8
selinux,smack: fix subjective/objective credential use mixups Jann Horn reported a problem with commit eb1231f73c4d ("selinux: clarify task subjective and objective credentials") where some LSM hooks were attempting to access the subjective credentials of a task other than the current task. Generally speaking, it is not safe to access another task's subjective credentials and doing so can cause a number of problems. Further, while looking into the problem, I realized that Smack was suffering from a similar problem brought about by a similar commit 1fb057dcde11 ("smack: differentiate between subjective and objective task credentials"). This patch addresses this problem by restoring the use of the task's objective credentials in those cases where the task is other than the current executing task. Not only does this resolve the problem reported by Jann, it is arguably the correct thing to do in these cases. Cc: [email protected] Fixes: eb1231f73c4d ("selinux: clarify task subjective and objective credentials") Fixes: 1fb057dcde11 ("smack: differentiate between subjective and objective task credentials") Reported-by: Jann Horn <[email protected]> Acked-by: Eric W. Biederman <[email protected]> Acked-by: Casey Schaufler <[email protected]> Signed-off-by: Paul Moore <[email protected]>
*/ static void _cimg_blur_box_apply(T *ptr, const float boxsize, const int N, const ulongT off, const int order, const bool boundary_conditions, const unsigned int nb_iter) { // Smooth. if (boxsize>1 && nb_iter) { const int w2 = (int)(boxsize - 1)/2; const unsigned int winsize = 2*w2 + 1U; const double frac = (boxsize - winsize)/2.; CImg<T> win(winsize); for (unsigned int iter = 0; iter<nb_iter; ++iter) { Tdouble sum = 0; // window sum for (int x = -w2; x<=w2; ++x) { win[x + w2] = __cimg_blur_box_apply(ptr,N,off,boundary_conditions,x); sum+=win[x + w2]; } int ifirst = 0, ilast = 2*w2; T prev = __cimg_blur_box_apply(ptr,N,off,boundary_conditions,-w2 - 1), next = __cimg_blur_box_apply(ptr,N,off,boundary_conditions,w2 + 1); for (int x = 0; x < N - 1; ++x) { const double sum2 = sum + frac * (prev + next); ptr[x*off] = (T)(sum2/boxsize); prev = win[ifirst]; sum-=prev; ifirst = (int)((ifirst + 1)%winsize); ilast = (int)((ilast + 1)%winsize); win[ilast] = next; sum+=next; next = __cimg_blur_box_apply(ptr,N,off,boundary_conditions,x + w2 + 2); } const double sum2 = sum + frac * (prev + next); ptr[(N - 1)*off] = (T)(sum2/boxsize); } } // Derive. switch (order) { case 0 : break; case 1 : { Tfloat p = __cimg_blur_box_apply(ptr,N,off,boundary_conditions,-1), c = __cimg_blur_box_apply(ptr,N,off,boundary_conditions,0), n = __cimg_blur_box_apply(ptr,N,off,boundary_conditions,1); for (int x = 0; x<N - 1; ++x) { ptr[x*off] = (T)((n-p)/2.); p = c; c = n; n = __cimg_blur_box_apply(ptr,N,off,boundary_conditions,x + 2); } ptr[(N - 1)*off] = (T)((n-p)/2.); } break; case 2: { Tfloat p = __cimg_blur_box_apply(ptr,N,off,boundary_conditions,-1), c = __cimg_blur_box_apply(ptr,N,off,boundary_conditions,0), n = __cimg_blur_box_apply(ptr,N,off,boundary_conditions,1); for (int x = 0; x<N - 1; ++x) { ptr[x*off] = (T)(n - 2*c + p); p = c; c = n; n = __cimg_blur_box_apply(ptr,N,off,boundary_conditions,x + 2); } ptr[(N - 1)*off] = (T)(n - 2*c + p); } break; }
0
[ "CWE-119", "CWE-787" ]
CImg
ac8003393569aba51048c9d67e1491559877b1d1
70,679,817,756,522,345,000,000,000,000,000,000,000
67
.
CheckPredicate(Expr *predicate) { /* * transformExpr() should have already rejected subqueries, aggregates, * and window functions, based on the EXPR_KIND_ for a predicate. */ /* * A predicate using mutable functions is probably wrong, for the same * reasons that we don't allow an index expression to use one. */ if (CheckMutability(predicate)) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("functions in index predicate must be marked IMMUTABLE"))); }
0
[ "CWE-362" ]
postgres
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
294,856,541,015,474,500,000,000,000,000,000,000,000
16
Avoid repeated name lookups during table and index DDL. If the name lookups come to different conclusions due to concurrent activity, we might perform some parts of the DDL on a different table than other parts. At least in the case of CREATE INDEX, this can be used to cause the permissions checks to be performed against a different table than the index creation, allowing for a privilege escalation attack. This changes the calling convention for DefineIndex, CreateTrigger, transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible (in 9.2 and newer), and AlterTable (in 9.1 and older). In addition, CheckRelationOwnership is removed in 9.2 and newer and the calling convention is changed in older branches. A field has also been added to the Constraint node (FkConstraint in 8.4). Third-party code calling these functions or using the Constraint node will require updating. Report by Andres Freund. Patch by Robert Haas and Andres Freund, reviewed by Tom Lane. Security: CVE-2014-0062
crypto_cert_get_count(pkinit_identity_crypto_context id_cryptoctx, int *cert_count) { int count; *cert_count = 0; if (id_cryptoctx == NULL || id_cryptoctx->creds[0] == NULL) return EINVAL; for (count = 0; count <= MAX_CREDS_ALLOWED && id_cryptoctx->creds[count] != NULL; count++); *cert_count = count; return 0; }
0
[ "CWE-119", "CWE-787" ]
krb5
fbb687db1088ddd894d975996e5f6a4252b9a2b4
291,265,076,214,624,450,000,000,000,000,000,000,000
15
Fix PKINIT cert matching data construction Rewrite X509_NAME_oneline_ex() and its call sites to use dynamic allocation and to perform proper error checking. ticket: 8617 target_version: 1.16 target_version: 1.15-next target_version: 1.14-next tags: pullup
SPICE_GNUC_VISIBLE int spice_server_migrate_start(SpiceServer *reds) { spice_debug("trace"); if (!reds->config->mig_spice) { return -1; } return 0; }
0
[]
spice
ca5bbc5692e052159bce1a75f55dc60b36078749
20,306,059,763,767,928,000,000,000,000,000,000,000
8
With OpenSSL 1.1: Disable client-initiated renegotiation. Fixes issue #49 Fixes BZ#1904459 Signed-off-by: Julien Ropé <[email protected]> Reported-by: BlackKD Acked-by: Frediano Ziglio <[email protected]>
CAMLprim value caml_fill_string(value s, value offset, value len, value init) { memset(&Byte(s, Long_val(offset)), Int_val(init), Long_val(len)); return Val_unit; }
0
[ "CWE-200" ]
ocaml
659615c7b100a89eafe6253e7a5b9d84d0e8df74
71,299,743,578,244,670,000,000,000,000,000,000,000
5
fix PR#7003 and a few other bugs caused by misuse of Int_val git-svn-id: http://caml.inria.fr/svn/ocaml/trunk@16525 f963ae5c-01c2-4b8c-9fe0-0dff7051ff02
auth_server_connection_add_request(struct auth_server_connection *conn, struct auth_client_request *request) { unsigned int id; i_assert(conn->handshake_received); id = ++conn->client->request_id_counter; if (id == 0) { /* wrapped - ID 0 not allowed */ id = ++conn->client->request_id_counter; } i_assert(hash_table_lookup(conn->requests, POINTER_CAST(id)) == NULL); hash_table_insert(conn->requests, POINTER_CAST(id), request); return id; }
0
[]
core
a9b135760aea6d1790d447d351c56b78889dac22
148,152,109,430,448,310,000,000,000,000,000,000,000
16
lib-auth: Remove request after abort Otherwise the request will still stay in hash table and get dereferenced when all requests are aborted causing an attempt to access free'd memory. Found by Apollon Oikonomopoulos <[email protected]> Broken in 1a29ed2f96da1be22fa5a4d96c7583aa81b8b060
ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb, struct ext4_group_desc *bg) { return le32_to_cpu(bg->bg_inode_bitmap_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0); }
0
[ "CWE-703" ]
linux
744692dc059845b2a3022119871846e74d4f6e11
257,554,268,072,047,800,000,000,000,000,000,000,000
7
ext4: use ext4_get_block_write in buffer write Allocate uninitialized extent before ext4 buffer write and convert the extent to initialized after io completes. The purpose is to make sure an extent can only be marked initialized after it has been written with new data so we can safely drop the i_mutex lock in ext4 DIO read without exposing stale data. This helps to improve multi-thread DIO read performance on high-speed disks. Skip the nobh and data=journal mount cases to make things simple for now. Signed-off-by: Jiaying Zhang <[email protected]> Signed-off-by: "Theodore Ts'o" <[email protected]>
static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification) { /* update guest state fields: */ vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); vmcs12->guest_interruptibility_info = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); vmcs12->guest_pending_dbg_exceptions = vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; else vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; if (nested_cpu_has_preemption_timer(vmcs12)) { if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) vmcs12->vmx_preemption_timer_value = vmx_get_preemption_timer_value(vcpu); hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); } /* * In some cases (usually, nested EPT), L2 is allowed to change its * own CR3 without exiting. If it has changed it, we must keep it. * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. * * Additionally, restore L2's PDPTR to vmcs12. */ if (enable_ept) { vmcs12->guest_cr3 = vmcs_read64(GUEST_CR3); vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); } if (nested_cpu_has_vid(vmcs12)) vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); vmcs12->vm_entry_controls = (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) { kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); } /* TODO: These cannot have changed unless we have MSR bitmaps and * the relevant bit asks not to trap the change */ if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) vmcs12->guest_ia32_efer = vcpu->arch.efer; vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); if (vmx_mpx_supported()) vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); if (nested_cpu_has_xsaves(vmcs12)) vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP); /* update exit information fields: */ vmcs12->vm_exit_reason = exit_reason; vmcs12->exit_qualification = exit_qualification; vmcs12->vm_exit_intr_info = exit_intr_info; if ((vmcs12->vm_exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) == (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) vmcs12->vm_exit_intr_error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); vmcs12->idt_vectoring_info_field = 0; vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { /* vm_entry_intr_info_field is cleared on exit. Emulate this * instead of reading the real value. */ vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; /* * Transfer the event that L0 or L1 may wanted to inject into * L2 to IDT_VECTORING_INFO_FIELD. */ vmcs12_save_pending_event(vcpu, vmcs12); } /* * Drop what we picked up for L2 via vmx_complete_interrupts. It is * preserved above and would only end up incorrectly in L1. */ vcpu->arch.nmi_injected = false; kvm_clear_exception_queue(vcpu); kvm_clear_interrupt_queue(vcpu); }
0
[ "CWE-399" ]
linux
54a20552e1eae07aa240fa370a0293e006b5faed
280,190,975,402,851,140,000,000,000,000,000,000,000
143
KVM: x86: work around infinite loop in microcode when #AC is delivered It was found that a guest can DoS a host by triggering an infinite stream of "alignment check" (#AC) exceptions. This causes the microcode to enter an infinite loop where the core never receives another interrupt. The host kernel panics pretty quickly due to the effects (CVE-2015-5307). Signed-off-by: Eric Northup <[email protected]> Cc: [email protected] Signed-off-by: Paolo Bonzini <[email protected]>
ProcGlobalSemas(void) { /* * We need a sema per backend (including autovacuum), plus one for each * auxiliary process. */ return MaxBackends + NUM_AUXILIARY_PROCS; }
0
[ "CWE-89" ]
postgres
2b3a8b20c2da9f39ffecae25ab7c66974fbc0d3b
242,526,343,581,117,200,000,000,000,000,000,000,000
8
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
static long kvm_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { long r = -EINVAL; switch (ioctl) { case KVM_GET_API_VERSION: if (arg) goto out; r = KVM_API_VERSION; break; case KVM_CREATE_VM: r = kvm_dev_ioctl_create_vm(arg); break; case KVM_CHECK_EXTENSION: r = kvm_vm_ioctl_check_extension_generic(NULL, arg); break; case KVM_GET_VCPU_MMAP_SIZE: if (arg) goto out; r = PAGE_SIZE; /* struct kvm_run */ #ifdef CONFIG_X86 r += PAGE_SIZE; /* pio data page */ #endif #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET r += PAGE_SIZE; /* coalesced mmio ring page */ #endif break; case KVM_TRACE_ENABLE: case KVM_TRACE_PAUSE: case KVM_TRACE_DISABLE: r = -EOPNOTSUPP; break; default: return kvm_arch_dev_ioctl(filp, ioctl, arg); } out: return r; }
0
[ "CWE-416", "CWE-284" ]
linux
a0f1d21c1ccb1da66629627a74059dd7f5ac9c61
296,430,333,705,776,700,000,000,000,000,000,000,000
39
KVM: use after free in kvm_ioctl_create_device() We should move the ops->destroy(dev) after the list_del(&dev->vm_node) so that we don't use "dev" after freeing it. Fixes: a28ebea2adc4 ("KVM: Protect device ops->create and list_add with kvm->lock") Signed-off-by: Dan Carpenter <[email protected]> Reviewed-by: David Hildenbrand <[email protected]> Signed-off-by: Radim Krčmář <[email protected]>
void addReplyHumanLongDouble(client *c, long double d) { robj *o = createStringObjectFromLongDouble(d,1); addReplyBulk(c,o); decrRefCount(o); }
0
[ "CWE-254" ]
redis
874804da0c014a7d704b3d285aa500098a931f50
293,775,133,833,592,800,000,000,000,000,000,000,000
5
Security: Cross Protocol Scripting protection. This is an attempt at mitigating problems due to cross protocol scripting, an attack targeting services using line oriented protocols like Redis that can accept HTTP requests as valid protocol, by discarding the invalid parts and accepting the payloads sent, for example, via a POST request. For this to be effective, when we detect POST and Host: and terminate the connection asynchronously, the networking code was modified in order to never process further input. It was later verified that in a pipelined request containing a POST command, the successive commands are not executed.
flatpak_dir_get_system_default (void) { g_autoptr(GFile) path = flatpak_get_system_default_base_dir_location (); return flatpak_dir_new_full (path, FALSE, NULL); }
0
[ "CWE-668" ]
flatpak
cd2142888fc4c199723a0dfca1f15ea8788a5483
220,310,967,116,927,430,000,000,000,000,000,000,000
5
Don't expose /proc when running apply_extra As shown by CVE-2019-5736, it is sometimes possible for the sandbox app to access outside files using /proc/self/exe. This is not typically an issue for flatpak as the sandbox runs as the user which has no permissions to e.g. modify the host files. However, when installing apps using extra-data into the system repo we *do* actually run a sandbox as root. So, in this case we disable mounting /proc in the sandbox, which will neuter attacks like this.
static const char *columnTypeImpl( NameContext *pNC, #ifndef SQLITE_ENABLE_COLUMN_METADATA Expr *pExpr #else Expr *pExpr, const char **pzOrigDb, const char **pzOrigTab, const char **pzOrigCol #endif ){ char const *zType = 0; int j; #ifdef SQLITE_ENABLE_COLUMN_METADATA char const *zOrigDb = 0; char const *zOrigTab = 0; char const *zOrigCol = 0; #endif assert( pExpr!=0 ); assert( pNC->pSrcList!=0 ); switch( pExpr->op ){ case TK_COLUMN: { /* The expression is a column. Locate the table the column is being ** extracted from in NameContext.pSrcList. This table may be real ** database table or a subquery. */ Table *pTab = 0; /* Table structure column is extracted from */ Select *pS = 0; /* Select the column is extracted from */ int iCol = pExpr->iColumn; /* Index of column in pTab */ while( pNC && !pTab ){ SrcList *pTabList = pNC->pSrcList; for(j=0;j<pTabList->nSrc && pTabList->a[j].iCursor!=pExpr->iTable;j++); if( j<pTabList->nSrc ){ pTab = pTabList->a[j].pTab; pS = pTabList->a[j].pSelect; }else{ pNC = pNC->pNext; } } if( pTab==0 ){ /* At one time, code such as "SELECT new.x" within a trigger would ** cause this condition to run. Since then, we have restructured how ** trigger code is generated and so this condition is no longer ** possible. However, it can still be true for statements like ** the following: ** ** CREATE TABLE t1(col INTEGER); ** SELECT (SELECT t1.col) FROM FROM t1; ** ** when columnType() is called on the expression "t1.col" in the ** sub-select. In this case, set the column type to NULL, even ** though it should really be "INTEGER". ** ** This is not a problem, as the column type of "t1.col" is never ** used. When columnType() is called on the expression ** "(SELECT t1.col)", the correct type is returned (see the TK_SELECT ** branch below. */ break; } assert( pTab && pExpr->y.pTab==pTab ); if( pS ){ /* The "table" is actually a sub-select or a view in the FROM clause ** of the SELECT statement. Return the declaration type and origin ** data for the result-set column of the sub-select. */ if( iCol>=0 && iCol<pS->pEList->nExpr ){ /* If iCol is less than zero, then the expression requests the ** rowid of the sub-select or view. This expression is legal (see ** test case misc2.2.2) - it always evaluates to NULL. */ NameContext sNC; Expr *p = pS->pEList->a[iCol].pExpr; sNC.pSrcList = pS->pSrc; sNC.pNext = pNC; sNC.pParse = pNC->pParse; zType = columnType(&sNC, p,&zOrigDb,&zOrigTab,&zOrigCol); } }else{ /* A real table or a CTE table */ assert( !pS ); #ifdef SQLITE_ENABLE_COLUMN_METADATA if( iCol<0 ) iCol = pTab->iPKey; assert( iCol==XN_ROWID || (iCol>=0 && iCol<pTab->nCol) ); if( iCol<0 ){ zType = "INTEGER"; zOrigCol = "rowid"; }else{ zOrigCol = pTab->aCol[iCol].zName; zType = sqlite3ColumnType(&pTab->aCol[iCol],0); } zOrigTab = pTab->zName; if( pNC->pParse && pTab->pSchema ){ int iDb = sqlite3SchemaToIndex(pNC->pParse->db, pTab->pSchema); zOrigDb = pNC->pParse->db->aDb[iDb].zDbSName; } #else assert( iCol==XN_ROWID || (iCol>=0 && iCol<pTab->nCol) ); if( iCol<0 ){ zType = "INTEGER"; }else{ zType = sqlite3ColumnType(&pTab->aCol[iCol],0); } #endif } break; } #ifndef SQLITE_OMIT_SUBQUERY case TK_SELECT: { /* The expression is a sub-select. Return the declaration type and ** origin info for the single column in the result set of the SELECT ** statement. */ NameContext sNC; Select *pS = pExpr->x.pSelect; Expr *p = pS->pEList->a[0].pExpr; assert( ExprHasProperty(pExpr, EP_xIsSelect) ); sNC.pSrcList = pS->pSrc; sNC.pNext = pNC; sNC.pParse = pNC->pParse; zType = columnType(&sNC, p, &zOrigDb, &zOrigTab, &zOrigCol); break; } #endif } #ifdef SQLITE_ENABLE_COLUMN_METADATA if( pzOrigDb ){ assert( pzOrigTab && pzOrigCol ); *pzOrigDb = zOrigDb; *pzOrigTab = zOrigTab; *pzOrigCol = zOrigCol; } #endif return zType; }
0
[ "CWE-20" ]
sqlite
e59c562b3f6894f84c715772c4b116d7b5c01348
110,967,134,761,565,830,000,000,000,000,000,000,000
138
Fix a crash that could occur if a sub-select that uses both DISTINCT and window functions also used an ORDER BY that is the same as its select list. FossilOrigin-Name: bcdd66c1691955c697f3d756c2b035acfe98f6aad72e90b0021bab6e9023b3ba
static irqreturn_t xudc_irq(int irq, void *_udc) { struct xusb_udc *udc = _udc; u32 intrstatus; u32 ier; u8 index; u32 bufintr; unsigned long flags; spin_lock_irqsave(&udc->lock, flags); /* * Event interrupts are level sensitive hence first disable * IER, read ISR and figure out active interrupts. */ ier = udc->read_fn(udc->addr + XUSB_IER_OFFSET); ier &= ~XUSB_STATUS_INTR_EVENT_MASK; udc->write_fn(udc->addr, XUSB_IER_OFFSET, ier); /* Read the Interrupt Status Register.*/ intrstatus = udc->read_fn(udc->addr + XUSB_STATUS_OFFSET); /* Call the handler for the event interrupt.*/ if (intrstatus & XUSB_STATUS_INTR_EVENT_MASK) { /* * Check if there is any action to be done for : * - USB Reset received {XUSB_STATUS_RESET_MASK} * - USB Suspend received {XUSB_STATUS_SUSPEND_MASK} * - USB Resume received {XUSB_STATUS_RESUME_MASK} * - USB Disconnect received {XUSB_STATUS_DISCONNECT_MASK} */ xudc_startup_handler(udc, intrstatus); } /* Check the buffer completion interrupts */ if (intrstatus & XUSB_STATUS_INTR_BUFF_COMP_ALL_MASK) { /* Enable Reset, Suspend, Resume and Disconnect */ ier = udc->read_fn(udc->addr + XUSB_IER_OFFSET); ier |= XUSB_STATUS_INTR_EVENT_MASK; udc->write_fn(udc->addr, XUSB_IER_OFFSET, ier); if (intrstatus & XUSB_STATUS_EP0_BUFF1_COMP_MASK) xudc_ctrl_ep_handler(udc, intrstatus); for (index = 1; index < 8; index++) { bufintr = ((intrstatus & (XUSB_STATUS_EP1_BUFF1_COMP_MASK << (index - 1))) || (intrstatus & (XUSB_STATUS_EP1_BUFF2_COMP_MASK << (index - 1)))); if (bufintr) { xudc_nonctrl_ep_handler(udc, index, intrstatus); } } } spin_unlock_irqrestore(&udc->lock, flags); return IRQ_HANDLED; }
0
[ "CWE-20", "CWE-129" ]
linux
7f14c7227f342d9932f9b918893c8814f86d2a0d
188,303,936,376,683,160,000,000,000,000,000,000,000
60
USB: gadget: validate endpoint index for xilinx udc Assure that host may not manipulate the index to point past endpoint array. Signed-off-by: Szymon Heidrich <[email protected]> Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
static int de_thread(struct task_struct *tsk) { struct signal_struct *sig = tsk->signal; struct sighand_struct *oldsighand = tsk->sighand; spinlock_t *lock = &oldsighand->siglock; if (thread_group_empty(tsk)) goto no_thread_group; /* * Kill all other threads in the thread group. */ spin_lock_irq(lock); if (signal_group_exit(sig)) { /* * Another group action in progress, just * return so that the signal is processed. */ spin_unlock_irq(lock); return -EAGAIN; } sig->group_exit_task = tsk; sig->notify_count = zap_other_threads(tsk); if (!thread_group_leader(tsk)) sig->notify_count--; while (sig->notify_count) { __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(lock); schedule(); spin_lock_irq(lock); } spin_unlock_irq(lock); /* * At this point all other threads have exited, all we have to * do is to wait for the thread group leader to become inactive, * and to assume its PID: */ if (!thread_group_leader(tsk)) { struct task_struct *leader = tsk->group_leader; sig->notify_count = -1; /* for exit_notify() */ for (;;) { write_lock_irq(&tasklist_lock); if (likely(leader->exit_state)) break; __set_current_state(TASK_UNINTERRUPTIBLE); write_unlock_irq(&tasklist_lock); schedule(); } /* * The only record we have of the real-time age of a * process, regardless of execs it's done, is start_time. * All the past CPU time is accumulated in signal_struct * from sister threads now dead. But in this non-leader * exec, nothing survives from the original leader thread, * whose birth marks the true age of this process now. * When we take on its identity by switching to its PID, we * also take its birthdate (always earlier than our own). */ tsk->start_time = leader->start_time; BUG_ON(!same_thread_group(leader, tsk)); BUG_ON(has_group_leader_pid(tsk)); /* * An exec() starts a new thread group with the * TGID of the previous thread group. Rehash the * two threads with a switched PID, and release * the former thread group leader: */ /* Become a process group leader with the old leader's pid. * The old leader becomes a thread of the this thread group. * Note: The old leader also uses this pid until release_task * is called. Odd but simple and correct. */ detach_pid(tsk, PIDTYPE_PID); tsk->pid = leader->pid; attach_pid(tsk, PIDTYPE_PID, task_pid(leader)); transfer_pid(leader, tsk, PIDTYPE_PGID); transfer_pid(leader, tsk, PIDTYPE_SID); list_replace_rcu(&leader->tasks, &tsk->tasks); list_replace_init(&leader->sibling, &tsk->sibling); tsk->group_leader = tsk; leader->group_leader = tsk; tsk->exit_signal = SIGCHLD; leader->exit_signal = -1; BUG_ON(leader->exit_state != EXIT_ZOMBIE); leader->exit_state = EXIT_DEAD; /* * We are going to release_task()->ptrace_unlink() silently, * the tracer can sleep in do_wait(). EXIT_DEAD guarantees * the tracer wont't block again waiting for this thread. */ if (unlikely(leader->ptrace)) __wake_up_parent(leader, leader->parent); write_unlock_irq(&tasklist_lock); release_task(leader); } sig->group_exit_task = NULL; sig->notify_count = 0; no_thread_group: /* we have changed execution domain */ tsk->exit_signal = SIGCHLD; exit_itimers(sig); flush_itimer_signals(); if (atomic_read(&oldsighand->count) != 1) { struct sighand_struct *newsighand; /* * This ->sighand is shared with the CLONE_SIGHAND * but not CLONE_THREAD task, switch to the new one. */ newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); if (!newsighand) return -ENOMEM; atomic_set(&newsighand->count, 1); memcpy(newsighand->action, oldsighand->action, sizeof(newsighand->action)); write_lock_irq(&tasklist_lock); spin_lock(&oldsighand->siglock); rcu_assign_pointer(tsk->sighand, newsighand); spin_unlock(&oldsighand->siglock); write_unlock_irq(&tasklist_lock); __cleanup_sighand(oldsighand); } BUG_ON(!thread_group_leader(tsk)); return 0; }
0
[ "CWE-264" ]
linux
259e5e6c75a910f3b5e656151dc602f53f9d7548
114,399,256,879,819,810,000,000,000,000,000,000,000
145
Add PR_{GET,SET}_NO_NEW_PRIVS to prevent execve from granting privs With this change, calling prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) disables privilege granting operations at execve-time. For example, a process will not be able to execute a setuid binary to change their uid or gid if this bit is set. The same is true for file capabilities. Additionally, LSM_UNSAFE_NO_NEW_PRIVS is defined to ensure that LSMs respect the requested behavior. To determine if the NO_NEW_PRIVS bit is set, a task may call prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0); It returns 1 if set and 0 if it is not set. If any of the arguments are non-zero, it will return -1 and set errno to -EINVAL. (PR_SET_NO_NEW_PRIVS behaves similarly.) This functionality is desired for the proposed seccomp filter patch series. By using PR_SET_NO_NEW_PRIVS, it allows a task to modify the system call behavior for itself and its child tasks without being able to impact the behavior of a more privileged task. Another potential use is making certain privileged operations unprivileged. For example, chroot may be considered "safe" if it cannot affect privileged tasks. Note, this patch causes execve to fail when PR_SET_NO_NEW_PRIVS is set and AppArmor is in use. It is fixed in a subsequent patch. Signed-off-by: Andy Lutomirski <[email protected]> Signed-off-by: Will Drewry <[email protected]> Acked-by: Eric Paris <[email protected]> Acked-by: Kees Cook <[email protected]> v18: updated change desc v17: using new define values as per 3.4 Signed-off-by: James Morris <[email protected]>
input_osc_104(struct input_ctx *ictx, const char *p) { struct window_pane *wp = ictx->wp; char *copy, *s; long idx; if (wp == NULL) return; if (*p == '\0') { window_pane_reset_palette(wp); return; } copy = s = xstrdup(p); while (*s != '\0') { idx = strtol(s, &s, 10); if (*s != '\0' && *s != ';') goto bad; if (idx < 0 || idx >= 0x100) goto bad; window_pane_unset_palette(wp, idx); if (*s == ';') s++; } free(copy); return; bad: log_debug("bad OSC 104: %s", p); free(copy); }
0
[ "CWE-787" ]
tmux
a868bacb46e3c900530bed47a1c6f85b0fbe701c
57,203,727,889,777,520,000,000,000,000,000,000,000
33
Do not write after the end of the array and overwrite the stack when colon-separated SGR sequences contain empty arguments. Reported by Sergey Nizovtsev.
static int timezone_initialize(php_timezone_obj *tzobj, /*const*/ char *tz TSRMLS_DC) { timelib_time *dummy_t = ecalloc(1, sizeof(timelib_time)); int dst, not_found; char *orig_tz = tz; dummy_t->z = timelib_parse_zone(&tz, &dst, dummy_t, &not_found, DATE_TIMEZONEDB, php_date_parse_tzfile_wrapper); if (not_found) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unknown or bad timezone (%s)", orig_tz); efree(dummy_t); return FAILURE; } else { set_timezone_from_timelib_time(tzobj, dummy_t); free(dummy_t->tz_abbr); efree(dummy_t); return SUCCESS; }
0
[]
php-src
c377f1a715476934133f3254d1e0d4bf3743e2d2
283,262,877,432,479,700,000,000,000,000,000,000,000
18
Fix bug #68942 (Use after free vulnerability in unserialize() with DateTimeZone)
bool has_type_information () const { return false; }
0
[ "CWE-284" ]
Fast-DDS
d2aeab37eb4fad4376b68ea4dfbbf285a2926384
93,999,101,374,204,520,000,000,000,000,000,000,000
4
check remote permissions (#1387) * Refs 5346. Blackbox test Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. one-way string compare Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. Do not add partition separator on last partition Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Access control unit testing It only covers Partition and Topic permissions Signed-off-by: Iker Luengo <[email protected]> * Refs #3680. Fix partition check on Permissions plugin. Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Uncrustify Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Fix tests on mac Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Fix windows tests Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Avoid memory leak on test Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Proxy data mocks should not return temporary objects Signed-off-by: Iker Luengo <[email protected]> * refs 3680. uncrustify Signed-off-by: Iker Luengo <[email protected]> Co-authored-by: Miguel Company <[email protected]>
static void free_variant_list(HLSContext *c) { int i; for (i = 0; i < c->n_variants; i++) { struct variant *var = c->variants[i]; av_freep(&var->playlists); av_free(var); } av_freep(&c->variants); c->n_variants = 0; }
0
[ "CWE-703" ]
FFmpeg
7ba100d3e6e8b1e5d5342feb960a7f081d6e15af
334,609,251,894,488,900,000,000,000,000,000,000,000
11
avformat/hls: Fix DoS due to infinite loop Fixes: loop.m3u The default max iteration count of 1000 is arbitrary and ideas for a better solution are welcome Found-by: Xiaohei and Wangchu from Alibaba Security Team Previous version reviewed-by: Steven Liu <[email protected]> Signed-off-by: Michael Niedermayer <[email protected]> (cherry picked from commit 7ec414892ddcad88313848494b6fc5f437c9ca4a) Signed-off-by: Michael Niedermayer <[email protected]>
ModuleExport size_t RegisterSVGImage(void) { char version[MagickPathExtent]; MagickInfo *entry; *version='\0'; #if defined(LIBXML_DOTTED_VERSION) (void) CopyMagickString(version,"XML " LIBXML_DOTTED_VERSION, MagickPathExtent); #endif #if defined(MAGICKCORE_RSVG_DELEGATE) #if !GLIB_CHECK_VERSION(2,35,0) g_type_init(); #endif (void) FormatLocaleString(version,MagickPathExtent,"RSVG %d.%d.%d", LIBRSVG_MAJOR_VERSION,LIBRSVG_MINOR_VERSION,LIBRSVG_MICRO_VERSION); #endif entry=AcquireMagickInfo("SVG","SVG","Scalable Vector Graphics"); entry->decoder=(DecodeImageHandler *) ReadSVGImage; entry->encoder=(EncodeImageHandler *) WriteSVGImage; entry->flags^=CoderBlobSupportFlag; #if defined(MAGICKCORE_RSVG_DELEGATE) entry->flags^=CoderDecoderThreadSupportFlag; #endif entry->mime_type=ConstantString("image/svg+xml"); if (*version != '\0') entry->version=ConstantString(version); entry->magick=(IsImageFormatHandler *) IsSVG; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("SVG","SVGZ","Compressed Scalable Vector Graphics"); #if defined(MAGICKCORE_XML_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadSVGImage; #endif entry->encoder=(EncodeImageHandler *) WriteSVGImage; entry->flags^=CoderBlobSupportFlag; #if defined(MAGICKCORE_RSVG_DELEGATE) entry->flags^=CoderDecoderThreadSupportFlag; #endif entry->mime_type=ConstantString("image/svg+xml"); if (*version != '\0') entry->version=ConstantString(version); entry->magick=(IsImageFormatHandler *) IsSVG; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("SVG","MSVG", "ImageMagick's own SVG internal renderer"); #if defined(MAGICKCORE_XML_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadSVGImage; #endif entry->encoder=(EncodeImageHandler *) WriteSVGImage; entry->flags^=CoderBlobSupportFlag; #if defined(MAGICKCORE_RSVG_DELEGATE) entry->flags^=CoderDecoderThreadSupportFlag; #endif entry->magick=(IsImageFormatHandler *) IsSVG; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); }
0
[ "CWE-20", "CWE-674", "CWE-787" ]
ImageMagick
ec9c8944af2bfc65c697ca44f93a727a99b405f1
98,214,514,292,574,200,000,000,000,000,000,000,000
60
[FG-VD-19-136] ImageMagick Convert SVG MacOS Denial Of Service
void replace_all(std::string & subject, const std::string & search, const std::string & replace) { size_t pos = 0; while ((pos = subject.find(search, pos)) != std::string::npos) { subject.replace(pos, search.length(), replace); pos += replace.length(); } }
1
[ "CWE-284" ]
Fast-DDS
d2aeab37eb4fad4376b68ea4dfbbf285a2926384
274,974,469,713,376,750,000,000,000,000,000,000,000
7
check remote permissions (#1387) * Refs 5346. Blackbox test Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. one-way string compare Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. Do not add partition separator on last partition Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Access control unit testing It only covers Partition and Topic permissions Signed-off-by: Iker Luengo <[email protected]> * Refs #3680. Fix partition check on Permissions plugin. Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Uncrustify Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Fix tests on mac Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Fix windows tests Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Avoid memory leak on test Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Proxy data mocks should not return temporary objects Signed-off-by: Iker Luengo <[email protected]> * refs 3680. uncrustify Signed-off-by: Iker Luengo <[email protected]> Co-authored-by: Miguel Company <[email protected]>
static int check_trust(X509_STORE_CTX *ctx, int num_untrusted) { int i; X509 *x = NULL; X509 *mx; SSL_DANE *dane = ctx->dane; int num = sk_X509_num(ctx->chain); int trust; /* * Check for a DANE issuer at depth 1 or greater, if it is a DANE-TA(2) * match, we're done, otherwise we'll merely record the match depth. */ if (DANETLS_HAS_TA(dane) && num_untrusted > 0 && num_untrusted < num) { switch (trust = check_dane_issuer(ctx, num_untrusted)) { case X509_TRUST_TRUSTED: case X509_TRUST_REJECTED: return trust; } } /* * Check trusted certificates in chain at depth num_untrusted and up. * Note, that depths 0..num_untrusted-1 may also contain trusted * certificates, but the caller is expected to have already checked those, * and wants to incrementally check just any added since. */ for (i = num_untrusted; i < num; i++) { x = sk_X509_value(ctx->chain, i); trust = X509_check_trust(x, ctx->param->trust, 0); /* If explicitly trusted return trusted */ if (trust == X509_TRUST_TRUSTED) goto trusted; if (trust == X509_TRUST_REJECTED) goto rejected; } /* * If we are looking at a trusted certificate, and accept partial chains, * the chain is PKIX trusted. */ if (num_untrusted < num) { if (ctx->param->flags & X509_V_FLAG_PARTIAL_CHAIN) goto trusted; return X509_TRUST_UNTRUSTED; } if (num_untrusted == num && ctx->param->flags & X509_V_FLAG_PARTIAL_CHAIN) { /* * Last-resort call with no new trusted certificates, check the leaf * for a direct trust store match. */ i = 0; x = sk_X509_value(ctx->chain, i); mx = lookup_cert_match(ctx, x); if (!mx) return X509_TRUST_UNTRUSTED; /* * Check explicit auxiliary trust/reject settings. If none are set, * we'll accept X509_TRUST_UNTRUSTED when not self-signed. */ trust = X509_check_trust(mx, ctx->param->trust, 0); if (trust == X509_TRUST_REJECTED) { X509_free(mx); goto rejected; } /* Replace leaf with trusted match */ (void) sk_X509_set(ctx->chain, 0, mx); X509_free(x); ctx->num_untrusted = 0; goto trusted; } /* * If no trusted certs in chain at all return untrusted and allow * standard (no issuer cert) etc errors to be indicated. */ return X509_TRUST_UNTRUSTED; rejected: if (!verify_cb_cert(ctx, x, i, X509_V_ERR_CERT_REJECTED)) return X509_TRUST_REJECTED; return X509_TRUST_UNTRUSTED; trusted: if (!DANETLS_ENABLED(dane)) return X509_TRUST_TRUSTED; if (dane->pdpth < 0) dane->pdpth = num_untrusted; /* With DANE, PKIX alone is not trusted until we have both */ if (dane->mdpth >= 0) return X509_TRUST_TRUSTED; return X509_TRUST_UNTRUSTED; }
0
[ "CWE-295" ]
openssl
2a40b7bc7b94dd7de897a74571e7024f0cf0d63b
192,816,346,217,656,520,000,000,000,000,000,000,000
96
check_chain_extensions: Do not override error return value by check_curve The X509_V_FLAG_X509_STRICT flag enables additional security checks of the certificates present in a certificate chain. It is not set by default. Starting from OpenSSL version 1.1.1h a check to disallow certificates with explicitly encoded elliptic curve parameters in the chain was added to the strict checks. An error in the implementation of this check meant that the result of a previous check to confirm that certificates in the chain are valid CA certificates was overwritten. This effectively bypasses the check that non-CA certificates must not be able to issue other certificates. If a "purpose" has been configured then a subsequent check that the certificate is consistent with that purpose also checks that it is a valid CA. Therefore where a purpose is set the certificate chain will still be rejected even when the strict flag has been used. A purpose is set by default in libssl client and server certificate verification routines, but it can be overriden by an application. Affected applications explicitly set the X509_V_FLAG_X509_STRICT verification flag and either do not set a purpose for the certificate verification or, in the case of TLS client or server applications, override the default purpose to make it not set. CVE-2021-3450 Reviewed-by: Matt Caswell <[email protected]> Reviewed-by: Paul Dale <[email protected]>
int LibRaw::copy_mem_image(void* scan0, int stride, int bgr) { // the image memory pointed to by scan0 is assumed to be in the format returned by get_mem_image_format if((imgdata.progress_flags & LIBRAW_PROGRESS_THUMB_MASK) < LIBRAW_PROGRESS_PRE_INTERPOLATE) return LIBRAW_OUT_OF_ORDER_CALL; if(libraw_internal_data.output_data.histogram) { int perc, val, total, t_white=0x2000,c; perc = S.width * S.height * 0.01; /* 99th percentile white level */ if (IO.fuji_width) perc /= 2; if (!((O.highlight & ~2) || O.no_auto_bright)) for (t_white=c=0; c < P1.colors; c++) { for (val=0x2000, total=0; --val > 32; ) if ((total += libraw_internal_data.output_data.histogram[c][val]) > perc) break; if (t_white < val) t_white = val; } gamma_curve (O.gamm[0], O.gamm[1], 2, (t_white << 3)/O.bright); } int s_iheight = S.iheight; int s_iwidth = S.iwidth; int s_width = S.width; int s_hwight = S.height; S.iheight = S.height; S.iwidth = S.width; if (S.flip & 4) SWAP(S.height,S.width); uchar *ppm; ushort *ppm2; int c, row, col, soff, rstep, cstep; soff = flip_index (0, 0); cstep = flip_index (0, 1) - soff; rstep = flip_index (1, 0) - flip_index (0, S.width); for (row=0; row < S.height; row++, soff += rstep) { uchar *bufp = ((uchar*)scan0)+row*stride; ppm2 = (ushort*) (ppm = bufp); // keep trivial decisions in the outer loop for speed if (bgr) { if (O.output_bps == 8) { for (col=0; col < S.width; col++, soff += cstep) FORBGR *ppm++ = imgdata.color.curve[imgdata.image[soff][c]]>>8; } else { for (col=0; col < S.width; col++, soff += cstep) FORBGR *ppm2++ = imgdata.color.curve[imgdata.image[soff][c]]; } } else { if (O.output_bps == 8) { for (col=0; col < S.width; col++, soff += cstep) FORRGB *ppm++ = imgdata.color.curve[imgdata.image[soff][c]]>>8; } else { for (col=0; col < S.width; col++, soff += cstep) FORRGB *ppm2++ = imgdata.color.curve[imgdata.image[soff][c]]; } } // bufp += stride; // go to the next line } S.iheight = s_iheight; S.iwidth = s_iwidth; S.width = s_width; S.height = s_hwight; return 0; }
0
[ "CWE-119", "CWE-787" ]
LibRaw
2f912f5b33582961b1cdbd9fd828589f8b78f21d
152,808,223,174,303,570,000,000,000,000,000,000,000
76
fixed wrong data_maximum calcluation; prevent out-of-buffer in exp_bef
PHP_FUNCTION(stream_get_contents) { php_stream *stream; zval *zsrc; long maxlen = PHP_STREAM_COPY_ALL, desiredpos = -1L; long len; char *contents = NULL; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r|ll", &zsrc, &maxlen, &desiredpos) == FAILURE) { RETURN_FALSE; } php_stream_from_zval(stream, &zsrc); if (desiredpos >= 0) { int seek_res = 0; off_t position; position = php_stream_tell(stream); if (position >= 0 && desiredpos > position) { /* use SEEK_CUR to allow emulation in streams that don't support seeking */ seek_res = php_stream_seek(stream, desiredpos - position, SEEK_CUR); } else if (desiredpos < position) { /* desired position before position or error on tell */ seek_res = php_stream_seek(stream, desiredpos, SEEK_SET); } if (seek_res != 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Failed to seek to position %ld in the stream", desiredpos); RETURN_FALSE; } } len = php_stream_copy_to_mem(stream, &contents, maxlen, 0); if (contents) { if (len > INT_MAX) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "content truncated from %ld to %d bytes", len, INT_MAX); len = INT_MAX; } RETVAL_STRINGL(contents, len, 0); } else { RETVAL_EMPTY_STRING(); } }
0
[ "CWE-20" ]
php-src
52b93f0cfd3cba7ff98cc5198df6ca4f23865f80
5,625,363,537,531,280,000,000,000,000,000,000,000
47
Fixed bug #69353 (Missing null byte checks for paths in various PHP extensions)
ZEND_VM_COLD_CONST_HANDLER(46, ZEND_JMPZ_EX, CONST|TMPVAR|CV, JMP_ADDR) { USE_OPLINE zend_free_op free_op1; zval *val; int ret; val = GET_OP1_ZVAL_PTR_UNDEF(BP_VAR_R); if (Z_TYPE_INFO_P(val) == IS_TRUE) { ZVAL_TRUE(EX_VAR(opline->result.var)); ZEND_VM_NEXT_OPCODE(); } else if (EXPECTED(Z_TYPE_INFO_P(val) <= IS_TRUE)) { ZVAL_FALSE(EX_VAR(opline->result.var)); if (OP1_TYPE == IS_CV && UNEXPECTED(Z_TYPE_INFO_P(val) == IS_UNDEF)) { SAVE_OPLINE(); ZVAL_UNDEFINED_OP1(); if (UNEXPECTED(EG(exception))) { HANDLE_EXCEPTION(); } } ZEND_VM_JMP_EX(OP_JMP_ADDR(opline, opline->op2), 0); } SAVE_OPLINE(); ret = i_zend_is_true(val); FREE_OP1(); if (ret) { ZVAL_TRUE(EX_VAR(opline->result.var)); opline++; } else { ZVAL_FALSE(EX_VAR(opline->result.var)); opline = OP_JMP_ADDR(opline, opline->op2); } ZEND_VM_JMP(opline); }
0
[ "CWE-787" ]
php-src
f1ce8d5f5839cb2069ea37ff424fb96b8cd6932d
14,497,612,277,121,296,000,000,000,000,000,000,000
36
Fix #73122: Integer Overflow when concatenating strings We must avoid integer overflows in memory allocations, so we introduce an additional check in the VM, and bail out in the rare case of an overflow. Since the recent fix for bug #74960 still doesn't catch all possible overflows, we fix that right away.
bool is_comp_vector(const unsigned int arg) const { unsigned int siz = _cimg_mp_size(arg); if (siz>128) return false; const int *ptr = memtype.data(arg + 1); bool is_tmp = true; while (siz-->0) if (*(ptr++)) { is_tmp = false; break; } return is_tmp; }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
319,986,062,029,805,580,000,000,000,000,000,000,000
8
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
static long fuse_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host); /* FUSE_IOCTL_DIR only supported for API version >= 7.18 */ if (fc->minor < 18) return -ENOTTY; return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_DIR); }
0
[ "CWE-459" ]
linux
5d069dbe8aaf2a197142558b6fb2978189ba3454
121,253,100,841,537,320,000,000,000,000,000,000,000
11
fuse: fix bad inode Jan Kara's analysis of the syzbot report (edited): The reproducer opens a directory on FUSE filesystem, it then attaches dnotify mark to the open directory. After that a fuse_do_getattr() call finds that attributes returned by the server are inconsistent, and calls make_bad_inode() which, among other things does: inode->i_mode = S_IFREG; This then confuses dnotify which doesn't tear down its structures properly and eventually crashes. Avoid calling make_bad_inode() on a live inode: switch to a private flag on the fuse inode. Also add the test to ops which the bad_inode_ops would have caught. This bug goes back to the initial merge of fuse in 2.6.14... Reported-by: [email protected] Signed-off-by: Miklos Szeredi <[email protected]> Tested-by: Jan Kara <[email protected]> Cc: <[email protected]>
static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu) { int i = 0; while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu) ++i; return i; }
0
[ "CWE-703" ]
linux
67f1aee6f45059fd6b0f5b0ecb2c97ad0451f6b3
163,222,296,668,433,120,000,000,000,000,000,000,000
8
iw_cxgb3: Fix incorrectly returning error on success The cxgb3_*_send() functions return NET_XMIT_ values, which are positive integers values. So don't treat positive return values as an error. Signed-off-by: Steve Wise <[email protected]> Signed-off-by: Hariprasad Shenai <[email protected]> Signed-off-by: Doug Ledford <[email protected]>
static RTSPTransportField *find_transport(RTSPMessageHeader *h, enum RTSPLowerTransport lower_transport) { RTSPTransportField *th; int i; for(i=0;i<h->nb_transports;i++) { th = &h->transports[i]; if (th->lower_transport == lower_transport) return th; } return NULL; }
0
[ "CWE-119", "CWE-787" ]
FFmpeg
a5d25faa3f4b18dac737fdb35d0dd68eb0dc2156
324,549,268,478,093,500,000,000,000,000,000,000,000
12
ffserver: Check chunk size Fixes out of array access Fixes: poc_ffserver.py Found-by: Paul Cher <[email protected]> Signed-off-by: Michael Niedermayer <[email protected]>
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { const struct sys_reg_desc *r; void __user *uaddr = (void __user *)(unsigned long)reg->addr; if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) return demux_c15_get(reg->id, uaddr); if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) return -ENOENT; r = index_to_sys_reg_desc(vcpu, reg->id); if (!r) return get_invariant_sys_reg(reg->id, uaddr); if (r->get_user) return (r->get_user)(vcpu, r, reg, uaddr); return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id); }
0
[ "CWE-20", "CWE-617" ]
linux
9e3f7a29694049edd728e2400ab57ad7553e5aa9
277,046,693,583,979,880,000,000,000,000,000,000,000
20
arm64: KVM: pmu: Fix AArch32 cycle counter access We're missing the handling code for the cycle counter accessed from a 32bit guest, leading to unexpected results. Cc: [email protected] # 4.6+ Signed-off-by: Wei Huang <[email protected]> Signed-off-by: Marc Zyngier <[email protected]>
static int scsi_disk_initfn(SCSIDevice *dev) { DriveInfo *dinfo; uint8_t scsi_type; if (!dev->conf.bs) { scsi_type = TYPE_DISK; /* will die in scsi_initfn() */ } else { dinfo = drive_get_by_blockdev(dev->conf.bs); scsi_type = dinfo->media_cd ? TYPE_ROM : TYPE_DISK; } return scsi_initfn(dev, scsi_type); }
0
[ "CWE-119", "CWE-787" ]
qemu
103b40f51e4012b3b0ad20f615562a1806d7f49a
234,099,693,805,431,860,000,000,000,000,000,000,000
14
scsi-disk: commonize iovec creation between reads and writes Also, consistently use qiov.size instead of iov.iov_len. Signed-off-by: Paolo Bonzini <[email protected]> Signed-off-by: Kevin Wolf <[email protected]>
static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter, const Image *image,Image *resize_image,const double x_factor, const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception) { #define ResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **magick_restrict contributions; MagickBooleanType status; double scale, support; ssize_t x; /* Apply filter to resize horizontally from image to resize image. */ scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse) return(MagickFalse); if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(double) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,resize_image,resize_image->columns,1) #endif for (x=0; x < (ssize_t) resize_image->columns; x++) { const int id = GetOpenMPThreadId(); double bisect, density; register const Quantum *magick_restrict p; register ContributionInfo *magick_restrict contribution; register Quantum *magick_restrict q; register ssize_t y; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(double) (x+0.5)/x_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns); density=0.0; contribution=contributions[id]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((double) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if (n == 0) continue; if ((density != 0.0) && (density != 1.0)) { register ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception); q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) resize_image->rows; y++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait resize_traits, traits; register ssize_t j; ssize_t k; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; if (((resize_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2))) { j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-1.0)+0.5); k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j-start].pixel-contribution[0].pixel); SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i], q); continue; } pixel=0.0; if ((resize_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (j=0; j < n; j++) { k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j].pixel-contribution[0].pixel); alpha=contribution[j].weight; pixel+=alpha*p[k*GetPixelChannels(image)+i]; } SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q); continue; } /* Alpha blending. */ gamma=0.0; for (j=0; j < n; j++) { k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j].pixel-contribution[0].pixel); alpha=contribution[j].weight*QuantumScale* GetPixelAlpha(image,p+k*GetPixelChannels(image)); pixel+=alpha*p[k*GetPixelChannels(image)+i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HorizontalFilter) #endif proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); }
0
[ "CWE-125" ]
ImageMagick
c5402b6e0fcf8b694ae2af6a6652ebb8ce0ccf46
179,598,649,965,247,030,000,000,000,000,000,000,000
216
https://github.com/ImageMagick/ImageMagick/issues/717
int virtio_queue_ready(VirtQueue *vq) { return vq->vring.avail != 0; }
0
[ "CWE-94" ]
qemu
cc45995294b92d95319b4782750a3580cabdbc0c
96,218,979,630,495,820,000,000,000,000,000,000,000
4
virtio: out-of-bounds buffer write on invalid state load CVE-2013-4151 QEMU 1.0 out-of-bounds buffer write in virtio_load@hw/virtio/virtio.c So we have this code since way back when: num = qemu_get_be32(f); for (i = 0; i < num; i++) { vdev->vq[i].vring.num = qemu_get_be32(f); array of vqs has size VIRTIO_PCI_QUEUE_MAX, so on invalid input this will write beyond end of buffer. Signed-off-by: Michael S. Tsirkin <[email protected]> Reviewed-by: Michael Roth <[email protected]> Signed-off-by: Juan Quintela <[email protected]>
NCR_GetRemoteAddress(NCR_Instance inst) { return &inst->remote_addr; }
0
[]
chrony
a78bf9725a7b481ebff0e0c321294ba767f2c1d8
339,928,670,303,856,780,000,000,000,000,000,000,000
4
ntp: restrict authentication of server/peer to specified key When a server/peer was specified with a key number to enable authentication with a symmetric key, packets received from the server/peer were accepted if they were authenticated with any of the keys contained in the key file and not just the specified key. This allowed an attacker who knew one key of a client/peer to modify packets from its servers/peers that were authenticated with other keys in a man-in-the-middle (MITM) attack. For example, in a network where each NTP association had a separate key and all hosts had only keys they needed, a client of a server could not attack other clients of the server, but it could attack the server and also attack its own clients (i.e. modify packets from other servers). To not allow the server/peer to be authenticated with other keys extend the authentication test to check if the key ID in the received packet is equal to the configured key number. As a consequence, it's no longer possible to authenticate two peers to each other with two different keys, both peers have to be configured to use the same key. This issue was discovered by Matt Street of Cisco ASIG.
int main(int argc, char** argv) { /* Kernel starts us with all fd's closed. * But it's dangerous: * fprintf(stderr) can dump messages into random fds, etc. * Ensure that if any of fd 0,1,2 is closed, we open it to /dev/null. */ int fd = xopen("/dev/null", O_RDWR); while (fd < 2) fd = xdup(fd); if (fd > 2) close(fd); if (argc < 8) { /* percent specifier: %s %c %p %u %g %t %e %h */ /* argv: [0] [1] [2] [3] [4] [5] [6] [7] [8]*/ error_msg_and_die("Usage: %s SIGNO CORE_SIZE_LIMIT PID UID GID TIME BINARY_NAME [HOSTNAME]", argv[0]); } /* Not needed on 2.6.30. * At least 2.6.18 has a bug where * argv[1] = "SIGNO CORE_SIZE_LIMIT PID ..." * argv[2] = "CORE_SIZE_LIMIT PID ..." * and so on. Fixing it: */ if (strchr(argv[1], ' ')) { int i; for (i = 1; argv[i]; i++) { strchrnul(argv[i], ' ')[0] = '\0'; } } logmode = LOGMODE_JOURNAL; /* Parse abrt.conf */ load_abrt_conf(); /* ... and plugins/CCpp.conf */ bool setting_MakeCompatCore; bool setting_SaveBinaryImage; { map_string_t *settings = new_map_string(); load_abrt_plugin_conf_file("CCpp.conf", settings); const char *value; value = get_map_string_item_or_NULL(settings, "MakeCompatCore"); setting_MakeCompatCore = value && string_to_bool(value); value = get_map_string_item_or_NULL(settings, "SaveBinaryImage"); setting_SaveBinaryImage = value && string_to_bool(value); value = get_map_string_item_or_NULL(settings, "VerboseLog"); if (value) g_verbose = xatoi_positive(value); free_map_string(settings); } errno = 0; const char* signal_str = argv[1]; int signal_no = xatoi_positive(signal_str); off_t ulimit_c = strtoull(argv[2], NULL, 10); if (ulimit_c < 0) /* unlimited? */ { /* set to max possible >0 value */ ulimit_c = ~((off_t)1 << (sizeof(off_t)*8-1)); } const char *pid_str = argv[3]; pid_t pid = xatoi_positive(argv[3]); uid_t uid = xatoi_positive(argv[4]); if (errno || pid <= 0) { perror_msg_and_die("PID '%s' or limit '%s' is bogus", argv[3], argv[2]); } { char *s = xmalloc_fopen_fgetline_fclose(VAR_RUN"/abrt/saved_core_pattern"); /* If we have a saved pattern and it's not a "|PROG ARGS" thing... */ if (s && s[0] != '|') core_basename = s; else free(s); } struct utsname uts; if (!argv[8]) /* no HOSTNAME? */ { uname(&uts); argv[8] = uts.nodename; } char path[PATH_MAX]; int src_fd_binary = -1; char *executable = get_executable(pid, setting_SaveBinaryImage ? &src_fd_binary : NULL); if (executable && strstr(executable, "/abrt-hook-ccpp")) { error_msg_and_die("PID %lu is '%s', not dumping it to avoid recursion", (long)pid, executable); } user_pwd = get_cwd(pid); /* may be NULL on error */ log_notice("user_pwd:'%s'", user_pwd); sprintf(path, "/proc/%lu/status", (long)pid); proc_pid_status = xmalloc_xopen_read_close(path, /*maxsz:*/ NULL); uid_t fsuid = uid; uid_t tmp_fsuid = get_fsuid(); int suid_policy = dump_suid_policy(); if (tmp_fsuid != uid) { /* use root for suided apps unless it's explicitly set to UNSAFE */ fsuid = 0; if (suid_policy == DUMP_SUID_UNSAFE) { fsuid = tmp_fsuid; } } /* Open a fd to compat coredump, if requested and is possible */ if (setting_MakeCompatCore && ulimit_c != 0) /* note: checks "user_pwd == NULL" inside; updates core_basename */ user_core_fd = open_user_core(uid, fsuid, pid, &argv[1]); if (executable == NULL) { /* readlink on /proc/$PID/exe failed, don't create abrt dump dir */ error_msg("Can't read /proc/%lu/exe link", (long)pid); goto create_user_core; } const char *signame = NULL; switch (signal_no) { case SIGILL : signame = "ILL" ; break; case SIGFPE : signame = "FPE" ; break; case SIGSEGV: signame = "SEGV"; break; case SIGBUS : signame = "BUS" ; break; //Bus error (bad memory access) case SIGABRT: signame = "ABRT"; break; //usually when abort() was called // We have real-world reports from users who see buggy programs // dying with SIGTRAP, uncommented it too: case SIGTRAP: signame = "TRAP"; break; //Trace/breakpoint trap // These usually aren't caused by bugs: //case SIGQUIT: signame = "QUIT"; break; //Quit from keyboard //case SIGSYS : signame = "SYS" ; break; //Bad argument to routine (SVr4) //case SIGXCPU: signame = "XCPU"; break; //CPU time limit exceeded (4.2BSD) //case SIGXFSZ: signame = "XFSZ"; break; //File size limit exceeded (4.2BSD) default: goto create_user_core; // not a signal we care about } if (!daemon_is_ok()) { /* not an error, exit with exit code 0 */ log("abrtd is not running. If it crashed, " "/proc/sys/kernel/core_pattern contains a stale value, " "consider resetting it to 'core'" ); goto create_user_core; } if (g_settings_nMaxCrashReportsSize > 0) { /* If free space is less than 1/4 of MaxCrashReportsSize... */ if (low_free_space(g_settings_nMaxCrashReportsSize, g_settings_dump_location)) goto create_user_core; } /* Check /var/tmp/abrt/last-ccpp marker, do not dump repeated crashes * if they happen too often. Else, write new marker value. */ snprintf(path, sizeof(path), "%s/last-ccpp", g_settings_dump_location); if (check_recent_crash_file(path, executable)) { /* It is a repeating crash */ goto create_user_core; } const char *last_slash = strrchr(executable, '/'); if (last_slash && strncmp(++last_slash, "abrt", 4) == 0) { /* If abrtd/abrt-foo crashes, we don't want to create a _directory_, * since that can make new copy of abrtd to process it, * and maybe crash again... * Unlike dirs, mere files are ignored by abrtd. */ snprintf(path, sizeof(path), "%s/%s-coredump", g_settings_dump_location, last_slash); int abrt_core_fd = xopen3(path, O_WRONLY | O_CREAT | O_TRUNC, 0600); off_t core_size = copyfd_eof(STDIN_FILENO, abrt_core_fd, COPYFD_SPARSE); if (core_size < 0 || fsync(abrt_core_fd) != 0) { unlink(path); /* copyfd_eof logs the error including errno string, * but it does not log file name */ error_msg_and_die("Error saving '%s'", path); } log("Saved core dump of pid %lu (%s) to %s (%llu bytes)", (long)pid, executable, path, (long long)core_size); return 0; } unsigned path_len = snprintf(path, sizeof(path), "%s/ccpp-%s-%lu.new", g_settings_dump_location, iso_date_string(NULL), (long)pid); if (path_len >= (sizeof(path) - sizeof("/"FILENAME_COREDUMP))) { goto create_user_core; } /* use fsuid instead of uid, so we don't expose any sensitive * information of suided app in /var/tmp/abrt * * dd_create_skeleton() creates a new directory and leaves ownership to * the current user, hence, we have to call dd_reset_ownership() after the * directory is populated. */ dd = dd_create_skeleton(path, fsuid, DEFAULT_DUMP_DIR_MODE, /*no flags*/0); if (dd) { char *rootdir = get_rootdir(pid); dd_create_basic_files(dd, fsuid, NULL); char source_filename[sizeof("/proc/%lu/somewhat_long_name") + sizeof(long)*3]; int source_base_ofs = sprintf(source_filename, "/proc/%lu/smaps", (long)pid); source_base_ofs -= strlen("smaps"); char *dest_filename = concat_path_file(dd->dd_dirname, "also_somewhat_longish_name"); char *dest_base = strrchr(dest_filename, '/') + 1; // Disabled for now: /proc/PID/smaps tends to be BIG, // and not much more informative than /proc/PID/maps: //copy_file_ext(source_filename, dest_filename, 0640, dd->dd_uid, dd->dd_gid, O_RDONLY, O_WRONLY | O_CREAT | O_TRUNC | O_EXCL); strcpy(source_filename + source_base_ofs, "maps"); strcpy(dest_base, FILENAME_MAPS); copy_file_ext(source_filename, dest_filename, 0640, dd->dd_uid, dd->dd_gid, O_RDONLY, O_WRONLY | O_CREAT | O_TRUNC | O_EXCL); strcpy(source_filename + source_base_ofs, "limits"); strcpy(dest_base, FILENAME_LIMITS); copy_file_ext(source_filename, dest_filename, 0640, dd->dd_uid, dd->dd_gid, O_RDONLY, O_WRONLY | O_CREAT | O_TRUNC | O_EXCL); strcpy(source_filename + source_base_ofs, "cgroup"); strcpy(dest_base, FILENAME_CGROUP); copy_file_ext(source_filename, dest_filename, 0640, dd->dd_uid, dd->dd_gid, O_RDONLY, O_WRONLY | O_CREAT | O_TRUNC | O_EXCL); strcpy(dest_base, FILENAME_OPEN_FDS); dump_fd_info(dest_filename, source_filename, source_base_ofs, dd->dd_uid, dd->dd_gid); free(dest_filename); dd_save_text(dd, FILENAME_ANALYZER, "CCpp"); dd_save_text(dd, FILENAME_TYPE, "CCpp"); dd_save_text(dd, FILENAME_EXECUTABLE, executable); dd_save_text(dd, FILENAME_PID, pid_str); dd_save_text(dd, FILENAME_PROC_PID_STATUS, proc_pid_status); if (user_pwd) dd_save_text(dd, FILENAME_PWD, user_pwd); if (rootdir) { if (strcmp(rootdir, "/") != 0) dd_save_text(dd, FILENAME_ROOTDIR, rootdir); } char *reason = xasprintf("%s killed by SIG%s", last_slash, signame ? signame : signal_str); dd_save_text(dd, FILENAME_REASON, reason); free(reason); char *cmdline = get_cmdline(pid); dd_save_text(dd, FILENAME_CMDLINE, cmdline ? : ""); free(cmdline); char *environ = get_environ(pid); dd_save_text(dd, FILENAME_ENVIRON, environ ? : ""); free(environ); char *fips_enabled = xmalloc_fopen_fgetline_fclose("/proc/sys/crypto/fips_enabled"); if (fips_enabled) { if (strcmp(fips_enabled, "0") != 0) dd_save_text(dd, "fips_enabled", fips_enabled); free(fips_enabled); } dd_save_text(dd, FILENAME_ABRT_VERSION, VERSION); if (src_fd_binary > 0) { strcpy(path + path_len, "/"FILENAME_BINARY); int dst_fd = create_or_die(path); off_t sz = copyfd_eof(src_fd_binary, dst_fd, COPYFD_SPARSE); if (fsync(dst_fd) != 0 || close(dst_fd) != 0 || sz < 0) { dd_delete(dd); error_msg_and_die("Error saving '%s'", path); } close(src_fd_binary); } strcpy(path + path_len, "/"FILENAME_COREDUMP); int abrt_core_fd = create_or_die(path); /* We write both coredumps at once. * We can't write user coredump first, since it might be truncated * and thus can't be copied and used as abrt coredump; * and if we write abrt coredump first and then copy it as user one, * then we have a race when process exits but coredump does not exist yet: * $ echo -e '#include<signal.h>\nmain(){raise(SIGSEGV);}' | gcc -o test -x c - * $ rm -f core*; ulimit -c unlimited; ./test; ls -l core* * 21631 Segmentation fault (core dumped) ./test * ls: cannot access core*: No such file or directory <=== BAD */ off_t core_size = copyfd_sparse(STDIN_FILENO, abrt_core_fd, user_core_fd, ulimit_c); if (fsync(abrt_core_fd) != 0 || close(abrt_core_fd) != 0 || core_size < 0) { unlink(path); dd_delete(dd); if (user_core_fd >= 0) { xchdir(user_pwd); unlink(core_basename); } /* copyfd_sparse logs the error including errno string, * but it does not log file name */ error_msg_and_die("Error writing '%s'", path); } if (user_core_fd >= 0 /* error writing user coredump? */ && (fsync(user_core_fd) != 0 || close(user_core_fd) != 0 /* user coredump is too big? */ || (ulimit_c == 0 /* paranoia */ || core_size > ulimit_c) ) ) { /* nuke it (silently) */ xchdir(user_pwd); unlink(core_basename); } /* Because of #1211835 and #1126850 */ #if 0 /* Save JVM crash log if it exists. (JVM's coredump per se * is nearly useless for JVM developers) */ { char *java_log = xasprintf("/tmp/jvm-%lu/hs_error.log", (long)pid); int src_fd = open(java_log, O_RDONLY); free(java_log); /* If we couldn't open the error log in /tmp directory we can try to * read the log from the current directory. It may produce AVC, it * may produce some error log but all these are expected. */ if (src_fd < 0) { java_log = xasprintf("%s/hs_err_pid%lu.log", user_pwd, (long)pid); src_fd = open(java_log, O_RDONLY); free(java_log); } if (src_fd >= 0) { strcpy(path + path_len, "/hs_err.log"); int dst_fd = create_or_die(path); off_t sz = copyfd_eof(src_fd, dst_fd, COPYFD_SPARSE); if (close(dst_fd) != 0 || sz < 0) { dd_delete(dd); error_msg_and_die("Error saving '%s'", path); } close(src_fd); } } #endif /* And finally set the right uid and gid */ dd_reset_ownership(dd); /* We close dumpdir before we start catering for crash storm case. * Otherwise, delete_dump_dir's from other concurrent * CCpp's won't be able to delete our dump (their delete_dump_dir * will wait for us), and we won't be able to delete their dumps. * Classic deadlock. */ dd_close(dd); path[path_len] = '\0'; /* path now contains only directory name */ char *newpath = xstrndup(path, path_len - (sizeof(".new")-1)); if (rename(path, newpath) == 0) strcpy(path, newpath); free(newpath); log("Saved core dump of pid %lu (%s) to %s (%llu bytes)", (long)pid, executable, path, (long long)core_size); notify_new_path(path); /* rhbz#539551: "abrt going crazy when crashing process is respawned" */ if (g_settings_nMaxCrashReportsSize > 0) { /* x1.25 and round up to 64m: go a bit up, so that usual in-daemon trimming * kicks in first, and we don't "fight" with it: */ unsigned maxsize = g_settings_nMaxCrashReportsSize + g_settings_nMaxCrashReportsSize / 4; maxsize |= 63; trim_problem_dirs(g_settings_dump_location, maxsize * (double)(1024*1024), path); } free(rootdir); return 0; } /* We didn't create abrt dump, but may need to create compat coredump */ create_user_core: if (user_core_fd >= 0) { off_t core_size = copyfd_size(STDIN_FILENO, user_core_fd, ulimit_c, COPYFD_SPARSE); if (fsync(user_core_fd) != 0 || close(user_core_fd) != 0 || core_size < 0) { /* perror first, otherwise unlink may trash errno */ perror_msg("Error writing '%s'", full_core_basename); xchdir(user_pwd); unlink(core_basename); return 1; } if (ulimit_c == 0 || core_size > ulimit_c) { xchdir(user_pwd); unlink(core_basename); return 1; } log("Saved core dump of pid %lu to %s (%llu bytes)", (long)pid, full_core_basename, (long long)core_size); } return 0; }
1
[ "CWE-200" ]
abrt
af945ff58a698ce00c45059a05994ef53a13e192
72,132,926,788,203,175,000,000,000,000,000,000,000
429
ccpp: do not override existing files by compat cores Implement all checks used in kernel's do_coredump() and require non-relative path if suid_dumpable is 2. Related: #1212818 Signed-off-by: Jakub Filak <[email protected]>
TfLiteStatus LogicalAndEval(TfLiteContext* context, TfLiteNode* node) { return LogicalImpl(context, node, LogicalAnd); }
0
[ "CWE-125", "CWE-787" ]
tensorflow
1970c2158b1ffa416d159d03c3370b9a462aee35
125,493,966,230,740,230,000,000,000,000,000,000,000
3
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
TEST_F(RouterTest, NoRetryWithBodyLimit) { NiceMock<Http::MockRequestEncoder> encoder1; Http::ResponseDecoder* response_decoder = nullptr; EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) .WillOnce(Invoke( [&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { response_decoder = &decoder; callbacks.onPoolReady(encoder1, cm_.thread_local_cluster_.conn_pool_.host_, upstream_stream_info_, Http::Protocol::Http10); return nullptr; })); // Set a per route body limit which disallows any buffering. EXPECT_CALL(callbacks_.route_->route_entry_, retryShadowBufferLimit()).WillOnce(Return(0)); Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, false); // Unlike RetryUpstreamReset above the data won't be buffered as the body exceeds the buffer limit EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true)); EXPECT_CALL(callbacks_, addDecodedData(_, _)).Times(0); Buffer::OwnedImpl body("t"); router_.decodeData(body, false); EXPECT_EQ(1U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}}); response_decoder->decodeHeaders(std::move(response_headers), true); }
0
[ "CWE-703" ]
envoy
18871dbfb168d3512a10c78dd267ff7c03f564c6
70,807,064,220,949,615,000,000,000,000,000,000,000
30
[1.18] CVE-2022-21655 Crash with direct_response Signed-off-by: Otto van der Schaaf <[email protected]>
LibRaw_buffer_datastream::LibRaw_buffer_datastream(void *buffer, size_t bsize) { buf = (unsigned char*)buffer; streampos = 0; streamsize = bsize; }
0
[ "CWE-703" ]
LibRaw
11909cc59e712e09b508dda729b99aeaac2b29ad
17,531,642,823,392,957,000,000,000,000,000,000,000
4
cumulated data checks patch
void setIsCodeWithScope(bool isCodeWithScope) { if (isCodeWithScope) { _startPosition |= 1 << 31; } else { _startPosition &= ~(1 << 31); } }
0
[ "CWE-20" ]
mongo
3a7e85ea1f672f702660e5472566234b1d19038e
135,717,227,435,334,400,000,000,000,000,000,000,000
8
SERVER-17264: improve bson validation for utf-8 strings (cherry picked from commit 394a8569ff14a215c0691aa34440227b2e62a4de) Conflicts: src/mongo/bson/bson_validate_test.cpp
static int path_init(int dfd, const struct filename *name, unsigned int flags, struct nameidata *nd) { int retval = 0; const char *s = name->name; nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; nd->depth = 0; nd->base = NULL; if (flags & LOOKUP_ROOT) { struct dentry *root = nd->root.dentry; struct inode *inode = root->d_inode; if (*s) { if (!d_can_lookup(root)) return -ENOTDIR; retval = inode_permission(inode, MAY_EXEC); if (retval) return retval; } nd->path = nd->root; nd->inode = inode; if (flags & LOOKUP_RCU) { rcu_read_lock(); nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); nd->m_seq = read_seqbegin(&mount_lock); } else { path_get(&nd->path); } goto done; } nd->root.mnt = NULL; nd->m_seq = read_seqbegin(&mount_lock); if (*s == '/') { if (flags & LOOKUP_RCU) { rcu_read_lock(); nd->seq = set_root_rcu(nd); } else { set_root(nd); path_get(&nd->root); } nd->path = nd->root; } else if (dfd == AT_FDCWD) { if (flags & LOOKUP_RCU) { struct fs_struct *fs = current->fs; unsigned seq; rcu_read_lock(); do { seq = read_seqcount_begin(&fs->seq); nd->path = fs->pwd; nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } else { get_fs_pwd(current->fs, &nd->path); } } else { /* Caller must check execute permissions on the starting path component */ struct fd f = fdget_raw(dfd); struct dentry *dentry; if (!f.file) return -EBADF; dentry = f.file->f_path.dentry; if (*s) { if (!d_can_lookup(dentry)) { fdput(f); return -ENOTDIR; } } nd->path = f.file->f_path; if (flags & LOOKUP_RCU) { if (f.flags & FDPUT_FPUT) nd->base = f.file; nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); rcu_read_lock(); } else { path_get(&nd->path); fdput(f); } } nd->inode = nd->path.dentry->d_inode; if (!(flags & LOOKUP_RCU)) goto done; if (likely(!read_seqcount_retry(&nd->path.dentry->d_seq, nd->seq))) goto done; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); return -ECHILD; done: current->total_link_count = 0; return link_path_walk(s, nd); }
0
[ "CWE-416" ]
linux
f15133df088ecadd141ea1907f2c96df67c729f0
257,045,125,992,217,740,000,000,000,000,000,000,000
101
path_openat(): fix double fput() path_openat() jumps to the wrong place after do_tmpfile() - it has already done path_cleanup() (as part of path_lookupat() called by do_tmpfile()), so doing that again can lead to double fput(). Cc: [email protected] # v3.11+ Signed-off-by: Al Viro <[email protected]>
do_add_counters(struct net *net, const void __user *user, unsigned int len, int compat) { unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct ip6t_entry *iter; unsigned int addend; paddc = xt_copy_counters_from_user(user, len, &tmp, compat); if (IS_ERR(paddc)) return PTR_ERR(paddc); t = xt_find_table_lock(net, AF_INET6, tmp.name); if (IS_ERR(t)) { ret = PTR_ERR(t); goto free; } local_bh_disable(); private = t->private; if (private->number != tmp.num_counters) { ret = -EINVAL; goto unlock_up_free; } i = 0; addend = xt_write_recseq_begin(); xt_entry_foreach(iter, private->entries, private->size) { struct xt_counters *tmp; tmp = xt_get_this_cpu_counter(&iter->counters); ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: vfree(paddc); return ret; }
0
[ "CWE-476" ]
linux
57ebd808a97d7c5b1e1afb937c2db22beba3c1f8
211,555,614,847,694,970,000,000,000,000,000,000,000
47
netfilter: add back stackpointer size checks The rationale for removing the check is only correct for rulesets generated by ip(6)tables. In iptables, a jump can only occur to a user-defined chain, i.e. because we size the stack based on number of user-defined chains we cannot exceed stack size. However, the underlying binary format has no such restriction, and the validation step only ensures that the jump target is a valid rule start point. IOW, its possible to build a rule blob that has no user-defined chains but does contain a jump. If this happens, no jump stack gets allocated and crash occurs because no jumpstack was allocated. Fixes: 7814b6ec6d0d6 ("netfilter: xtables: don't save/restore jumpstack offset") Reported-by: [email protected] Signed-off-by: Florian Westphal <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
TEST(HeaderMapImplTest, Equality) { TestHeaderMapImpl headers1; TestHeaderMapImpl headers2; EXPECT_EQ(headers1, headers2); headers1.addCopy("hello", "world"); EXPECT_FALSE(headers1 == headers2); headers2.addCopy("foo", "bar"); EXPECT_FALSE(headers1 == headers2); }
0
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
57,258,938,247,125,220,000,000,000,000,000,000,000
11
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <[email protected]>
struct file *open_exec(const char *name) { struct filename *filename = getname_kernel(name); struct file *f = ERR_CAST(filename); if (!IS_ERR(filename)) { f = do_open_execat(AT_FDCWD, filename, 0); putname(filename); } return f; }
0
[ "CWE-362" ]
linux
8b01fc86b9f425899f8a3a8fc1c47d73c2c20543
170,447,977,591,365,680,000,000,000,000,000,000,000
11
fs: take i_mutex during prepare_binprm for set[ug]id executables This prevents a race between chown() and execve(), where chowning a setuid-user binary to root would momentarily make the binary setuid root. This patch was mostly written by Linus Torvalds. Signed-off-by: Jann Horn <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
test_policies_getinfo_helper_policies(void *arg) { (void)arg; int rv = 0; size_t ipv4_len = 0, ipv6_len = 0; char *answer = NULL; const char *errmsg = NULL; routerinfo_t mock_my_routerinfo; memset(&mock_my_routerinfo, 0, sizeof(mock_my_routerinfo)); rv = getinfo_helper_policies(NULL, "exit-policy/default", &answer, &errmsg); tt_assert(rv == 0); tt_assert(answer != NULL); tt_assert(strlen(answer) > 0); tor_free(answer); rv = getinfo_helper_policies(NULL, "exit-policy/reject-private/default", &answer, &errmsg); tt_assert(rv == 0); tt_assert(answer != NULL); tt_assert(strlen(answer) > 0); tor_free(answer); memset(&mock_my_routerinfo, 0, sizeof(routerinfo_t)); MOCK(router_get_my_routerinfo, mock_router_get_my_routerinfo); mock_my_routerinfo.exit_policy = smartlist_new(); mock_desc_routerinfo = &mock_my_routerinfo; memset(&mock_options, 0, sizeof(or_options_t)); MOCK(get_options, mock_get_options); rv = getinfo_helper_policies(NULL, "exit-policy/reject-private/relay", &answer, &errmsg); tt_assert(rv == 0); tt_assert(answer != NULL); tt_assert(strlen(answer) == 0); tor_free(answer); rv = getinfo_helper_policies(NULL, "exit-policy/ipv4", &answer, &errmsg); tt_assert(rv == 0); tt_assert(answer != NULL); ipv4_len = strlen(answer); tt_assert(ipv4_len == 0 || ipv4_len == strlen(DEFAULT_POLICY_STRING)); tt_assert(ipv4_len == 0 || !strcasecmp(answer, DEFAULT_POLICY_STRING)); tor_free(answer); rv = getinfo_helper_policies(NULL, "exit-policy/ipv6", &answer, &errmsg); tt_assert(rv == 0); tt_assert(answer != NULL); ipv6_len = strlen(answer); tt_assert(ipv6_len == 0 || ipv6_len == strlen(DEFAULT_POLICY_STRING)); tt_assert(ipv6_len == 0 || !strcasecmp(answer, DEFAULT_POLICY_STRING)); tor_free(answer); rv = getinfo_helper_policies(NULL, "exit-policy/full", &answer, &errmsg); tt_assert(rv == 0); tt_assert(answer != NULL); /* It's either empty or it's the default */ tt_assert(strlen(answer) == 0 || !strcasecmp(answer, DEFAULT_POLICY_STRING)); tor_free(answer); mock_my_routerinfo.addr = TEST_IPV4_ADDR; tor_addr_parse(&mock_my_routerinfo.ipv6_addr, TEST_IPV6_ADDR); append_exit_policy_string(&mock_my_routerinfo.exit_policy, "accept *4:*"); append_exit_policy_string(&mock_my_routerinfo.exit_policy, "reject *6:*"); mock_options.IPv6Exit = 1; tor_addr_from_ipv4h(&mock_options.OutboundBindAddressIPv4_, TEST_IPV4_ADDR); tor_addr_parse(&mock_options.OutboundBindAddressIPv6_, TEST_IPV6_ADDR); mock_options.ExitPolicyRejectPrivate = 1; mock_options.ExitPolicyRejectLocalInterfaces = 1; rv = getinfo_helper_policies(NULL, "exit-policy/reject-private/relay", &answer, &errmsg); tt_assert(rv == 0); tt_assert(answer != NULL); tt_assert(strlen(answer) > 0); tor_free(answer); mock_options.ExitPolicyRejectPrivate = 1; mock_options.ExitPolicyRejectLocalInterfaces = 0; rv = getinfo_helper_policies(NULL, "exit-policy/reject-private/relay", &answer, &errmsg); tt_assert(rv == 0); tt_assert(answer != NULL); tt_assert(strlen(answer) > 0); tor_free(answer); mock_options.ExitPolicyRejectPrivate = 0; mock_options.ExitPolicyRejectLocalInterfaces = 1; rv = getinfo_helper_policies(NULL, "exit-policy/reject-private/relay", &answer, &errmsg); tt_assert(rv == 0); tt_assert(answer != NULL); tt_assert(strlen(answer) > 0); tor_free(answer); mock_options.ExitPolicyRejectPrivate = 0; mock_options.ExitPolicyRejectLocalInterfaces = 0; rv = getinfo_helper_policies(NULL, "exit-policy/reject-private/relay", &answer, &errmsg); tt_assert(rv == 0); tt_assert(answer != NULL); tt_assert(strlen(answer) == 0); tor_free(answer); rv = getinfo_helper_policies(NULL, "exit-policy/ipv4", &answer, &errmsg); tt_assert(rv == 0); tt_assert(answer != NULL); ipv4_len = strlen(answer); tt_assert(ipv4_len > 0); tor_free(answer); rv = getinfo_helper_policies(NULL, "exit-policy/ipv6", &answer, &errmsg); tt_assert(rv == 0); tt_assert(answer != NULL); ipv6_len = strlen(answer); tt_assert(ipv6_len > 0); tor_free(answer); rv = getinfo_helper_policies(NULL, "exit-policy/full", &answer, &errmsg); tt_assert(rv == 0); tt_assert(answer != NULL); tt_assert(strlen(answer) > 0); tt_assert(strlen(answer) == ipv4_len + ipv6_len + 1); tor_free(answer); done: tor_free(answer); UNMOCK(get_options); UNMOCK(router_get_my_routerinfo); addr_policy_list_free(mock_my_routerinfo.exit_policy); }
0
[]
tor
1afc2ed956a35b40dfd1d207652af5b50c295da7
10,980,865,872,106,732,000,000,000,000,000,000,000
144
Fix policies.c instance of the "if (r=(a-b)) return r" pattern I think this one probably can't underflow, since the input ranges are small. But let's not tempt fate. This patch also replaces the "cmp" functions here with just "eq" functions, since nothing actually checked for anything besides 0 and nonzero. Related to 21278.
int nfs_revalidate_mapping_nolock(struct inode *inode, struct address_space *mapping) { struct nfs_inode *nfsi = NFS_I(inode); int ret = 0; if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) || nfs_attribute_timeout(inode) || NFS_STALE(inode)) { ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode); if (ret < 0) goto out; } if (nfsi->cache_validity & NFS_INO_INVALID_DATA) ret = nfs_invalidate_mapping_nolock(inode, mapping); out: return ret; }
0
[ "CWE-703" ]
linux
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
88,644,495,348,454,980,000,000,000,000,000,000,000
16
NFSv4: Convert the open and close ops to use fmode Signed-off-by: Trond Myklebust <[email protected]>
bracket( const char *p, /* pattern (next to '[') */ const char *pend, const char *s, /* string */ const char *send, int flags, rb_encoding *enc) { const int nocase = flags & FNM_CASEFOLD; const int escape = !(flags & FNM_NOESCAPE); unsigned int c1, c2; int r; int ok = 0, not = 0; if (p >= pend) return NULL; if (*p == '!' || *p == '^') { not = 1; p++; } while (*p != ']') { const char *t1 = p; if (escape && *t1 == '\\') t1++; if (!*t1) return NULL; p = t1 + (r = rb_enc_mbclen(t1, pend, enc)); if (p >= pend) return NULL; if (p[0] == '-' && p[1] != ']') { const char *t2 = p + 1; int r2; if (escape && *t2 == '\\') t2++; if (!*t2) return NULL; p = t2 + (r2 = rb_enc_mbclen(t2, pend, enc)); if (ok) continue; if ((r <= (send-s) && memcmp(t1, s, r) == 0) || (r2 <= (send-s) && memcmp(t2, s, r2) == 0)) { ok = 1; continue; } c1 = rb_enc_codepoint(s, send, enc); if (nocase) c1 = rb_enc_toupper(c1, enc); c2 = rb_enc_codepoint(t1, pend, enc); if (nocase) c2 = rb_enc_toupper(c2, enc); if (c1 < c2) continue; c2 = rb_enc_codepoint(t2, pend, enc); if (nocase) c2 = rb_enc_toupper(c2, enc); if (c1 > c2) continue; } else { if (ok) continue; if (r <= (send-s) && memcmp(t1, s, r) == 0) { ok = 1; continue; } if (!nocase) continue; c1 = rb_enc_toupper(rb_enc_codepoint(s, send, enc), enc); c2 = rb_enc_toupper(rb_enc_codepoint(p, pend, enc), enc); if (c1 != c2) continue; } ok = 1; } return ok == not ? NULL : (char *)p + 1; }
0
[ "CWE-22" ]
ruby
bd5661a3cbb38a8c3a3ea10cd76c88bbef7871b8
227,243,615,470,637,000,000,000,000,000,000,000,000
67
dir.c: check NUL bytes * dir.c (GlobPathValue): should be used in rb_push_glob only. other methods should use FilePathValue. https://hackerone.com/reports/302338 * dir.c (rb_push_glob): expand GlobPathValue git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62989 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
getlinecol( long *cp, // pointer to columns long *rp) // pointer to rows { char_u tbuf[TBUFSZ]; if (T_NAME != NULL && *T_NAME != NUL && invoke_tgetent(tbuf, T_NAME) == NULL) { if (*cp == 0) *cp = tgetnum("co"); if (*rp == 0) *rp = tgetnum("li"); } }
0
[ "CWE-125", "CWE-787" ]
vim
e178af5a586ea023622d460779fdcabbbfac0908
333,438,497,275,443,500,000,000,000,000,000,000,000
14
patch 8.2.5160: accessing invalid memory after changing terminal size Problem: Accessing invalid memory after changing terminal size. Solution: Adjust cmdline_row and msg_row to the value of Rows.
STATIC void GC_do_blocking_inner(ptr_t data, void * context GC_ATTR_UNUSED) { struct blocking_data * d = (struct blocking_data *) data; GC_ASSERT(GC_is_initialized); GC_ASSERT(GC_blocked_sp == NULL); # ifdef SPARC GC_blocked_sp = GC_save_regs_in_stack(); # else GC_blocked_sp = (ptr_t) &d; /* save approx. sp */ # endif # ifdef IA64 GC_blocked_register_sp = GC_save_regs_in_stack(); # endif d -> client_data = (d -> fn)(d -> client_data); # ifdef SPARC GC_ASSERT(GC_blocked_sp != NULL); # else GC_ASSERT(GC_blocked_sp == (ptr_t) &d); # endif GC_blocked_sp = NULL; }
0
[ "CWE-119" ]
bdwgc
7292c02fac2066d39dd1bcc37d1a7054fd1e32ee
170,702,851,963,079,600,000,000,000,000,000,000,000
23
Fix malloc routines to prevent size value wrap-around See issue #135 on Github. * allchblk.c (GC_allochblk, GC_allochblk_nth): Use OBJ_SZ_TO_BLOCKS_CHECKED instead of OBJ_SZ_TO_BLOCKS. * malloc.c (GC_alloc_large): Likewise. * alloc.c (GC_expand_hp_inner): Type of "bytes" local variable changed from word to size_t; cast ROUNDUP_PAGESIZE argument to size_t; prevent overflow when computing GC_heapsize+bytes > GC_max_heapsize. * dbg_mlc.c (GC_debug_malloc, GC_debug_malloc_ignore_off_page, GC_debug_malloc_atomic_ignore_off_page, GC_debug_generic_malloc, GC_debug_generic_malloc_inner, GC_debug_generic_malloc_inner_ignore_off_page, GC_debug_malloc_stubborn, GC_debug_malloc_atomic, GC_debug_malloc_uncollectable, GC_debug_malloc_atomic_uncollectable): Use SIZET_SAT_ADD (instead of "+" operator) to add extra bytes to lb value. * fnlz_mlc.c (GC_finalized_malloc): Likewise. * gcj_mlc.c (GC_debug_gcj_malloc): Likewise. * include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES, ADD_SLOP, ROUNDUP_PAGESIZE): Likewise. * include/private/gcconfig.h (GET_MEM): Likewise. * mallocx.c (GC_malloc_many, GC_memalign): Likewise. * os_dep.c (GC_wince_get_mem, GC_win32_get_mem): Likewise. * typd_mlc.c (GC_malloc_explicitly_typed, GC_malloc_explicitly_typed_ignore_off_page, GC_calloc_explicitly_typed): Likewise. * headers.c (GC_scratch_alloc): Change type of bytes_to_get from word to size_t (because ROUNDUP_PAGESIZE_IF_MMAP result type changed). * include/private/gc_priv.h: Include limits.h (unless SIZE_MAX already defined). * include/private/gc_priv.h (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Move from malloc.c file. * include/private/gc_priv.h (SIZET_SAT_ADD): New macro (defined before include gcconfig.h). * include/private/gc_priv.h (EXTRA_BYTES, GC_page_size): Change type to size_t. * os_dep.c (GC_page_size): Likewise. * include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES, ADD_SLOP, ROUNDUP_PAGESIZE): Add comment about the argument. * include/private/gcconfig.h (GET_MEM): Likewise. * include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES, ADD_SLOP, OBJ_SZ_TO_BLOCKS, ROUNDUP_PAGESIZE, ROUNDUP_PAGESIZE_IF_MMAP): Rename argument to "lb". * include/private/gc_priv.h (OBJ_SZ_TO_BLOCKS_CHECKED): New macro. * include/private/gcconfig.h (GC_win32_get_mem, GC_wince_get_mem, GC_unix_get_mem): Change argument type from word to int. * os_dep.c (GC_unix_mmap_get_mem, GC_unix_get_mem, GC_unix_sbrk_get_mem, GC_wince_get_mem, GC_win32_get_mem): Likewise. * malloc.c (GC_alloc_large_and_clear): Call OBJ_SZ_TO_BLOCKS only if no value wrap around is guaranteed. * malloc.c (GC_generic_malloc): Do not check for lb_rounded < lb case (because ROUNDED_UP_GRANULES and GRANULES_TO_BYTES guarantees no value wrap around). * mallocx.c (GC_generic_malloc_ignore_off_page): Likewise. * misc.c (GC_init_size_map): Change "i" local variable type from int to size_t. * os_dep.c (GC_write_fault_handler, catch_exception_raise): Likewise. * misc.c (GC_envfile_init): Cast len to size_t when passed to ROUNDUP_PAGESIZE_IF_MMAP. * os_dep.c (GC_setpagesize): Cast GC_sysinfo.dwPageSize and GETPAGESIZE() to size_t (when setting GC_page_size). * os_dep.c (GC_unix_mmap_get_mem, GC_unmap_start, GC_remove_protection): Expand ROUNDUP_PAGESIZE macro but without value wrap-around checking (the argument is of word type). * os_dep.c (GC_unix_mmap_get_mem): Replace -GC_page_size with ~GC_page_size+1 (because GC_page_size is unsigned); remove redundant cast to size_t. * os_dep.c (GC_unix_sbrk_get_mem): Add explicit cast of GC_page_size to SBRK_ARG_T. * os_dep.c (GC_wince_get_mem): Change type of res_bytes local variable to size_t. * typd_mlc.c: Do not include limits.h. * typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Remove (as defined in gc_priv.h now).
gc_test(mrb_state *mrb, mrb_value self) { test_mrb_field_write_barrier(); test_mrb_write_barrier(); test_add_gray_list(); test_gc_gray_mark(); test_incremental_gc(); test_incremental_sweep_phase(); return mrb_nil_value(); }
0
[ "CWE-416" ]
mruby
5c114c91d4ff31859fcd84cf8bf349b737b90d99
78,788,240,209,628,740,000,000,000,000,000,000,000
10
Clear unused stack region that may refer freed objects; fix #3596
static void *genunicodedata(void *_gt,int32 *len) { GTextField *gt = _gt; unichar_t *temp; *len = gt->sel_end-gt->sel_start + 1; temp = malloc((*len+2)*sizeof(unichar_t)); temp[0] = 0xfeff; /* KDE expects a byte order flag */ u_strncpy(temp+1,gt->text+gt->sel_start,gt->sel_end-gt->sel_start); temp[*len+1] = 0; return( temp ); }
0
[ "CWE-119", "CWE-787" ]
fontforge
626f751752875a0ddd74b9e217b6f4828713573c
236,815,847,732,770,640,000,000,000,000,000,000,000
10
Warn users before discarding their unsaved scripts (#3852) * Warn users before discarding their unsaved scripts This closes #3846.
GetCode(gdIOCtx *fd, CODE_STATIC_DATA *scd, int code_size, int flag, int *ZeroDataBlockP) { int rv; rv = GetCode_(fd, scd, code_size,flag, ZeroDataBlockP); if(VERBOSE) { printf("[GetCode(,%d,%d) returning %d]\n",code_size,flag,rv); } return rv; }
0
[ "CWE-681" ]
libgd
a11f47475e6443b7f32d21f2271f28f417e2ac04
111,383,781,829,771,710,000,000,000,000,000,000,000
12
Fix #420: Potential infinite loop in gdImageCreateFromGifCtx Due to a signedness confusion in `GetCode_` a corrupt GIF file can trigger an infinite loop. Furthermore we make sure that a GIF without any palette entries is treated as invalid *after* open palette entries have been removed. CVE-2018-5711 See also https://bugs.php.net/bug.php?id=75571.
xfs_attr_fillstate(xfs_da_state_t *state) { xfs_da_state_path_t *path; xfs_da_state_blk_t *blk; int level; trace_xfs_attr_fillstate(state->args); /* * Roll down the "path" in the state structure, storing the on-disk * block number for those buffers in the "path". */ path = &state->path; ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); for (blk = path->blk, level = 0; level < path->active; blk++, level++) { if (blk->bp) { blk->disk_blkno = XFS_BUF_ADDR(blk->bp); blk->bp = NULL; } else { blk->disk_blkno = 0; } } /* * Roll down the "altpath" in the state structure, storing the on-disk * block number for those buffers in the "altpath". */ path = &state->altpath; ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); for (blk = path->blk, level = 0; level < path->active; blk++, level++) { if (blk->bp) { blk->disk_blkno = XFS_BUF_ADDR(blk->bp); blk->bp = NULL; } else { blk->disk_blkno = 0; } } return(0); }
0
[ "CWE-241", "CWE-19" ]
linux
8275cdd0e7ac550dcce2b3ef6d2fb3b808c1ae59
208,415,182,590,536,900,000,000,000,000,000,000,000
40
xfs: remote attribute overwrite causes transaction overrun Commit e461fcb ("xfs: remote attribute lookups require the value length") passes the remote attribute length in the xfs_da_args structure on lookup so that CRC calculations and validity checking can be performed correctly by related code. This, unfortunately has the side effect of changing the args->valuelen parameter in cases where it shouldn't. That is, when we replace a remote attribute, the incoming replacement stores the value and length in args->value and args->valuelen, but then the lookup which finds the existing remote attribute overwrites args->valuelen with the length of the remote attribute being replaced. Hence when we go to create the new attribute, we create it of the size of the existing remote attribute, not the size it is supposed to be. When the new attribute is much smaller than the old attribute, this results in a transaction overrun and an ASSERT() failure on a debug kernel: XFS: Assertion failed: tp->t_blk_res_used <= tp->t_blk_res, file: fs/xfs/xfs_trans.c, line: 331 Fix this by keeping the remote attribute value length separate to the attribute value length in the xfs_da_args structure. The enables us to pass the length of the remote attribute to be removed without overwriting the new attribute's length. Also, ensure that when we save remote block contexts for a later rename we zero the original state variables so that we don't confuse the state of the attribute to be removes with the state of the new attribute that we just added. [Spotted by Brain Foster.] Signed-off-by: Dave Chinner <[email protected]> Reviewed-by: Brian Foster <[email protected]> Signed-off-by: Dave Chinner <[email protected]>
ecma_date_value_to_time_string (ecma_number_t datetime_number) /**<datetime */ { return ecma_date_to_string_format (datetime_number, "$h:$m:$s GMT$z$Z"); } /* ecma_date_value_to_time_string */
0
[ "CWE-416" ]
jerryscript
3bcd48f72d4af01d1304b754ef19fe1a02c96049
48,169,004,609,168,620,000,000,000,000,000,000,000
4
Improve parse_identifier (#4691) Ascii string length is no longer computed during string allocation. JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected]
int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index) { struct rcb_common_cb *rcb_common; enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode; u16 max_vfn; u16 max_q_per_vf; int ring_num = hns_rcb_get_ring_num(dsaf_dev); rcb_common = devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) + ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL); if (!rcb_common) { dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n"); return -ENOMEM; } rcb_common->comm_index = comm_index; rcb_common->ring_num = ring_num; rcb_common->dsaf_dev = dsaf_dev; rcb_common->desc_num = dsaf_dev->desc_num; hns_rcb_get_queue_mode(dsaf_mode, &max_vfn, &max_q_per_vf); rcb_common->max_vfn = max_vfn; rcb_common->max_q_per_vf = max_q_per_vf; rcb_common->io_base = hns_rcb_common_get_vaddr(rcb_common); rcb_common->phy_base = hns_rcb_common_get_paddr(rcb_common); dsaf_dev->rcb_common[comm_index] = rcb_common; return 0; }
0
[ "CWE-119", "CWE-703" ]
linux
412b65d15a7f8a93794653968308fc100f2aa87c
308,723,964,016,802,400,000,000,000,000,000,000,000
32
net: hns: fix ethtool_get_strings overflow in hns driver hns_get_sset_count() returns HNS_NET_STATS_CNT and the data space allocated is not enough for ethtool_get_strings(), which will cause random memory corruption. When SLAB and DEBUG_SLAB are both enabled, memory corruptions like the the following can be observed without this patch: [ 43.115200] Slab corruption (Not tainted): Acpi-ParseExt start=ffff801fb0b69030, len=80 [ 43.115206] Redzone: 0x9f911029d006462/0x5f78745f31657070. [ 43.115208] Last user: [<5f7272655f746b70>](0x5f7272655f746b70) [ 43.115214] 010: 70 70 65 31 5f 74 78 5f 70 6b 74 00 6b 6b 6b 6b ppe1_tx_pkt.kkkk [ 43.115217] 030: 70 70 65 31 5f 74 78 5f 70 6b 74 5f 6f 6b 00 6b ppe1_tx_pkt_ok.k [ 43.115218] Next obj: start=ffff801fb0b69098, len=80 [ 43.115220] Redzone: 0x706d655f6f666966/0x9f911029d74e35b. [ 43.115229] Last user: [<ffff0000084b11b0>](acpi_os_release_object+0x28/0x38) [ 43.115231] 000: 74 79 00 6b 6b 6b 6b 6b 70 70 65 31 5f 74 78 5f ty.kkkkkppe1_tx_ [ 43.115232] 010: 70 6b 74 5f 65 72 72 5f 63 73 75 6d 5f 66 61 69 pkt_err_csum_fai Signed-off-by: Timmy Li <[email protected]> Signed-off-by: David S. Miller <[email protected]>
R_API void r_bin_java_print_constant_value_attr_summary(RBinJavaAttrInfo *attr) { if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *ConstantValue.\n"); return; } printf ("Constant Value Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); printf (" ConstantValue Index: %d\n", attr->info.constant_value_attr.constantvalue_idx); }
0
[ "CWE-119", "CWE-788" ]
radare2
6c4428f018d385fc80a33ecddcb37becea685dd5
134,506,894,236,181,390,000,000,000,000,000,000,000
11
Improve boundary checks to fix oobread segfaults ##crash * Reported by Cen Zhang via huntr.dev * Reproducer: bins/fuzzed/javaoob-havoc.class
static NTSTATUS ldapsrv_CompareRequest(struct ldapsrv_call *call) { struct ldap_CompareRequest *req = &call->request->r.CompareRequest; struct ldap_Result *compare; struct ldapsrv_reply *compare_r; TALLOC_CTX *local_ctx; struct ldb_context *samdb = call->conn->ldb; struct ldb_result *res = NULL; struct ldb_dn *dn; const char *attrs[1]; const char *errstr = NULL; const char *filter = NULL; int result = LDAP_SUCCESS; int ldb_ret; DEBUG(10, ("CompareRequest")); DEBUGADD(10, (" dn: %s\n", req->dn)); local_ctx = talloc_named(call, 0, "CompareRequest local_memory_context"); NT_STATUS_HAVE_NO_MEMORY(local_ctx); dn = ldb_dn_new(local_ctx, samdb, req->dn); NT_STATUS_HAVE_NO_MEMORY(dn); DEBUG(10, ("CompareRequest: dn: [%s]\n", req->dn)); filter = talloc_asprintf(local_ctx, "(%s=%*s)", req->attribute, (int)req->value.length, req->value.data); NT_STATUS_HAVE_NO_MEMORY(filter); DEBUGADD(10, ("CompareRequest: attribute: [%s]\n", filter)); attrs[0] = NULL; compare_r = ldapsrv_init_reply(call, LDAP_TAG_CompareResponse); NT_STATUS_HAVE_NO_MEMORY(compare_r); if (result == LDAP_SUCCESS) { ldb_ret = ldb_search(samdb, local_ctx, &res, dn, LDB_SCOPE_BASE, attrs, "%s", filter); if (ldb_ret != LDB_SUCCESS) { result = map_ldb_error(local_ctx, ldb_ret, ldb_errstring(samdb), &errstr); DEBUG(10,("CompareRequest: error: %s\n", errstr)); } else if (res->count == 0) { DEBUG(10,("CompareRequest: doesn't matched\n")); result = LDAP_COMPARE_FALSE; errstr = NULL; } else if (res->count == 1) { DEBUG(10,("CompareRequest: matched\n")); result = LDAP_COMPARE_TRUE; errstr = NULL; } else if (res->count > 1) { result = LDAP_OTHER; map_ldb_error(local_ctx, LDB_ERR_OTHER, NULL, &errstr); errstr = talloc_asprintf(local_ctx, "%s. Too many objects match!", errstr); DEBUG(10,("CompareRequest: %d results: %s\n", res->count, errstr)); } } compare = &compare_r->msg->r.CompareResponse; compare->dn = NULL; compare->resultcode = result; compare->errormessage = (errstr?talloc_strdup(compare_r,errstr):NULL); compare->referral = NULL; talloc_free(local_ctx); return ldapsrv_queue_reply(call, compare_r); }
0
[]
samba
86fe9d48883f87c928bf31ccbd275db420386803
47,750,664,480,989,580,000,000,000,000,000,000,000
70
CVE-2021-3670 ldap_server: Set timeout on requests based on MaxQueryDuration BUG: https://bugzilla.samba.org/show_bug.cgi?id=14694 Signed-off-by: Joseph Sutton <[email protected]> Reviewed-by: Douglas Bagnall <[email protected]>
h2_end_headers(struct worker *wrk, struct h2_sess *h2, struct req *req, struct h2_req *r2) { h2_error h2e; ssize_t cl; ASSERT_RXTHR(h2); assert(r2->state == H2_S_OPEN); h2e = h2h_decode_fini(h2); h2->new_req = NULL; if (h2e != NULL) { Lck_Lock(&h2->sess->mtx); VSLb(h2->vsl, SLT_Debug, "HPACK/FINI %s", h2e->name); Lck_Unlock(&h2->sess->mtx); assert(!WS_IsReserved(r2->req->ws)); h2_del_req(wrk, r2); return (h2e); } VSLb_ts_req(req, "Req", req->t_req); // XXX: Smarter to do this already at HPACK time into tail end of // XXX: WS, then copy back once all headers received. // XXX: Have I mentioned H/2 Is hodge-podge ? http_CollectHdrSep(req->http, H_Cookie, "; "); // rfc7540,l,3114,3120 cl = http_GetContentLength(req->http); assert(cl >= -2); if (cl == -2) { VSLb(h2->vsl, SLT_Debug, "Non-parseable Content-Length"); return (H2SE_PROTOCOL_ERROR); } if (req->req_body_status == NULL) { if (cl == -1) req->req_body_status = BS_EOF; else req->req_body_status = BS_LENGTH; req->htc->content_length = cl; } else { /* A HEADER frame contained END_STREAM */ assert (req->req_body_status == BS_NONE); r2->state = H2_S_CLOS_REM; if (cl > 0) return (H2CE_PROTOCOL_ERROR); //rfc7540,l,1838,1840 } if (req->http->hd[HTTP_HDR_METHOD].b == NULL) { VSLb(h2->vsl, SLT_Debug, "Missing :method"); return (H2SE_PROTOCOL_ERROR); //rfc7540,l,3087,3090 } if (req->http->hd[HTTP_HDR_URL].b == NULL) { VSLb(h2->vsl, SLT_Debug, "Missing :path"); return (H2SE_PROTOCOL_ERROR); //rfc7540,l,3087,3090 } AN(req->http->hd[HTTP_HDR_PROTO].b); assert(req->req_step == R_STP_TRANSPORT); VCL_TaskEnter(req->privs); VCL_TaskEnter(req->top->privs); req->task->func = h2_do_req; req->task->priv = req; r2->scheduled = 1; if (Pool_Task(wrk->pool, req->task, TASK_QUEUE_STR) != 0) { r2->scheduled = 0; r2->state = H2_S_CLOSED; return (H2SE_REFUSED_STREAM); //rfc7540,l,3326,3329 } return (0); }
0
[ "CWE-444" ]
varnish-cache
d4c67d2a1a05304598895c24663c58a2e2932708
105,786,925,817,631,430,000,000,000,000,000,000,000
69
Take content length into account on H/2 request bodies When receiving H/2 data frames, make sure to take the advertised content length into account, and fail appropriately if the combined sum of the data frames does not match the content length.
do_ed_script (char const *inname, char const *outname, bool *outname_needs_removal, FILE *ofp) { static char const editor_program[] = EDITOR_PROGRAM; file_offset beginning_of_this_line; size_t chars_read; FILE *tmpfp = 0; int tmpfd = -1; /* placate gcc's -Wmaybe-uninitialized */ int exclusive = *outname_needs_removal ? 0 : O_EXCL; char const **ed_argv; int stdin_dup, status; if (! dry_run && ! skip_rest_of_patch) { /* Write ed script to a temporary file. This causes ed to abort on invalid commands such as when line numbers or ranges exceed the number of available lines. When ed reads from a pipe, it rejects invalid commands and treats the next line as a new command, which can lead to arbitrary command execution. */ tmpfd = make_tempfile (&TMPEDNAME, 'e', NULL, O_RDWR | O_BINARY, 0); if (tmpfd == -1) pfatal ("Can't create temporary file %s", quotearg (TMPEDNAME)); TMPEDNAME_needs_removal = true; tmpfp = fdopen (tmpfd, "w+b"); if (! tmpfp) pfatal ("Can't open stream for file %s", quotearg (TMPEDNAME)); } for (;;) { char ed_command_letter; beginning_of_this_line = file_tell (pfp); chars_read = get_line (); if (! chars_read) { next_intuit_at(beginning_of_this_line,p_input_line); break; } ed_command_letter = get_ed_command_letter (buf); if (ed_command_letter) { if (tmpfp) if (fwrite (buf, sizeof *buf, chars_read, tmpfp) < chars_read) write_fatal (); if (ed_command_letter != 'd' && ed_command_letter != 's') { p_pass_comments_through = true; while ((chars_read = get_line ()) != 0) { if (tmpfp) if (fwrite (buf, sizeof *buf, chars_read, tmpfp) < chars_read) write_fatal (); if (chars_read == 2 && strEQ (buf, ".\n")) break; } p_pass_comments_through = false; } } else { next_intuit_at(beginning_of_this_line,p_input_line); break; } } if (dry_run || skip_rest_of_patch) return; if (fwrite ("w\nq\n", sizeof (char), (size_t) 4, tmpfp) < (size_t) 4 || fflush (tmpfp) != 0) write_fatal (); if (lseek (tmpfd, 0, SEEK_SET) == -1) pfatal ("Can't rewind to the beginning of file %s", quotearg (TMPEDNAME)); if (inerrno != ENOENT) { *outname_needs_removal = true; copy_file (inname, outname, 0, exclusive, instat.st_mode, true); } fflush (stdout); if ((stdin_dup = dup (0)) == -1 || dup2 (tmpfd, 0) == -1) pfatal ("Failed to duplicate standard input"); assert (outname[0] != '!' && outname[0] != '-'); ed_argv = alloca (4 * sizeof * ed_argv); ed_argv[0] = editor_program; ed_argv[1] = "-"; ed_argv[2] = outname; ed_argv[3] = (char *) NULL; status = execute (editor_program, editor_program, (char **)ed_argv, false, false, false, false, true, false, NULL); if (status) fatal ("%s FAILED", editor_program); if (dup2 (stdin_dup, 0) == -1 || close (stdin_dup) == -1) pfatal ("Failed to duplicate standard input"); fclose (tmpfp); if (ofp) { FILE *ifp = fopen (outname, binary_transput ? "rb" : "r"); int c; if (!ifp) pfatal ("can't open '%s'", outname); while ((c = getc (ifp)) != EOF) if (putc (c, ofp) == EOF) write_fatal (); if (ferror (ifp) || fclose (ifp) != 0) read_fatal (); } }
0
[ "CWE-415" ]
patch
9c986353e420ead6e706262bf204d6e03322c300
114,287,915,870,181,740,000,000,000,000,000,000,000
110
Fix swapping fake lines in pch_swap * src/pch.c (pch_swap): Fix swapping p_bfake and p_efake when there is a blank line in the middle of a context-diff hunk: that empty line stays in the middle of the hunk and isn't swapped. Fixes: https://savannah.gnu.org/bugs/index.php?53133
ZipStream(zip* z, const String& name) : m_zipFile(nullptr) { if (name.empty()) { return; } struct zip_stat zipStat; if (zip_stat(z, name.c_str(), 0, &zipStat) != 0) { return; } m_zipFile = zip_fopen(z, name.c_str(), 0); }
0
[ "CWE-22" ]
hhvm
65c95a01541dd2fbc9c978ac53bed235b5376686
172,481,323,188,526,800,000,000,000,000,000,000,000
12
ZipArchive::extractTo bug 70350 Summary:Don't allow upward directory traversal when extracting zip archive files. Files in zip files with `..` or starting at main root `/` should be normalized to something where the file being extracted winds up within the directory or a subdirectory where the actual extraction is taking place. http://git.php.net/?p=php-src.git;a=commit;h=f9c2bf73adb2ede0a486b0db466c264f2b27e0bb Reviewed By: FBNeal Differential Revision: D2798452 fb-gh-sync-id: 844549c93e011d1e991bb322bf85822246b04e30 shipit-source-id: 844549c93e011d1e991bb322bf85822246b04e30
path_end (const char *url) { enum url_scheme scheme = url_scheme (url); const char *seps; if (scheme == SCHEME_INVALID) scheme = SCHEME_HTTP; /* use http semantics for rel links */ /* +2 to ignore the first two separators ':' and '/' */ seps = init_seps (scheme) + 2; return strpbrk_or_eos (url, seps); }
0
[]
wget
59b920874daa565a1323ffa1e756e80493190686
113,017,514,559,027,920,000,000,000,000,000,000,000
10
Support non-ASCII URLs * src/url.c [HAVE_ICONV]: Include iconv.h and langinfo.h. (convert_fname): New function. [HAVE_ICONV]: Convert file name from remote encoding to local encoding. (url_file_name): Call convert_fname. (filechr_table): Don't consider bytes in 128..159 as control characters. * tests/Test-ftp-iri.px: Fix the expected file name to match the new file-name recoding. State the remote encoding explicitly on the Wget command line. * NEWS: Mention the URI recoding when built with libiconv.
TEST_F(QueryPlannerTest, OrBelowElemMatchInexactCovered) { // true means multikey addIndex(BSON("a.b" << 1), true); runQuery(fromjson("{a: {$elemMatch: {$or: [{b: 'x'}, {b: /z/}]}}}")); assertNumSolutions(2U); assertSolutionExists("{cscan: {dir: 1}}"); assertSolutionExists( "{fetch: {filter: {a: {$elemMatch: {$or: [{b: 'x'}, {b: /z/}]}}}," "node: {ixscan: {filter: null, pattern: {'a.b': 1}}}}}"); }
0
[]
mongo
ee97c0699fd55b498310996ee002328e533681a3
249,421,474,257,303,880,000,000,000,000,000,000,000
11
SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr.
cma_select_ib_ps(struct rdma_id_private *id_priv) { enum rdma_ucm_port_space ps = 0; struct sockaddr_ib *sib; u64 sid_ps, mask, sid; sib = (struct sockaddr_ib *) cma_src_addr(id_priv); mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; sid = be64_to_cpu(sib->sib_sid) & mask; if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { sid_ps = RDMA_IB_IP_PS_IB; ps = RDMA_PS_IB; } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && (sid == (RDMA_IB_IP_PS_TCP & mask))) { sid_ps = RDMA_IB_IP_PS_TCP; ps = RDMA_PS_TCP; } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && (sid == (RDMA_IB_IP_PS_UDP & mask))) { sid_ps = RDMA_IB_IP_PS_UDP; ps = RDMA_PS_UDP; } if (ps) { sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | be64_to_cpu(sib->sib_sid_mask)); } return ps; }
0
[ "CWE-416" ]
linux
bc0bdc5afaa740d782fbf936aaeebd65e5c2921d
253,734,360,500,647,270,000,000,000,000,000,000,000
30
RDMA/cma: Do not change route.addr.src_addr.ss_family If the state is not idle then rdma_bind_addr() will immediately fail and no change to global state should happen. For instance if the state is already RDMA_CM_LISTEN then this will corrupt the src_addr and would cause the test in cma_cancel_operation(): if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) To view a mangled src_addr, eg with a IPv6 loopback address but an IPv4 family, failing the test. This would manifest as this trace from syzkaller: BUG: KASAN: use-after-free in __list_add_valid+0x93/0xa0 lib/list_debug.c:26 Read of size 8 at addr ffff8881546491e0 by task syz-executor.1/32204 CPU: 1 PID: 32204 Comm: syz-executor.1 Not tainted 5.12.0-rc8-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:79 [inline] dump_stack+0x141/0x1d7 lib/dump_stack.c:120 print_address_description.constprop.0.cold+0x5b/0x2f8 mm/kasan/report.c:232 __kasan_report mm/kasan/report.c:399 [inline] kasan_report.cold+0x7c/0xd8 mm/kasan/report.c:416 __list_add_valid+0x93/0xa0 lib/list_debug.c:26 __list_add include/linux/list.h:67 [inline] list_add_tail include/linux/list.h:100 [inline] cma_listen_on_all drivers/infiniband/core/cma.c:2557 [inline] rdma_listen+0x787/0xe00 drivers/infiniband/core/cma.c:3751 ucma_listen+0x16a/0x210 drivers/infiniband/core/ucma.c:1102 ucma_write+0x259/0x350 drivers/infiniband/core/ucma.c:1732 vfs_write+0x28e/0xa30 fs/read_write.c:603 ksys_write+0x1ee/0x250 fs/read_write.c:658 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae Which is indicating that an rdma_id_private was destroyed without doing cma_cancel_listens(). Instead of trying to re-use the src_addr memory to indirectly create an any address build one explicitly on the stack and bind to that as any other normal flow would do. Link: https://lore.kernel.org/r/[email protected] Cc: [email protected] Fixes: 732d41c545bb ("RDMA/cma: Make the locking for automatic state transition more clear") Reported-by: [email protected] Tested-by: Hao Sun <[email protected]> Reviewed-by: Leon Romanovsky <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
static void delete_char(struct vc_data *vc, unsigned int nr) { unsigned short *p = (unsigned short *) vc->vc_pos; vc_uniscr_delete(vc, nr); scr_memcpyw(p, p + nr, (vc->vc_cols - vc->vc_x - nr) * 2); scr_memsetw(p + vc->vc_cols - vc->vc_x - nr, vc->vc_video_erase_char, nr * 2); vc->vc_need_wrap = 0; if (con_should_update(vc)) do_update_region(vc, (unsigned long) p, vc->vc_cols - vc->vc_x); }
0
[ "CWE-416", "CWE-362" ]
linux
ca4463bf8438b403596edd0ec961ca0d4fbe0220
85,952,579,391,231,360,000,000,000,000,000,000,000
13
vt: vt_ioctl: fix VT_DISALLOCATE freeing in-use virtual console The VT_DISALLOCATE ioctl can free a virtual console while tty_release() is still running, causing a use-after-free in con_shutdown(). This occurs because VT_DISALLOCATE considers a virtual console's 'struct vc_data' to be unused as soon as the corresponding tty's refcount hits 0. But actually it may be still being closed. Fix this by making vc_data be reference-counted via the embedded 'struct tty_port'. A newly allocated virtual console has refcount 1. Opening it for the first time increments the refcount to 2. Closing it for the last time decrements the refcount (in tty_operations::cleanup() so that it happens late enough), as does VT_DISALLOCATE. Reproducer: #include <fcntl.h> #include <linux/vt.h> #include <sys/ioctl.h> #include <unistd.h> int main() { if (fork()) { for (;;) close(open("/dev/tty5", O_RDWR)); } else { int fd = open("/dev/tty10", O_RDWR); for (;;) ioctl(fd, VT_DISALLOCATE, 5); } } KASAN report: BUG: KASAN: use-after-free in con_shutdown+0x76/0x80 drivers/tty/vt/vt.c:3278 Write of size 8 at addr ffff88806a4ec108 by task syz_vt/129 CPU: 0 PID: 129 Comm: syz_vt Not tainted 5.6.0-rc2 #11 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20191223_100556-anatol 04/01/2014 Call Trace: [...] con_shutdown+0x76/0x80 drivers/tty/vt/vt.c:3278 release_tty+0xa8/0x410 drivers/tty/tty_io.c:1514 tty_release_struct+0x34/0x50 drivers/tty/tty_io.c:1629 tty_release+0x984/0xed0 drivers/tty/tty_io.c:1789 [...] Allocated by task 129: [...] kzalloc include/linux/slab.h:669 [inline] vc_allocate drivers/tty/vt/vt.c:1085 [inline] vc_allocate+0x1ac/0x680 drivers/tty/vt/vt.c:1066 con_install+0x4d/0x3f0 drivers/tty/vt/vt.c:3229 tty_driver_install_tty drivers/tty/tty_io.c:1228 [inline] tty_init_dev+0x94/0x350 drivers/tty/tty_io.c:1341 tty_open_by_driver drivers/tty/tty_io.c:1987 [inline] tty_open+0x3ca/0xb30 drivers/tty/tty_io.c:2035 [...] Freed by task 130: [...] kfree+0xbf/0x1e0 mm/slab.c:3757 vt_disallocate drivers/tty/vt/vt_ioctl.c:300 [inline] vt_ioctl+0x16dc/0x1e30 drivers/tty/vt/vt_ioctl.c:818 tty_ioctl+0x9db/0x11b0 drivers/tty/tty_io.c:2660 [...] Fixes: 4001d7b7fc27 ("vt: push down the tty lock so we can see what is left to tackle") Cc: <[email protected]> # v3.4+ Reported-by: [email protected] Acked-by: Jiri Slaby <[email protected]> Signed-off-by: Eric Biggers <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Greg Kroah-Hartman <[email protected]>
uint32 get_partition_id_cols_range_for_endpoint(partition_info *part_info, bool is_left_endpoint, bool include_endpoint, uint32 nparts) { uint min_part_id= 0, max_part_id= part_info->num_parts, loc_part_id; part_column_list_val *range_col_array= part_info->range_col_array; uint num_columns= part_info->part_field_list.elements; DBUG_ENTER("get_partition_id_cols_range_for_endpoint"); /* Find the matching partition (including taking endpoint into account). */ do { /* Midpoint, adjusted down, so it can never be > last partition. */ loc_part_id= (max_part_id + min_part_id) >> 1; if (0 <= cmp_rec_and_tuple_prune(range_col_array + loc_part_id * num_columns, nparts, is_left_endpoint, include_endpoint)) min_part_id= loc_part_id + 1; else max_part_id= loc_part_id; } while (max_part_id > min_part_id); loc_part_id= max_part_id; /* Given value must be LESS THAN the found partition. */ DBUG_ASSERT(loc_part_id == part_info->num_parts || (0 > cmp_rec_and_tuple_prune(range_col_array + loc_part_id * num_columns, nparts, is_left_endpoint, include_endpoint))); /* Given value must be GREATER THAN or EQUAL to the previous partition. */ DBUG_ASSERT(loc_part_id == 0 || (0 <= cmp_rec_and_tuple_prune(range_col_array + (loc_part_id - 1) * num_columns, nparts, is_left_endpoint, include_endpoint))); if (!is_left_endpoint) { /* Set the end after this partition if not already after the last. */ if (loc_part_id < part_info->num_parts) loc_part_id++; } DBUG_RETURN(loc_part_id); }
0
[]
mysql-server
be901b60ae59c93848c829d1b0b2cb523ab8692e
123,427,431,703,679,250,000,000,000,000,000,000,000
47
Bug#26390632: CREATE TABLE CAN CAUSE MYSQL TO EXIT. Analysis ======== CREATE TABLE of InnoDB table with a partition name which exceeds the path limit can cause the server to exit. During the preparation of the partition name, there was no check to identify whether the complete path name for partition exceeds the max supported path length, causing the server to exit during subsequent processing. Fix === During the preparation of partition name, check and report an error if the partition path name exceeds the maximum path name limit. This is a 5.5 patch.
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); // TODO(ahentz): use could use GetOptionalInputTensor() here, but we need to // decide whether we are OK with optional tensors being completely absent, as // opposed to having -1 as their index. bool hasBias = NumInputs(node) == 3; TF_LITE_ENSURE(context, hasBias || NumInputs(node) == 2); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); const TfLiteTensor* bias = nullptr; TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_EQ(context, NumDimensions(filter), 4); const TfLiteType data_type = input->type; const TfLiteType filter_type = filter->type; const bool is_hybrid = data_type == kTfLiteFloat32 && filter_type == kTfLiteInt8; TF_LITE_ENSURE(context, data_type == kTfLiteFloat32 || data_type == kTfLiteUInt8 || data_type == kTfLiteInt8 || data_type == kTfLiteInt16); TF_LITE_ENSURE_TYPES_EQ(context, output->type, data_type); if (!is_hybrid) { TF_LITE_ENSURE(context, filter->type == data_type || data_type == kTfLiteInt16); } // Filter in DepthwiseConv is expected to be [1, H, W, O]. TF_LITE_ENSURE_EQ(context, SizeOfDimension(filter, 0), 1); if (hasBias) { bias = GetInput(context, node, kBiasTensor); if (data_type == kTfLiteUInt8 || data_type == kTfLiteInt8) { TF_LITE_ENSURE_TYPES_EQ(context, bias->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, bias->params.zero_point, 0); } else if (data_type == kTfLiteInt16) { TF_LITE_ENSURE_TYPES_EQ(context, bias->type, kTfLiteInt64); TF_LITE_ENSURE_EQ(context, bias->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); } else { TF_LITE_ENSURE_TYPES_EQ(context, bias->type, data_type); } TF_LITE_ENSURE_EQ(context, NumDimensions(bias), 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(filter, 3), SizeOfDimension(bias, 0)); } int channels_out = SizeOfDimension(filter, 3); int width = SizeOfDimension(input, 2); int height = SizeOfDimension(input, 1); int filter_width = SizeOfDimension(filter, 2); int filter_height = SizeOfDimension(filter, 1); int batches = SizeOfDimension(input, 0); // Matching GetWindowedOutputSize in TensorFlow. auto padding = params->padding; int out_width, out_height; data->padding = ComputePaddingHeightWidth( params->stride_height, params->stride_width, params->dilation_height_factor, params->dilation_width_factor, height, width, filter_height, filter_width, padding, &out_height, &out_width); // Note that quantized inference requires that all tensors have their // parameters set. This is usually done during quantized training or // calibration. if (data_type != kTfLiteFloat32) { TF_LITE_ENSURE_EQ(context, filter->quantization.type, kTfLiteAffineQuantization); const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>( filter->quantization.params); TF_LITE_ENSURE(context, affine_quantization); TF_LITE_ENSURE(context, affine_quantization->scale); TF_LITE_ENSURE(context, (affine_quantization->scale->size == 1 || affine_quantization->scale->size == channels_out)); data->per_channel_output_multiplier.resize(channels_out); data->per_channel_output_shift.resize(channels_out); TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( context, input, filter, bias, output, params->activation, &data->output_multiplier, &data->output_shift, &data->output_activation_min, &data->output_activation_max, data->per_channel_output_multiplier.data(), data->per_channel_output_shift.data(), channels_out)); } if (is_hybrid) { const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>( filter->quantization.params); TF_LITE_ENSURE(context, affine_quantization); TF_LITE_ENSURE(context, affine_quantization->scale); TF_LITE_ENSURE_EQ( context, affine_quantization->scale->size, filter->dims->data[affine_quantization->quantized_dimension]); int temporaries_count = 0; data->input_quantized_index = temporaries_count; if (data->input_quantized_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK( context, context->AddTensors(context, 1, &data->input_quantized_id)); } ++temporaries_count; data->scaling_factors_index = temporaries_count; if (data->scaling_factors_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK( context, context->AddTensors(context, 1, &data->scaling_factors_id)); } ++temporaries_count; data->input_offset_index = temporaries_count; if (data->input_offset_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK( context, context->AddTensors(context, 1, &data->input_offset_id)); } ++temporaries_count; TfLiteIntArrayFree(node->temporaries); node->temporaries = TfLiteIntArrayCreate(temporaries_count); node->temporaries->data[data->input_quantized_index] = data->input_quantized_id; TfLiteTensor* input_quantized = GetTemporary(context, node, data->input_quantized_index); input_quantized->type = kTfLiteInt8; input_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, input_quantized_size)); } node->temporaries->data[data->scaling_factors_index] = data->scaling_factors_id; TfLiteTensor* scaling_factors = GetTemporary(context, node, data->scaling_factors_index); scaling_factors->type = kTfLiteFloat32; scaling_factors->allocation_type = kTfLiteArenaRw; const int batch_size = SizeOfDimension(input, 0); int scaling_dims[1] = {batch_size}; if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1); scaling_factors_size->data[0] = batch_size; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, scaling_factors_size)); } node->temporaries->data[data->input_offset_index] = data->input_offset_id; TfLiteTensor* input_offsets = GetTemporary(context, node, data->input_offset_index); input_offsets->type = kTfLiteInt32; input_offsets->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1, scaling_dims)) { TfLiteIntArray* input_offsets_size = TfLiteIntArrayCreate(1); input_offsets_size->data[0] = batch_size; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_offsets, input_offsets_size)); } } TfLiteIntArray* outputSize = TfLiteIntArrayCreate(4); outputSize->data[0] = batches; outputSize->data[1] = out_height; outputSize->data[2] = out_width; outputSize->data[3] = channels_out; return context->ResizeTensor(context, output, outputSize); }
1
[ "CWE-125", "CWE-787" ]
tensorflow
1970c2158b1ffa416d159d03c3370b9a462aee35
225,061,722,387,927,900,000,000,000,000,000,000,000
174
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56