func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
UINT32_Marshal(UINT32 *source, BYTE **buffer, INT32 *size) { if (buffer != NULL) { if ((size == NULL) || ((UINT32)*size >= sizeof(UINT32))) { (*buffer)[0] = (BYTE)((*source >> 24) & 0xff); (*buffer)[1] = (BYTE)((*source >> 16) & 0xff); (*buffer)[2] = (BYTE)((*source >> 8) & 0xff); (*buffer)[3] = (BYTE)((*source >> 0) & 0xff); *buffer += sizeof(UINT32); if (size != NULL) { *size -= sizeof(UINT32); } } else { pAssert(FALSE); } } return sizeof(UINT32); }
0
[ "CWE-787" ]
libtpms
3ef9b26cb9f28bd64d738bff9505a20d4eb56acd
285,910,005,728,659,120,000,000,000,000,000,000,000
21
tpm2: Add maxSize parameter to TPM2B_Marshal for sanity checks Add maxSize parameter to TPM2B_Marshal and assert on it checking the size of the data intended to be marshaled versus the maximum buffer size. Signed-off-by: Stefan Berger <[email protected]>
FLAC__bool write_bitbuffer_(FLAC__StreamEncoder *encoder, uint32_t samples, FLAC__bool is_last_block) { const FLAC__byte *buffer; size_t bytes; FLAC__ASSERT(FLAC__bitwriter_is_byte_aligned(encoder->private_->frame)); if(!FLAC__bitwriter_get_buffer(encoder->private_->frame, &buffer, &bytes)) { encoder->protected_->state = FLAC__STREAM_ENCODER_MEMORY_ALLOCATION_ERROR; return false; } if(encoder->protected_->verify) { encoder->private_->verify.output.data = buffer; encoder->private_->verify.output.bytes = bytes; if(encoder->private_->verify.state_hint == ENCODER_IN_MAGIC) { encoder->private_->verify.needs_magic_hack = true; } else { if(!FLAC__stream_decoder_process_single(encoder->private_->verify.decoder) || (!is_last_block && (FLAC__stream_encoder_get_verify_decoder_state(encoder) == FLAC__STREAM_DECODER_END_OF_STREAM))) { FLAC__bitwriter_release_buffer(encoder->private_->frame); FLAC__bitwriter_clear(encoder->private_->frame); if(encoder->protected_->state != FLAC__STREAM_ENCODER_VERIFY_MISMATCH_IN_AUDIO_DATA) encoder->protected_->state = FLAC__STREAM_ENCODER_VERIFY_DECODER_ERROR; return false; } } } if(write_frame_(encoder, buffer, bytes, samples, is_last_block) != FLAC__STREAM_ENCODER_WRITE_STATUS_OK) { FLAC__bitwriter_release_buffer(encoder->private_->frame); FLAC__bitwriter_clear(encoder->private_->frame); encoder->protected_->state = FLAC__STREAM_ENCODER_CLIENT_ERROR; return false; } FLAC__bitwriter_release_buffer(encoder->private_->frame); FLAC__bitwriter_clear(encoder->private_->frame); if(samples > 0) { encoder->private_->streaminfo.data.stream_info.min_framesize = flac_min(bytes, encoder->private_->streaminfo.data.stream_info.min_framesize); encoder->private_->streaminfo.data.stream_info.max_framesize = flac_max(bytes, encoder->private_->streaminfo.data.stream_info.max_framesize); } return true; }
0
[ "CWE-787" ]
flac
e1575e4a7c5157cbf4e4a16dbd39b74f7174c7be
56,325,365,369,468,100,000,000,000,000,000,000,000
48
libFlac: Exit at EOS in verify mode When verify mode is enabled, once decoder flags end of stream, encode processing is considered complete. CVE-2021-0561 Signed-off-by: Ralph Giles <[email protected]>
static void powermate_free_buffers(struct usb_device *udev, struct powermate_device *pm) { usb_free_coherent(udev, POWERMATE_PAYLOAD_SIZE_MAX, pm->data, pm->data_dma); kfree(pm->configcr); }
0
[ "CWE-703" ]
linux
9c6ba456711687b794dcf285856fc14e2c76074f
143,805,899,120,175,700,000,000,000,000,000,000,000
6
Input: powermate - fix oops with malicious USB descriptors The powermate driver expects at least one valid USB endpoint in its probe function. If given malicious descriptors that specify 0 for the number of endpoints, it will crash. Validate the number of endpoints on the interface before using them. The full report for this issue can be found here: http://seclists.org/bugtraq/2016/Mar/85 Reported-by: Ralf Spenneberg <[email protected]> Cc: stable <[email protected]> Signed-off-by: Josh Boyer <[email protected]> Signed-off-by: Dmitry Torokhov <[email protected]>
TEST_F(ConnectionManagerUtilityTest, MtlsSetForwardClientCert) { auto ssl = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>(); ON_CALL(*ssl, peerCertificatePresented()).WillByDefault(Return(true)); const std::vector<std::string> local_uri_sans{"test://foo.com/be"}; EXPECT_CALL(*ssl, uriSanLocalCertificate()).WillOnce(Return(local_uri_sans)); std::string expected_sha("abcdefg"); EXPECT_CALL(*ssl, sha256PeerCertificateDigest()).WillOnce(ReturnRef(expected_sha)); const std::vector<std::string> peer_uri_sans{"test://foo.com/fe"}; EXPECT_CALL(*ssl, uriSanPeerCertificate()).WillRepeatedly(Return(peer_uri_sans)); std::string expected_pem("%3D%3Dabc%0Ade%3D"); EXPECT_CALL(*ssl, urlEncodedPemEncodedPeerCertificate()).WillOnce(ReturnRef(expected_pem)); std::string expected_chain_pem(expected_pem + "%3D%3Dlmn%0Aop%3D"); EXPECT_CALL(*ssl, urlEncodedPemEncodedPeerCertificateChain()) .WillOnce(ReturnRef(expected_chain_pem)); std::vector<std::string> expected_dns = {"www.example.com"}; EXPECT_CALL(*ssl, dnsSansPeerCertificate()).WillOnce(Return(expected_dns)); ON_CALL(connection_, ssl()).WillByDefault(Return(ssl)); ON_CALL(config_, forwardClientCert()) .WillByDefault(Return(Http::ForwardClientCertType::AppendForward)); std::vector<Http::ClientCertDetailsType> details = std::vector<Http::ClientCertDetailsType>(); details.push_back(Http::ClientCertDetailsType::URI); details.push_back(Http::ClientCertDetailsType::Cert); details.push_back(Http::ClientCertDetailsType::Chain); details.push_back(Http::ClientCertDetailsType::DNS); ON_CALL(config_, setCurrentClientCertDetails()).WillByDefault(ReturnRef(details)); TestRequestHeaderMapImpl headers; EXPECT_EQ((MutateRequestRet{"10.0.0.3:50000", false, Tracing::Reason::NotTraceable}), callMutateRequestHeaders(headers, Protocol::Http2)); EXPECT_TRUE(headers.has("x-forwarded-client-cert")); EXPECT_EQ("By=test://foo.com/be;" "Hash=abcdefg;" "URI=test://foo.com/fe;" "Cert=\"%3D%3Dabc%0Ade%3D\";" "Chain=\"%3D%3Dabc%0Ade%3D%3D%3Dlmn%0Aop%3D\";" "DNS=www.example.com", headers.get_("x-forwarded-client-cert")); }
0
[ "CWE-22" ]
envoy
5333b928d8bcffa26ab19bf018369a835f697585
213,759,417,632,754,660,000,000,000,000,000,000,000
38
Implement handling of escaped slash characters in URL path Fixes: CVE-2021-29492 Signed-off-by: Yan Avlasov <[email protected]>
void TY_(AddStringLiteral)( Lexer* lexer, ctmbstr str ) { byte c; while(0 != (c = *str++) ) TY_(AddCharToLexer)( lexer, c ); }
0
[ "CWE-119" ]
tidy-html5
c18f27a58792f7fbd0b30a0ff50d6b40a82f940d
9,222,284,398,788,250,000,000,000,000,000,000,000
6
Issue #217 - avoid len going negative, ever...
static void TDES_CTR(const BYTE *key, // IN INT32 keySizeInBits, // IN INT32 dSize, // IN const BYTE *dIn, // IN BYTE *iv, // IN BYTE *dOut, // OUT INT16 blockSize // IN ) { tpmCryptKeySchedule_t keySchedule; int i; BYTE tmp[MAX_SYM_BLOCK_SIZE]; BYTE *pT; TDES_set_encrypt_key(key, keySizeInBits, (tpmKeyScheduleTDES *)&keySchedule.TDES); for(; dSize > 0; dSize -= blockSize) { // Encrypt the current value of the IV(counter) TDES_encrypt(iv, tmp, (tpmKeyScheduleTDES *)&keySchedule.TDES); //increment the counter (counter is big-endian so start at end) for(i = blockSize - 1; i >= 0; i--) if((iv[i] += 1) != 0) break; // XOR the encrypted counter value with input and put into output pT = tmp; for(i = (dSize < blockSize) ? dSize : blockSize; i > 0; i--) *dOut++ = *dIn++ ^ *pT++; } }
0
[ "CWE-330" ]
libtpms
32c159ab53db703749a8f90430cdc7b20b00975e
213,106,295,345,909,600,000,000,000,000,000,000,000
31
tpm2: CryptSym: fix AES output IV The TPM is supposed to provide the output IV in the ivInOut parameter in CryptSymmetricEncrypt. In the case of using the openssl routines, the output IV is missed, and the resulting output from the TPM is in the input IV. OpenSSL unfortunately does not export EVP_CIPHER_CTX_iv() until tags/OpenSSL_1_1_0, so we have to fall back to the reference code for previous OpenSSL versions. Signed-off-by: William Roberts <[email protected]> Signed-off-by: Stefan Berger <[email protected]>
void smtp_server_connection_timeout_reset(struct smtp_server_connection *conn) { if (conn->to_idle != NULL) timeout_reset(conn->to_idle); }
0
[ "CWE-77" ]
core
321c339756f9b2b98fb7326359d1333adebb5295
279,731,981,580,565,550,000,000,000,000,000,000,000
5
lib-smtp: smtp-server-connection - Fix STARTTLS command injection vulnerability. The input handler kept reading more commands even though the input was locked by the STARTTLS command, thereby causing it to read the command pipelined beyond STARTTLS. This causes a STARTTLS command injection vulerability.
xmlPopInputCallbacks(void) { if (!xmlInputCallbackInitialized) return(-1); if (xmlInputCallbackNr <= 0) return(-1); xmlInputCallbackNr--; xmlInputCallbackTable[xmlInputCallbackNr].matchcallback = NULL; xmlInputCallbackTable[xmlInputCallbackNr].opencallback = NULL; xmlInputCallbackTable[xmlInputCallbackNr].readcallback = NULL; xmlInputCallbackTable[xmlInputCallbackNr].closecallback = NULL; return(xmlInputCallbackNr); }
0
[ "CWE-134" ]
libxml2
4472c3a5a5b516aaf59b89be602fbce52756c3e9
81,383,372,838,899,830,000,000,000,000,000,000,000
16
Fix some format string warnings with possible format string vulnerability For https://bugzilla.gnome.org/show_bug.cgi?id=761029 Decorate every method in libxml2 with the appropriate LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups following the reports.
void nfs41_shutdown_client(struct nfs_client *clp) { if (nfs4_has_session(clp)) { nfs4_cleanup_callback(clp); nfs4_shutdown_ds_clients(clp); nfs4_destroy_session(clp->cl_session); nfs4_destroy_clientid(clp); } }
0
[ "CWE-703" ]
linux
dd99e9f98fbf423ff6d365b37a98e8879170f17c
298,852,868,655,691,900,000,000,000,000,000,000,000
10
NFSv4: Initialise connection to the server in nfs4_alloc_client() Set up the connection to the NFSv4 server in nfs4_alloc_client(), before we've added the struct nfs_client to the net-namespace's nfs_client_list so that a downed server won't cause other mounts to hang in the trunking detection code. Reported-by: Michael Wakabayashi <[email protected]> Fixes: 5c6e5b60aae4 ("NFS: Fix an Oops in the pNFS files and flexfiles connection setup to the DS") Signed-off-by: Trond Myklebust <[email protected]>
TEST_P(Security, BuiltinAuthenticationAndAccessAndCryptoPlugin_PermissionsDisableDiscoveryDisableAccessNone_validation_ok_disable_discovery_enable_access_none) // *INDENT-ON* { PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME); PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME); std::string governance_file("governance_disable_discovery_disable_access_none.smime"); BuiltinAuthenticationAndAccessAndCryptoPlugin_Permissions_validation_ok_common(reader, writer, governance_file); }
0
[ "CWE-284" ]
Fast-DDS
d2aeab37eb4fad4376b68ea4dfbbf285a2926384
227,568,209,078,496,550,000,000,000,000,000,000,000
9
check remote permissions (#1387) * Refs 5346. Blackbox test Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. one-way string compare Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. Do not add partition separator on last partition Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Access control unit testing It only covers Partition and Topic permissions Signed-off-by: Iker Luengo <[email protected]> * Refs #3680. Fix partition check on Permissions plugin. Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Uncrustify Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Fix tests on mac Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Fix windows tests Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Avoid memory leak on test Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Proxy data mocks should not return temporary objects Signed-off-by: Iker Luengo <[email protected]> * refs 3680. uncrustify Signed-off-by: Iker Luengo <[email protected]> Co-authored-by: Miguel Company <[email protected]>
bool alwaysSetRequestIdInResponse() const override { return always_set_request_id_in_response_; }
0
[ "CWE-22" ]
envoy
5333b928d8bcffa26ab19bf018369a835f697585
230,279,222,708,545,580,000,000,000,000,000,000,000
1
Implement handling of escaped slash characters in URL path Fixes: CVE-2021-29492 Signed-off-by: Yan Avlasov <[email protected]>
absl::string_view ExtractorImpl::extractJWT(absl::string_view value_str, absl::string_view::size_type after) const { const auto starting = value_str.find_first_of(ConstantBase64UrlEncodingCharsPlusDot, after); if (starting == value_str.npos) { return value_str; } // There should be two dots (periods; 0x2e) inside the string, but we don't verify that here auto ending = value_str.find_first_not_of(ConstantBase64UrlEncodingCharsPlusDot, starting); if (ending == value_str.npos) { // Base64Url-encoded string occupies the rest of the line return value_str.substr(starting); } return value_str.substr(starting, ending - starting); }
0
[]
envoy
2c60632d41555ec8b3d9ef5246242be637a2db0f
45,194,522,056,722,450,000,000,000,000,000,000,000
13
http: header map security fixes for duplicate headers (#197) Previously header matching did not match on all headers for non-inline headers. This patch changes the default behavior to always logically match on all headers. Multiple individual headers will be logically concatenated with ',' similar to what is done with inline headers. This makes the behavior effectively consistent. This behavior can be temporary reverted by setting the runtime value "envoy.reloadable_features.header_match_on_all_headers" to "false". Targeted fixes have been additionally performed on the following extensions which make them consider all duplicate headers by default as a comma concatenated list: 1) Any extension using CEL matching on headers. 2) The header to metadata filter. 3) The JWT filter. 4) The Lua filter. Like primary header matching used in routing, RBAC, etc. this behavior can be disabled by setting the runtime value "envoy.reloadable_features.header_match_on_all_headers" to false. Finally, the setCopy() header map API previously only set the first header in the case of duplicate non-inline headers. setCopy() now behaves similiarly to the other set*() APIs and replaces all found headers with a single value. This may have had security implications in the extauth filter which uses this API. This behavior can be disabled by setting the runtime value "envoy.reloadable_features.http_set_copy_replace_all_headers" to false. Fixes https://github.com/envoyproxy/envoy-setec/issues/188 Signed-off-by: Matt Klein <[email protected]>
static void controloptions (lua_State *L, int opt, const char **fmt, Header *h) { switch (opt) { case ' ': return; /* ignore white spaces */ case '>': h->endian = BIG; return; case '<': h->endian = LITTLE; return; case '!': { int a = getnum(fmt, MAXALIGN); if (!isp2(a)) luaL_error(L, "alignment %d is not a power of 2", a); h->align = a; return; } default: { const char *msg = lua_pushfstring(L, "invalid format option '%c'", opt); luaL_argerror(L, 1, msg); } } }
1
[ "CWE-61", "CWE-190", "CWE-787" ]
redis
ef764dde1cca2f25d00686673d1bc89448819571
328,502,044,187,717,850,000,000,000,000,000,000,000
19
[FIX] revisit CVE-2015-8080 vulnerability
register_assoc (graphid i, graphid j) { if (!used_assoc) init_assoc (); used_assoc[i] = true; used_assoc[j] = true; }
0
[]
bison
b7aab2dbad43aaf14eebe78d54aafa245a000988
322,774,504,722,624,540,000,000,000,000,000,000,000
7
fix: crash when redefining the EOF token Reported by Agency for Defense Development. https://lists.gnu.org/r/bug-bison/2020-08/msg00008.html On an empty such as %token FOO BAR FOO 0 %% input: %empty we crash because when we find FOO 0, we decrement ntokens (since FOO was discovered to be EOF, which is already known to be a token, so we increment ntokens for it, and need to cancel this). This "works well" when EOF is properly defined in one go, but here it is first defined and later only assign token code 0. In the meanwhile BAR was given the token number that we just decremented. To fix this, assign symbol numbers after parsing, not during parsing, so that we also saw all the explicit token codes. To maintain the current numbers (I'd like to keep no difference in the output, not just equivalence), we need to make sure the symbols are numbered in the same order: that of appearance in the source file. So we need the locations to be correct, which was almost the case, except for nterms that appeared several times as LHS (i.e., several times as "foo: ..."). Fixing the use of location_of_lhs sufficed (it appears it was intended for this use, but its implementation was unfinished: it was always set to "false" only). * src/symtab.c (symbol_location_as_lhs_set): Update location_of_lhs. (symbol_code_set): Remove broken hack that decremented ntokens. (symbol_class_set, dummy_symbol_get): Don't set number, ntokens and nnterms. (symbol_check_defined): Do it. (symbols): Don't count nsyms here. Actually, don't count nsyms at all: let it be done in... * src/reader.c (check_and_convert_grammar): here. Define nsyms from ntokens and nnterms after parsing. * tests/input.at (EOF redeclared): New. * examples/c/bistromathic/bistromathic.test: Adjust the traces: in "%nterm <double> exp %% input: ...", exp used to be numbered before input.
struct vfsmount *lookup_mnt(struct path *path) { struct mount *child_mnt; br_read_lock(&vfsmount_lock); child_mnt = __lookup_mnt(path->mnt, path->dentry, 1); if (child_mnt) { mnt_add_count(child_mnt, 1); br_read_unlock(&vfsmount_lock); return &child_mnt->mnt; } else { br_read_unlock(&vfsmount_lock); return NULL; } }
0
[ "CWE-284", "CWE-264" ]
linux
3151527ee007b73a0ebd296010f1c0454a919c7d
3,041,775,296,748,296,000,000,000,000,000,000,000
15
userns: Don't allow creation if the user is chrooted Guarantee that the policy of which files may be access that is established by setting the root directory will not be violated by user namespaces by verifying that the root directory points to the root of the mount namespace at the time of user namespace creation. Changing the root is a privileged operation, and as a matter of policy it serves to limit unprivileged processes to files below the current root directory. For reasons of simplicity and comprehensibility the privilege to change the root directory is gated solely on the CAP_SYS_CHROOT capability in the user namespace. Therefore when creating a user namespace we must ensure that the policy of which files may be access can not be violated by changing the root directory. Anyone who runs a processes in a chroot and would like to use user namespace can setup the same view of filesystems with a mount namespace instead. With this result that this is not a practical limitation for using user namespaces. Cc: [email protected] Acked-by: Serge Hallyn <[email protected]> Reported-by: Andy Lutomirski <[email protected]> Signed-off-by: "Eric W. Biederman" <[email protected]>
ExecWithCheckOptions(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate) { Relation rel = resultRelInfo->ri_RelationDesc; TupleDesc tupdesc = RelationGetDescr(rel); ExprContext *econtext; ListCell *l1, *l2; /* * We will use the EState's per-tuple context for evaluating constraint * expressions (creating it if it's not already there). */ econtext = GetPerTupleExprContext(estate); /* Arrange for econtext's scan tuple to be the tuple under test */ econtext->ecxt_scantuple = slot; /* Check each of the constraints */ forboth(l1, resultRelInfo->ri_WithCheckOptions, l2, resultRelInfo->ri_WithCheckOptionExprs) { WithCheckOption *wco = (WithCheckOption *) lfirst(l1); ExprState *wcoExpr = (ExprState *) lfirst(l2); /* * WITH CHECK OPTION checks are intended to ensure that the new tuple * is visible (in the case of a view) or that it passes the * 'with-check' policy (in the case of row security). * If the qual evaluates to NULL or FALSE, then the new tuple won't be * included in the view or doesn't pass the 'with-check' policy for the * table. We need ExecQual to return FALSE for NULL to handle the view * case (the opposite of what we do above for CHECK constraints). */ if (!ExecQual((List *) wcoExpr, econtext, false)) { char *val_desc; Bitmapset *modifiedCols; modifiedCols = GetModifiedColumns(resultRelInfo, estate); val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel), slot, tupdesc, modifiedCols, 64); ereport(ERROR, (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION), errmsg("new row violates WITH CHECK OPTION for \"%s\"", wco->viewname), val_desc ? errdetail("Failing row contains %s.", val_desc) : 0)); } } }
0
[ "CWE-209" ]
postgres
804b6b6db4dcfc590a468e7be390738f9f7755fb
69,281,795,697,146,830,000,000,000,000,000,000,000
55
Fix column-privilege leak in error-message paths While building error messages to return to the user, BuildIndexValueDescription, ExecBuildSlotValueDescription and ri_ReportViolation would happily include the entire key or entire row in the result returned to the user, even if the user didn't have access to view all of the columns being included. Instead, include only those columns which the user is providing or which the user has select rights on. If the user does not have any rights to view the table or any of the columns involved then no detail is provided and a NULL value is returned from BuildIndexValueDescription and ExecBuildSlotValueDescription. Note that, for key cases, the user must have access to all of the columns for the key to be shown; a partial key will not be returned. Further, in master only, do not return any data for cases where row security is enabled on the relation and row security should be applied for the user. This required a bit of refactoring and moving of things around related to RLS- note the addition of utils/misc/rls.c. Back-patch all the way, as column-level privileges are now in all supported versions. This has been assigned CVE-2014-8161, but since the issue and the patch have already been publicized on pgsql-hackers, there's no point in trying to hide this commit.
int evtchn_get(evtchn_port_t evtchn) { int irq; struct irq_info *info; int err = -ENOENT; if (evtchn >= xen_evtchn_max_channels()) return -EINVAL; mutex_lock(&irq_mapping_update_lock); irq = get_evtchn_to_irq(evtchn); if (irq == -1) goto done; info = info_for_irq(irq); if (!info) goto done; err = -EINVAL; if (info->refcnt <= 0) goto done; info->refcnt++; err = 0; done: mutex_unlock(&irq_mapping_update_lock); return err; }
0
[ "CWE-400", "CWE-703" ]
linux
e99502f76271d6bc4e374fe368c50c67a1fd3070
313,042,986,819,520,200,000,000,000,000,000,000,000
31
xen/events: defer eoi in case of excessive number of events In case rogue guests are sending events at high frequency it might happen that xen_evtchn_do_upcall() won't stop processing events in dom0. As this is done in irq handling a crash might be the result. In order to avoid that, delay further inter-domain events after some time in xen_evtchn_do_upcall() by forcing eoi processing into a worker on the same cpu, thus inhibiting new events coming in. The time after which eoi processing is to be delayed is configurable via a new module parameter "event_loop_timeout" which specifies the maximum event loop time in jiffies (default: 2, the value was chosen after some tests showing that a value of 2 was the lowest with an only slight drop of dom0 network throughput while multiple guests performed an event storm). How long eoi processing will be delayed can be specified via another parameter "event_eoi_delay" (again in jiffies, default 10, again the value was chosen after testing with different delay values). This is part of XSA-332. Cc: [email protected] Reported-by: Julien Grall <[email protected]> Signed-off-by: Juergen Gross <[email protected]> Reviewed-by: Stefano Stabellini <[email protected]> Reviewed-by: Wei Liu <[email protected]>
static int __ext4_check_blockref(const char *function, struct inode *inode, __le32 *p, unsigned int max) { __le32 *bref = p; unsigned int blk; while (bref < p+max) { blk = le32_to_cpu(*bref++); if (blk && unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), blk, 1))) { __ext4_error(inode->i_sb, function, "invalid block reference %u " "in inode #%lu", blk, inode->i_ino); return -EIO; } } return 0; }
0
[ "CWE-703" ]
linux
744692dc059845b2a3022119871846e74d4f6e11
278,641,004,562,202,800,000,000,000,000,000,000,000
19
ext4: use ext4_get_block_write in buffer write Allocate uninitialized extent before ext4 buffer write and convert the extent to initialized after io completes. The purpose is to make sure an extent can only be marked initialized after it has been written with new data so we can safely drop the i_mutex lock in ext4 DIO read without exposing stale data. This helps to improve multi-thread DIO read performance on high-speed disks. Skip the nobh and data=journal mount cases to make things simple for now. Signed-off-by: Jiaying Zhang <[email protected]> Signed-off-by: "Theodore Ts'o" <[email protected]>
GF_Err gf_isom_enum_sample_aux_data(GF_ISOFile *the_file, u32 trackNumber, u32 sample_number, u32 *sai_idx, u32 *sai_type, u32 *sai_parameter, u8 **sai_data, u32 *sai_size) { GF_TrackBox *trak; u32 i, count; if (!sai_type || !sai_idx || !sai_data || !sai_size) return GF_BAD_PARAM; if (sai_parameter) *sai_parameter = 0; *sai_type = 0; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak) return GF_BAD_PARAM; if (!trak->Media->information->sampleTable->sai_sizes) return GF_OK; if (!trak->Media->information->sampleTable->sai_offsets) return GF_OK; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (sample_number <= trak->sample_count_at_seg_start) return GF_BAD_PARAM; sample_number -= trak->sample_count_at_seg_start; #endif count = gf_list_count(trak->Media->information->sampleTable->sai_sizes); for (i=0; i<count; i++) { GF_Err e; GF_SampleAuxiliaryInfoSizeBox *saiz; GF_SampleAuxiliaryInfoOffsetBox *saio=NULL; u32 j; saiz = (GF_SampleAuxiliaryInfoSizeBox*)gf_list_get(trak->Media->information->sampleTable->sai_sizes, i); switch (saiz->aux_info_type) { case GF_ISOM_CENC_SCHEME: case GF_ISOM_CBC_SCHEME: case GF_ISOM_CENS_SCHEME: case GF_ISOM_CBCS_SCHEME: case GF_ISOM_PIFF_SCHEME: case 0: continue; default: break; } if (*sai_idx>i) continue; for (j=0; j<gf_list_count(trak->Media->information->sampleTable->sai_offsets); j++) { saio = (GF_SampleAuxiliaryInfoOffsetBox*)gf_list_get(trak->Media->information->sampleTable->sai_offsets, j); if ((saio->aux_info_type == saiz->aux_info_type) && (saio->aux_info_type_parameter == saiz->aux_info_type_parameter)) break; saio = NULL; } if (!saio) continue; if (!saio->offsets && !saio->sai_data) continue; u64 offset = saio->offsets ? saio->offsets[0] : 0; u32 nb_saio = saio->entry_count; if ((nb_saio>1) && (saio->entry_count != saiz->sample_count)) continue; *sai_type = saiz->aux_info_type; if (sai_parameter) *sai_parameter = saiz->aux_info_type_parameter; (*sai_idx)++; if (nb_saio == 1) { for (j=0; j < sample_number-1; j++) { u32 size = saiz->default_sample_info_size ? saiz->default_sample_info_size : saiz->sample_info_size[j]; offset += size; } } else { offset = saio->offsets[sample_number-1]; } *sai_size = saiz->default_sample_info_size ? saiz->default_sample_info_size : saiz->sample_info_size[j]; if (*sai_size) { *sai_data = gf_malloc( *sai_size); if (! *sai_data) return GF_OUT_OF_MEM; } e = GF_OK; if (saio->sai_data) { if (offset + *sai_size <= saio->sai_data->dataSize) { memcpy(*sai_data, saio->sai_data->data + offset, *sai_size); } else { e = GF_IO_ERR; } } else { u64 cur_position = gf_bs_get_position(the_file->movieFileMap->bs); gf_bs_seek(the_file->movieFileMap->bs, offset); u32 nb_read = gf_bs_read_data(the_file->movieFileMap->bs, *sai_data, *sai_size); if (nb_read != *sai_size) e = GF_IO_ERR; gf_bs_seek(the_file->movieFileMap->bs, cur_position); } if (e) { gf_free(*sai_data); *sai_data = NULL; *sai_size = 0; GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[isobmf] Failed to clone sai data: %s\n", gf_error_to_string(e) )); } return e; } return GF_OK; }
0
[ "CWE-787" ]
gpac
f0a41d178a2dc5ac185506d9fa0b0a58356b16f7
204,210,131,977,082,750,000,000,000,000,000,000,000
98
fixed #2120
void vrend_renderer_detach_res_ctx(int ctx_id, int res_handle) { struct vrend_context *ctx = vrend_lookup_renderer_ctx(ctx_id); if (!ctx) return; vrend_renderer_detach_res_ctx_p(ctx, res_handle); }
0
[ "CWE-787" ]
virglrenderer
cbc8d8b75be360236cada63784046688aeb6d921
181,255,118,765,326,240,000,000,000,000,000,000,000
7
vrend: check transfer bounds for negative values too and report error Closes #138 Signed-off-by: Gert Wollny <[email protected]> Reviewed-by: Emil Velikov <[email protected]>
static int handle_name_request(enum request_types request_type, const char *name, const char *domain_name, struct berval **berval) { int ret; char *fq_name = NULL; struct passwd pwd; struct group grp; char *sid_str = NULL; enum sss_id_type id_type; size_t buf_len; char *buf = NULL; struct sss_nss_kv *kv_list = NULL; ret = asprintf(&fq_name, "%s%c%s", name, SSSD_DOMAIN_SEPARATOR, domain_name); if (ret == -1) { ret = LDAP_OPERATIONS_ERROR; fq_name = NULL; /* content is undefined according to asprintf(3) */ goto done; } if (request_type == REQ_SIMPLE) { ret = sss_nss_getsidbyname(fq_name, &sid_str, &id_type); if (ret != 0) { if (ret == ENOENT) { ret = LDAP_NO_SUCH_OBJECT; } else { ret = LDAP_OPERATIONS_ERROR; } goto done; } ret = pack_ber_sid(sid_str, berval); } else { ret = get_buffer(&buf_len, &buf); if (ret != LDAP_SUCCESS) { goto done; } ret = getpwnam_r_wrapper(MAX_BUF, fq_name, &pwd, &buf, &buf_len); if (ret == 0) { if (request_type == REQ_FULL_WITH_GROUPS) { ret = sss_nss_getorigbyname(pwd.pw_name, &kv_list, &id_type); if (ret != 0 || !(id_type == SSS_ID_TYPE_UID || id_type == SSS_ID_TYPE_BOTH)) { if (ret == ENOENT) { ret = LDAP_NO_SUCH_OBJECT; } else { ret = LDAP_OPERATIONS_ERROR; } goto done; } } ret = pack_ber_user((request_type == REQ_FULL ? RESP_USER : RESP_USER_GROUPLIST), domain_name, pwd.pw_name, pwd.pw_uid, pwd.pw_gid, pwd.pw_gecos, pwd.pw_dir, pwd.pw_shell, kv_list, berval); } else if (ret == ENOMEM || ret == ERANGE) { ret = LDAP_OPERATIONS_ERROR; goto done; } else { /* no user entry found */ /* according to the getpwnam() man page there are a couple of * error codes which can indicate that the user was not found. To * be on the safe side we fail back to the group lookup on all * errors. */ ret = getgrnam_r_wrapper(MAX_BUF, fq_name, &grp, &buf, &buf_len); if (ret != 0) { if (ret == ENOMEM || ret == ERANGE) { ret = LDAP_OPERATIONS_ERROR; } else { ret = LDAP_NO_SUCH_OBJECT; } goto done; } if (request_type == REQ_FULL_WITH_GROUPS) { ret = sss_nss_getorigbyname(grp.gr_name, &kv_list, &id_type); if (ret != 0 || !(id_type == SSS_ID_TYPE_GID || id_type == SSS_ID_TYPE_BOTH)) { if (ret == ENOENT) { ret = LDAP_NO_SUCH_OBJECT; } else { ret = LDAP_OPERATIONS_ERROR; } goto done; } } ret = pack_ber_group((request_type == REQ_FULL ? RESP_GROUP : RESP_GROUP_MEMBERS), domain_name, grp.gr_name, grp.gr_gid, grp.gr_mem, kv_list, berval); } } done: sss_nss_free_kv(kv_list); free(fq_name); free(sid_str); free(buf); return ret; }
1
[ "CWE-19" ]
freeipa
c15a407cbfaed163a933ab137eed16387efe25d2
104,569,710,063,349,910,000,000,000,000,000,000,000
106
extdom: make nss buffer configurable The get*_r_wrapper() calls expect a maximum buffer size to avoid memory shortage if too many threads try to allocate buffers e.g. for large groups. With this patch this size can be configured by setting ipaExtdomMaxNssBufSize in the plugin config object cn=ipa_extdom_extop,cn=plugins,cn=config. Related to https://fedorahosted.org/freeipa/ticket/4908 Reviewed-By: Alexander Bokovoy <[email protected]>
void hci_sock_cleanup(void) { if (bt_sock_unregister(BTPROTO_HCI) < 0) BT_ERR("HCI socket unregistration failed"); proto_unregister(&hci_sk_proto); }
0
[ "CWE-200" ]
linux
3f68ba07b1da811bf383b4b701b129bfcb2e4988
297,389,465,243,242,400,000,000,000,000,000,000,000
7
Bluetooth: HCI - Fix info leak via getsockname() The HCI code fails to initialize the hci_channel member of struct sockaddr_hci and that for leaks two bytes kernel stack via the getsockname() syscall. Initialize hci_channel with 0 to avoid the info leak. Signed-off-by: Mathias Krause <[email protected]> Cc: Marcel Holtmann <[email protected]> Cc: Gustavo Padovan <[email protected]> Cc: Johan Hedberg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
new_cached_dir(char *s, time_t published) { cached_dir_t *d = tor_malloc_zero(sizeof(cached_dir_t)); d->refcnt = 1; d->dir = s; d->dir_len = strlen(s); d->published = published; if (tor_gzip_compress(&(d->dir_z), &(d->dir_z_len), d->dir, d->dir_len, ZLIB_METHOD)) { log_warn(LD_BUG, "Error compressing directory"); } return d; }
0
[ "CWE-264" ]
tor
00fffbc1a15e2696a89c721d0c94dc333ff419ef
32,244,452,581,394,370,000,000,000,000,000,000,000
13
Don't give the Guard flag to relays without the CVE-2011-2768 fix
void set_block_number(uint64_t block_number) { block_number_ = block_number; }
0
[ "CWE-20" ]
libvpx
f00890eecdf8365ea125ac16769a83aa6b68792d
157,808,430,453,024,060,000,000,000,000,000,000,000
1
update libwebm to libwebm-1.0.0.27-352-g6ab9fcf https://chromium.googlesource.com/webm/libwebm/+log/af81f26..6ab9fcf Change-Id: I9d56e1fbaba9b96404b4fbabefddc1a85b79c25d
void CairoOutputDev::updateLineWidth(GfxState *state) { LOG(printf ("line width: %f\n", state->getLineWidth())); if (state->getLineWidth() == 0.0) { /* find out how big pixels (device unit) are in the x and y directions * choose the smaller of the two as our line width */ double x = 1.0, y = 1.0; cairo_device_to_user_distance(cairo, &x, &y); cairo_set_line_width (cairo, MIN(fabs(x),fabs(y))); } else { cairo_set_line_width (cairo, state->getLineWidth()); } if (cairo_shape) cairo_set_line_width (cairo_shape, cairo_get_line_width (cairo)); }
0
[]
poppler
abf167af8b15e5f3b510275ce619e6fdb42edd40
201,179,922,522,010,830,000,000,000,000,000,000,000
14
Implement tiling/patterns in SplashOutputDev Fixes bug 13518
term_job_running_check(term_T *term, int check_job_status) { /* Also consider the job finished when the channel is closed, to avoid a * race condition when updating the title. */ if (term != NULL && term->tl_job != NULL && channel_is_open(term->tl_job->jv_channel)) { if (check_job_status) job_status(term->tl_job); return (term->tl_job->jv_status == JOB_STARTED || term->tl_job->jv_channel->ch_keep_open); } return FALSE; }
0
[ "CWE-476" ]
vim
cd929f7ba8cc5b6d6dcf35c8b34124e969fed6b8
32,944,484,890,436,150,000,000,000,000,000,000,000
15
patch 8.1.0633: crash when out of memory while opening a terminal window Problem: Crash when out of memory while opening a terminal window. Solution: Handle out-of-memory more gracefully.
void SetAttrValue(const NameAttrList& value, AttrValue* out) { *out->mutable_func() = value; }
0
[ "CWE-369", "CWE-674" ]
tensorflow
e07e1c3d26492c06f078c7e5bf2d138043e199c1
196,226,186,515,969,120,000,000,000,000,000,000,000
3
Prevent memory overflow in ParseAttrValue from nested tensors. PiperOrigin-RevId: 370108442 Change-Id: I84d64a5e8895a6aeffbf4749841b4c54d51b5889
void CServer::ProcessClientPacket(CNetChunk *pPacket) { int ClientID = pPacket->m_ClientID; CUnpacker Unpacker; Unpacker.Reset(pPacket->m_pData, pPacket->m_DataSize); // unpack msgid and system flag int Msg = Unpacker.GetInt(); int Sys = Msg&1; Msg >>= 1; if(Unpacker.Error()) return; if(Sys) { // system message if(Msg == NETMSG_INFO) { if((pPacket->m_Flags&NET_CHUNKFLAG_VITAL) != 0 && m_aClients[ClientID].m_State == CClient::STATE_AUTH) { const char *pVersion = Unpacker.GetString(CUnpacker::SANITIZE_CC); if(str_comp(pVersion, GameServer()->NetVersion()) != 0) { // wrong version char aReason[256]; str_format(aReason, sizeof(aReason), "Wrong version. Server is running '%s' and client '%s'", GameServer()->NetVersion(), pVersion); m_NetServer.Drop(ClientID, aReason); return; } const char *pPassword = Unpacker.GetString(CUnpacker::SANITIZE_CC); if(Config()->m_Password[0] != 0 && str_comp(Config()->m_Password, pPassword) != 0) { // wrong password m_NetServer.Drop(ClientID, "Wrong password"); return; } m_aClients[ClientID].m_Version = Unpacker.GetInt(); m_aClients[ClientID].m_State = CClient::STATE_CONNECTING; SendMap(ClientID); } } else if(Msg == NETMSG_REQUEST_MAP_DATA) { if((pPacket->m_Flags&NET_CHUNKFLAG_VITAL) != 0 && (m_aClients[ClientID].m_State == CClient::STATE_CONNECTING || m_aClients[ClientID].m_State == CClient::STATE_CONNECTING_AS_SPEC)) { int ChunkSize = MAP_CHUNK_SIZE; // send map chunks for(int i = 0; i < m_MapChunksPerRequest && m_aClients[ClientID].m_MapChunk >= 0; ++i) { int Chunk = m_aClients[ClientID].m_MapChunk; int Offset = Chunk * ChunkSize; // check for last part if(Offset+ChunkSize >= m_CurrentMapSize) { ChunkSize = m_CurrentMapSize-Offset; m_aClients[ClientID].m_MapChunk = -1; } else m_aClients[ClientID].m_MapChunk++; CMsgPacker Msg(NETMSG_MAP_DATA, true); Msg.AddRaw(&m_pCurrentMapData[Offset], ChunkSize); SendMsg(&Msg, MSGFLAG_VITAL|MSGFLAG_FLUSH, ClientID); if(Config()->m_Debug) { char aBuf[64]; str_format(aBuf, sizeof(aBuf), "sending chunk %d with size %d", Chunk, ChunkSize); Console()->Print(IConsole::OUTPUT_LEVEL_DEBUG, "server", aBuf); } } } } else if(Msg == NETMSG_READY) { if((pPacket->m_Flags&NET_CHUNKFLAG_VITAL) != 0 && (m_aClients[ClientID].m_State == CClient::STATE_CONNECTING || m_aClients[ClientID].m_State == CClient::STATE_CONNECTING_AS_SPEC)) { char aAddrStr[NETADDR_MAXSTRSIZE]; net_addr_str(m_NetServer.ClientAddr(ClientID), aAddrStr, sizeof(aAddrStr), true); char aBuf[256]; str_format(aBuf, sizeof(aBuf), "player is ready. ClientID=%d addr=%s", ClientID, aAddrStr); Console()->Print(IConsole::OUTPUT_LEVEL_ADDINFO, "server", aBuf); bool ConnectAsSpec = m_aClients[ClientID].m_State == CClient::STATE_CONNECTING_AS_SPEC; m_aClients[ClientID].m_State = CClient::STATE_READY; GameServer()->OnClientConnected(ClientID, ConnectAsSpec); SendConnectionReady(ClientID); } } else if(Msg == NETMSG_ENTERGAME) { if((pPacket->m_Flags&NET_CHUNKFLAG_VITAL) != 0 && m_aClients[ClientID].m_State == CClient::STATE_READY && GameServer()->IsClientReady(ClientID)) { char aAddrStr[NETADDR_MAXSTRSIZE]; net_addr_str(m_NetServer.ClientAddr(ClientID), aAddrStr, sizeof(aAddrStr), true); char aBuf[256]; str_format(aBuf, sizeof(aBuf), "player has entered the game. ClientID=%d addr=%s", ClientID, aAddrStr); Console()->Print(IConsole::OUTPUT_LEVEL_STANDARD, "server", aBuf); m_aClients[ClientID].m_State = CClient::STATE_INGAME; SendServerInfo(ClientID); GameServer()->OnClientEnter(ClientID); } } else if(Msg == NETMSG_INPUT) { CClient::CInput *pInput; int64 TagTime; int64 Now = time_get(); m_aClients[ClientID].m_LastAckedSnapshot = Unpacker.GetInt(); int IntendedTick = Unpacker.GetInt(); int Size = Unpacker.GetInt(); // check for errors if(Unpacker.Error() || Size/4 > MAX_INPUT_SIZE) return; if(m_aClients[ClientID].m_LastAckedSnapshot > 0) m_aClients[ClientID].m_SnapRate = CClient::SNAPRATE_FULL; // add message to report the input timing // skip packets that are old if(IntendedTick > m_aClients[ClientID].m_LastInputTick) { int TimeLeft = ((TickStartTime(IntendedTick)-Now)*1000) / time_freq(); CMsgPacker Msg(NETMSG_INPUTTIMING, true); Msg.AddInt(IntendedTick); Msg.AddInt(TimeLeft); SendMsg(&Msg, 0, ClientID); } m_aClients[ClientID].m_LastInputTick = IntendedTick; pInput = &m_aClients[ClientID].m_aInputs[m_aClients[ClientID].m_CurrentInput]; if(IntendedTick <= Tick()) IntendedTick = Tick()+1; pInput->m_GameTick = IntendedTick; for(int i = 0; i < Size/4; i++) pInput->m_aData[i] = Unpacker.GetInt(); int PingCorrection = clamp(Unpacker.GetInt(), 0, 50); if(m_aClients[ClientID].m_Snapshots.Get(m_aClients[ClientID].m_LastAckedSnapshot, &TagTime, 0, 0) >= 0) { m_aClients[ClientID].m_Latency = (int)(((Now-TagTime)*1000)/time_freq()); m_aClients[ClientID].m_Latency = max(0, m_aClients[ClientID].m_Latency - PingCorrection); } mem_copy(m_aClients[ClientID].m_LatestInput.m_aData, pInput->m_aData, MAX_INPUT_SIZE*sizeof(int)); m_aClients[ClientID].m_CurrentInput++; m_aClients[ClientID].m_CurrentInput %= 200; // call the mod with the fresh input data if(m_aClients[ClientID].m_State == CClient::STATE_INGAME) GameServer()->OnClientDirectInput(ClientID, m_aClients[ClientID].m_LatestInput.m_aData); } else if(Msg == NETMSG_RCON_CMD) { const char *pCmd = Unpacker.GetString(); if((pPacket->m_Flags&NET_CHUNKFLAG_VITAL) != 0 && Unpacker.Error() == 0 && m_aClients[ClientID].m_Authed) { char aBuf[256]; str_format(aBuf, sizeof(aBuf), "ClientID=%d rcon='%s'", ClientID, pCmd); Console()->Print(IConsole::OUTPUT_LEVEL_ADDINFO, "server", aBuf); m_RconClientID = ClientID; m_RconAuthLevel = m_aClients[ClientID].m_Authed; Console()->SetAccessLevel(m_aClients[ClientID].m_Authed == AUTHED_ADMIN ? IConsole::ACCESS_LEVEL_ADMIN : IConsole::ACCESS_LEVEL_MOD); Console()->ExecuteLineFlag(pCmd, CFGFLAG_SERVER); Console()->SetAccessLevel(IConsole::ACCESS_LEVEL_ADMIN); m_RconClientID = IServer::RCON_CID_SERV; m_RconAuthLevel = AUTHED_ADMIN; } } else if(Msg == NETMSG_RCON_AUTH) { const char *pPw = Unpacker.GetString(CUnpacker::SANITIZE_CC); if((pPacket->m_Flags&NET_CHUNKFLAG_VITAL) != 0 && Unpacker.Error() == 0) { if(Config()->m_SvRconPassword[0] == 0 && Config()->m_SvRconModPassword[0] == 0) { if(!m_aClients[ClientID].m_NoRconNote) { SendRconLine(ClientID, "No rcon password set on server. Set sv_rcon_password and/or sv_rcon_mod_password to enable the remote console."); m_aClients[ClientID].m_NoRconNote = true; } } else if(Config()->m_SvRconPassword[0] && str_comp(pPw, Config()->m_SvRconPassword) == 0) { CMsgPacker Msg(NETMSG_RCON_AUTH_ON, true); SendMsg(&Msg, MSGFLAG_VITAL, ClientID); m_aClients[ClientID].m_Authed = AUTHED_ADMIN; m_aClients[ClientID].m_pRconCmdToSend = Console()->FirstCommandInfo(IConsole::ACCESS_LEVEL_ADMIN, CFGFLAG_SERVER); if(m_aClients[ClientID].m_Version >= MIN_MAPLIST_CLIENTVERSION) m_aClients[ClientID].m_pMapListEntryToSend = m_pFirstMapEntry; SendRconLine(ClientID, "Admin authentication successful. Full remote console access granted."); char aBuf[256]; str_format(aBuf, sizeof(aBuf), "ClientID=%d authed (admin)", ClientID); Console()->Print(IConsole::OUTPUT_LEVEL_STANDARD, "server", aBuf); } else if(Config()->m_SvRconModPassword[0] && str_comp(pPw, Config()->m_SvRconModPassword) == 0) { CMsgPacker Msg(NETMSG_RCON_AUTH_ON, true); SendMsg(&Msg, MSGFLAG_VITAL, ClientID); m_aClients[ClientID].m_Authed = AUTHED_MOD; m_aClients[ClientID].m_pRconCmdToSend = Console()->FirstCommandInfo(IConsole::ACCESS_LEVEL_MOD, CFGFLAG_SERVER); SendRconLine(ClientID, "Moderator authentication successful. Limited remote console access granted."); const IConsole::CCommandInfo *pInfo = Console()->GetCommandInfo("sv_map", CFGFLAG_SERVER, false); if(pInfo && pInfo->GetAccessLevel() == IConsole::ACCESS_LEVEL_MOD && m_aClients[ClientID].m_Version >= MIN_MAPLIST_CLIENTVERSION) m_aClients[ClientID].m_pMapListEntryToSend = m_pFirstMapEntry; char aBuf[256]; str_format(aBuf, sizeof(aBuf), "ClientID=%d authed (moderator)", ClientID); Console()->Print(IConsole::OUTPUT_LEVEL_STANDARD, "server", aBuf); } else if(Config()->m_SvRconMaxTries && m_ServerBan.IsBannable(m_NetServer.ClientAddr(ClientID))) { m_aClients[ClientID].m_AuthTries++; char aBuf[128]; str_format(aBuf, sizeof(aBuf), "Wrong password %d/%d.", m_aClients[ClientID].m_AuthTries, Config()->m_SvRconMaxTries); SendRconLine(ClientID, aBuf); if(m_aClients[ClientID].m_AuthTries >= Config()->m_SvRconMaxTries) { if(!Config()->m_SvRconBantime) m_NetServer.Drop(ClientID, "Too many remote console authentication tries"); else m_ServerBan.BanAddr(m_NetServer.ClientAddr(ClientID), Config()->m_SvRconBantime*60, "Too many remote console authentication tries"); } } else { SendRconLine(ClientID, "Wrong password."); } } } else if(Msg == NETMSG_PING) { CMsgPacker Msg(NETMSG_PING_REPLY, true); SendMsg(&Msg, 0, ClientID); } else { if(Config()->m_Debug) { char aHex[] = "0123456789ABCDEF"; char aBuf[512]; for(int b = 0; b < pPacket->m_DataSize && b < 32; b++) { aBuf[b*3] = aHex[((const unsigned char *)pPacket->m_pData)[b]>>4]; aBuf[b*3+1] = aHex[((const unsigned char *)pPacket->m_pData)[b]&0xf]; aBuf[b*3+2] = ' '; aBuf[b*3+3] = 0; } char aBufMsg[256]; str_format(aBufMsg, sizeof(aBufMsg), "strange message ClientID=%d msg=%d data_size=%d", ClientID, Msg, pPacket->m_DataSize); Console()->Print(IConsole::OUTPUT_LEVEL_DEBUG, "server", aBufMsg); Console()->Print(IConsole::OUTPUT_LEVEL_DEBUG, "server", aBuf); } } } else { // game message if((pPacket->m_Flags&NET_CHUNKFLAG_VITAL) != 0 && m_aClients[ClientID].m_State >= CClient::STATE_READY) GameServer()->OnMessage(Msg, &Unpacker, ClientID); } }
0
[ "CWE-20", "CWE-703", "CWE-400" ]
teeworlds
c68402fa7e279d42886d5951d1ea8ac2facc1ea5
251,902,769,085,037,940,000,000,000,000,000,000,000
283
changed a check
static void __io_complete_rw(struct io_kiocb *req, long res, long res2, unsigned int issue_flags) { int cflags = 0; if (req->rw.kiocb.ki_flags & IOCB_WRITE) kiocb_end_write(req); if (res != req->result) { if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_should_reissue(req)) { req->flags |= REQ_F_REISSUE; return; } req_set_fail(req); } if (req->flags & REQ_F_BUFFER_SELECTED) cflags = io_put_rw_kbuf(req); __io_req_complete(req, issue_flags, res, cflags); }
0
[ "CWE-125" ]
linux
89c2b3b74918200e46699338d7bcc19b1ea12110
6,434,978,445,296,537,000,000,000,000,000,000,000
19
io_uring: reexpand under-reexpanded iters [ 74.211232] BUG: KASAN: stack-out-of-bounds in iov_iter_revert+0x809/0x900 [ 74.212778] Read of size 8 at addr ffff888025dc78b8 by task syz-executor.0/828 [ 74.214756] CPU: 0 PID: 828 Comm: syz-executor.0 Not tainted 5.14.0-rc3-next-20210730 #1 [ 74.216525] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014 [ 74.219033] Call Trace: [ 74.219683] dump_stack_lvl+0x8b/0xb3 [ 74.220706] print_address_description.constprop.0+0x1f/0x140 [ 74.224226] kasan_report.cold+0x7f/0x11b [ 74.226085] iov_iter_revert+0x809/0x900 [ 74.227960] io_write+0x57d/0xe40 [ 74.232647] io_issue_sqe+0x4da/0x6a80 [ 74.242578] __io_queue_sqe+0x1ac/0xe60 [ 74.245358] io_submit_sqes+0x3f6e/0x76a0 [ 74.248207] __do_sys_io_uring_enter+0x90c/0x1a20 [ 74.257167] do_syscall_64+0x3b/0x90 [ 74.257984] entry_SYSCALL_64_after_hwframe+0x44/0xae old_size = iov_iter_count(); ... iov_iter_revert(old_size - iov_iter_count()); If iov_iter_revert() is done base on the initial size as above, and the iter is truncated and not reexpanded in the middle, it miscalculates borders causing problems. This trace is due to no one reexpanding after generic_write_checks(). Now iters store how many bytes has been truncated, so reexpand them to the initial state right before reverting. Cc: [email protected] Reported-by: Palash Oswal <[email protected]> Reported-by: Sudip Mukherjee <[email protected]> Reported-and-tested-by: [email protected] Signed-off-by: Pavel Begunkov <[email protected]> Signed-off-by: Al Viro <[email protected]>
static int nfs4_check_lease(struct nfs_client *clp) { struct rpc_cred *cred; int status = -NFS4ERR_EXPIRED; /* Is the client already known to have an expired lease? */ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) return 0; cred = nfs4_get_renew_cred(clp); if (cred == NULL) { cred = nfs4_get_setclientid_cred(clp); if (cred == NULL) goto out; } status = nfs4_proc_renew(clp, cred); put_rpccred(cred); out: nfs4_recovery_handle_error(clp, status); return status; }
0
[ "CWE-703" ]
linux
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
245,623,559,355,766,220,000,000,000,000,000,000,000
20
NFSv4: Convert the open and close ops to use fmode Signed-off-by: Trond Myklebust <[email protected]>
int zmq::stream_engine_t::write_subscription_msg (msg_t *msg_) { msg_t subscription; // Inject the subscription message, so that also // ZMQ 2.x peers receive published messages. int rc = subscription.init_size (1); errno_assert (rc == 0); *(unsigned char*) subscription.data () = 1; rc = session->push_msg (&subscription); if (rc == -1) return -1; process_msg = &stream_engine_t::push_msg_to_session; return push_msg_to_session (msg_); }
0
[]
libzmq
77f14aad95cdf0d2a244ae9b4a025e5ba0adf01a
290,677,005,048,957,730,000,000,000,000,000,000,000
16
Problem: stream_engine.cpp security can be downgraded Solution: accept only the mechanism defined by the socket options. I've not tested this yet, so it's a speculative fix.
bgp_put_cap_add_path(struct bgp_proto *p, byte *buf) { *buf++ = 69; /* Capability 69: Support for ADD-PATH */ *buf++ = 4; /* Capability data length */ *buf++ = 0; /* Appropriate AF */ *buf++ = BGP_AF; *buf++ = 1; /* SAFI 1 */ *buf++ = p->cf->add_path; return buf; }
0
[ "CWE-787" ]
bird
1657c41c96b3c07d9265b07dd4912033ead4124b
122,709,515,670,293,510,000,000,000,000,000,000,000
13
BGP: Fix bugs in handling of shutdown messages There is an improper check for valid message size, which may lead to stack overflow and buffer leaks to log when a large message is received. Thanks to Daniel McCarney for bugreport and analysis.
xmlNsErr(xmlParserCtxtPtr ctxt, xmlParserErrors error, const char *msg, const xmlChar * info1, const xmlChar * info2, const xmlChar * info3) { if ((ctxt != NULL) && (ctxt->disableSAX != 0) && (ctxt->instate == XML_PARSER_EOF)) return; if (ctxt != NULL) ctxt->errNo = error; __xmlRaiseError(NULL, NULL, NULL, ctxt, NULL, XML_FROM_NAMESPACE, error, XML_ERR_ERROR, NULL, 0, (const char *) info1, (const char *) info2, (const char *) info3, 0, 0, msg, info1, info2, info3); if (ctxt != NULL) ctxt->nsWellFormed = 0; }
0
[ "CWE-125" ]
libxml2
77404b8b69bc122d12231807abf1a837d121b551
286,242,552,186,002,100,000,000,000,000,000,000,000
17
Make sure the parser returns when getting a Stop order patch backported from chromiun bug fixes, assuming author is Chris
static void clamp_pollexp_and_set_MAXSTRAT(void) { if (G.poll_exp < MINPOLL) G.poll_exp = MINPOLL; if (G.poll_exp > BIGPOLL) G.poll_exp = BIGPOLL; G.polladj_count = 0; G.stratum = MAXSTRAT; }
0
[ "CWE-399" ]
busybox
150dc7a2b483b8338a3e185c478b4b23ee884e71
281,588,508,830,511,000,000,000,000,000,000,000,000
9
ntpd: respond only to client and symmetric active packets The busybox NTP implementation doesn't check the NTP mode of packets received on the server port and responds to any packet with the right size. This includes responses from another NTP server. An attacker can send a packet with a spoofed source address in order to create an infinite loop of responses between two busybox NTP servers. Adding more packets to the loop increases the traffic between the servers until one of them has a fully loaded CPU and/or network. Signed-off-by: Miroslav Lichvar <[email protected]> Signed-off-by: Denys Vlasenko <[email protected]>
napi_status napi_get_prototype(napi_env env, napi_value object, napi_value* result) { NAPI_PREAMBLE(env); CHECK_ARG(env, result); v8::Local<v8::Context> context = env->context(); v8::Local<v8::Object> obj; CHECK_TO_OBJECT(env, context, obj, object); v8::Local<v8::Value> val = obj->GetPrototype(); *result = v8impl::JsValueFromV8LocalValue(val); return GET_RETURN_STATUS(env); }
0
[ "CWE-191" ]
node
656260b4b65fec3b10f6da3fdc9f11fb941aafb5
208,080,718,081,198,450,000,000,000,000,000,000,000
15
napi: fix memory corruption vulnerability Fixes: https://hackerone.com/reports/784186 CVE-ID: CVE-2020-8174 PR-URL: https://github.com/nodejs-private/node-private/pull/195 Reviewed-By: Anna Henningsen <[email protected]> Reviewed-By: Gabriel Schulhof <[email protected]> Reviewed-By: Michael Dawson <[email protected]> Reviewed-By: Colin Ihrig <[email protected]> Reviewed-By: Rich Trott <[email protected]>
Render_Stroke( int num_indices, int first_index ) { int start_x, start_y, step_x, step_y, x, y; int i; FT_Size size; FT_Stroker stroker = NULL; error = FTDemo_Get_Size( handle, &size ); if ( error ) { /* probably a non-existent bitmap font size */ return error; } INIT_SIZE( size, start_x, start_y, step_x, step_y, x, y ); i = first_index; error = FT_Stroker_New( handle->library, &stroker ); if ( error ) goto Exit; FT_Stroker_Set( stroker, 64, FT_STROKER_LINECAP_ROUND, FT_STROKER_LINEJOIN_ROUND, 0 ); while ( i < num_indices ) { int gindex; FT_GlyphSlot slot; if ( handle->encoding == FT_ENCODING_NONE ) gindex = i; else gindex = FTDemo_Get_Index( handle, i ); error = FT_Load_Glyph( size->face, gindex, handle->load_flags | FT_LOAD_NO_BITMAP ); slot = size->face->glyph; if ( !error && slot->format == FT_GLYPH_FORMAT_OUTLINE ) { FT_Glyph glyph; error = FT_Get_Glyph( slot, &glyph ); if ( error ) goto Next; error = FT_Glyph_Stroke( &glyph, stroker, 1 ); if ( error ) { FT_Done_Glyph( glyph ); goto Next; } error = FTDemo_Draw_Glyph( handle, display, glyph, &x, &y ); FT_Done_Glyph( glyph ); if ( error ) status.Fail++; else if ( X_TOO_LONG( x, size, display ) ) { x = start_x; y += step_y; if ( Y_TOO_LONG( y, size, display ) ) break; } } else { Next: status.Fail++; } i++; } Exit: if ( stroker ) FT_Stroker_Done( stroker ); return error; }
0
[ "CWE-120" ]
freetype2-demos
b995299b73ba4cd259f221f500d4e63095508bec
170,237,171,338,737,800,000,000,000,000,000,000,000
90
Fix Savannah bug #30054. * src/ftdiff.c, src/ftgrid.c, src/ftmulti.c, src/ftstring.c, src/ftview.c: Use precision for `%s' where appropriate to avoid buffer overflows.
void TDStretch::overlapStereo(short *poutput, const short *input) const { int i; short temp; int cnt2; for (i = 0; i < overlapLength ; i ++) { temp = (short)(overlapLength - i); cnt2 = 2 * i; poutput[cnt2] = (input[cnt2] * i + pMidBuffer[cnt2] * temp ) / overlapLength; poutput[cnt2 + 1] = (input[cnt2 + 1] * i + pMidBuffer[cnt2 + 1] * temp ) / overlapLength; } }
0
[ "CWE-617" ]
soundtouch
107f2c5d201a4dfea1b7f15c5957ff2ac9e5f260
251,328,148,460,662,900,000,000,000,000,000,000,000
14
Replaced illegal-number-of-channel assertions with run-time exception
static int collect_expired_timers(struct timer_base *base, struct hlist_head *heads) { unsigned long now = READ_ONCE(jiffies); /* * NOHZ optimization. After a long idle sleep we need to forward the * base to current jiffies. Avoid a loop by searching the bitfield for * the next expiring timer. */ if ((long)(now - base->clk) > 2) { unsigned long next = __next_timer_interrupt(base); /* * If the next timer is ahead of time forward to current * jiffies, otherwise forward to the next expiry time: */ if (time_after(next, now)) { /* * The call site will increment base->clk and then * terminate the expiry loop immediately. */ base->clk = now; return 0; } base->clk = next; } return __collect_expired_timers(base, heads); }
0
[ "CWE-200", "CWE-330" ]
linux
f227e3ec3b5cad859ad15666874405e8c1bbc1d4
152,740,800,052,398,940,000,000,000,000,000,000,000
29
random32: update the net random state on interrupt and activity This modifies the first 32 bits out of the 128 bits of a random CPU's net_rand_state on interrupt or CPU activity to complicate remote observations that could lead to guessing the network RNG's internal state. Note that depending on some network devices' interrupt rate moderation or binding, this re-seeding might happen on every packet or even almost never. In addition, with NOHZ some CPUs might not even get timer interrupts, leaving their local state rarely updated, while they are running networked processes making use of the random state. For this reason, we also perform this update in update_process_times() in order to at least update the state when there is user or system activity, since it's the only case we care about. Reported-by: Amit Klein <[email protected]> Suggested-by: Linus Torvalds <[email protected]> Cc: Eric Dumazet <[email protected]> Cc: "Jason A. Donenfeld" <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Kees Cook <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: <[email protected]> Signed-off-by: Willy Tarreau <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
isoent_setup_file_location(struct iso9660 *iso9660, int location) { struct isoent *isoent; struct isoent *np; struct isofile *file; size_t size; int block; int depth; int joliet; int symlocation; int total_block; iso9660->total_file_block = 0; if ((isoent = iso9660->el_torito.catalog) != NULL) { isoent->file->content.location = location; block = (int)((archive_entry_size(isoent->file->entry) + LOGICAL_BLOCK_SIZE -1) >> LOGICAL_BLOCK_BITS); location += block; iso9660->total_file_block += block; } if ((isoent = iso9660->el_torito.boot) != NULL) { isoent->file->content.location = location; size = fd_boot_image_size(iso9660->el_torito.media_type); if (size == 0) size = (size_t)archive_entry_size(isoent->file->entry); block = ((int)size + LOGICAL_BLOCK_SIZE -1) >> LOGICAL_BLOCK_BITS; location += block; iso9660->total_file_block += block; isoent->file->content.blocks = block; } depth = 0; symlocation = -16; if (!iso9660->opt.rr && iso9660->opt.joliet) { joliet = 1; np = iso9660->joliet.rootent; } else { joliet = 0; np = iso9660->primary.rootent; } do { _isoent_file_location(iso9660, np, &symlocation); if (np->subdirs.first != NULL && (joliet || ((iso9660->opt.rr == OPT_RR_DISABLED && depth + 2 < iso9660->primary.max_depth) || (iso9660->opt.rr && depth + 1 < iso9660->primary.max_depth)))) { /* Enter to sub directories. */ np = np->subdirs.first; depth++; continue; } while (np != np->parent) { if (np->drnext == NULL) { /* Return to the parent directory. */ np = np->parent; depth--; } else { np = np->drnext; break; } } } while (np != np->parent); total_block = 0; for (file = iso9660->data_file_list.first; file != NULL; file = file->datanext) { if (!file->write_content) continue; file->cur_content = &(file->content); do { file->cur_content->location = location; location += file->cur_content->blocks; total_block += file->cur_content->blocks; /* Next fragument */ file->cur_content = file->cur_content->next; } while (file->cur_content != NULL); } iso9660->total_file_block += total_block; }
0
[ "CWE-190" ]
libarchive
3014e19820ea53c15c90f9d447ca3e668a0b76c6
190,032,120,284,037,700,000,000,000,000,000,000,000
85
Issue 711: Be more careful about verifying filename lengths when writing ISO9660 archives * Don't cast size_t to int, since this can lead to overflow on machines where sizeof(int) < sizeof(size_t) * Check a + b > limit by writing it as a > limit || b > limit || a + b > limit to avoid problems when a + b wraps around.
Delete(asdl_seq * targets, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Delete_kind; p->v.Delete.targets = targets; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; }
0
[ "CWE-125" ]
cpython
dcfcd146f8e6fc5c2fc16a4c192a0c5f5ca8c53c
311,728,946,706,392,930,000,000,000,000,000,000,000
15
bpo-35766: Merge typed_ast back into CPython (GH-11645)
absl::optional<uint64_t> toInteger(const Http::RequestHeaderMap&) const override { return absl::nullopt; }
0
[ "CWE-22" ]
envoy
5333b928d8bcffa26ab19bf018369a835f697585
34,860,484,933,759,497,000,000,000,000,000,000,000
3
Implement handling of escaped slash characters in URL path Fixes: CVE-2021-29492 Signed-off-by: Yan Avlasov <[email protected]>
static inline Quantum GetPixelCyan(const Image *restrict image, const Quantum *restrict pixel) { return(pixel[image->channel_map[CyanPixelChannel].offset]); }
0
[ "CWE-119", "CWE-787" ]
ImageMagick
450bd716ed3b9186dd10f9e60f630a3d9eeea2a4
70,546,412,762,520,530,000,000,000,000,000,000,000
5
xmlSchemaCheckRCaseNSRecurseCheckCardinality(xmlSchemaParserCtxtPtr ctxt, xmlSchemaParticlePtr r, xmlSchemaParticlePtr b) { xmlSchemaParticlePtr part; /* TODO: Error codes (rcase-NSRecurseCheckCardinality). */ if ((r->children == NULL) || (r->children->children == NULL)) return (-1); /* * SPEC "For a group particle to be a `valid restriction` of a * wildcard particle..." * * SPEC (1) "Every member of the {particles} of the group is a `valid * restriction` of the wildcard as defined by * Particle Valid (Restriction) ($3.9.6)." */ part = (xmlSchemaParticlePtr) r->children->children; do { if (xmlSchemaCheckCOSParticleRestrict(ctxt, part, b)) return (1); part = (xmlSchemaParticlePtr) part->next; } while (part != NULL); /* * SPEC (2) "The effective total range of the group [...] is a * valid restriction of B's occurrence range as defined by * Occurrence Range OK ($3.9.6)." */ if (xmlSchemaCheckParticleRangeOK( xmlSchemaGetParticleTotalRangeMin(r), xmlSchemaGetParticleTotalRangeMax(r), b->minOccurs, b->maxOccurs) != 0) return (1); return (0); }
0
[ "CWE-134" ]
libxml2
4472c3a5a5b516aaf59b89be602fbce52756c3e9
133,698,552,973,933,650,000,000,000,000,000,000,000
34
Fix some format string warnings with possible format string vulnerability For https://bugzilla.gnome.org/show_bug.cgi?id=761029 Decorate every method in libxml2 with the appropriate LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups following the reports.
int CLASS foveon_fixed (void *ptr, int size, const char *name) { void *dp; unsigned dim[3]; if (!name) return 0; dp = foveon_camf_matrix (dim, name); if (!dp) return 0; memcpy (ptr, dp, size*4); free (dp); return 1; }
0
[ "CWE-703" ]
LibRaw
11909cc59e712e09b508dda729b99aeaac2b29ad
277,827,628,208,523,140,000,000,000,000,000,000,000
12
cumulated data checks patch
static MagickBooleanType WriteMSLImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { Image *msl_image; MagickBooleanType status; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); msl_image=CloneImage(image,0,0,MagickTrue,exception); status=ProcessMSLScript(image_info,&msl_image,exception); msl_image=DestroyImageList(msl_image); return(status); }
0
[ "CWE-772" ]
ImageMagick
c9c4ef4e7ca83d8a00effd16723f37946e89fbad
68,756,492,873,778,490,000,000,000,000,000,000,000
20
Fixed leaking of the image when writing an MSL image (#1360).
xmlDocGetRootElement(const xmlDoc *doc) { xmlNodePtr ret; if (doc == NULL) return(NULL); ret = doc->children; while (ret != NULL) { if (ret->type == XML_ELEMENT_NODE) return(ret); ret = ret->next; } return(ret); }
0
[ "CWE-20" ]
libxml2
bdd66182ef53fe1f7209ab6535fda56366bd7ac9
172,197,858,247,498,170,000,000,000,000,000,000,000
12
Avoid building recursive entities For https://bugzilla.gnome.org/show_bug.cgi?id=762100 When we detect a recusive entity we should really not build the associated data, moreover if someone bypass libxml2 fatal errors and still tries to serialize a broken entity make sure we don't risk to get ito a recursion * parser.c: xmlParserEntityCheck() don't build if entity loop were found and remove the associated text content * tree.c: xmlStringGetNodeList() avoid a potential recursion
GF_Err stbl_AddDTS(GF_SampleTableBox *stbl, u64 DTS, u32 *sampleNumber, u32 LastAUDefDuration, u32 nb_pack) { u32 i, j, sampNum; u64 *DTSs, curDTS; Bool inserted; GF_SttsEntry *ent; GF_TimeToSampleBox *stts = stbl->TimeToSample; //reset the reading cache when adding a sample stts->r_FirstSampleInEntry = 0; *sampleNumber = 0; CHECK_PACK(GF_BAD_PARAM) //if we don't have an entry, that's the first one... if (!stts->nb_entries) { //assert the first DTS is 0. If not, that will break the whole file if (DTS) return GF_BAD_PARAM; stts->alloc_size = 1; stts->nb_entries = 1; stts->entries = gf_malloc(sizeof(GF_SttsEntry)); if (!stts->entries) return GF_OUT_OF_MEM; stts->entries[0].sampleCount = nb_pack; stts->entries[0].sampleDelta = (nb_pack>1) ? 0 : LastAUDefDuration; (*sampleNumber) = 1; stts->w_currentSampleNum = nb_pack; return GF_OK; } //check the last DTS - we allow 0-duration samples (same DTS) if (DTS >= stts->w_LastDTS) { u32 nb_extra = 0; ent = &stts->entries[stts->nb_entries-1]; if (!ent->sampleDelta && (ent->sampleCount>1)) { ent->sampleDelta = (u32) ( DTS / ent->sampleCount); stts->w_LastDTS = DTS - ent->sampleDelta; } //OK, we're adding at the end if ((DTS == stts->w_LastDTS + ent->sampleDelta) //for raw audio, consider (dts==last_dts) and (dts==last_dts+2*delta) as sample append to cope with //timescale vs samplerate precision || ((nb_pack>1) && ((DTS == stts->w_LastDTS) || (DTS == stts->w_LastDTS + 2*ent->sampleDelta) )) ) { (*sampleNumber) = stts->w_currentSampleNum + 1; ent->sampleCount += nb_pack; stts->w_currentSampleNum += nb_pack; stts->w_LastDTS = DTS + ent->sampleDelta * (nb_pack-1); return GF_OK; } //we need to split the entry if (ent->sampleCount == 1) { //FIXME - we need more tests with timed text #if 0 if (stts->w_LastDTS) ent->sampleDelta += (u32) (DTS - stts->w_LastDTS); else ent->sampleDelta = (u32) DTS; #else //use this one and adjust... ent->sampleDelta = (u32) (DTS - stts->w_LastDTS); #endif ent->sampleCount ++; //little opt, merge last entry with previous one if same delta if ((stts->nb_entries>=2) && (ent->sampleDelta== stts->entries[stts->nb_entries-2].sampleDelta)) { stts->entries[stts->nb_entries-2].sampleCount += ent->sampleCount; stts->nb_entries--; } stts->w_currentSampleNum ++; stts->w_LastDTS = DTS; (*sampleNumber) = stts->w_currentSampleNum; return GF_OK; } //we definitely need to split the entry ;) ent->sampleCount --; if (nb_pack>1) nb_extra = 1; if (stts->alloc_size <= stts->nb_entries + nb_extra) { ALLOC_INC(stts->alloc_size); stts->entries = gf_realloc(stts->entries, sizeof(GF_SttsEntry)*stts->alloc_size); if (!stts->entries) return GF_OUT_OF_MEM; memset(&stts->entries[stts->nb_entries], 0, sizeof(GF_SttsEntry)*(stts->alloc_size-stts->nb_entries) ); } if (nb_extra) nb_extra = stts->entries[stts->nb_entries-1].sampleDelta; ent = &stts->entries[stts->nb_entries]; stts->nb_entries++; if (nb_pack==1) { ent->sampleCount = 2; ent->sampleDelta = (u32) (DTS - stts->w_LastDTS); stts->w_LastDTS = DTS; (*sampleNumber) = stts->w_currentSampleNum+1; stts->w_currentSampleNum += 1; return GF_OK; } ent->sampleCount = 1; ent->sampleDelta = (u32) (DTS - stts->w_LastDTS); ent = &stts->entries[stts->nb_entries]; stts->nb_entries++; ent->sampleCount = nb_pack; ent->sampleDelta = nb_extra; stts->w_LastDTS = DTS; (*sampleNumber) = stts->w_currentSampleNum + 1; stts->w_currentSampleNum += nb_pack; return GF_OK; } //unpack the DTSs and locate new sample... DTSs = (u64*)gf_malloc(sizeof(u64) * (stbl->SampleSize->sampleCount+2) ); if (!DTSs) return GF_OUT_OF_MEM; curDTS = 0; sampNum = 0; ent = NULL; inserted = 0; for (i=0; i<stts->nb_entries; i++) { ent = & stts->entries[i]; for (j = 0; j<ent->sampleCount; j++) { if (!inserted && (curDTS > DTS)) { DTSs[sampNum] = DTS; sampNum++; *sampleNumber = sampNum; inserted = 1; } DTSs[sampNum] = curDTS; curDTS += ent->sampleDelta; sampNum ++; } } if (!inserted) { gf_free(DTSs); return GF_BAD_PARAM; } /*we will at most insert 3 new entries*/ if (stts->nb_entries+3 >= stts->alloc_size) { stts->alloc_size += 3; stts->entries = gf_realloc(stts->entries, sizeof(GF_SttsEntry)*stts->alloc_size); if (!stts->entries) return GF_OUT_OF_MEM; memset(&stts->entries[stts->nb_entries], 0, sizeof(GF_SttsEntry)*(stts->alloc_size - stts->nb_entries) ); } /*repack the DTSs*/ j=0; stts->nb_entries = 1; stts->entries[0].sampleCount = 1; stts->entries[0].sampleDelta = (u32) DTSs[1] /* - (DTS[0] which is 0)*/; for (i=1; i<stbl->SampleSize->sampleCount+1; i++) { if (i == stbl->SampleSize->sampleCount) { //and by default, our last sample has the same delta as the prev stts->entries[j].sampleCount++; } else if (stts->entries[j].sampleDelta == (u32) ( DTSs[i+1] - DTSs[i]) ) { stts->entries[j].sampleCount ++; } else { stts->nb_entries ++; j++; stts->entries[j].sampleCount = 1; stts->entries[j].sampleDelta = (u32) (DTSs[i+1] - DTSs[i]); } } gf_free(DTSs); //reset the cache to the end stts->w_currentSampleNum = stbl->SampleSize->sampleCount + 1; return GF_OK; }
0
[ "CWE-120", "CWE-787" ]
gpac
77ed81c069e10b3861d88f72e1c6be1277ee7eae
12,676,770,305,267,652,000,000,000,000,000,000,000
176
fixed #1774 (fuzz)
static int smack_msg_queue_msgrcv(struct kern_ipc_perm *isp, struct msg_msg *msg, struct task_struct *target, long type, int mode) { return smk_curacc_msq(isp, MAY_READWRITE); }
0
[ "CWE-416" ]
linux
a3727a8bac0a9e77c70820655fd8715523ba3db7
101,320,284,735,909,360,000,000,000,000,000,000,000
5
selinux,smack: fix subjective/objective credential use mixups Jann Horn reported a problem with commit eb1231f73c4d ("selinux: clarify task subjective and objective credentials") where some LSM hooks were attempting to access the subjective credentials of a task other than the current task. Generally speaking, it is not safe to access another task's subjective credentials and doing so can cause a number of problems. Further, while looking into the problem, I realized that Smack was suffering from a similar problem brought about by a similar commit 1fb057dcde11 ("smack: differentiate between subjective and objective task credentials"). This patch addresses this problem by restoring the use of the task's objective credentials in those cases where the task is other than the current executing task. Not only does this resolve the problem reported by Jann, it is arguably the correct thing to do in these cases. Cc: [email protected] Fixes: eb1231f73c4d ("selinux: clarify task subjective and objective credentials") Fixes: 1fb057dcde11 ("smack: differentiate between subjective and objective task credentials") Reported-by: Jann Horn <[email protected]> Acked-by: Eric W. Biederman <[email protected]> Acked-by: Casey Schaufler <[email protected]> Signed-off-by: Paul Moore <[email protected]>
static void gf_sg_dom_event_bubble(GF_Node *node, GF_DOM_Event *event, GF_List *use_stack, u32 cur_par_idx) { GF_Node *parent; if (!node || node->sgprivate->scenegraph->abort_bubbling) return; /*get the node's parent*/ parent = gf_node_get_parent(node, 0); if (!parent) { /*top of the graph, use Document*/ if (node->sgprivate->scenegraph->RootNode==node) gf_sg_fire_dom_event(node->sgprivate->scenegraph->dom_evt, event, node->sgprivate->scenegraph, NULL); return; } if (cur_par_idx) { GF_Node *used_node = (GF_Node *)gf_list_get(use_stack, cur_par_idx-1); /*if the node is a used one, switch to the <use> subtree*/ if (used_node==node) { parent = (GF_Node *)gf_list_get(use_stack, cur_par_idx); if (cur_par_idx>1) cur_par_idx-=2; else cur_par_idx = 0; /*if no events attached,bubble by default*/ if (parent->sgprivate->interact) { Bool can_bubble = gf_sg_fire_dom_event(parent->sgprivate->interact->dom_evt, event, node->sgprivate->scenegraph, parent); if (!can_bubble) { return; } } gf_sg_dom_event_bubble(parent, event, use_stack, cur_par_idx); return; } } /*if no events attached,bubble by default*/ if (parent->sgprivate->interact) { Bool can_bubble; can_bubble = gf_sg_fire_dom_event(parent->sgprivate->interact->dom_evt, event, node->sgprivate->scenegraph, parent); if(!can_bubble) return; } gf_sg_dom_event_bubble(parent, event, use_stack, cur_par_idx); }
0
[ "CWE-416" ]
gpac
9723dd0955894f2cb7be13b94cf7a47f2754b893
183,530,452,590,577,800,000,000,000,000,000,000,000
42
fixed #2109
void sas_porte_link_reset_err(struct work_struct *work) { struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; sas_deform_port(phy, 1); }
0
[ "CWE-284" ]
linux
0558f33c06bb910e2879e355192227a8e8f0219d
176,852,295,991,652,500,000,000,000,000,000,000,000
7
scsi: libsas: direct call probe and destruct In commit 87c8331fcf72 ("[SCSI] libsas: prevent domain rediscovery competing with ata error handling") introduced disco mutex to prevent rediscovery competing with ata error handling and put the whole revalidation in the mutex. But the rphy add/remove needs to wait for the error handling which also grabs the disco mutex. This may leads to dead lock.So the probe and destruct event were introduce to do the rphy add/remove asynchronously and out of the lock. The asynchronously processed workers makes the whole discovery process not atomic, the other events may interrupt the process. For example, if a loss of signal event inserted before the probe event, the sas_deform_port() is called and the port will be deleted. And sas_port_delete() may run before the destruct event, but the port-x:x is the top parent of end device or expander. This leads to a kernel WARNING such as: [ 82.042979] sysfs group 'power' not found for kobject 'phy-1:0:22' [ 82.042983] ------------[ cut here ]------------ [ 82.042986] WARNING: CPU: 54 PID: 1714 at fs/sysfs/group.c:237 sysfs_remove_group+0x94/0xa0 [ 82.043059] Call trace: [ 82.043082] [<ffff0000082e7624>] sysfs_remove_group+0x94/0xa0 [ 82.043085] [<ffff00000864e320>] dpm_sysfs_remove+0x60/0x70 [ 82.043086] [<ffff00000863ee10>] device_del+0x138/0x308 [ 82.043089] [<ffff00000869a2d0>] sas_phy_delete+0x38/0x60 [ 82.043091] [<ffff00000869a86c>] do_sas_phy_delete+0x6c/0x80 [ 82.043093] [<ffff00000863dc20>] device_for_each_child+0x58/0xa0 [ 82.043095] [<ffff000008696f80>] sas_remove_children+0x40/0x50 [ 82.043100] [<ffff00000869d1bc>] sas_destruct_devices+0x64/0xa0 [ 82.043102] [<ffff0000080e93bc>] process_one_work+0x1fc/0x4b0 [ 82.043104] [<ffff0000080e96c0>] worker_thread+0x50/0x490 [ 82.043105] [<ffff0000080f0364>] kthread+0xfc/0x128 [ 82.043107] [<ffff0000080836c0>] ret_from_fork+0x10/0x50 Make probe and destruct a direct call in the disco and revalidate function, but put them outside the lock. The whole discovery or revalidate won't be interrupted by other events. And the DISCE_PROBE and DISCE_DESTRUCT event are deleted as a result of the direct call. Introduce a new list to destruct the sas_port and put the port delete after the destruct. This makes sure the right order of destroying the sysfs kobject and fix the warning above. In sas_ex_revalidate_domain() have a loop to find all broadcasted device, and sometimes we have a chance to find the same expander twice. Because the sas_port will be deleted at the end of the whole revalidate process, sas_port with the same name cannot be added before this. Otherwise the sysfs will complain of creating duplicate filename. Since the LLDD will send broadcast for every device change, we can only process one expander's revalidation. [mkp: kbuild test robot warning] Signed-off-by: Jason Yan <[email protected]> CC: John Garry <[email protected]> CC: Johannes Thumshirn <[email protected]> CC: Ewan Milne <[email protected]> CC: Christoph Hellwig <[email protected]> CC: Tomas Henzl <[email protected]> CC: Dan Williams <[email protected]> Reviewed-by: Hannes Reinecke <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
void rose_stop_idletimer(struct sock *sk) { del_timer(&rose_sk(sk)->idletimer); }
1
[ "CWE-416" ]
linux
9cc02ede696272c5271a401e4f27c262359bc2f6
111,486,110,913,219,350,000,000,000,000,000,000,000
4
net: rose: fix UAF bugs caused by timer handler There are UAF bugs in rose_heartbeat_expiry(), rose_timer_expiry() and rose_idletimer_expiry(). The root cause is that del_timer() could not stop the timer handler that is running and the refcount of sock is not managed properly. One of the UAF bugs is shown below: (thread 1) | (thread 2) | rose_bind | rose_connect | rose_start_heartbeat rose_release | (wait a time) case ROSE_STATE_0 | rose_destroy_socket | rose_heartbeat_expiry rose_stop_heartbeat | sock_put(sk) | ... sock_put(sk) // FREE | | bh_lock_sock(sk) // USE The sock is deallocated by sock_put() in rose_release() and then used by bh_lock_sock() in rose_heartbeat_expiry(). Although rose_destroy_socket() calls rose_stop_heartbeat(), it could not stop the timer that is running. The KASAN report triggered by POC is shown below: BUG: KASAN: use-after-free in _raw_spin_lock+0x5a/0x110 Write of size 4 at addr ffff88800ae59098 by task swapper/3/0 ... Call Trace: <IRQ> dump_stack_lvl+0xbf/0xee print_address_description+0x7b/0x440 print_report+0x101/0x230 ? irq_work_single+0xbb/0x140 ? _raw_spin_lock+0x5a/0x110 kasan_report+0xed/0x120 ? _raw_spin_lock+0x5a/0x110 kasan_check_range+0x2bd/0x2e0 _raw_spin_lock+0x5a/0x110 rose_heartbeat_expiry+0x39/0x370 ? rose_start_heartbeat+0xb0/0xb0 call_timer_fn+0x2d/0x1c0 ? rose_start_heartbeat+0xb0/0xb0 expire_timers+0x1f3/0x320 __run_timers+0x3ff/0x4d0 run_timer_softirq+0x41/0x80 __do_softirq+0x233/0x544 irq_exit_rcu+0x41/0xa0 sysvec_apic_timer_interrupt+0x8c/0xb0 </IRQ> <TASK> asm_sysvec_apic_timer_interrupt+0x1b/0x20 RIP: 0010:default_idle+0xb/0x10 RSP: 0018:ffffc9000012fea0 EFLAGS: 00000202 RAX: 000000000000bcae RBX: ffff888006660f00 RCX: 000000000000bcae RDX: 0000000000000001 RSI: ffffffff843a11c0 RDI: ffffffff843a1180 RBP: dffffc0000000000 R08: dffffc0000000000 R09: ffffed100da36d46 R10: dfffe9100da36d47 R11: ffffffff83cf0950 R12: 0000000000000000 R13: 1ffff11000ccc1e0 R14: ffffffff8542af28 R15: dffffc0000000000 ... Allocated by task 146: __kasan_kmalloc+0xc4/0xf0 sk_prot_alloc+0xdd/0x1a0 sk_alloc+0x2d/0x4e0 rose_create+0x7b/0x330 __sock_create+0x2dd/0x640 __sys_socket+0xc7/0x270 __x64_sys_socket+0x71/0x80 do_syscall_64+0x43/0x90 entry_SYSCALL_64_after_hwframe+0x46/0xb0 Freed by task 152: kasan_set_track+0x4c/0x70 kasan_set_free_info+0x1f/0x40 ____kasan_slab_free+0x124/0x190 kfree+0xd3/0x270 __sk_destruct+0x314/0x460 rose_release+0x2fa/0x3b0 sock_close+0xcb/0x230 __fput+0x2d9/0x650 task_work_run+0xd6/0x160 exit_to_user_mode_loop+0xc7/0xd0 exit_to_user_mode_prepare+0x4e/0x80 syscall_exit_to_user_mode+0x20/0x40 do_syscall_64+0x4f/0x90 entry_SYSCALL_64_after_hwframe+0x46/0xb0 This patch adds refcount of sock when we use functions such as rose_start_heartbeat() and so on to start timer, and decreases the refcount of sock when timer is finished or deleted by functions such as rose_stop_heartbeat() and so on. As a result, the UAF bugs could be mitigated. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Duoming Zhou <[email protected]> Tested-by: Duoming Zhou <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Paolo Abeni <[email protected]>
void md_rdev_clear(struct md_rdev *rdev) { if (rdev->sb_page) { put_page(rdev->sb_page); rdev->sb_loaded = 0; rdev->sb_page = NULL; rdev->sb_start = 0; rdev->sectors = 0; } if (rdev->bb_page) { put_page(rdev->bb_page); rdev->bb_page = NULL; } kfree(rdev->badblocks.page); rdev->badblocks.page = NULL; }
0
[ "CWE-200" ]
linux
b6878d9e03043695dbf3fa1caa6dfc09db225b16
130,547,192,007,932,840,000,000,000,000,000,000,000
16
md: use kzalloc() when bitmap is disabled In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a mdu_bitmap_file_t called "file". 5769 file = kmalloc(sizeof(*file), GFP_NOIO); 5770 if (!file) 5771 return -ENOMEM; This structure is copied to user space at the end of the function. 5786 if (err == 0 && 5787 copy_to_user(arg, file, sizeof(*file))) 5788 err = -EFAULT But if bitmap is disabled only the first byte of "file" is initialized with zero, so it's possible to read some bytes (up to 4095) of kernel space memory from user space. This is an information leak. 5775 /* bitmap disabled, zero the first byte and copy out */ 5776 if (!mddev->bitmap_info.file) 5777 file->pathname[0] = '\0'; Signed-off-by: Benjamin Randazzo <[email protected]> Signed-off-by: NeilBrown <[email protected]>
void textview_set_position(TextView *textview, gint pos) { GtkTextView *text = GTK_TEXT_VIEW(textview->text); gtkut_text_view_set_position(text, pos); }
0
[ "CWE-601" ]
claws
ac286a71ed78429e16c612161251b9ea90ccd431
251,486,796,625,401,850,000,000,000,000,000,000,000
6
harden link checker before accepting click
Client::handleRequestBodyProducerAborted() { if (requestSender != NULL) debugs(9,3, HERE << "fyi: request body aborted while we were sending"); fwd->dontRetry(true); // the problem is not with the server stopConsumingFrom(requestBodySource); // requestSender, if any, will notice // kids extend this }
0
[ "CWE-20" ]
squid
6c9c44d0e9cf7b72bb233360c5308aa063af3d69
286,897,834,303,967,730,000,000,000,000,000,000,000
10
Handle more partial responses (#791)
ZEND_API int ZEND_FASTCALL zend_binary_zval_strcmp(zval *s1, zval *s2) /* {{{ */ { return zend_binary_strcmp(Z_STRVAL_P(s1), Z_STRLEN_P(s1), Z_STRVAL_P(s2), Z_STRLEN_P(s2)); }
0
[ "CWE-787" ]
php-src
f1ce8d5f5839cb2069ea37ff424fb96b8cd6932d
179,897,608,411,664,880,000,000,000,000,000,000,000
4
Fix #73122: Integer Overflow when concatenating strings We must avoid integer overflows in memory allocations, so we introduce an additional check in the VM, and bail out in the rare case of an overflow. Since the recent fix for bug #74960 still doesn't catch all possible overflows, we fix that right away.
div_sf(VALUE s, VALUE *f) { VALUE n = sec_to_ns(s); if (f) *f = f_mod(n, INT2FIX(1)); return f_floor(n); }
0
[]
date
3959accef8da5c128f8a8e2fd54e932a4fb253b0
216,589,023,179,287,950,000,000,000,000,000,000,000
8
Add length limit option for methods that parses date strings `Date.parse` now raises an ArgumentError when a given date string is longer than 128. You can configure the limit by giving `limit` keyword arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`, the limit is disabled. Not only `Date.parse` but also the following methods are changed. * Date._parse * Date.parse * DateTime.parse * Date._iso8601 * Date.iso8601 * DateTime.iso8601 * Date._rfc3339 * Date.rfc3339 * DateTime.rfc3339 * Date._xmlschema * Date.xmlschema * DateTime.xmlschema * Date._rfc2822 * Date.rfc2822 * DateTime.rfc2822 * Date._rfc822 * Date.rfc822 * DateTime.rfc822 * Date._jisx0301 * Date.jisx0301 * DateTime.jisx0301
irc_nick_color_for_msg (struct t_irc_server *server, int server_message, struct t_irc_nick *nick, const char *nickname) { static char color[16][64]; static int index_color = 0; char *color_found; if (server_message && !weechat_config_boolean (irc_config_look_color_nicks_in_server_messages)) { return IRC_COLOR_CHAT_NICK; } if (nick) return nick->color; if (nickname) { if (server && (irc_server_strcasecmp (server, nickname, server->nick) == 0)) { return IRC_COLOR_CHAT_NICK_SELF; } color_found = irc_nick_find_color (nickname); index_color = (index_color + 1) % 16; snprintf (color[index_color], sizeof (color[index_color]), "%s", color_found); if (color_found) free (color_found); return color[index_color]; } return IRC_COLOR_CHAT_NICK; }
0
[ "CWE-120", "CWE-787" ]
weechat
40ccacb4330a64802b1f1e28ed9a6b6d3ca9197f
236,335,659,829,761,900,000,000,000,000,000,000
35
irc: fix crash when a new message 005 is received with longer nick prefixes Thanks to Stuart Nevans Locke for reporting the issue.
int SWFShape_getFlags(SWFShape shape) { if(shape->useVersion == SWF_SHAPE4) return shape->flags; else return 0; }
0
[ "CWE-20", "CWE-476" ]
libming
6e76e8c71cb51c8ba0aa9737a636b9ac3029887f
333,504,991,753,184,800,000,000,000,000,000,000,000
7
SWFShape_setLeftFillStyle: prevent fill overflow
static int nci_open_device(struct nci_dev *ndev) { int rc = 0; mutex_lock(&ndev->req_lock); if (test_bit(NCI_UNREG, &ndev->flags)) { rc = -ENODEV; goto done; } if (test_bit(NCI_UP, &ndev->flags)) { rc = -EALREADY; goto done; } if (ndev->ops->open(ndev)) { rc = -EIO; goto done; } atomic_set(&ndev->cmd_cnt, 1); set_bit(NCI_INIT, &ndev->flags); if (ndev->ops->init) rc = ndev->ops->init(ndev); if (!rc) { rc = __nci_request(ndev, nci_reset_req, (void *)0, msecs_to_jiffies(NCI_RESET_TIMEOUT)); } if (!rc && ndev->ops->setup) { rc = ndev->ops->setup(ndev); } if (!rc) { struct nci_core_init_v2_cmd nci_init_v2_cmd = { .feature1 = NCI_FEATURE_DISABLE, .feature2 = NCI_FEATURE_DISABLE }; const void *opt = NULL; if (ndev->nci_ver & NCI_VER_2_MASK) opt = &nci_init_v2_cmd; rc = __nci_request(ndev, nci_init_req, opt, msecs_to_jiffies(NCI_INIT_TIMEOUT)); } if (!rc && ndev->ops->post_setup) rc = ndev->ops->post_setup(ndev); if (!rc) { rc = __nci_request(ndev, nci_init_complete_req, (void *)0, msecs_to_jiffies(NCI_INIT_TIMEOUT)); } clear_bit(NCI_INIT, &ndev->flags); if (!rc) { set_bit(NCI_UP, &ndev->flags); nci_clear_target_list(ndev); atomic_set(&ndev->state, NCI_IDLE); } else { /* Init failed, cleanup */ skb_queue_purge(&ndev->cmd_q); skb_queue_purge(&ndev->rx_q); skb_queue_purge(&ndev->tx_q); ndev->ops->close(ndev); ndev->flags = 0; } done: mutex_unlock(&ndev->req_lock); return rc; }
0
[]
linux
48b71a9e66c2eab60564b1b1c85f4928ed04e406
54,831,679,667,944,130,000,000,000,000,000,000,000
79
NFC: add NCI_UNREG flag to eliminate the race There are two sites that calls queue_work() after the destroy_workqueue() and lead to possible UAF. The first site is nci_send_cmd(), which can happen after the nci_close_device as below nfcmrvl_nci_unregister_dev | nfc_genl_dev_up nci_close_device | flush_workqueue | del_timer_sync | nci_unregister_device | nfc_get_device destroy_workqueue | nfc_dev_up nfc_unregister_device | nci_dev_up device_del | nci_open_device | __nci_request | nci_send_cmd | queue_work !!! Another site is nci_cmd_timer, awaked by the nci_cmd_work from the nci_send_cmd. ... | ... nci_unregister_device | queue_work destroy_workqueue | nfc_unregister_device | ... device_del | nci_cmd_work | mod_timer | ... | nci_cmd_timer | queue_work !!! For the above two UAF, the root cause is that the nfc_dev_up can race between the nci_unregister_device routine. Therefore, this patch introduce NCI_UNREG flag to easily eliminate the possible race. In addition, the mutex_lock in nci_close_device can act as a barrier. Signed-off-by: Lin Ma <[email protected]> Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation") Reviewed-by: Jakub Kicinski <[email protected]> Reviewed-by: Krzysztof Kozlowski <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
bool Item_direct_view_ref::excl_dep_on_grouping_fields(st_select_lex *sel) { if (item_equal) { DBUG_ASSERT(real_item()->type() == Item::FIELD_ITEM); return find_matching_grouping_field(this, sel) != NULL; } return (*ref)->excl_dep_on_grouping_fields(sel); }
0
[ "CWE-416" ]
server
c02ebf3510850ba78a106be9974c94c3b97d8585
21,907,464,686,110,724,000,000,000,000,000,000,000
9
MDEV-24176 Preparations 1. moved fix_vcol_exprs() call to open_table() mysql_alter_table() doesn't do lock_tables() so it cannot win from fix_vcol_exprs() from there. Tests affected: main.default_session 2. Vanilla cleanups and comments.
dns_cache_lookup(struct module_env* env, uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass, uint16_t flags, struct regional* region, struct regional* scratch, int no_partial, uint8_t* dpname, size_t dpnamelen) { struct lruhash_entry* e; struct query_info k; hashvalue_type h; time_t now = *env->now; struct ub_packed_rrset_key* rrset; /* lookup first, this has both NXdomains and ANSWER responses */ k.qname = qname; k.qname_len = qnamelen; k.qtype = qtype; k.qclass = qclass; k.local_alias = NULL; h = query_info_hash(&k, flags); e = slabhash_lookup(env->msg_cache, h, &k, 0); if(e) { struct msgreply_entry* key = (struct msgreply_entry*)e->key; struct reply_info* data = (struct reply_info*)e->data; struct dns_msg* msg = tomsg(env, &key->key, data, region, now, 0, scratch); if(msg) { lock_rw_unlock(&e->lock); return msg; } /* could be msg==NULL; due to TTL or not all rrsets available */ lock_rw_unlock(&e->lock); } /* see if a DNAME exists. Checked for first, to enforce that DNAMEs * are more important, the CNAME is resynthesized and thus * consistent with the DNAME */ if(!no_partial && (rrset=find_closest_of_type(env, qname, qnamelen, qclass, now, LDNS_RR_TYPE_DNAME, 1, 0, NULL, 0))) { /* synthesize a DNAME+CNAME message based on this */ enum sec_status sec_status = sec_status_unchecked; struct dns_msg* msg = synth_dname_msg(rrset, region, now, &k, &sec_status); if(msg) { struct ub_packed_rrset_key* cname_rrset; lock_rw_unlock(&rrset->entry.lock); /* now, after unlocking the DNAME rrset lock, * check the sec_status, and see if we need to look * up the CNAME record associated before it can * be used */ /* normally, only secure DNAMEs allowed from cache*/ if(sec_status == sec_status_secure) return msg; /* but if we have a CNAME cached with this name, then we * have previously already allowed this name to pass. * the next cache lookup is going to fetch that CNAME itself, * but it is better to have the (unsigned)DNAME + CNAME in * that case */ cname_rrset = rrset_cache_lookup( env->rrset_cache, qname, qnamelen, LDNS_RR_TYPE_CNAME, qclass, 0, now, 0); if(cname_rrset) { /* CNAME already synthesized by * synth_dname_msg routine, so we can * straight up return the msg */ lock_rw_unlock(&cname_rrset->entry.lock); return msg; } } else { lock_rw_unlock(&rrset->entry.lock); } } /* see if we have CNAME for this domain, * but not for DS records (which are part of the parent) */ if(!no_partial && qtype != LDNS_RR_TYPE_DS && (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen, LDNS_RR_TYPE_CNAME, qclass, 0, now, 0))) { uint8_t* wc = NULL; size_t wl; /* if the rrset is not a wildcard expansion, with wcname */ /* because, if we return that CNAME rrset on its own, it is * missing the NSEC or NSEC3 proof */ if(!(val_rrset_wildcard(rrset, &wc, &wl) && wc != NULL)) { struct dns_msg* msg = rrset_msg(rrset, region, now, &k); if(msg) { lock_rw_unlock(&rrset->entry.lock); return msg; } } lock_rw_unlock(&rrset->entry.lock); } /* construct DS, DNSKEY messages from rrset cache. */ if((qtype == LDNS_RR_TYPE_DS || qtype == LDNS_RR_TYPE_DNSKEY) && (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen, qtype, qclass, 0, now, 0))) { /* if the rrset is from the additional section, and the * signatures have fallen off, then do not synthesize a msg * instead, allow a full query for signed results to happen. * Forego all rrset data from additional section, because * some signatures may not be present and cause validation * failure. */ struct packed_rrset_data *d = (struct packed_rrset_data*) rrset->entry.data; if(d->trust != rrset_trust_add_noAA && d->trust != rrset_trust_add_AA && (qtype == LDNS_RR_TYPE_DS || (d->trust != rrset_trust_auth_noAA && d->trust != rrset_trust_auth_AA) )) { struct dns_msg* msg = rrset_msg(rrset, region, now, &k); if(msg) { lock_rw_unlock(&rrset->entry.lock); return msg; } } lock_rw_unlock(&rrset->entry.lock); } /* stop downwards cache search on NXDOMAIN. * Empty nonterminals are NOERROR, so an NXDOMAIN for foo * means bla.foo also does not exist. The DNSSEC proofs are * the same. We search upwards for NXDOMAINs. */ if(env->cfg->harden_below_nxdomain) { while(!dname_is_root(k.qname)) { if(dpname && dpnamelen && !dname_subdomain_c(k.qname, dpname)) break; /* no synth nxdomain above the stub */ dname_remove_label(&k.qname, &k.qname_len); h = query_info_hash(&k, flags); e = slabhash_lookup(env->msg_cache, h, &k, 0); if(!e && k.qtype != LDNS_RR_TYPE_A && env->cfg->qname_minimisation) { k.qtype = LDNS_RR_TYPE_A; h = query_info_hash(&k, flags); e = slabhash_lookup(env->msg_cache, h, &k, 0); } if(e) { struct reply_info* data = (struct reply_info*)e->data; struct dns_msg* msg; if(FLAGS_GET_RCODE(data->flags) == LDNS_RCODE_NXDOMAIN && data->security == sec_status_secure && (data->an_numrrsets == 0 || ntohs(data->rrsets[0]->rk.type) != LDNS_RR_TYPE_CNAME) && (msg=tomsg(env, &k, data, region, now, 0, scratch))) { lock_rw_unlock(&e->lock); msg->qinfo.qname=qname; msg->qinfo.qname_len=qnamelen; /* check that DNSSEC really works out */ msg->rep->security = sec_status_unchecked; iter_scrub_nxdomain(msg); return msg; } lock_rw_unlock(&e->lock); } k.qtype = qtype; } } /* fill common RR types for ANY response to avoid requery */ if(qtype == LDNS_RR_TYPE_ANY) { return fill_any(env, qname, qnamelen, qtype, qclass, region); } return NULL; }
0
[ "CWE-613", "CWE-703" ]
unbound
f6753a0f1018133df552347a199e0362fc1dac68
297,972,438,308,458,750,000,000,000,000,000,000,000
166
- Fix the novel ghost domain issues CVE-2022-30698 and CVE-2022-30699.
QUtil::tell(FILE* stream) { #if HAVE_FSEEKO return static_cast<qpdf_offset_t>(ftello(stream)); #elif HAVE_FSEEKO64 return static_cast<qpdf_offset_t>(ftello64(stream)); #else # if defined _MSC_VER || defined __BORLANDC__ return _ftelli64(stream); # else return static_cast<qpdf_offset_t>(ftell(stream)); # endif #endif }
0
[ "CWE-125" ]
qpdf
6d46346eb93d5032c08cf1e39023b5d57260a766
335,932,067,563,811,100,000,000,000,000,000,000,000
14
Detect integer overflow/underflow
static inline pmd_t pmd_mkdirty(pmd_t pmd) { return pmd_set_flags(pmd, _PAGE_DIRTY); }
0
[ "CWE-119", "CWE-787" ]
linux
027ef6c87853b0a9df53175063028edb4950d476
222,320,194,844,218,500,000,000,000,000,000,000,000
4
mm: thp: fix pmd_present for split_huge_page and PROT_NONE with THP In many places !pmd_present has been converted to pmd_none. For pmds that's equivalent and pmd_none is quicker so using pmd_none is better. However (unless we delete pmd_present) we should provide an accurate pmd_present too. This will avoid the risk of code thinking the pmd is non present because it's under __split_huge_page_map, see the pmd_mknotpresent there and the comment above it. If the page has been mprotected as PROT_NONE, it would also lead to a pmd_present false negative in the same way as the race with split_huge_page. Because the PSE bit stays on at all times (both during split_huge_page and when the _PAGE_PROTNONE bit get set), we could only check for the PSE bit, but checking the PROTNONE bit too is still good to remember pmd_present must always keep PROT_NONE into account. This explains a not reproducible BUG_ON that was seldom reported on the lists. The same issue is in pmd_large, it would go wrong with both PROT_NONE and if it races with split_huge_page. Signed-off-by: Andrea Arcangeli <[email protected]> Acked-by: Rik van Riel <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Mel Gorman <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static av_always_inline void dnxhd_encode_dc(DNXHDEncContext *ctx, int diff) { int nbits; if (diff < 0) { nbits = av_log2_16bit(-2*diff); diff--; } else { nbits = av_log2_16bit(2*diff); } put_bits(&ctx->m.pb, ctx->cid_table->dc_bits[nbits] + nbits, (ctx->cid_table->dc_codes[nbits]<<nbits) + (diff & ((1 << nbits) - 1))); }
0
[ "CWE-703" ]
FFmpeg
f1caaa1c61310beba705957e6366f0392a0b005b
157,431,565,134,511,270,000,000,000,000,000,000,000
12
dnxhdenc: fix mb_rc size Fixes out of array access with RC_VARIANCE set to 0 Signed-off-by: Michael Niedermayer <[email protected]>
static void yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, void *yyscanner, RE_LEX_ENVIRONMENT *lex_env) { YYUSE (yyvaluep); YYUSE (yyscanner); YYUSE (lex_env); if (!yymsg) yymsg = "Deleting"; YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN switch (yytype) { case 6: /* _CLASS_ */ #line 96 "re_grammar.y" /* yacc.c:1257 */ { yr_free(((*yyvaluep).class_vector)); } #line 1045 "re_grammar.c" /* yacc.c:1257 */ break; case 26: /* alternative */ #line 97 "re_grammar.y" /* yacc.c:1257 */ { yr_re_node_destroy(((*yyvaluep).re_node)); } #line 1051 "re_grammar.c" /* yacc.c:1257 */ break; case 27: /* concatenation */ #line 98 "re_grammar.y" /* yacc.c:1257 */ { yr_re_node_destroy(((*yyvaluep).re_node)); } #line 1057 "re_grammar.c" /* yacc.c:1257 */ break; case 28: /* repeat */ #line 99 "re_grammar.y" /* yacc.c:1257 */ { yr_re_node_destroy(((*yyvaluep).re_node)); } #line 1063 "re_grammar.c" /* yacc.c:1257 */ break; case 29: /* single */ #line 100 "re_grammar.y" /* yacc.c:1257 */ { yr_re_node_destroy(((*yyvaluep).re_node)); } #line 1069 "re_grammar.c" /* yacc.c:1257 */ break; default: break; }
1
[ "CWE-674", "CWE-787" ]
yara
925bcf3c3b0a28b5b78e25d9efda5c0bf27ae699
171,859,271,658,806,500,000,000,000,000,000,000,000
48
Fix issue #674. Move regexp limits to limits.h.
static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) { struct tcmsg *tcm = NLMSG_DATA(n); struct rtattr **tca = arg; struct net_device *dev; struct Qdisc *q = NULL; struct Qdisc_class_ops *cops; unsigned long cl = 0; unsigned long new_cl; u32 pid = tcm->tcm_parent; u32 clid = tcm->tcm_handle; u32 qid = TC_H_MAJ(clid); int err; if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL) return -ENODEV; /* parent == TC_H_UNSPEC - unspecified parent. parent == TC_H_ROOT - class is root, which has no parent. parent == X:0 - parent is root class. parent == X:Y - parent is a node in hierarchy. parent == 0:Y - parent is X:Y, where X:0 is qdisc. handle == 0:0 - generate handle from kernel pool. handle == 0:Y - class is X:Y, where X:0 is qdisc. handle == X:Y - clear. handle == X:0 - root class. */ /* Step 1. Determine qdisc handle X:0 */ if (pid != TC_H_ROOT) { u32 qid1 = TC_H_MAJ(pid); if (qid && qid1) { /* If both majors are known, they must be identical. */ if (qid != qid1) return -EINVAL; } else if (qid1) { qid = qid1; } else if (qid == 0) qid = dev->qdisc_sleeping->handle; /* Now qid is genuine qdisc handle consistent both with parent and child. TC_H_MAJ(pid) still may be unspecified, complete it now. */ if (pid) pid = TC_H_MAKE(qid, pid); } else { if (qid == 0) qid = dev->qdisc_sleeping->handle; } /* OK. Locate qdisc */ if ((q = qdisc_lookup(dev, qid)) == NULL) return -ENOENT; /* An check that it supports classes */ cops = q->ops->cl_ops; if (cops == NULL) return -EINVAL; /* Now try to get class */ if (clid == 0) { if (pid == TC_H_ROOT) clid = qid; } else clid = TC_H_MAKE(qid, clid); if (clid) cl = cops->get(q, clid); if (cl == 0) { err = -ENOENT; if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE)) goto out; } else { switch (n->nlmsg_type) { case RTM_NEWTCLASS: err = -EEXIST; if (n->nlmsg_flags&NLM_F_EXCL) goto out; break; case RTM_DELTCLASS: err = cops->delete(q, cl); if (err == 0) tclass_notify(skb, n, q, cl, RTM_DELTCLASS); goto out; case RTM_GETTCLASS: err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS); goto out; default: err = -EINVAL; goto out; } } new_cl = cl; err = cops->change(q, clid, pid, tca, &new_cl); if (err == 0) tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS); out: if (cl) cops->put(q, cl); return err; }
0
[ "CWE-200" ]
linux-2.6
9ef1d4c7c7aca1cd436612b6ca785b726ffb8ed8
170,481,033,093,087,150,000,000,000,000,000,000,000
111
[NETLINK]: Missing initializations in dumped data Mostly missing initialization of padding fields of 1 or 2 bytes length, two instances of uninitialized nlmsgerr->msg of 16 bytes length. Signed-off-by: Patrick McHardy <[email protected]> Signed-off-by: David S. Miller <[email protected]>
enc_succ_char(char *p, long len, rb_encoding *enc) { long i; int l; while (1) { for (i = len-1; 0 <= i && (unsigned char)p[i] == 0xff; i--) p[i] = '\0'; if (i < 0) return NEIGHBOR_WRAPPED; ++((unsigned char*)p)[i]; l = rb_enc_precise_mbclen(p, p+len, enc); if (MBCLEN_CHARFOUND_P(l)) { l = MBCLEN_CHARFOUND_LEN(l); if (l == len) { return NEIGHBOR_FOUND; } else { memset(p+l, 0xff, len-l); } } if (MBCLEN_INVALID_P(l) && i < len-1) { long len2; int l2; for (len2 = len-1; 0 < len2; len2--) { l2 = rb_enc_precise_mbclen(p, p+len2, enc); if (!MBCLEN_INVALID_P(l2)) break; } memset(p+len2+1, 0xff, len-(len2+1)); } } }
0
[ "CWE-119" ]
ruby
1c2ef610358af33f9ded3086aa2d70aac03dcac5
208,945,030,465,569,170,000,000,000,000,000,000,000
32
* string.c (rb_str_justify): CVE-2009-4124. Fixes a bug reported by Emmanouel Kellinis <Emmanouel.Kellinis AT kpmg.co.uk>, KPMG London; Patch by nobu. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@26038 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
static void nhmldump_send_header(GF_NHMLDumpCtx *ctx) { GF_FilterPacket *dst_pck; char nhml[1024]; u32 size; u8 *output; const GF_PropertyValue *p; ctx->szRootName = "NHNTStream"; if (ctx->dims) { ctx->szRootName = "DIMSStream"; } if (!ctx->filep) { sprintf(nhml, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml)); } /*write header*/ sprintf(nhml, "<%s version=\"1.0\" ", ctx->szRootName); gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml)); NHML_PRINT_UINT(GF_PROP_PID_ID, NULL, "trackID") NHML_PRINT_UINT(GF_PROP_PID_TIMESCALE, NULL, "timeScale") p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_IN_IOD); if (p && p->value.boolean) { sprintf(nhml, "inRootOD=\"yes\" "); gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml)); } if (ctx->oti && (ctx->oti<GF_CODECID_LAST_MPEG4_MAPPING)) { sprintf(nhml, "streamType=\"%d\" objectTypeIndication=\"%d\" ", ctx->streamtype, ctx->oti); gf_bs_write_data(ctx->bs_w, nhml, (u32)strlen(nhml)); } else { p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_SUBTYPE); if (p) { sprintf(nhml, "%s=\"%s\" ", "mediaType", gf_4cc_to_str(p->value.uint)); gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml)); NHML_PRINT_4CC(GF_PROP_PID_ISOM_SUBTYPE, "mediaSubType", "mediaSubType") } else { NHML_PRINT_4CC(GF_PROP_PID_CODECID, NULL, "codecID") } } if (ctx->w && ctx->h) { //compatibility with old arch, we might want to remove this switch (ctx->streamtype) { case GF_STREAM_VISUAL: case GF_STREAM_SCENE: sprintf(nhml, "width=\"%d\" height=\"%d\" ", ctx->w, ctx->h); gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml)); break; default: break; } } else if (ctx->sr && ctx->chan) { sprintf(nhml, "sampleRate=\"%d\" numChannels=\"%d\" ", ctx->sr, ctx->chan); gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml)); sprintf(nhml, "sampleRate=\"%d\" numChannels=\"%d\" ", ctx->sr, ctx->chan); gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml)); p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_AUDIO_FORMAT); sprintf(nhml, "bitsPerSample=\"%d\" ", gf_audio_fmt_bit_depth(p->value.uint)); gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml)); } NHML_PRINT_4CC(0, "codec_vendor", "codecVendor") NHML_PRINT_UINT(0, "codec_version", "codecVersion") NHML_PRINT_UINT(0, "codec_revision", "codecRevision") NHML_PRINT_STRING(0, "compressor_name", "compressorName") NHML_PRINT_UINT(0, "temporal_quality", "temporalQuality") NHML_PRINT_UINT(0, "spatial_quality", "spatialQuality") NHML_PRINT_UINT(0, "hres", "horizontalResolution") NHML_PRINT_UINT(0, "vres", "verticalResolution") NHML_PRINT_UINT(GF_PROP_PID_BIT_DEPTH_Y, NULL, "bitDepth") NHML_PRINT_STRING(0, "meta:xmlns", "xml_namespace") NHML_PRINT_STRING(0, "meta:schemaloc", "xml_schema_location") NHML_PRINT_STRING(0, "meta:mime", "mime_type") NHML_PRINT_STRING(0, "meta:config", "config") NHML_PRINT_STRING(0, "meta:aux_mimes", "aux_mime_type") if (ctx->codecid == GF_CODECID_DIMS) { if (gf_filter_pid_get_property_str(ctx->ipid, "meta:xmlns")==NULL) { sprintf(nhml, "xmlns=\"http://www.3gpp.org/richmedia\" "); gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml)); } NHML_PRINT_UINT(0, "dims:profile", "profile") NHML_PRINT_UINT(0, "dims:level", "level") NHML_PRINT_UINT(0, "dims:pathComponents", "pathComponents") p = gf_filter_pid_get_property_str(ctx->ipid, "dims:fullRequestHost"); if (p) { sprintf(nhml, "useFullRequestHost=\"%s\" ", p->value.boolean ? "yes" : "no"); gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml)); } p = gf_filter_pid_get_property_str(ctx->ipid, "dims:streamType"); if (p) { sprintf(nhml, "stream_type=\"%s\" ", p->value.boolean ? "primary" : "secondary"); gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml)); } p = gf_filter_pid_get_property_str(ctx->ipid, "dims:redundant"); if (p) { sprintf(nhml, "contains_redundant=\"%s\" ", (p->value.uint==1) ? "main" : ((p->value.uint==1) ? "redundant" : "main+redundant") ); gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml)); } NHML_PRINT_UINT(0, "dims:scriptTypes", "scriptTypes") } //send DCD if (ctx->opid_info) { sprintf(nhml, "specificInfoFile=\"%s\" ", gf_file_basename(ctx->info_file) ); gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml)); dst_pck = gf_filter_pck_new_shared(ctx->opid_info, ctx->dcfg, ctx->dcfg_size, NULL); gf_filter_pck_set_framing(dst_pck, GF_TRUE, GF_TRUE); gf_filter_pck_set_readonly(dst_pck); gf_filter_pck_send(dst_pck); } NHML_PRINT_STRING(0, "meta:encoding", "encoding") NHML_PRINT_STRING(0, "meta:contentEncoding", "content_encoding") ctx->uncompress = GF_FALSE; if (p) { if (!strcmp(p->value.string, "deflate")) ctx->uncompress = GF_TRUE; else { GF_LOG(GF_LOG_ERROR, GF_LOG_AUTHOR, ("[NHMLMx] content_encoding %s not supported\n", p->value.string )); } } if (ctx->opid_mdia) { sprintf(nhml, "baseMediaFile=\"%s\" ", gf_file_basename(ctx->media_file) ); gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml)); } sprintf(nhml, ">\n"); gf_bs_write_data(ctx->bs_w, nhml, (u32) strlen(nhml)); gf_bs_get_content_no_truncate(ctx->bs_w, &ctx->nhml_buffer, &size, &ctx->nhml_buffer_size); if (ctx->filep) { gf_fwrite(ctx->nhml_buffer, size, ctx->filep); return; } dst_pck = gf_filter_pck_new_alloc(ctx->opid_nhml, size, &output); memcpy(output, ctx->nhml_buffer, size); gf_filter_pck_set_framing(dst_pck, GF_TRUE, GF_FALSE); gf_filter_pck_send(dst_pck); }
1
[ "CWE-476" ]
gpac
9eeac00b38348c664dfeae2525bba0cf1bc32349
171,851,966,591,827,380,000,000,000,000,000,000,000
154
fixed #1565
static void dn_dev_set_timer(struct net_device *dev) { struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); if (dn_db->parms.t2 > dn_db->parms.t3) dn_db->parms.t2 = dn_db->parms.t3; dn_db->timer.data = (unsigned long)dev; dn_db->timer.function = dn_dev_timer_func; dn_db->timer.expires = jiffies + (dn_db->parms.t2 * HZ); add_timer(&dn_db->timer); }
0
[ "CWE-264" ]
net
90f62cf30a78721641e08737bda787552428061e
38,954,200,005,281,676,000,000,000,000,000,000,000
13
net: Use netlink_ns_capable to verify the permisions of netlink messages It is possible by passing a netlink socket to a more privileged executable and then to fool that executable into writing to the socket data that happens to be valid netlink message to do something that privileged executable did not intend to do. To keep this from happening replace bare capable and ns_capable calls with netlink_capable, netlink_net_calls and netlink_ns_capable calls. Which act the same as the previous calls except they verify that the opener of the socket had the desired permissions as well. Reported-by: Andy Lutomirski <[email protected]> Signed-off-by: "Eric W. Biederman" <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void lsr_read_gradient_units(GF_LASeRCodec *lsr, GF_Node *elt) { u32 flag; GF_FieldInfo info; GF_LSR_READ_INT(lsr, flag, 1, "hasGradientUnits"); if (flag) { lsr->last_error = gf_node_get_attribute_by_tag(elt, TAG_SVG_ATT_gradientUnits, 1, 0, &info); GF_LSR_READ_INT(lsr, *(SVG_GradientUnit*)info.far_ptr, 1, "gradientUnits"); } }
0
[ "CWE-190" ]
gpac
faa75edde3dfeba1e2cf6ffa48e45a50f1042096
83,288,946,498,841,630,000,000,000,000,000,000,000
10
fixed #2213
void Gfx::doFunctionShFill(GfxFunctionShading *shading) { double x0, y0, x1, y1; GfxColor colors[4]; if (out->useShadedFills( shading->getType() ) && out->functionShadedFill(state, shading)) { return; } shading->getDomain(&x0, &y0, &x1, &y1); shading->getColor(x0, y0, &colors[0]); shading->getColor(x0, y1, &colors[1]); shading->getColor(x1, y0, &colors[2]); shading->getColor(x1, y1, &colors[3]); doFunctionShFill1(shading, x0, y0, x1, y1, colors, 0); }
0
[]
poppler
abf167af8b15e5f3b510275ce619e6fdb42edd40
52,244,014,207,018,920,000,000,000,000,000,000,000
16
Implement tiling/patterns in SplashOutputDev Fixes bug 13518
static void __net_init ipip6_fb_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); struct iphdr *iph = &tunnel->parms.iph; struct net *net = dev_net(dev); struct sit_net *sitn = net_generic(net, sit_net_id); tunnel->dev = dev; strcpy(tunnel->parms.name, dev->name); iph->version = 4; iph->protocol = IPPROTO_IPV6; iph->ihl = 5; iph->ttl = 64; dev_hold(dev); sitn->tunnels_wc[0] = tunnel; }
0
[]
linux-2.6
d5aa407f59f5b83d2c50ec88f5bf56d40f1f8978
32,724,894,684,568,400,000,000,000,000,000,000,000
18
tunnels: fix netns vs proto registration ordering Same stuff as in ip_gre patch: receive hook can be called before netns setup is done, oopsing in net_generic(). Signed-off-by: Alexey Dobriyan <[email protected]> Signed-off-by: David S. Miller <[email protected]>
Value ExpressionAbs::evaluateNumericArg(const Value& numericArg) const { BSONType type = numericArg.getType(); if (type == NumberDouble) { return Value(std::abs(numericArg.getDouble())); } else if (type == NumberDecimal) { return Value(numericArg.getDecimal().toAbs()); } else { long long num = numericArg.getLong(); uassert(28680, "can't take $abs of long long min", num != std::numeric_limits<long long>::min()); long long absVal = std::abs(num); return type == NumberLong ? Value(absVal) : Value::createIntOrLong(absVal); } }
0
[ "CWE-835" ]
mongo
0a076417d1d7fba3632b73349a1fd29a83e68816
167,855,117,807,469,400,000,000,000,000,000,000,000
15
SERVER-38070 fix infinite loop in agg expression
R_API RSocket *r_socket_new_from_fd(int fd) { RSocket *s = R_NEW0 (RSocket); if (s) { s->fd = fd; } return s; }
0
[ "CWE-78" ]
radare2
04edfa82c1f3fa2bc3621ccdad2f93bdbf00e4f9
219,215,983,276,245,930,000,000,000,000,000,000,000
7
Fix command injection on PDB download (#16966) * Fix r_sys_mkdirp with absolute path on Windows * Fix build with --with-openssl * Use RBuffer in r_socket_http_answer() * r_socket_http_answer: Fix read for big responses * Implement r_str_escape_sh() * Cleanup r_socket_connect() on Windows * Fix socket being created without a protocol * Fix socket connect with SSL ##socket * Use select() in r_socket_ready() * Fix read failing if received only protocol answer * Fix double-free * r_socket_http_get: Fail if req. SSL with no support * Follow redirects in r_socket_http_answer() * Fix r_socket_http_get result length with R2_CURL=1 * Also follow redirects * Avoid using curl for downloading PDBs * Use r_socket_http_get() on UNIXs * Use WinINet API on Windows for r_socket_http_get() * Fix command injection * Fix r_sys_cmd_str_full output for binary data * Validate GUID on PDB download * Pass depth to socket_http_get_recursive() * Remove 'r_' and '__' from static function names * Fix is_valid_guid * Fix for comments
THD::binlog_start_trans_and_stmt() { binlog_cache_mngr *cache_mngr= (binlog_cache_mngr*) thd_get_ha_data(this, binlog_hton); DBUG_ENTER("binlog_start_trans_and_stmt"); DBUG_PRINT("enter", ("cache_mngr: %p cache_mngr->trx_cache.get_prev_position(): %lu", cache_mngr, (cache_mngr ? (ulong) cache_mngr->trx_cache.get_prev_position() : (ulong) 0))); if (cache_mngr == NULL || cache_mngr->trx_cache.get_prev_position() == MY_OFF_T_UNDEF) { this->binlog_set_stmt_begin(); if (in_multi_stmt_transaction_mode()) trans_register_ha(this, TRUE, binlog_hton); trans_register_ha(this, FALSE, binlog_hton); /* Mark statement transaction as read/write. We never start a binary log transaction and keep it read-only, therefore it's best to mark the transaction read/write just at the same time we start it. Not necessary to mark the normal transaction read/write since the statement-level flag will be propagated automatically inside ha_commit_trans. */ ha_data[binlog_hton->slot].ha_info[0].set_trx_read_write(); } DBUG_VOID_RETURN; }
0
[ "CWE-264" ]
mysql-server
48bd8b16fe382be302c6f0b45931be5aa6f29a0e
161,741,210,890,686,000,000,000,000,000,000,000,000
29
Bug#24388753: PRIVILEGE ESCALATION USING MYSQLD_SAFE [This is the 5.5/5.6 version of the bugfix]. The problem was that it was possible to write log files ending in .ini/.cnf that later could be parsed as an options file. This made it possible for users to specify startup options without the permissions to do so. This patch fixes the problem by disallowing general query log and slow query log to be written to files ending in .ini and .cnf.
static void free_huge_page(struct page *page) { /* * Can't pass hstate in here because it is called from the * compound page destructor. */ struct hstate *h = page_hstate(page); int nid = page_to_nid(page); struct hugepage_subpool *spool = (struct hugepage_subpool *)page_private(page); set_page_private(page, 0); page->mapping = NULL; BUG_ON(page_count(page)); BUG_ON(page_mapcount(page)); INIT_LIST_HEAD(&page->lru); spin_lock(&hugetlb_lock); if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) { update_and_free_page(h, page); h->surplus_huge_pages--; h->surplus_huge_pages_node[nid]--; } else { enqueue_huge_page(h, page); } spin_unlock(&hugetlb_lock); hugepage_subpool_put_pages(spool, 1); }
0
[ "CWE-399" ]
linux
90481622d75715bfcb68501280a917dbfe516029
133,759,679,817,792,480,000,000,000,000,000,000,000
28
hugepages: fix use after free bug in "quota" handling hugetlbfs_{get,put}_quota() are badly named. They don't interact with the general quota handling code, and they don't much resemble its behaviour. Rather than being about maintaining limits on on-disk block usage by particular users, they are instead about maintaining limits on in-memory page usage (including anonymous MAP_PRIVATE copied-on-write pages) associated with a particular hugetlbfs filesystem instance. Worse, they work by having callbacks to the hugetlbfs filesystem code from the low-level page handling code, in particular from free_huge_page(). This is a layering violation of itself, but more importantly, if the kernel does a get_user_pages() on hugepages (which can happen from KVM amongst others), then the free_huge_page() can be delayed until after the associated inode has already been freed. If an unmount occurs at the wrong time, even the hugetlbfs superblock where the "quota" limits are stored may have been freed. Andrew Barry proposed a patch to fix this by having hugepages, instead of storing a pointer to their address_space and reaching the superblock from there, had the hugepages store pointers directly to the superblock, bumping the reference count as appropriate to avoid it being freed. Andrew Morton rejected that version, however, on the grounds that it made the existing layering violation worse. This is a reworked version of Andrew's patch, which removes the extra, and some of the existing, layering violation. It works by introducing the concept of a hugepage "subpool" at the lower hugepage mm layer - that is a finite logical pool of hugepages to allocate from. hugetlbfs now creates a subpool for each filesystem instance with a page limit set, and a pointer to the subpool gets added to each allocated hugepage, instead of the address_space pointer used now. The subpool has its own lifetime and is only freed once all pages in it _and_ all other references to it (i.e. superblocks) are gone. subpools are optional - a NULL subpool pointer is taken by the code to mean that no subpool limits are in effect. Previous discussion of this bug found in: "Fix refcounting in hugetlbfs quota handling.". See: https://lkml.org/lkml/2011/8/11/28 or http://marc.info/?l=linux-mm&m=126928970510627&w=1 v2: Fixed a bug spotted by Hillf Danton, and removed the extra parameter to alloc_huge_page() - since it already takes the vma, it is not necessary. Signed-off-by: Andrew Barry <[email protected]> Signed-off-by: David Gibson <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Hillf Danton <[email protected]> Cc: Paul Mackerras <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
xps_moveto(gx_device_vector *vdev, double x0, double y0, double x, double y, gx_path_type_t type) { gx_device_xps *xps = (gx_device_xps *)vdev; char line[300]; if_debug2m('_', xps->memory, "xps_moveto %g %g\n", x, y); /* skip non-drawing paths for now */ if (!drawing_path(type, xps->filltype)) { if_debug1m('_', xps->memory, "xps_moveto: type not supported %x\n", type); return 0; } gs_sprintf(line, " M %g,%g", x, y); write_str_to_current_page(xps, line); if_debug1m('_', xps->memory, "xps_moveto %s", line); return 0; }
0
[]
ghostpdl
94d8955cb7725eb5f3557ddc02310c76124fdd1a
240,221,743,689,131,500,000,000,000,000,000,000,000
19
Bug 701818: better handling of error during PS/PDF image In the xps device, if an error occurred after xps_begin_image() but before xps_image_end_image(), *if* the Postscript had called 'restore' as part of the error handling, the image enumerator would have been freed (by the restore) despite the xps device still holding a reference to it. Simply changing to an allocator unaffected save/restore doesn't work because the enumerator holds references to other objects (graphics state, color space, possibly others) whose lifespans are inherently controlled by save/restore. So, add a finalize method for the XPS device's image enumerator (xps_image_enum_finalize()) which takes over cleaning up the memory it allocates and also deals with cleaning up references from the device to the enumerator and from the enumerator to the device.
static double mp_cbrt(_cimg_math_parser& mp) { return cimg::cbrt(_mp_arg(2));
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
204,933,964,827,555,630,000,000,000,000,000,000,000
3
Fix other issues in 'CImg<T>::load_bmp()'.
int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags) { struct ext4_ext_path *path = NULL; struct ext4_extent newex, *ex, *ex2; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); ext4_fsblk_t newblock = 0; int free_on_err = 0, err = 0, depth, ret; unsigned int allocated = 0, offset = 0; unsigned int allocated_clusters = 0; struct ext4_allocation_request ar; ext4_lblk_t cluster_offset; bool map_from_cluster = false; ext_debug("blocks %u/%u requested for inode %lu\n", map->m_lblk, map->m_len, inode->i_ino); trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); /* find extent for this block */ path = ext4_find_extent(inode, map->m_lblk, NULL, 0); if (IS_ERR(path)) { err = PTR_ERR(path); path = NULL; goto out2; } depth = ext_depth(inode); /* * consistent leaf must not be empty; * this situation is possible, though, _during_ tree modification; * this is why assert can't be put in ext4_find_extent() */ if (unlikely(path[depth].p_ext == NULL && depth != 0)) { EXT4_ERROR_INODE(inode, "bad extent address " "lblock: %lu, depth: %d pblock %lld", (unsigned long) map->m_lblk, depth, path[depth].p_block); err = -EFSCORRUPTED; goto out2; } ex = path[depth].p_ext; if (ex) { ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); ext4_fsblk_t ee_start = ext4_ext_pblock(ex); unsigned short ee_len; /* * unwritten extents are treated as holes, except that * we split out initialized portions during a write. */ ee_len = ext4_ext_get_actual_len(ex); trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); /* if found extent covers block, simply return it */ if (in_range(map->m_lblk, ee_block, ee_len)) { newblock = map->m_lblk - ee_block + ee_start; /* number of remaining blocks in the extent */ allocated = ee_len - (map->m_lblk - ee_block); ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, ee_block, ee_len, newblock); /* * If the extent is initialized check whether the * caller wants to convert it to unwritten. */ if ((!ext4_ext_is_unwritten(ex)) && (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { allocated = convert_initialized_extent( handle, inode, map, &path, allocated); goto out2; } else if (!ext4_ext_is_unwritten(ex)) goto out; ret = ext4_ext_handle_unwritten_extents( handle, inode, map, &path, flags, allocated, newblock); if (ret < 0) err = ret; else allocated = ret; goto out2; } } /* * requested block isn't allocated yet; * we couldn't try to create block if create flag is zero */ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { ext4_lblk_t hole_start, hole_len; hole_start = map->m_lblk; hole_len = ext4_ext_determine_hole(inode, path, &hole_start); /* * put just found gap into cache to speed up * subsequent requests */ ext4_ext_put_gap_in_cache(inode, hole_start, hole_len); /* Update hole_len to reflect hole size after map->m_lblk */ if (hole_start != map->m_lblk) hole_len -= map->m_lblk - hole_start; map->m_pblk = 0; map->m_len = min_t(unsigned int, map->m_len, hole_len); goto out2; } /* * Okay, we need to do block allocation. */ newex.ee_block = cpu_to_le32(map->m_lblk); cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); /* * If we are doing bigalloc, check to see if the extent returned * by ext4_find_extent() implies a cluster we can use. */ if (cluster_offset && ex && get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { ar.len = allocated = map->m_len; newblock = map->m_pblk; map_from_cluster = true; goto got_allocated_blocks; } /* find neighbour allocated blocks */ ar.lleft = map->m_lblk; err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); if (err) goto out2; ar.lright = map->m_lblk; ex2 = NULL; err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); if (err) goto out2; /* Check if the extent after searching to the right implies a * cluster we can use. */ if ((sbi->s_cluster_ratio > 1) && ex2 && get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { ar.len = allocated = map->m_len; newblock = map->m_pblk; map_from_cluster = true; goto got_allocated_blocks; } /* * See if request is beyond maximum number of blocks we can have in * a single extent. For an initialized extent this limit is * EXT_INIT_MAX_LEN and for an unwritten extent this limit is * EXT_UNWRITTEN_MAX_LEN. */ if (map->m_len > EXT_INIT_MAX_LEN && !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) map->m_len = EXT_INIT_MAX_LEN; else if (map->m_len > EXT_UNWRITTEN_MAX_LEN && (flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) map->m_len = EXT_UNWRITTEN_MAX_LEN; /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ newex.ee_len = cpu_to_le16(map->m_len); err = ext4_ext_check_overlap(sbi, inode, &newex, path); if (err) allocated = ext4_ext_get_actual_len(&newex); else allocated = map->m_len; /* allocate new block */ ar.inode = inode; ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); ar.logical = map->m_lblk; /* * We calculate the offset from the beginning of the cluster * for the logical block number, since when we allocate a * physical cluster, the physical block should start at the * same offset from the beginning of the cluster. This is * needed so that future calls to get_implied_cluster_alloc() * work correctly. */ offset = EXT4_LBLK_COFF(sbi, map->m_lblk); ar.len = EXT4_NUM_B2C(sbi, offset+allocated); ar.goal -= offset; ar.logical -= offset; if (S_ISREG(inode->i_mode)) ar.flags = EXT4_MB_HINT_DATA; else /* disable in-core preallocation for non-regular files */ ar.flags = 0; if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) ar.flags |= EXT4_MB_HINT_NOPREALLOC; if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) ar.flags |= EXT4_MB_DELALLOC_RESERVED; if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) ar.flags |= EXT4_MB_USE_RESERVED; newblock = ext4_mb_new_blocks(handle, &ar, &err); if (!newblock) goto out2; ext_debug("allocate new block: goal %llu, found %llu/%u\n", ar.goal, newblock, allocated); free_on_err = 1; allocated_clusters = ar.len; ar.len = EXT4_C2B(sbi, ar.len) - offset; if (ar.len > allocated) ar.len = allocated; got_allocated_blocks: /* try to insert new extent into found leaf and return */ ext4_ext_store_pblock(&newex, newblock + offset); newex.ee_len = cpu_to_le16(ar.len); /* Mark unwritten */ if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){ ext4_ext_mark_unwritten(&newex); map->m_flags |= EXT4_MAP_UNWRITTEN; } err = 0; if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len); if (!err) err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags); if (err && free_on_err) { int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ? EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0; /* free data blocks we just allocated */ /* not a good idea to call discard here directly, * but otherwise we'd need to call it every free() */ ext4_discard_preallocations(inode); ext4_free_blocks(handle, inode, NULL, newblock, EXT4_C2B(sbi, allocated_clusters), fb_flags); goto out2; } /* previous routine could use block we allocated */ newblock = ext4_ext_pblock(&newex); allocated = ext4_ext_get_actual_len(&newex); if (allocated > map->m_len) allocated = map->m_len; map->m_flags |= EXT4_MAP_NEW; /* * Update reserved blocks/metadata blocks after successful * block allocation which had been deferred till now. */ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { unsigned int reserved_clusters; /* * Check how many clusters we had reserved this allocated range */ reserved_clusters = get_reserved_cluster_alloc(inode, map->m_lblk, allocated); if (!map_from_cluster) { BUG_ON(allocated_clusters < reserved_clusters); if (reserved_clusters < allocated_clusters) { struct ext4_inode_info *ei = EXT4_I(inode); int reservation = allocated_clusters - reserved_clusters; /* * It seems we claimed few clusters outside of * the range of this allocation. We should give * it back to the reservation pool. This can * happen in the following case: * * * Suppose s_cluster_ratio is 4 (i.e., each * cluster has 4 blocks. Thus, the clusters * are [0-3],[4-7],[8-11]... * * First comes delayed allocation write for * logical blocks 10 & 11. Since there were no * previous delayed allocated blocks in the * range [8-11], we would reserve 1 cluster * for this write. * * Next comes write for logical blocks 3 to 8. * In this case, we will reserve 2 clusters * (for [0-3] and [4-7]; and not for [8-11] as * that range has a delayed allocated blocks. * Thus total reserved clusters now becomes 3. * * Now, during the delayed allocation writeout * time, we will first write blocks [3-8] and * allocate 3 clusters for writing these * blocks. Also, we would claim all these * three clusters above. * * Now when we come here to writeout the * blocks [10-11], we would expect to claim * the reservation of 1 cluster we had made * (and we would claim it since there are no * more delayed allocated blocks in the range * [8-11]. But our reserved cluster count had * already gone to 0. * * Thus, at the step 4 above when we determine * that there are still some unwritten delayed * allocated blocks outside of our current * block range, we should increment the * reserved clusters count so that when the * remaining blocks finally gets written, we * could claim them. */ dquot_reserve_block(inode, EXT4_C2B(sbi, reservation)); spin_lock(&ei->i_block_reservation_lock); ei->i_reserved_data_blocks += reservation; spin_unlock(&ei->i_block_reservation_lock); } /* * We will claim quota for all newly allocated blocks. * We're updating the reserved space *after* the * correction above so we do not accidentally free * all the metadata reservation because we might * actually need it later on. */ ext4_da_update_reserve_space(inode, allocated_clusters, 1); } } /* * Cache the extent and update transaction to commit on fdatasync only * when it is _not_ an unwritten extent. */ if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0) ext4_update_inode_fsync_trans(handle, inode, 1); else ext4_update_inode_fsync_trans(handle, inode, 0); out: if (allocated > map->m_len) allocated = map->m_len; ext4_ext_show_leaf(inode, path); map->m_flags |= EXT4_MAP_MAPPED; map->m_pblk = newblock; map->m_len = allocated; out2: ext4_ext_drop_refs(path); kfree(path); trace_ext4_ext_map_blocks_exit(inode, flags, map, err ? err : allocated); return err ? err : allocated; }
0
[ "CWE-125" ]
linux
bc890a60247171294acc0bd67d211fa4b88d40ba
141,350,352,748,685,620,000,000,000,000,000,000,000
346
ext4: verify the depth of extent tree in ext4_find_extent() If there is a corupted file system where the claimed depth of the extent tree is -1, this can cause a massive buffer overrun leading to sadness. This addresses CVE-2018-10877. https://bugzilla.kernel.org/show_bug.cgi?id=199417 Signed-off-by: Theodore Ts'o <[email protected]> Cc: [email protected]
save_dead_message_env (compose_env_t *env) { if (mailvar_is_true (mailvar_name_save)) { mu_stream_t dead_letter, str; int rc; time_t t; struct tm *tm; const char *name = getenv ("DEAD"); char *sender; /* FIXME: Use MU_STREAM_APPEND if appenddeadletter, instead of the stream manipulations below */ rc = mu_file_stream_create (&dead_letter, name, MU_STREAM_CREAT|MU_STREAM_WRITE); if (rc) { mu_error (_("Cannot open file %s: %s"), name, strerror (rc)); return 1; } if (mailvar_is_true (mailvar_name_appenddeadletter)) mu_stream_seek (dead_letter, 0, MU_SEEK_END, NULL); else mu_stream_truncate (dead_letter, 0); time (&t); tm = gmtime (&t); sender = mu_get_user_email (NULL); if (!sender) sender = mu_strdup ("UNKNOWN"); mu_stream_printf (dead_letter, "From %s ", sender); free (sender); mu_c_streamftime (dead_letter, "%c%n", tm, NULL); if (mu_header_get_streamref (env->header, &str) == 0) { mu_stream_copy (dead_letter, str, 0, NULL); mu_stream_unref (str); } else mu_stream_write (dead_letter, "\n", 1, NULL); mu_stream_seek (env->compstr, 0, MU_SEEK_SET, NULL); mu_stream_copy (dead_letter, env->compstr, 0, NULL); mu_stream_write (dead_letter, "\n", 1, NULL); mu_stream_destroy (&dead_letter); } return 0; }
0
[]
mailutils
4befcfd015256c568121653038accbd84820198f
85,445,495,618,501,900,000,000,000,000,000,000,000
49
mail: disable compose escapes in non-interctive mode. * NEWS: Document changes. * doc/texinfo/programs/mail.texi: Document changes. * mail/send.c (mail_compose_send): Recognize escapes only in interactive mode.
void Chapter::Clear() { StrCpy(NULL, &id_); while (displays_count_ > 0) { Display& d = displays_[--displays_count_]; d.Clear(); } delete[] displays_; displays_ = NULL; displays_size_ = 0; }
0
[ "CWE-20" ]
libvpx
f00890eecdf8365ea125ac16769a83aa6b68792d
9,486,336,427,931,897,000,000,000,000,000,000,000
13
update libwebm to libwebm-1.0.0.27-352-g6ab9fcf https://chromium.googlesource.com/webm/libwebm/+log/af81f26..6ab9fcf Change-Id: I9d56e1fbaba9b96404b4fbabefddc1a85b79c25d
struct r_bin_mdmp_obj *r_bin_mdmp_new_buf(RBuffer *buf) { bool fail = false; struct r_bin_mdmp_obj *obj = R_NEW0 (struct r_bin_mdmp_obj); if (!obj) { return NULL; } obj->kv = sdb_new0 (); obj->size = (ut32) r_buf_size (buf); fail |= (!(obj->streams.ex_threads = r_list_new ())); fail |= (!(obj->streams.memories = r_list_newf ((RListFree)free))); fail |= (!(obj->streams.memories64.memories = r_list_new ())); fail |= (!(obj->streams.memory_infos = r_list_newf ((RListFree)free))); fail |= (!(obj->streams.modules = r_list_newf ((RListFree)free))); fail |= (!(obj->streams.operations = r_list_newf ((RListFree)free))); fail |= (!(obj->streams.thread_infos = r_list_newf ((RListFree)free))); fail |= (!(obj->streams.token_infos = r_list_newf ((RListFree)free))); fail |= (!(obj->streams.threads = r_list_new ())); fail |= (!(obj->streams.unloaded_modules = r_list_newf ((RListFree)free))); fail |= (!(obj->pe32_bins = r_list_newf (r_bin_mdmp_free_pe32_bin))); fail |= (!(obj->pe64_bins = r_list_newf (r_bin_mdmp_free_pe64_bin))); if (fail) { r_bin_mdmp_free (obj); return NULL; } obj->b = r_buf_ref (buf); if (!r_bin_mdmp_init (obj)) { r_bin_mdmp_free (obj); return NULL; } return obj; }
0
[ "CWE-400", "CWE-703" ]
radare2
27fe8031782d3a06c3998eaa94354867864f9f1b
193,537,887,345,113,460,000,000,000,000,000,000,000
36
Fix DoS in the minidump parser ##crash * Reported by lazymio via huntr.dev * Reproducer: mdmp-dos
static int __follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *ptep; pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) goto out; pud = pud_offset(pgd, address); if (pud_none(*pud) || unlikely(pud_bad(*pud))) goto out; pmd = pmd_offset(pud, address); VM_BUG_ON(pmd_trans_huge(*pmd)); if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) goto out; /* We cannot handle huge page PFN maps. Luckily they don't exist. */ if (pmd_huge(*pmd)) goto out; ptep = pte_offset_map_lock(mm, pmd, address, ptlp); if (!ptep) goto out; if (!pte_present(*ptep)) goto unlock; *ptepp = ptep; return 0; unlock: pte_unmap_unlock(ptep, *ptlp); out: return -EINVAL; }
0
[ "CWE-264" ]
linux-2.6
1a5a9906d4e8d1976b701f889d8f35d54b928f25
308,076,509,738,393,900,000,000,000,000,000,000,000
37
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(&current->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [[email protected]: checkpatch fixes] Reported-by: Ulrich Obergfell <[email protected]> Signed-off-by: Andrea Arcangeli <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Dave Jones <[email protected]> Acked-by: Larry Woodman <[email protected]> Acked-by: Rik van Riel <[email protected]> Cc: <[email protected]> [2.6.38+] Cc: Mark Salter <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
int MP4_ReadBoxContainerChildren( stream_t *p_stream, MP4_Box_t *p_container, uint32_t i_last_child ) { return MP4_ReadBoxContainerChildrenIndexed( p_stream, p_container, i_last_child, false ); }
0
[ "CWE-120", "CWE-191", "CWE-787" ]
vlc
2e7c7091a61aa5d07e7997b393d821e91f593c39
17,228,442,671,218,225,000,000,000,000,000,000,000
6
demux: mp4: fix buffer overflow in parsing of string boxes. We ensure that pbox->i_size is never smaller than 8 to avoid an integer underflow in the third argument of the subsequent call to memcpy. We also make sure no truncation occurs when passing values derived from the 64 bit integer p_box->i_size to arguments of malloc and memcpy that may be 32 bit integers on 32 bit platforms. Signed-off-by: Jean-Baptiste Kempf <[email protected]>
e_ews_connection_create_items_finish (EEwsConnection *cnc, GAsyncResult *result, GSList **ids, GError **error) { GSimpleAsyncResult *simple; EwsAsyncData *async_data; g_return_val_if_fail (cnc != NULL, FALSE); g_return_val_if_fail ( g_simple_async_result_is_valid ( result, G_OBJECT (cnc), e_ews_connection_create_items), FALSE); simple = G_SIMPLE_ASYNC_RESULT (result); async_data = g_simple_async_result_get_op_res_gpointer (simple); if (g_simple_async_result_propagate_error (simple, error)) return FALSE; /* if there is only one item, then check whether it's an error */ if (async_data->items && !async_data->items->next) { EEwsItem *item = async_data->items->data; if (item && e_ews_item_get_item_type (item) == E_EWS_ITEM_TYPE_ERROR) { if (error) *error = g_error_copy (e_ews_item_get_error (item)); g_slist_free_full (async_data->items, g_object_unref); async_data->items = NULL; return FALSE; } } *ids = async_data->items; return TRUE; }
0
[ "CWE-295" ]
evolution-ews
915226eca9454b8b3e5adb6f2fff9698451778de
310,579,394,826,716,320,000,000,000,000,000,000,000
39
I#27 - SSL Certificates are not validated This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too. Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
PJ_DEF(pj_status_t) pjmedia_sdp_neg_modify_local_offer2( pj_pool_t *pool, pjmedia_sdp_neg *neg, unsigned flags, const pjmedia_sdp_session *local) { pjmedia_sdp_session *new_offer; pjmedia_sdp_session *old_offer; char media_used[PJMEDIA_MAX_SDP_MEDIA]; unsigned oi; /* old offer media index */ pj_status_t status; /* Check arguments are valid. */ PJ_ASSERT_RETURN(pool && neg && local, PJ_EINVAL); /* Can only do this in STATE_DONE. */ PJ_ASSERT_RETURN(neg->state == PJMEDIA_SDP_NEG_STATE_DONE, PJMEDIA_SDPNEG_EINSTATE); /* Validate the new offer */ status = pjmedia_sdp_validate(local); if (status != PJ_SUCCESS) return status; /* Change state to STATE_LOCAL_OFFER */ neg->state = PJMEDIA_SDP_NEG_STATE_LOCAL_OFFER; /* Init vars */ pj_bzero(media_used, sizeof(media_used)); old_offer = neg->active_local_sdp; new_offer = pjmedia_sdp_session_clone(pool, local); /* RFC 3264 Section 8: When issuing an offer that modifies the session, * the "o=" line of the new SDP MUST be identical to that in the * previous SDP, except that the version in the origin field MUST * increment by one from the previous SDP. */ pj_strdup(pool, &new_offer->origin.user, &old_offer->origin.user); new_offer->origin.id = old_offer->origin.id; pj_strdup(pool, &new_offer->origin.net_type, &old_offer->origin.net_type); pj_strdup(pool, &new_offer->origin.addr_type,&old_offer->origin.addr_type); pj_strdup(pool, &new_offer->origin.addr, &old_offer->origin.addr); if ((flags & PJMEDIA_SDP_NEG_ALLOW_MEDIA_CHANGE) == 0) { /* Generating the new offer, in the case media lines doesn't match the * active SDP (e.g. current/active SDP's have m=audio and m=video lines, * and the new offer only has m=audio line), the negotiator will fix * the new offer by reordering and adding the missing media line with * port number set to zero. */ for (oi = 0; oi < old_offer->media_count; ++oi) { pjmedia_sdp_media *om; pjmedia_sdp_media *nm; unsigned ni; /* new offer media index */ pj_bool_t found = PJ_FALSE; om = old_offer->media[oi]; for (ni = oi; ni < new_offer->media_count; ++ni) { nm = new_offer->media[ni]; if (pj_strcmp(&nm->desc.media, &om->desc.media) == 0) { if (ni != oi) { /* The same media found but the position unmatched to * the old offer, so let's put this media in the right * place, and keep the order of the rest. */ pj_array_insert( new_offer->media, /* array */ sizeof(new_offer->media[0]), /* elmt size*/ ni, /* count */ oi, /* pos */ &nm); /* new elmt */ } found = PJ_TRUE; break; } } if (!found) { pjmedia_sdp_media *m; m = sdp_media_clone_deactivate(pool, om, om, local); pj_array_insert(new_offer->media, sizeof(new_offer->media[0]), new_offer->media_count++, oi, &m); } } } else { /* If media type change is allowed, the negotiator only needs to fix * the new offer by adding the missing media line(s) with port number * set to zero. */ for (oi = new_offer->media_count; oi < old_offer->media_count; ++oi) { pjmedia_sdp_media *m; m = sdp_media_clone_deactivate(pool, old_offer->media[oi], old_offer->media[oi], local); pj_array_insert(new_offer->media, sizeof(new_offer->media[0]), new_offer->media_count++, oi, &m); } } /* New_offer fixed */ #if PJMEDIA_SDP_NEG_COMPARE_BEFORE_INC_VERSION new_offer->origin.version = old_offer->origin.version; if (pjmedia_sdp_session_cmp(new_offer, neg->initial_sdp, 0) != PJ_SUCCESS) { ++new_offer->origin.version; } #else new_offer->origin.version = old_offer->origin.version + 1; #endif neg->initial_sdp_tmp = neg->initial_sdp; neg->initial_sdp = new_offer; neg->neg_local_sdp = pjmedia_sdp_session_clone(pool, new_offer); return PJ_SUCCESS; }
1
[ "CWE-400", "CWE-200", "CWE-754" ]
pjproject
97b3d7addbaa720b7ddb0af9bf6f3e443e664365
197,312,442,692,241,200,000,000,000,000,000,000,000
121
Merge pull request from GHSA-hvq6-f89p-frvp
Error HeifContext::get_id_of_non_virtual_child_image(heif_item_id id, heif_item_id& out) const { std::string image_type = m_heif_file->get_item_type(id); if (image_type=="grid" || image_type=="iden" || image_type=="iovl") { auto iref_box = m_heif_file->get_iref_box(); std::vector<heif_item_id> image_references = iref_box->get_references(id, fourcc("dimg")); // TODO: check whether this really can be recursive (e.g. overlay of grid images) if (image_references.empty()) { return Error(heif_error_Invalid_input, heif_suberror_No_item_data, "Derived image does not reference any other image items"); } else { return get_id_of_non_virtual_child_image(image_references[0], out); } } else { out = id; return Error::Ok; } }
1
[ "CWE-125" ]
libheif
f7399b62d7fbc596f1b2871578c1d2053bedf1dd
339,461,888,669,115,020,000,000,000,000,000,000,000
25
Handle case where referenced "iref" box doesn't exist (fixes #138).
end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), bool end_of_records) { TABLE *table= join_tab->table; int idx= -1; DBUG_ENTER("end_write_group"); if (!join->first_record || end_of_records || (idx=test_if_group_changed(join->group_fields)) >= 0) { if (join->first_record || (end_of_records && !join->group)) { if (join->procedure) join->procedure->end_group(); int send_group_parts= join->send_group_parts; if (idx < send_group_parts) { if (!join->first_record) { /* No matching rows for group function */ join->clear(); } copy_sum_funcs(join->sum_funcs, join->sum_funcs_end[send_group_parts]); if (!join_tab->having || join_tab->having->val_int()) { int error= table->file->ha_write_tmp_row(table->record[0]); if (unlikely(error) && create_internal_tmp_table_from_heap(join->thd, table, join_tab->tmp_table_param->start_recinfo, &join_tab->tmp_table_param->recinfo, error, 0, NULL)) DBUG_RETURN(NESTED_LOOP_ERROR); } if (unlikely(join->rollup.state != ROLLUP::STATE_NONE)) { if (unlikely(join->rollup_write_data((uint) (idx+1), join_tab->tmp_table_param, table))) { DBUG_RETURN(NESTED_LOOP_ERROR); } } if (end_of_records) goto end; } } else { if (end_of_records) goto end; join->first_record=1; (void) test_if_group_changed(join->group_fields); } if (idx < (int) join->send_group_parts) { copy_fields(join_tab->tmp_table_param); if (unlikely(copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd))) DBUG_RETURN(NESTED_LOOP_ERROR); if (unlikely(init_sum_functions(join->sum_funcs, join->sum_funcs_end[idx+1]))) DBUG_RETURN(NESTED_LOOP_ERROR); if (unlikely(join->procedure)) join->procedure->add(); goto end; } } if (unlikely(update_sum_func(join->sum_funcs))) DBUG_RETURN(NESTED_LOOP_ERROR); if (unlikely(join->procedure)) join->procedure->add(); end: if (unlikely(join->thd->check_killed())) { DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */ } DBUG_RETURN(NESTED_LOOP_OK); }
0
[]
server
ff77a09bda884fe6bf3917eb29b9d3a2f53f919b
8,039,926,638,485,735,000,000,000,000,000,000,000
79
MDEV-22464 Server crash on UPDATE with nested subquery Uninitialized ref_pointer_array[] because setup_fields() got empty fields list. mysql_multi_update() for some reason does that by substituting the fields list with empty total_list for the mysql_select() call (looks like wrong merge since total_list is not used anywhere else and is always empty). The fix would be to return back the original fields list. But this fails update_use_source.test case: --error ER_BAD_FIELD_ERROR update v1 set t1c1=2 order by 1; Actually not failing the above seems to be ok. The other fix would be to keep resolve_in_select_list false (and that keeps outer context from being resolved in Item_ref::fix_fields()). This fix is more consistent with how SELECT behaves: --error ER_SUBQUERY_NO_1_ROW select a from t1 where a= (select 2 from t1 having (a = 3)); So this patch implements this fix.
cmd_http_sendhex(CMD_ARGS) { struct http *hp; char buf[3], *q; uint8_t *p; int i, j, l; (void)cmd; (void)vl; CAST_OBJ_NOTNULL(hp, priv, HTTP_MAGIC); AN(av[1]); AZ(av[2]); l = strlen(av[1]) / 2; p = malloc(l); AN(p); q = av[1]; for (i = 0; i < l; i++) { while (vct_issp(*q)) q++; if (*q == '\0') break; memcpy(buf, q, 2); q += 2; buf[2] = '\0'; if (!vct_ishex(buf[0]) || !vct_ishex(buf[1])) vtc_log(hp->vl, 0, "Illegal Hex char \"%c%c\"", buf[0], buf[1]); p[i] = (uint8_t)strtoul(buf, NULL, 16); } vtc_hexdump(hp->vl, 4, "sendhex", (void*)p, i); j = write(hp->fd, p, i); assert(j == i); free(p); }
0
[ "CWE-269" ]
Varnish-Cache
85e8468bec9416bd7e16b0d80cb820ecd2b330c3
256,516,207,942,745,900,000,000,000,000,000,000,000
35
Do not consider a CR by itself as a valid line terminator Varnish (prior to version 4.0) was not following the standard with regard to line separator. Spotted and analyzed by: Régis Leroy [regilero] [email protected]
void swoole_serialize_init(int module_number TSRMLS_DC) { SWOOLE_INIT_CLASS_ENTRY(swoole_serialize_ce, "swoole_serialize", "Swoole\\Serialize", swoole_serialize_methods); swoole_serialize_class_entry_ptr = zend_register_internal_class(&swoole_serialize_ce TSRMLS_CC); SWOOLE_CLASS_ALIAS(swoole_serialize, "Swoole\\Serialize"); // ZVAL_STRING(&swSeriaG.sleep_fname, "__sleep"); zend_string *zstr_sleep = zend_string_init("__sleep", sizeof ("__sleep") - 1, 1); zend_string *zstr_weekup = zend_string_init("__weekup", sizeof ("__weekup") - 1, 1); ZVAL_STR(&swSeriaG.sleep_fname, zstr_sleep); ZVAL_STR(&swSeriaG.weekup_fname, zstr_weekup); // ZVAL_STRING(&swSeriaG.weekup_fname, "__weekup"); memset(&swSeriaG.filter, 0, sizeof (swSeriaG.filter)); memset(&mini_filter, 0, sizeof (mini_filter)); REGISTER_LONG_CONSTANT("SWOOLE_FAST_PACK", SW_FAST_PACK, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("UNSERIALIZE_OBJECT_TO_ARRAY", UNSERIALIZE_OBJECT_TO_ARRAY, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("UNSERIALIZE_OBJECT_TO_STDCLASS", UNSERIALIZE_OBJECT_TO_STDCLASS, CONST_CS | CONST_PERSISTENT); }
0
[ "CWE-200", "CWE-502" ]
swoole-src
4cdbce5d9bf2fe596bb6acd7d6611f9e8c253a76
119,155,685,754,960,360,000,000,000,000,000,000,000
20
add buffer end check
char *convert_query_hexchar(char *txt) { int d1, d2; if (strlen(txt) < 3) { *txt = '\0'; return txt-1; } d1 = hextoint(*(txt+1)); d2 = hextoint(*(txt+2)); if (d1<0 || d2<0) { strcpy(txt, txt+3); return txt-1; } else { *txt = d1 * 16 + d2; strcpy(txt+1, txt+3); return txt; } }
0
[]
cgit
02a545e63454530c1639014d3239c14ced2022c6
10,782,252,576,671,145,000,000,000,000,000,000,000
18
Add support for cloning over http This patch implements basic support for cloning over http, based on the work on git-http-backend by Shawn O. Pearce. Signed-off-by: Lars Hjemli <[email protected]>
bool Aggregator_distinct::unique_walk_function_for_count(void *element) { Item_sum_count *sum= (Item_sum_count *)item_sum; sum->count++; return 0; }
0
[ "CWE-120" ]
server
eca207c46293bc72dd8d0d5622153fab4d3fccf1
62,167,815,905,399,830,000,000,000,000,000,000,000
6
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size. Precision should be kept below DECIMAL_MAX_SCALE for computations. It can be bigger in Item_decimal. I'd fix this too but it changes the existing behaviour so problemmatic to ix.
void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, unsigned int *size) { const struct xt_target *target = t->u.kernel.target; struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; int off = xt_compat_target_offset(target); u_int16_t tsize = ct->u.user.target_size; char name[sizeof(t->u.user.name)]; t = *dstptr; memcpy(t, ct, sizeof(*ct)); if (target->compat_from_user) target->compat_from_user(t->data, ct->data); else memcpy(t->data, ct->data, tsize - sizeof(*ct)); tsize += off; t->u.user.target_size = tsize; strlcpy(name, target->name, sizeof(name)); module_put(target->me); strncpy(t->u.user.name, name, sizeof(t->u.user.name)); *size += off; *dstptr += tsize; }
0
[ "CWE-787" ]
linux
b29c457a6511435960115c0f548c4360d5f4801d
325,466,358,307,912,460,000,000,000,000,000,000,000
25
netfilter: x_tables: fix compat match/target pad out-of-bound write xt_compat_match/target_from_user doesn't check that zeroing the area to start of next rule won't write past end of allocated ruleset blob. Remove this code and zero the entire blob beforehand. Reported-by: [email protected] Reported-by: Andy Nguyen <[email protected]> Fixes: 9fa492cdc160c ("[NETFILTER]: x_tables: simplify compat API") Signed-off-by: Florian Westphal <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol) { int rc; struct nfc_target *target; pr_debug("dev_name=%s target_idx=%u protocol=%u\n", dev_name(&dev->dev), target_idx, protocol); device_lock(&dev->dev); if (!device_is_registered(&dev->dev)) { rc = -ENODEV; goto error; } if (dev->active_target) { rc = -EBUSY; goto error; } target = nfc_find_target(dev, target_idx); if (target == NULL) { rc = -ENOTCONN; goto error; } rc = dev->ops->activate_target(dev, target, protocol); if (!rc) { dev->active_target = target; dev->rf_mode = NFC_RF_INITIATOR; if (dev->ops->check_presence && !dev->shutting_down) mod_timer(&dev->check_pres_timer, jiffies + msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); } error: device_unlock(&dev->dev); return rc; }
1
[ "CWE-416" ]
linux
da5c0f119203ad9728920456a0f52a6d850c01cd
92,770,391,428,014,000,000,000,000,000,000,000,000
40
nfc: replace improper check device_is_registered() in netlink related functions The device_is_registered() in nfc core is used to check whether nfc device is registered in netlink related functions such as nfc_fw_download(), nfc_dev_up() and so on. Although device_is_registered() is protected by device_lock, there is still a race condition between device_del() and device_is_registered(). The root cause is that kobject_del() in device_del() is not protected by device_lock. (cleanup task) | (netlink task) | nfc_unregister_device | nfc_fw_download device_del | device_lock ... | if (!device_is_registered)//(1) kobject_del//(2) | ... ... | device_unlock The device_is_registered() returns the value of state_in_sysfs and the state_in_sysfs is set to zero in kobject_del(). If we pass check in position (1), then set zero in position (2). As a result, the check in position (1) is useless. This patch uses bool variable instead of device_is_registered() to judge whether the nfc device is registered, which is well synchronized. Fixes: 3e256b8f8dfa ("NFC: add nfc subsystem core") Signed-off-by: Duoming Zhou <[email protected]> Signed-off-by: David S. Miller <[email protected]>
encode_SET_IP_TTL(const struct ofpact_ip_ttl *ttl, enum ofp_version ofp_version, struct ofpbuf *out) { if (ofp_version >= OFP11_VERSION) { put_OFPAT11_SET_NW_TTL(out, ttl->ttl); } else { struct mf_subfield dst = { .field = mf_from_id(MFF_IP_TTL), .ofs = 0, .n_bits = 8 }; put_reg_load(out, &dst, ttl->ttl); } }
0
[ "CWE-125" ]
ovs
9237a63c47bd314b807cda0bd2216264e82edbe8
316,686,485,532,347,930,000,000,000,000,000,000,000
11
ofp-actions: Avoid buffer overread in BUNDLE action decoding. Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052 Signed-off-by: Ben Pfaff <[email protected]> Acked-by: Justin Pettit <[email protected]>
static int manager_setup_signals(Manager *m) { struct sigaction sa = { .sa_handler = SIG_DFL, .sa_flags = SA_NOCLDSTOP|SA_RESTART, }; sigset_t mask; int r; assert(m); assert_se(sigaction(SIGCHLD, &sa, NULL) == 0); /* We make liberal use of realtime signals here. On * Linux/glibc we have 30 of them (with the exception of Linux * on hppa, see below), between SIGRTMIN+0 ... SIGRTMIN+30 * (aka SIGRTMAX). */ assert_se(sigemptyset(&mask) == 0); sigset_add_many(&mask, SIGCHLD, /* Child died */ SIGTERM, /* Reexecute daemon */ SIGHUP, /* Reload configuration */ SIGUSR1, /* systemd/upstart: reconnect to D-Bus */ SIGUSR2, /* systemd: dump status */ SIGINT, /* Kernel sends us this on control-alt-del */ SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */ SIGPWR, /* Some kernel drivers and upsd send us this on power failure */ SIGRTMIN+0, /* systemd: start default.target */ SIGRTMIN+1, /* systemd: isolate rescue.target */ SIGRTMIN+2, /* systemd: isolate emergency.target */ SIGRTMIN+3, /* systemd: start halt.target */ SIGRTMIN+4, /* systemd: start poweroff.target */ SIGRTMIN+5, /* systemd: start reboot.target */ SIGRTMIN+6, /* systemd: start kexec.target */ /* ... space for more special targets ... */ SIGRTMIN+13, /* systemd: Immediate halt */ SIGRTMIN+14, /* systemd: Immediate poweroff */ SIGRTMIN+15, /* systemd: Immediate reboot */ SIGRTMIN+16, /* systemd: Immediate kexec */ /* ... space for more immediate system state changes ... */ SIGRTMIN+20, /* systemd: enable status messages */ SIGRTMIN+21, /* systemd: disable status messages */ SIGRTMIN+22, /* systemd: set log level to LOG_DEBUG */ SIGRTMIN+23, /* systemd: set log level to LOG_INFO */ SIGRTMIN+24, /* systemd: Immediate exit (--user only) */ /* .. one free signal here ... */ #if !defined(__hppa64__) && !defined(__hppa__) /* Apparently Linux on hppa has fewer RT * signals (SIGRTMAX is SIGRTMIN+25 there), * hence let's not try to make use of them * here. Since these commands are accessible * by different means and only really a safety * net, the missing functionality on hppa * shouldn't matter. */ SIGRTMIN+26, /* systemd: set log target to journal-or-kmsg */ SIGRTMIN+27, /* systemd: set log target to console */ SIGRTMIN+28, /* systemd: set log target to kmsg */ SIGRTMIN+29, /* systemd: set log target to syslog-or-kmsg (obsolete) */ /* ... one free signal here SIGRTMIN+30 ... */ #endif -1); assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0); m->signal_fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC); if (m->signal_fd < 0) return -errno; r = sd_event_add_io(m->event, &m->signal_event_source, m->signal_fd, EPOLLIN, manager_dispatch_signal_fd, m); if (r < 0) return r; (void) sd_event_source_set_description(m->signal_event_source, "manager-signal"); /* Process signals a bit earlier than the rest of things, but later than notify_fd processing, so that the * notify processing can still figure out to which process/service a message belongs, before we reap the * process. Also, process this before handling cgroup notifications, so that we always collect child exit * status information before detecting that there's no process in a cgroup. */ r = sd_event_source_set_priority(m->signal_event_source, SD_EVENT_PRIORITY_NORMAL-6); if (r < 0) return r; if (MANAGER_IS_SYSTEM(m)) return enable_special_signals(m); return 0; }
0
[ "CWE-20" ]
systemd
531ac2b2349da02acc9c382849758e07eb92b020
34,214,659,108,187,805,000,000,000,000,000,000,000
95
If the notification message length is 0, ignore the message (#4237) Fixes #4234. Signed-off-by: Jorge Niedbalski <[email protected]>
static struct smp_ltk_info *get_ltk(GKeyFile *key_file, const char *peer, uint8_t peer_type, const char *group) { struct smp_ltk_info *ltk = NULL; GError *gerr = NULL; bool master; char *key; char *rand = NULL; key = g_key_file_get_string(key_file, group, "Key", NULL); if (!key || strlen(key) < 32) goto failed; rand = g_key_file_get_string(key_file, group, "Rand", NULL); if (!rand) goto failed; ltk = g_new0(struct smp_ltk_info, 1); /* Default to assuming a master key */ ltk->master = true; str2ba(peer, &ltk->bdaddr); ltk->bdaddr_type = peer_type; /* * Long term keys should respond to an identity address which can * either be a public address or a random static address. Keys * stored for resolvable random and unresolvable random addresses * are ignored. * * This is an extra sanity check for older kernel versions or older * daemons that might have been instructed to store long term keys * for these temporary addresses. */ if (ltk->bdaddr_type == BDADDR_LE_RANDOM && (ltk->bdaddr.b[5] & 0xc0) != 0xc0) { g_free(ltk); ltk = NULL; goto failed; } if (!strncmp(key, "0x", 2)) str2buf(&key[2], ltk->val, sizeof(ltk->val)); else str2buf(&key[0], ltk->val, sizeof(ltk->val)); if (!strncmp(rand, "0x", 2)) { uint64_t rand_le; str2buf(&rand[2], (uint8_t *) &rand_le, sizeof(rand_le)); ltk->rand = le64_to_cpu(rand_le); } else { sscanf(rand, "%" PRIu64, &ltk->rand); } ltk->authenticated = g_key_file_get_integer(key_file, group, "Authenticated", NULL); ltk->enc_size = g_key_file_get_integer(key_file, group, "EncSize", NULL); ltk->ediv = g_key_file_get_integer(key_file, group, "EDiv", NULL); master = g_key_file_get_boolean(key_file, group, "Master", &gerr); if (gerr) g_error_free(gerr); else ltk->master = master; ltk->is_blocked = is_blocked_key(HCI_BLOCKED_KEY_TYPE_LTK, ltk->val); failed: g_free(key); g_free(rand); return ltk; }
0
[ "CWE-862", "CWE-863" ]
bluez
b497b5942a8beb8f89ca1c359c54ad67ec843055
122,422,596,497,648,280,000,000,000,000,000,000,000
76
adapter: Fix storing discoverable setting discoverable setting shall only be store when changed via Discoverable property and not when discovery client set it as that be considered temporary just for the lifetime of the discovery.
getCharacters (FileInfo * nested, CharsString * characters) { /*Get ruleChars string */ CharsString token; if (getToken (nested, &token, "characters")) if (parseChars (nested, characters, &token)) return 1; return 0; }
0
[]
liblouis
dc97ef791a4fae9da11592c79f9f79e010596e0c
233,518,646,446,890,950,000,000,000,000,000,000,000
9
Merge branch 'table_resolver'
static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, unsigned int optlen) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_association *asoc = NULL; struct sctp_setpeerprim prim; struct sctp_chunk *chunk; struct sctp_af *af; int err; sp = sctp_sk(sk); if (!net->sctp.addip_enable) return -EPERM; if (optlen != sizeof(struct sctp_setpeerprim)) return -EINVAL; if (copy_from_user(&prim, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); if (!asoc) return -EINVAL; if (!asoc->peer.asconf_capable) return -EPERM; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) return -EPERM; if (!sctp_state(asoc, ESTABLISHED)) return -ENOTCONN; af = sctp_get_af_specific(prim.sspp_addr.ss_family); if (!af) return -EINVAL; if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) return -EADDRNOTAVAIL; if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) return -EADDRNOTAVAIL; /* Create an ASCONF chunk with SET_PRIMARY parameter */ chunk = sctp_make_asconf_set_prim(asoc, (union sctp_addr *)&prim.sspp_addr); if (!chunk) return -ENOMEM; err = sctp_send_asconf(asoc, chunk); SCTP_DEBUG_PRINTK("We set peer primary addr primitively.\n"); return err; }
0
[ "CWE-20" ]
linux
726bc6b092da4c093eb74d13c07184b18c1af0f1
242,212,455,796,043,400,000,000,000,000,000,000,000
57
net/sctp: Validate parameter size for SCTP_GET_ASSOC_STATS Building sctp may fail with: In function ‘copy_from_user’, inlined from ‘sctp_getsockopt_assoc_stats’ at net/sctp/socket.c:5656:20: arch/x86/include/asm/uaccess_32.h:211:26: error: call to ‘copy_from_user_overflow’ declared with attribute error: copy_from_user() buffer size is not provably correct if built with W=1 due to a missing parameter size validation before the call to copy_from_user. Signed-off-by: Guenter Roeck <[email protected]> Acked-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
bool PrimaryChromaticity::Parse(IMkvReader* reader, long long read_pos, long long value_size, bool is_x, PrimaryChromaticity** chromaticity) { if (!reader) return false; if (!*chromaticity) *chromaticity = new PrimaryChromaticity(); if (!*chromaticity) return false; PrimaryChromaticity* pc = *chromaticity; float* value = is_x ? &pc->x : &pc->y; double parser_value = 0; const long long parse_status = UnserializeFloat(reader, read_pos, value_size, parser_value); // Valid range is [0, 1]. Make sure the double is representable as a float // before casting. if (parse_status < 0 || parser_value < 0.0 || parser_value > 1.0 || (parser_value > 0.0 && parser_value < FLT_MIN)) return false; *value = static_cast<float>(parser_value); return true; }
0
[ "CWE-20" ]
libvpx
34d54b04e98dd0bac32e9aab0fbda0bf501bc742
54,373,388,115,559,910,000,000,000,000,000,000,000
29
update libwebm to libwebm-1.0.0.27-358-gdbf1d10 changelog: https://chromium.googlesource.com/webm/libwebm/+log/libwebm-1.0.0.27-351-g9f23fbc..libwebm-1.0.0.27-358-gdbf1d10 Change-Id: I28a6b3ae02a53fb1f2029eee11e9449afb94c8e3