func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static int fts3PoslistNearMerge(
char **pp, /* Output buffer */
char *aTmp, /* Temporary buffer space */
int nRight, /* Maximum difference in token positions */
int nLeft, /* Maximum difference in token positions */
char **pp1, /* IN/OUT: Left input list */
char **pp2 /* IN/OUT: Right input list */
){
char *p1 = *pp1;
char *p2 = *pp2;
char *pTmp1 = aTmp;
char *pTmp2;
char *aTmp2;
int res = 1;
fts3PoslistPhraseMerge(&pTmp1, nRight, 0, 0, pp1, pp2);
aTmp2 = pTmp2 = pTmp1;
*pp1 = p1;
*pp2 = p2;
fts3PoslistPhraseMerge(&pTmp2, nLeft, 1, 0, pp2, pp1);
if( pTmp1!=aTmp && pTmp2!=aTmp2 ){
fts3PoslistMerge(pp, &aTmp, &aTmp2);
}else if( pTmp1!=aTmp ){
fts3PoslistCopy(pp, &aTmp);
}else if( pTmp2!=aTmp2 ){
fts3PoslistCopy(pp, &aTmp2);
}else{
res = 0;
}
return res;
} | 0 | [
"CWE-787"
] | sqlite | c72f2fb7feff582444b8ffdc6c900c69847ce8a9 | 293,735,948,000,029,580,000,000,000,000,000,000,000 | 33 | More improvements to shadow table corruption detection in FTS3.
FossilOrigin-Name: 51525f9c3235967bc00a090e84c70a6400698c897aa4742e817121c725b8c99d |
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
{
int ret;
if (enable_unrestricted_guest)
return 0;
ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
PAGE_SIZE * 3);
if (ret)
return ret;
to_kvm_vmx(kvm)->tss_addr = addr;
return init_rmode_tss(kvm);
} | 0 | [
"CWE-284"
] | linux | 727ba748e110b4de50d142edca9d6a9b7e6111d8 | 99,703,299,601,418,060,000,000,000,000,000,000,000 | 14 | kvm: nVMX: Enforce cpl=0 for VMX instructions
VMX instructions executed inside a L1 VM will always trigger a VM exit
even when executed with cpl 3. This means we must perform the
privilege check in software.
Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks")
Cc: [email protected]
Signed-off-by: Felix Wilhelm <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
{
int64_t target_size = bdrv_getlength(bs) / BDRV_SECTOR_SIZE;
int64_t ret, nb_sectors, sector_num = 0;
int n;
for (;;) {
nb_sectors = target_size - sector_num;
if (nb_sectors <= 0) {
return 0;
}
if (nb_sectors > INT_MAX) {
nb_sectors = INT_MAX;
}
ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
if (ret < 0) {
error_report("error getting block status at sector %" PRId64 ": %s",
sector_num, strerror(-ret));
return ret;
}
if (ret & BDRV_BLOCK_ZERO) {
sector_num += n;
continue;
}
ret = bdrv_write_zeroes(bs, sector_num, n, flags);
if (ret < 0) {
error_report("error writing zeroes at sector %" PRId64 ": %s",
sector_num, strerror(-ret));
return ret;
}
sector_num += n;
}
} | 0 | [
"CWE-190"
] | qemu | 8f4754ede56e3f9ea3fd7207f4a7c4453e59285b | 255,461,447,774,266,740,000,000,000,000,000,000,000 | 33 | block: Limit request size (CVE-2014-0143)
Limiting the size of a single request to INT_MAX not only fixes a
direct integer overflow in bdrv_check_request() (which would only
trigger bad behaviour with ridiculously huge images, as in close to
2^64 bytes), but can also prevent overflows in all block drivers.
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]> |
MSG_PROCESS_RETURN tls_process_key_exchange(SSL *s, PACKET *pkt)
{
int al = -1;
long alg_k;
EVP_PKEY *pkey = NULL;
PACKET save_param_start, signature;
alg_k = s->s3->tmp.new_cipher->algorithm_mkey;
save_param_start = *pkt;
#if !defined(OPENSSL_NO_EC) || !defined(OPENSSL_NO_DH)
EVP_PKEY_free(s->s3->peer_tmp);
s->s3->peer_tmp = NULL;
#endif
if (alg_k & SSL_PSK) {
if (!tls_process_ske_psk_preamble(s, pkt, &al))
goto err;
}
/* Nothing else to do for plain PSK or RSAPSK */
if (alg_k & (SSL_kPSK | SSL_kRSAPSK)) {
} else if (alg_k & SSL_kSRP) {
if (!tls_process_ske_srp(s, pkt, &pkey, &al))
goto err;
} else if (alg_k & (SSL_kDHE | SSL_kDHEPSK)) {
if (!tls_process_ske_dhe(s, pkt, &pkey, &al))
goto err;
} else if (alg_k & (SSL_kECDHE | SSL_kECDHEPSK)) {
if (!tls_process_ske_ecdhe(s, pkt, &pkey, &al))
goto err;
} else if (alg_k) {
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, SSL_R_UNEXPECTED_MESSAGE);
goto err;
}
/* if it was signed, check the signature */
if (pkey != NULL) {
PACKET params;
int maxsig;
const EVP_MD *md = NULL;
EVP_MD_CTX *md_ctx;
/*
* |pkt| now points to the beginning of the signature, so the difference
* equals the length of the parameters.
*/
if (!PACKET_get_sub_packet(&save_param_start, ¶ms,
PACKET_remaining(&save_param_start) -
PACKET_remaining(pkt))) {
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
if (SSL_USE_SIGALGS(s)) {
const unsigned char *sigalgs;
int rv;
if (!PACKET_get_bytes(pkt, &sigalgs, 2)) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, SSL_R_LENGTH_TOO_SHORT);
goto err;
}
rv = tls12_check_peer_sigalg(&md, s, sigalgs, pkey);
if (rv == -1) {
al = SSL_AD_INTERNAL_ERROR;
goto err;
} else if (rv == 0) {
al = SSL_AD_DECODE_ERROR;
goto err;
}
#ifdef SSL_DEBUG
fprintf(stderr, "USING TLSv1.2 HASH %s\n", EVP_MD_name(md));
#endif
} else if (EVP_PKEY_id(pkey) == EVP_PKEY_RSA) {
md = EVP_md5_sha1();
} else {
md = EVP_sha1();
}
if (!PACKET_get_length_prefixed_2(pkt, &signature)
|| PACKET_remaining(pkt) != 0) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, SSL_R_LENGTH_MISMATCH);
goto err;
}
maxsig = EVP_PKEY_size(pkey);
if (maxsig < 0) {
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
/*
* Check signature length
*/
if (PACKET_remaining(&signature) > (size_t)maxsig) {
/* wrong packet length */
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE,
SSL_R_WRONG_SIGNATURE_LENGTH);
goto err;
}
md_ctx = EVP_MD_CTX_new();
if (md_ctx == NULL) {
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto err;
}
if (EVP_VerifyInit_ex(md_ctx, md, NULL) <= 0
|| EVP_VerifyUpdate(md_ctx, &(s->s3->client_random[0]),
SSL3_RANDOM_SIZE) <= 0
|| EVP_VerifyUpdate(md_ctx, &(s->s3->server_random[0]),
SSL3_RANDOM_SIZE) <= 0
|| EVP_VerifyUpdate(md_ctx, PACKET_data(¶ms),
PACKET_remaining(¶ms)) <= 0) {
EVP_MD_CTX_free(md_ctx);
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, ERR_R_EVP_LIB);
goto err;
}
if (EVP_VerifyFinal(md_ctx, PACKET_data(&signature),
PACKET_remaining(&signature), pkey) <= 0) {
/* bad signature */
EVP_MD_CTX_free(md_ctx);
al = SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, SSL_R_BAD_SIGNATURE);
goto err;
}
EVP_MD_CTX_free(md_ctx);
} else {
/* aNULL, aSRP or PSK do not need public keys */
if (!(s->s3->tmp.new_cipher->algorithm_auth & (SSL_aNULL | SSL_aSRP))
&& !(alg_k & SSL_PSK)) {
/* Might be wrong key type, check it */
if (ssl3_check_cert_and_algorithm(s)) {
/* Otherwise this shouldn't happen */
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
} else {
al = SSL_AD_DECODE_ERROR;
}
goto err;
}
/* still data left over */
if (PACKET_remaining(pkt) != 0) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, SSL_R_EXTRA_DATA_IN_MESSAGE);
goto err;
}
}
return MSG_PROCESS_CONTINUE_READING;
err:
if (al != -1)
ssl3_send_alert(s, SSL3_AL_FATAL, al);
ossl_statem_set_error(s);
return MSG_PROCESS_ERROR;
} | 0 | [
"CWE-476"
] | openssl | efbe126e3ebb9123ac9d058aa2bb044261342aaa | 268,495,958,352,747,300,000,000,000,000,000,000,000 | 163 | Fix missing NULL checks in CKE processing
Reviewed-by: Rich Salz <[email protected]> |
static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
{
struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
int r = -EBUSY;
if (dst_kvm == src_kvm)
return -EINVAL;
/*
* Bail if these VMs are already involved in a migration to avoid
* deadlock between two VMs trying to migrate to/from each other.
*/
if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1))
return -EBUSY;
if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1))
goto release_dst;
r = -EINTR;
if (mutex_lock_killable(&dst_kvm->lock))
goto release_src;
if (mutex_lock_killable_nested(&src_kvm->lock, SINGLE_DEPTH_NESTING))
goto unlock_dst;
return 0;
unlock_dst:
mutex_unlock(&dst_kvm->lock);
release_src:
atomic_set_release(&src_sev->migration_in_progress, 0);
release_dst:
atomic_set_release(&dst_sev->migration_in_progress, 0);
return r;
} | 0 | [
"CWE-459"
] | linux | 683412ccf61294d727ead4a73d97397396e69a6b | 264,020,091,436,341,170,000,000,000,000,000,000,000 | 34 | KVM: SEV: add cache flush to solve SEV cache incoherency issues
Flush the CPU caches when memory is reclaimed from an SEV guest (where
reclaim also includes it being unmapped from KVM's memslots). Due to lack
of coherency for SEV encrypted memory, failure to flush results in silent
data corruption if userspace is malicious/broken and doesn't ensure SEV
guest memory is properly pinned and unpinned.
Cache coherency is not enforced across the VM boundary in SEV (AMD APM
vol.2 Section 15.34.7). Confidential cachelines, generated by confidential
VM guests have to be explicitly flushed on the host side. If a memory page
containing dirty confidential cachelines was released by VM and reallocated
to another user, the cachelines may corrupt the new user at a later time.
KVM takes a shortcut by assuming all confidential memory remain pinned
until the end of VM lifetime. Therefore, KVM does not flush cache at
mmu_notifier invalidation events. Because of this incorrect assumption and
the lack of cache flushing, malicous userspace can crash the host kernel:
creating a malicious VM and continuously allocates/releases unpinned
confidential memory pages when the VM is running.
Add cache flush operations to mmu_notifier operations to ensure that any
physical memory leaving the guest VM get flushed. In particular, hook
mmu_notifier_invalidate_range_start and mmu_notifier_release events and
flush cache accordingly. The hook after releasing the mmu lock to avoid
contention with other vCPUs.
Cc: [email protected]
Suggested-by: Sean Christpherson <[email protected]>
Reported-by: Mingwei Zhang <[email protected]>
Signed-off-by: Mingwei Zhang <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
should_generate_v2_networkstatus(void)
{
return authdir_mode_v2(get_options()) &&
the_v2_networkstatus_is_dirty &&
the_v2_networkstatus_is_dirty + DIR_REGEN_SLACK_TIME < time(NULL);
} | 0 | [
"CWE-264"
] | tor | 00fffbc1a15e2696a89c721d0c94dc333ff419ef | 268,520,867,089,384,450,000,000,000,000,000,000,000 | 6 | Don't give the Guard flag to relays without the CVE-2011-2768 fix |
~TensorInfoCache() {
tensorflow::mutex_lock ml(lock);
for (auto& p : entries) {
libxsmm_spmdm_destroy(&p.second->handle);
}
entries.clear();
} | 0 | [
"CWE-125"
] | tensorflow | e6cf28c72ba2eb949ca950d834dd6d66bb01cfae | 198,943,040,737,782,320,000,000,000,000,000,000,000 | 7 | Validate that matrix dimension sizes in SparseMatMul are positive.
PiperOrigin-RevId: 401149683
Change-Id: Ib33eafc561a39c8741ece80b2edce6d4aae9a57d |
CString CIRCNetwork::ExpandString(const CString& sStr) const {
CString sRet;
return ExpandString(sStr, sRet);
} | 0 | [
"CWE-20"
] | znc | 64613bc8b6b4adf1e32231f9844d99cd512b8973 | 264,043,249,097,425,720,000,000,000,000,000,000,000 | 4 | Don't crash if user specified invalid encoding.
This is CVE-2019-9917 |
static int getTempStore(const char *z){
if( z[0]>='0' && z[0]<='2' ){
return z[0] - '0';
}else if( sqlite3StrICmp(z, "file")==0 ){
return 1;
}else if( sqlite3StrICmp(z, "memory")==0 ){
return 2;
}else{
return 0;
}
} | 0 | [
"CWE-754"
] | sqlite | ebd70eedd5d6e6a890a670b5ee874a5eae86b4dd | 168,479,086,271,050,680,000,000,000,000,000,000,000 | 11 | Fix the NOT NULL verification logic in PRAGMA integrity_check so that it
works for generated columns whose value is the result of a comparison operator.
Ticket [bd8c280671ba44a7]
FossilOrigin-Name: f3b39c71b88cb6721f443de56cdce4c08252453a5e340b00a2bd88dc10c42400 |
static int read_image_export_directory(RBuffer *b, ut64 addr, PE_(image_export_directory) *export_dir) {
st64 o_addr = r_buf_seek (b, 0, R_BUF_CUR);
if (r_buf_seek (b, addr, R_BUF_SET) < 0) {
return -1;
}
ut8 buf[sizeof (PE_(image_export_directory))];
r_buf_read (b, buf, sizeof (buf));
PE_READ_STRUCT_FIELD (export_dir, PE_(image_export_directory), Characteristics, 32);
PE_READ_STRUCT_FIELD (export_dir, PE_(image_export_directory), TimeDateStamp, 32);
PE_READ_STRUCT_FIELD (export_dir, PE_(image_export_directory), MajorVersion, 16);
PE_READ_STRUCT_FIELD (export_dir, PE_(image_export_directory), MinorVersion, 16);
PE_READ_STRUCT_FIELD (export_dir, PE_(image_export_directory), Name, 32);
PE_READ_STRUCT_FIELD (export_dir, PE_(image_export_directory), Base, 32);
PE_READ_STRUCT_FIELD (export_dir, PE_(image_export_directory), NumberOfFunctions, 32);
PE_READ_STRUCT_FIELD (export_dir, PE_(image_export_directory), NumberOfNames, 32);
PE_READ_STRUCT_FIELD (export_dir, PE_(image_export_directory), AddressOfFunctions, 32);
PE_READ_STRUCT_FIELD (export_dir, PE_(image_export_directory), AddressOfNames, 32);
PE_READ_STRUCT_FIELD (export_dir, PE_(image_export_directory), AddressOfOrdinals, 32);
r_buf_seek (b, o_addr, R_BUF_SET);
return sizeof (PE_(image_export_directory));
} | 0 | [
"CWE-400",
"CWE-703"
] | radare2 | 634b886e84a5c568d243e744becc6b3223e089cf | 182,778,081,778,535,720,000,000,000,000,000,000,000 | 21 | Fix DoS in PE/QNX/DYLDCACHE/PSX parsers ##crash
* Reported by lazymio
* Reproducer: AAA4AAAAAB4= |
BSONObj spec() {
return BSON("$or" << BSON_ARRAY(0 << BSON("$or" << BSON_ARRAY(BSON("$or" << BSON_ARRAY(1))))
<< "$a"
<< "$b"));
} | 0 | [
"CWE-835"
] | mongo | 0a076417d1d7fba3632b73349a1fd29a83e68816 | 232,146,879,427,403,560,000,000,000,000,000,000,000 | 5 | SERVER-38070 fix infinite loop in agg expression |
_rsvg_node_init (RsvgNode * self,
RsvgNodeType type)
{
self->type = type;
self->parent = NULL;
self->children = g_ptr_array_new ();
self->state = g_new (RsvgState, 1);
rsvg_state_init (self->state);
self->free = _rsvg_node_free;
self->draw = _rsvg_node_draw_nothing;
self->set_atts = _rsvg_node_dont_set_atts;
} | 0 | [] | librsvg | 34c95743ca692ea0e44778e41a7c0a129363de84 | 92,303,093,882,492,550,000,000,000,000,000,000,000 | 12 | Store node type separately in RsvgNode
The node name (formerly RsvgNode:type) cannot be used to infer
the sub-type of RsvgNode that we're dealing with, since for unknown
elements we put type = node-name. This lead to a (potentially exploitable)
crash e.g. when the element name started with "fe" which tricked
the old code into considering it as a RsvgFilterPrimitive.
CVE-2011-3146
https://bugzilla.gnome.org/show_bug.cgi?id=658014 |
void pause_recovery() {
Mutex::Locker l(recovery_lock);
recovery_paused = true;
} | 0 | [
"CWE-287",
"CWE-284"
] | ceph | 5ead97120e07054d80623dada90a5cc764c28468 | 288,122,578,972,459,820,000,000,000,000,000,000,000 | 4 | auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random() |
png_set_compression_mem_level(png_structrp png_ptr, int mem_level)
{
png_debug(1, "in png_set_compression_mem_level");
if (png_ptr == NULL)
return;
png_ptr->zlib_mem_level = mem_level;
} | 0 | [
"CWE-120"
] | libpng | 81f44665cce4cb1373f049a76f3904e981b7a766 | 201,565,904,247,605,000,000,000,000,000,000,000,000 | 9 | [libpng16] Reject attempt to write over-length PLTE chunk |
static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
return svm->nested.nested_cr3;
} | 0 | [] | kvm | 854e8bb1aa06c578c2c9145fa6bfe3680ef63b23 | 85,685,546,664,129,400,000,000,000,000,000,000,000 | 6 | KVM: x86: Check non-canonical addresses upon WRMSR
Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is
written to certain MSRs. The behavior is "almost" identical for AMD and Intel
(ignoring MSRs that are not implemented in either architecture since they would
anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
non-canonical address is written on Intel but not on AMD (which ignores the top
32-bits).
Accordingly, this patch injects a #GP on the MSRs which behave identically on
Intel and AMD. To eliminate the differences between the architecutres, the
value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to
canonical value before writing instead of injecting a #GP.
Some references from Intel and AMD manuals:
According to Intel SDM description of WRMSR instruction #GP is expected on
WRMSR "If the source register contains a non-canonical address and ECX
specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE,
IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP."
According to AMD manual instruction manual:
LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the
LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical
form, a general-protection exception (#GP) occurs."
IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the
base field must be in canonical form or a #GP fault will occur."
IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must
be in canonical form."
This patch fixes CVE-2014-3610.
Cc: [email protected]
Signed-off-by: Nadav Amit <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
inline void TDStretch::overlap(SAMPLETYPE *pOutput, const SAMPLETYPE *pInput, uint ovlPos) const
{
#ifndef USE_MULTICH_ALWAYS
if (channels == 1)
{
// mono sound.
overlapMono(pOutput, pInput + ovlPos);
}
else if (channels == 2)
{
// stereo sound
overlapStereo(pOutput, pInput + 2 * ovlPos);
}
else
#endif // USE_MULTICH_ALWAYS
{
assert(channels > 0);
overlapMulti(pOutput, pInput + channels * ovlPos);
}
}
| 0 | [
"CWE-617"
] | soundtouch | 107f2c5d201a4dfea1b7f15c5957ff2ac9e5f260 | 230,464,672,320,538,540,000,000,000,000,000,000,000 | 20 | Replaced illegal-number-of-channel assertions with run-time exception |
static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
{
switch (s->feature) {
case DSM_TRIM:
if (s->bs) {
ide_sector_start_dma(s, IDE_DMA_TRIM);
return false;
}
break;
}
ide_abort_command(s);
return true;
} | 0 | [
"CWE-189"
] | qemu | 940973ae0b45c9b6817bab8e4cf4df99a9ef83d7 | 291,970,543,824,048,440,000,000,000,000,000,000,000 | 14 | ide: Correct improper smart self test counter reset in ide core.
The SMART self test counter was incorrectly being reset to zero,
not 1. This had the effect that on every 21st SMART EXECUTE OFFLINE:
* We would write off the beginning of a dynamically allocated buffer
* We forgot the SMART history
Fix this.
Signed-off-by: Benoit Canet <[email protected]>
Message-id: [email protected]
Reviewed-by: Markus Armbruster <[email protected]>
Cc: [email protected]
Acked-by: Kevin Wolf <[email protected]>
[PMM: tweaked commit message as per suggestions from Markus]
Signed-off-by: Peter Maydell <[email protected]> |
xmlXPathIntersection (xmlNodeSetPtr nodes1, xmlNodeSetPtr nodes2) {
xmlNodeSetPtr ret = xmlXPathNodeSetCreate(NULL);
int i, l1;
xmlNodePtr cur;
if (ret == NULL)
return(ret);
if (xmlXPathNodeSetIsEmpty(nodes1))
return(ret);
if (xmlXPathNodeSetIsEmpty(nodes2))
return(ret);
l1 = xmlXPathNodeSetGetLength(nodes1);
for (i = 0; i < l1; i++) {
cur = xmlXPathNodeSetItem(nodes1, i);
if (xmlXPathNodeSetContains(nodes2, cur))
xmlXPathNodeSetAddUnique(ret, cur);
}
return(ret);
} | 0 | [
"CWE-119"
] | libxml2 | 91d19754d46acd4a639a8b9e31f50f31c78f8c9c | 79,441,984,716,653,820,000,000,000,000,000,000,000 | 21 | Fix the semantic of XPath axis for namespace/attribute context nodes
The processing of namespace and attributes nodes was not compliant
to the XPath-1.0 specification |
BOOL rdp_set_error_info(rdpRdp* rdp, UINT32 errorInfo)
{
rdp->errorInfo = errorInfo;
if (rdp->errorInfo != ERRINFO_SUCCESS)
{
rdpContext* context = rdp->context;
rdp_print_errinfo(rdp->errorInfo);
if (context)
{
freerdp_set_last_error_log(context, MAKE_FREERDP_ERROR(ERRINFO, errorInfo));
if (context->pubSub)
{
ErrorInfoEventArgs e;
EventArgsInit(&e, "freerdp");
e.code = rdp->errorInfo;
PubSub_OnErrorInfo(context->pubSub, context, &e);
}
}
else
WLog_ERR(TAG, "%s missing context=%p", __FUNCTION__, context);
}
else
{
freerdp_set_last_error_log(rdp->context, FREERDP_ERROR_SUCCESS);
}
return TRUE;
} | 0 | [
"CWE-125"
] | FreeRDP | 9301bfe730c66180263248b74353daa99f5a969b | 12,089,162,830,875,085,000,000,000,000,000,000,000 | 31 | Fixed #6007: Boundary checks in rdp_read_flow_control_pdu |
int kvm_vcpu_yield_to(struct kvm_vcpu *target)
{
struct pid *pid;
struct task_struct *task = NULL;
int ret = 0;
rcu_read_lock();
pid = rcu_dereference(target->pid);
if (pid)
task = get_pid_task(pid, PIDTYPE_PID);
rcu_read_unlock();
if (!task)
return ret;
ret = yield_to(task, 1);
put_task_struct(task);
return ret;
} | 0 | [
"CWE-416"
] | linux | 0774a964ef561b7170d8d1b1bfe6f88002b6d219 | 270,549,016,327,109,800,000,000,000,000,000,000,000 | 18 | KVM: Fix out of range accesses to memslots
Reset the LRU slot if it becomes invalid when deleting a memslot to fix
an out-of-bounds/use-after-free access when searching through memslots.
Explicitly check for there being no used slots in search_memslots(), and
in the caller of s390's approximation variant.
Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots")
Reported-by: Qian Cai <[email protected]>
Cc: Peter Xu <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Acked-by: Christian Borntraeger <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
isdn_ppp_free(isdn_net_local *lp)
{
struct ippp_struct *is;
if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
printk(KERN_ERR "%s: ppp_slot(%d) out of range\n",
__func__, lp->ppp_slot);
return 0;
}
#ifdef CONFIG_ISDN_MPP
spin_lock(&lp->netdev->pb->lock);
#endif
isdn_net_rm_from_bundle(lp);
#ifdef CONFIG_ISDN_MPP
if (lp->netdev->pb->ref_ct == 1) /* last link in queue? */
isdn_ppp_mp_cleanup(lp);
lp->netdev->pb->ref_ct--;
spin_unlock(&lp->netdev->pb->lock);
#endif /* CONFIG_ISDN_MPP */
if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
printk(KERN_ERR "%s: ppp_slot(%d) now invalid\n",
__func__, lp->ppp_slot);
return 0;
}
is = ippp_table[lp->ppp_slot];
if ((is->state & IPPP_CONNECT))
isdn_ppp_closewait(lp->ppp_slot); /* force wakeup on ippp device */
else if (is->state & IPPP_ASSIGNED)
is->state = IPPP_OPEN; /* fallback to 'OPEN but not ASSIGNED' state */
if (is->debug & 0x1)
printk(KERN_DEBUG "isdn_ppp_free %d %lx %lx\n", lp->ppp_slot, (long) lp, (long) is->lp);
is->lp = NULL; /* link is down .. set lp to NULL */
lp->ppp_slot = -1; /* is this OK ?? */
return 0;
} | 0 | [] | linux | 4ab42d78e37a294ac7bc56901d563c642e03c4ae | 212,325,776,886,574,900,000,000,000,000,000,000,000 | 40 | ppp, slip: Validate VJ compression slot parameters completely
Currently slhc_init() treats out-of-range values of rslots and tslots
as equivalent to 0, except that if tslots is too large it will
dereference a null pointer (CVE-2015-7799).
Add a range-check at the top of the function and make it return an
ERR_PTR() on error instead of NULL. Change the callers accordingly.
Compile-tested only.
Reported-by: 郭永刚 <[email protected]>
References: http://article.gmane.org/gmane.comp.security.oss.general/17908
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int sys_bsd_setgroups(gid_t primary_gid, int setlen, const gid_t *gidset)
{
gid_t *new_gidset = NULL;
int max;
int ret;
/* setgroups(2) will fail with EINVAL if we pass too many groups. */
max = groups_max();
/* No group list, just make sure we are setting the efective GID. */
if (setlen == 0) {
return samba_setgroups(1, &primary_gid);
}
/* If the primary gid is not the first array element, grow the array
* and insert it at the front.
*/
if (gidset[0] != primary_gid) {
new_gidset = SMB_MALLOC_ARRAY(gid_t, setlen + 1);
if (new_gidset == NULL) {
return -1;
}
memcpy(new_gidset + 1, gidset, (setlen * sizeof(gid_t)));
new_gidset[0] = primary_gid;
setlen++;
}
if (setlen > max) {
DEBUG(3, ("forced to truncate group list from %d to %d\n",
setlen, max));
setlen = max;
}
#if defined(HAVE_BROKEN_GETGROUPS)
ret = sys_broken_setgroups(setlen, new_gidset ? new_gidset : gidset);
#else
ret = samba_setgroups(setlen, new_gidset ? new_gidset : gidset);
#endif
if (new_gidset) {
int errsav = errno;
SAFE_FREE(new_gidset);
errno = errsav;
}
return ret;
} | 0 | [
"CWE-20"
] | samba | d77a74237e660dd2ce9f1e14b02635f8a2569653 | 223,937,633,724,546,800,000,000,000,000,000,000,000 | 48 | s3: nmbd: Fix bug 10633 - nmbd denial of service
The Linux kernel has a bug in that it can give spurious
wakeups on a non-blocking UDP socket for a non-deliverable packet.
When nmbd was changed to use non-blocking sockets it
became vulnerable to a spurious wakeup from poll/epoll.
Fix sys_recvfile() to return on EWOULDBLOCK/EAGAIN.
CVE-2014-0244
https://bugzilla.samba.org/show_bug.cgi?id=10633
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Andreas Schneider <[email protected]> |
static int read_into_chunked_item(conn *c) {
int total = 0;
int res;
assert(c->rcurr != c->ritem);
while (c->rlbytes > 0) {
item_chunk *ch = (item_chunk *)c->ritem;
int unused = ch->size - ch->used;
/* first check if we have leftovers in the conn_read buffer */
if (c->rbytes > 0) {
total = 0;
int tocopy = c->rbytes > c->rlbytes ? c->rlbytes : c->rbytes;
tocopy = tocopy > unused ? unused : tocopy;
if (c->ritem != c->rcurr) {
memmove(ch->data + ch->used, c->rcurr, tocopy);
}
total += tocopy;
c->rlbytes -= tocopy;
c->rcurr += tocopy;
c->rbytes -= tocopy;
ch->used += tocopy;
if (c->rlbytes == 0) {
break;
}
} else {
/* now try reading from the socket */
res = read(c->sfd, ch->data + ch->used,
(unused > c->rlbytes ? c->rlbytes : unused));
if (res > 0) {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.bytes_read += res;
pthread_mutex_unlock(&c->thread->stats.mutex);
ch->used += res;
total += res;
c->rlbytes -= res;
} else {
/* Reset total to the latest result so caller can handle it */
total = res;
break;
}
}
assert(ch->used <= ch->size);
if (ch->size == ch->used) {
if (ch->next) {
c->ritem = (char *) ch->next;
} else {
/* No space left. */
assert(c->rlbytes == 0);
break;
}
}
}
return total;
} | 0 | [
"CWE-190"
] | memcached | bd578fc34b96abe0f8d99c1409814a09f51ee71c | 202,280,152,358,877,800,000,000,000,000,000,000,000 | 55 | CVE reported by cisco talos |
Guint FoFiTrueType::doMapToVertGID(Guint orgGID)
{
Guint lookupCount;
Guint lookupListIndex;
Guint i;
Guint gid = 0;
Guint pos;
pos = gsubFeatureTable+2;
lookupCount = getU16BE(pos,&parsedOk);
pos += 2;
for (i = 0;i < lookupCount;i++) {
lookupListIndex = getU16BE(pos,&parsedOk);
pos += 2;
if ((gid = scanLookupList(lookupListIndex,orgGID)) != 0) {
break;
}
}
return gid;
} | 0 | [
"CWE-125"
] | poppler | cdb7ad95f7c8fbf63ade040d8a07ec96467042fc | 155,152,549,368,704,470,000,000,000,000,000,000,000 | 20 | Fix malformed file crash in bug #85243 |
static void StartAttribute(GF_SceneDumper *sdump, const char *name)
{
if (!sdump->trace) return;
if (!sdump->XMLDump) {
DUMP_IND(sdump);
gf_fprintf(sdump->trace, "%s ", name);
} else {
gf_fprintf(sdump->trace, " %s=\"", name);
}
} | 0 | [
"CWE-476"
] | gpac | 0102c5d4db7fdbf08b5b591b2a6264de33867a07 | 120,508,540,960,702,590,000,000,000,000,000,000,000 | 10 | fixed #2232 |
static pyc_object *copy_object(pyc_object *object) {
pyc_object *copy = R_NEW0 (pyc_object);
if (!copy || !object) {
free (copy);
return NULL;
}
copy->type = object->type;
if ((int)object->type == 0) {
// do nothing
} else
switch (object->type) {
case TYPE_NULL:
break;
case TYPE_TUPLE:
case TYPE_SMALL_TUPLE:
copy->data = r_list_clone (object->data);
break;
case TYPE_INT:
case TYPE_INT64:
case TYPE_NONE:
case TYPE_TRUE:
case TYPE_FALSE:
case TYPE_STRING:
case TYPE_ASCII:
case TYPE_SHORT_ASCII:
case TYPE_ASCII_INTERNED:
case TYPE_SHORT_ASCII_INTERNED:
copy->data = strdup (object->data);
break;
case TYPE_CODE_v0:
case TYPE_CODE_v1: {
pyc_code_object *src = object->data;
pyc_code_object *dst = R_NEW0 (pyc_code_object);
if (!dst) {
break;
}
memcpy (dst, src, sizeof (*dst));
dst->code = copy_object (src->code);
dst->consts = copy_object (src->consts);
dst->names = copy_object (src->names);
dst->varnames = copy_object (src->varnames);
dst->freevars = copy_object (src->freevars);
dst->cellvars = copy_object (src->cellvars);
dst->filename = copy_object (src->filename);
dst->name = copy_object (src->name);
dst->lnotab = copy_object (src->lnotab);
copy->data = dst;
break;
}
case TYPE_REF:
copy->data = copy_object (object->data);
break;
case TYPE_ELLIPSIS:
case TYPE_STOPITER:
case TYPE_BINARY_COMPLEX:
case TYPE_BINARY_FLOAT:
case TYPE_COMPLEX:
case TYPE_STRINGREF:
case TYPE_DICT:
case TYPE_FLOAT:
case TYPE_FROZENSET:
case TYPE_INTERNED:
case TYPE_LIST:
case TYPE_LONG:
case TYPE_SET:
case TYPE_UNICODE:
case TYPE_UNKNOWN:
eprintf ("Copy not implemented for type %x\n", object->type);
break;
default:
eprintf ("Undefined type in copy_object (%x)\n", object->type);
break;
}
if (!copy->data) {
R_FREE (copy);
}
return copy;
} | 0 | [
"CWE-416"
] | radare2 | 8525ad0b9fd596f4b251bb3d7b114e6dc7ce1ee8 | 243,379,904,924,008,400,000,000,000,000,000,000,000 | 78 | Fix bins/*/rep8 - UAF crash in pyc parser ##crash
* Reported by Cen Zhang via huntr.dev |
concat (char *out, char *in1, char *in2, int l1, int l2)
{
while (l1--)
*out++ = *in1++;
while (l2--)
*out++ = *in2++;
} | 0 | [
"CWE-125"
] | exim | 57aa14b216432be381b6295c312065b2fd034f86 | 317,429,078,069,256,200,000,000,000,000,000,000,000 | 7 | Fix SPA authenticator, checking client-supplied data before using it. Bug 2571 |
static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
uint64_t nb_sectors;
int page, dbd, buflen, ret, page_control;
uint8_t *p;
uint8_t dev_specific_param;
dbd = r->req.cmd.buf[1] & 0x8;
page = r->req.cmd.buf[2] & 0x3f;
page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n",
(r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control);
memset(outbuf, 0, r->req.cmd.xfer);
p = outbuf;
if (bdrv_is_read_only(s->bs)) {
dev_specific_param = 0x80; /* Readonly. */
} else {
dev_specific_param = 0x00;
}
if (r->req.cmd.buf[0] == MODE_SENSE) {
p[1] = 0; /* Default media type. */
p[2] = dev_specific_param;
p[3] = 0; /* Block descriptor length. */
p += 4;
} else { /* MODE_SENSE_10 */
p[2] = 0; /* Default media type. */
p[3] = dev_specific_param;
p[6] = p[7] = 0; /* Block descriptor length. */
p += 8;
}
bdrv_get_geometry(s->bs, &nb_sectors);
if (!dbd && nb_sectors) {
if (r->req.cmd.buf[0] == MODE_SENSE) {
outbuf[3] = 8; /* Block descriptor length */
} else { /* MODE_SENSE_10 */
outbuf[7] = 8; /* Block descriptor length */
}
nb_sectors /= s->cluster_size;
if (nb_sectors > 0xffffff)
nb_sectors = 0;
p[0] = 0; /* media density code */
p[1] = (nb_sectors >> 16) & 0xff;
p[2] = (nb_sectors >> 8) & 0xff;
p[3] = nb_sectors & 0xff;
p[4] = 0; /* reserved */
p[5] = 0; /* bytes 5-7 are the sector size in bytes */
p[6] = s->cluster_size * 2;
p[7] = 0;
p += 8;
}
if (page_control == 3) {
/* Saved Values */
scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
return -1;
}
if (page == 0x3f) {
for (page = 0; page <= 0x3e; page++) {
mode_sense_page(s, page, &p, page_control);
}
} else {
ret = mode_sense_page(s, page, &p, page_control);
if (ret == -1) {
return -1;
}
}
buflen = p - outbuf;
/*
* The mode data length field specifies the length in bytes of the
* following data that is available to be transferred. The mode data
* length does not include itself.
*/
if (r->req.cmd.buf[0] == MODE_SENSE) {
outbuf[0] = buflen - 1;
} else { /* MODE_SENSE_10 */
outbuf[0] = ((buflen - 2) >> 8) & 0xff;
outbuf[1] = (buflen - 2) & 0xff;
}
if (buflen > r->req.cmd.xfer)
buflen = r->req.cmd.xfer;
return buflen;
} | 0 | [
"CWE-119",
"CWE-787"
] | qemu | 103b40f51e4012b3b0ad20f615562a1806d7f49a | 257,938,227,221,607,180,000,000,000,000,000,000,000 | 88 | scsi-disk: commonize iovec creation between reads and writes
Also, consistently use qiov.size instead of iov.iov_len.
Signed-off-by: Paolo Bonzini <[email protected]>
Signed-off-by: Kevin Wolf <[email protected]> |
static NTSTATUS smb2cli_inbuf_parse_compound(struct smbXcli_conn *conn,
uint8_t *buf,
size_t buflen,
TALLOC_CTX *mem_ctx,
struct iovec **piov, int *pnum_iov)
{
struct iovec *iov;
int num_iov = 0;
size_t taken = 0;
uint8_t *first_hdr = buf;
size_t verified_buflen = 0;
uint8_t *tf = NULL;
size_t tf_len = 0;
iov = talloc_array(mem_ctx, struct iovec, num_iov);
if (iov == NULL) {
return NT_STATUS_NO_MEMORY;
}
while (taken < buflen) {
size_t len = buflen - taken;
uint8_t *hdr = first_hdr + taken;
struct iovec *cur;
size_t full_size;
size_t next_command_ofs;
uint16_t body_size;
struct iovec *iov_tmp;
if (verified_buflen > taken) {
len = verified_buflen - taken;
} else {
tf = NULL;
tf_len = 0;
}
if (len < 4) {
DEBUG(10, ("%d bytes left, expected at least %d\n",
(int)len, 4));
goto inval;
}
if (IVAL(hdr, 0) == SMB2_TF_MAGIC) {
struct smbXcli_session *s;
uint64_t uid;
struct iovec tf_iov[2];
size_t enc_len;
NTSTATUS status;
if (len < SMB2_TF_HDR_SIZE) {
DEBUG(10, ("%d bytes left, expected at least %d\n",
(int)len, SMB2_TF_HDR_SIZE));
goto inval;
}
tf = hdr;
tf_len = SMB2_TF_HDR_SIZE;
taken += tf_len;
hdr = first_hdr + taken;
enc_len = IVAL(tf, SMB2_TF_MSG_SIZE);
uid = BVAL(tf, SMB2_TF_SESSION_ID);
if (len < SMB2_TF_HDR_SIZE + enc_len) {
DEBUG(10, ("%d bytes left, expected at least %d\n",
(int)len,
(int)(SMB2_TF_HDR_SIZE + enc_len)));
goto inval;
}
s = conn->sessions;
for (; s; s = s->next) {
if (s->smb2->session_id != uid) {
continue;
}
break;
}
if (s == NULL) {
DEBUG(10, ("unknown session_id %llu\n",
(unsigned long long)uid));
goto inval;
}
tf_iov[0].iov_base = (void *)tf;
tf_iov[0].iov_len = tf_len;
tf_iov[1].iov_base = (void *)hdr;
tf_iov[1].iov_len = enc_len;
status = smb2_signing_decrypt_pdu(s->smb2->decryption_key,
conn->smb2.server.cipher,
tf_iov, 2);
if (!NT_STATUS_IS_OK(status)) {
TALLOC_FREE(iov);
return status;
}
verified_buflen = taken + enc_len;
len = enc_len;
}
/*
* We need the header plus the body length field
*/
if (len < SMB2_HDR_BODY + 2) {
DEBUG(10, ("%d bytes left, expected at least %d\n",
(int)len, SMB2_HDR_BODY));
goto inval;
}
if (IVAL(hdr, 0) != SMB2_MAGIC) {
DEBUG(10, ("Got non-SMB2 PDU: %x\n",
IVAL(hdr, 0)));
goto inval;
}
if (SVAL(hdr, 4) != SMB2_HDR_BODY) {
DEBUG(10, ("Got HDR len %d, expected %d\n",
SVAL(hdr, 4), SMB2_HDR_BODY));
goto inval;
}
full_size = len;
next_command_ofs = IVAL(hdr, SMB2_HDR_NEXT_COMMAND);
body_size = SVAL(hdr, SMB2_HDR_BODY);
if (next_command_ofs != 0) {
if (next_command_ofs < (SMB2_HDR_BODY + 2)) {
goto inval;
}
if (next_command_ofs > full_size) {
goto inval;
}
full_size = next_command_ofs;
}
if (body_size < 2) {
goto inval;
}
body_size &= 0xfffe;
if (body_size > (full_size - SMB2_HDR_BODY)) {
goto inval;
}
iov_tmp = talloc_realloc(mem_ctx, iov, struct iovec,
num_iov + 4);
if (iov_tmp == NULL) {
TALLOC_FREE(iov);
return NT_STATUS_NO_MEMORY;
}
iov = iov_tmp;
cur = &iov[num_iov];
num_iov += 4;
cur[0].iov_base = tf;
cur[0].iov_len = tf_len;
cur[1].iov_base = hdr;
cur[1].iov_len = SMB2_HDR_BODY;
cur[2].iov_base = hdr + SMB2_HDR_BODY;
cur[2].iov_len = body_size;
cur[3].iov_base = hdr + SMB2_HDR_BODY + body_size;
cur[3].iov_len = full_size - (SMB2_HDR_BODY + body_size);
taken += full_size;
}
*piov = iov;
*pnum_iov = num_iov;
return NT_STATUS_OK;
inval:
TALLOC_FREE(iov);
return NT_STATUS_INVALID_NETWORK_RESPONSE;
} | 0 | [
"CWE-20"
] | samba | a819d2b440aafa3138d95ff6e8b824da885a70e9 | 37,818,249,963,966,856,000,000,000,000,000,000,000 | 170 | CVE-2015-5296: libcli/smb: make sure we require signing when we demand encryption on a session
BUG: https://bugzilla.samba.org/show_bug.cgi?id=11536
Signed-off-by: Stefan Metzmacher <[email protected]>
Reviewed-by: Jeremy Allison <[email protected]> |
bool print_addr_hint_cb(ut64 addr, const RzVector /*<const RzAnalysisAddrHintRecord>*/ *records, void *user) {
HintNode *node = RZ_NEW0(HintNode);
if (!node) {
return false;
}
node->addr = addr;
node->type = HINT_NODE_ADDR;
node->addr_hints = records;
rz_rbtree_insert(user, &addr, &node->rb, hint_node_cmp, NULL);
return true;
} | 0 | [
"CWE-703"
] | rizin | 6ce71d8aa3dafe3cdb52d5d72ae8f4b95916f939 | 119,462,049,925,170,840,000,000,000,000,000,000,000 | 11 | Initialize retctx,ctx before freeing the inner elements
In rz_core_analysis_type_match retctx structure was initialized on the
stack only after a "goto out_function", where a field of that structure
was freed. When the goto path is taken, the field is not properly
initialized and it cause cause a crash of Rizin or have other effects.
Fixes: CVE-2021-4022 |
bool Column_definition::create_interval_from_interval_list(MEM_ROOT *mem_root,
bool reuse_interval_list_values)
{
DBUG_ENTER("Column_definition::create_interval_from_interval_list");
DBUG_ASSERT(!interval);
if (!(interval= (TYPELIB*) alloc_root(mem_root, sizeof(TYPELIB))))
DBUG_RETURN(true); // EOM
List_iterator<String> it(interval_list);
StringBuffer<64> conv;
char comma_buf[5]; /* 5 bytes for 'filename' charset */
DBUG_ASSERT(sizeof(comma_buf) >= charset->mbmaxlen);
int comma_length= charset->cset->wc_mb(charset, ',',
(uchar*) comma_buf,
(uchar*) comma_buf +
sizeof(comma_buf));
DBUG_ASSERT(comma_length >= 0 && comma_length <= (int) sizeof(comma_buf));
if (!multi_alloc_root(mem_root,
&interval->type_names,
sizeof(char*) * (interval_list.elements + 1),
&interval->type_lengths,
sizeof(uint) * (interval_list.elements + 1),
NullS))
goto err; // EOM
interval->name= "";
interval->count= interval_list.elements;
for (uint i= 0; i < interval->count; i++)
{
uint32 dummy;
String *tmp= it++;
LEX_CSTRING value;
if (String::needs_conversion(tmp->length(), tmp->charset(),
charset, &dummy))
{
uint cnv_errs;
conv.copy(tmp->ptr(), tmp->length(), tmp->charset(), charset, &cnv_errs);
value.str= strmake_root(mem_root, conv.ptr(), conv.length());
value.length= conv.length();
}
else
{
value.str= reuse_interval_list_values ? tmp->ptr() :
strmake_root(mem_root,
tmp->ptr(),
tmp->length());
value.length= tmp->length();
}
if (!value.str)
goto err; // EOM
// Strip trailing spaces.
value.length= charset->cset->lengthsp(charset, value.str, value.length);
((char*) value.str)[value.length]= '\0';
if (real_field_type() == MYSQL_TYPE_SET)
{
if (charset->coll->instr(charset, value.str, value.length,
comma_buf, comma_length, NULL, 0))
{
ErrConvString err(tmp);
my_error(ER_ILLEGAL_VALUE_FOR_TYPE, MYF(0), "set", err.ptr());
goto err;
}
}
interval->type_names[i]= value.str;
interval->type_lengths[i]= (uint)value.length;
}
interval->type_names[interval->count]= 0; // End marker
interval->type_lengths[interval->count]= 0;
interval_list.empty(); // Don't need interval_list anymore
DBUG_RETURN(false);
err:
interval= NULL; // Avoid having both non-empty interval_list and interval
DBUG_RETURN(true);
} | 0 | [
"CWE-416",
"CWE-703"
] | server | 08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917 | 309,252,867,228,154,380,000,000,000,000,000,000,000 | 78 | MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]> |
R_API int r_bin_add(RBin *bin, RBinPlugin *foo) {
RListIter *it;
RBinPlugin *plugin;
if (foo->init) {
foo->init (bin->user);
}
r_list_foreach (bin->plugins, it, plugin) {
if (!strcmp (plugin->name, foo->name)) {
return false;
}
}
plugin = R_NEW0 (RBinPlugin);
memcpy (plugin, foo, sizeof (RBinPlugin));
r_list_append (bin->plugins, plugin);
return true;
} | 0 | [
"CWE-125"
] | radare2 | d31c4d3cbdbe01ea3ded16a584de94149ecd31d9 | 242,290,224,286,127,880,000,000,000,000,000,000,000 | 16 | Fix #8748 - Fix oobread on string search |
static std::string WC2MB(const std::wstring& input, unsigned int code_page) {
if (input.empty()) {
return "";
}
// There do have other code pages which require the flags to be 0, e.g.,
// 50220, 50211, and so on. But they are not included in our charset
// dictionary. So, only consider 65001 (UTF-8) and 54936 (GB18030).
DWORD flags = 0;
if (code_page != 65001 && code_page != 54936) {
flags = WC_NO_BEST_FIT_CHARS | WC_COMPOSITECHECK | WC_DEFAULTCHAR;
}
int length = ::WideCharToMultiByte(code_page, flags, &input[0],
static_cast<int>(input.size()), NULL, 0,
NULL, NULL);
std::string output(length, '\0');
::WideCharToMultiByte(code_page, flags, &input[0],
static_cast<int>(input.size()), &output[0],
static_cast<int>(output.size()), NULL, NULL);
return output;
} | 0 | [
"CWE-22"
] | webcc | 55a45fd5039061d5cc62e9f1b9d1f7e97a15143f | 96,333,175,049,079,330,000,000,000,000,000,000,000 | 25 | fix static file serving security issue; fix url path encoding issue |
int mingw_mkdir(const char *path, int mode)
{
int ret;
wchar_t wpath[MAX_PATH];
if (xutftowcs_path(wpath, path) < 0)
return -1;
ret = _wmkdir(wpath);
if (!ret && needs_hiding(path))
return set_hidden_flag(wpath, 1);
return ret;
} | 0 | [
"CWE-20"
] | git | 6d8684161ee9c03bed5cb69ae76dfdddb85a0003 | 277,939,125,326,652,860,000,000,000,000,000,000,000 | 11 | mingw: fix quoting of arguments
We need to be careful to follow proper quoting rules. For example, if an
argument contains spaces, we have to quote them. Double-quotes need to
be escaped. Backslashes need to be escaped, but only if they are
followed by a double-quote character.
We need to be _extra_ careful to consider the case where an argument
ends in a backslash _and_ needs to be quoted: in this case, we append a
double-quote character, i.e. the backslash now has to be escaped!
The current code, however, fails to recognize that, and therefore can
turn an argument that ends in a single backslash into a quoted argument
that now ends in an escaped double-quote character. This allows
subsequent command-line parameters to be split and part of them being
mistaken for command-line options, e.g. through a maliciously-crafted
submodule URL during a recursive clone.
Technically, we would not need to quote _all_ arguments which end in a
backslash _unless_ the argument needs to be quoted anyway. For example,
`test\` would not need to be quoted, while `test \` would need to be.
To keep the code simple, however, and therefore easier to reason about
and ensure its correctness, we now _always_ quote an argument that ends
in a backslash.
This addresses CVE-2019-1350.
Signed-off-by: Johannes Schindelin <[email protected]> |
netanalyzer_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h,
const u_char *p)
{
/*
* Fail if we don't have enough data for the Hilscher pseudo-header.
*/
if (h->len < 4 || h->caplen < 4) {
ND_PRINT((ndo, "[|netanalyzer]"));
return (h->caplen);
}
/* Skip the pseudo-header. */
return (4 + ether_print(ndo, p + 4, h->len - 4, h->caplen - 4, NULL, NULL));
} | 0 | [
"CWE-125",
"CWE-787"
] | tcpdump | 1dcd10aceabbc03bf571ea32b892c522cbe923de | 301,072,299,901,570,500,000,000,000,000,000,000,000 | 14 | CVE-2017-12897/ISO CLNS: Use ND_TTEST() for the bounds checks in isoclns_print().
This fixes a buffer over-read discovered by Kamil Frankowicz.
Don't pass the remaining caplen - that's too hard to get right, and we
were getting it wrong in at least one case; just use ND_TTEST().
Add a test using the capture file supplied by the reporter(s). |
static void gf_m2ts_process_pat(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status)
{
GF_M2TS_Program *prog;
GF_M2TS_SECTION_ES *pmt;
u32 i, nb_progs, evt_type;
u32 nb_sections;
u32 data_size;
unsigned char *data;
GF_M2TS_Section *section;
/*wait for the last section */
if (!(status&GF_M2TS_TABLE_END)) return;
/*skip if already received*/
if (status&GF_M2TS_TABLE_REPEAT) {
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PAT_REPEAT, NULL);
return;
}
nb_sections = gf_list_count(sections);
if (nb_sections > 1) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("PAT on multiple sections not supported\n"));
}
section = (GF_M2TS_Section *)gf_list_get(sections, 0);
data = section->data;
data_size = section->data_size;
if (!(status&GF_M2TS_TABLE_UPDATE) && gf_list_count(ts->programs)) {
if (ts->pat->demux_restarted) {
ts->pat->demux_restarted = 0;
} else {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Multiple different PAT on single TS found, ignoring new PAT declaration (table id %d - extended table id %d)\n", table_id, ex_table_id));
}
return;
}
nb_progs = data_size / 4;
for (i=0; i<nb_progs; i++) {
u16 number, pid;
number = (data[0]<<8) | data[1];
pid = (data[2]&0x1f)<<8 | data[3];
data += 4;
if (number==0) {
if (!ts->nit) {
ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0);
}
} else {
GF_SAFEALLOC(prog, GF_M2TS_Program);
if (!prog) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Fail to allocate program for pid %d\n", pid));
return;
}
prog->streams = gf_list_new();
prog->pmt_pid = pid;
prog->number = number;
prog->ts = ts;
gf_list_add(ts->programs, prog);
GF_SAFEALLOC(pmt, GF_M2TS_SECTION_ES);
if (!pmt) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Fail to allocate pmt filter for pid %d\n", pid));
return;
}
pmt->flags = GF_M2TS_ES_IS_SECTION;
gf_list_add(prog->streams, pmt);
pmt->pid = prog->pmt_pid;
pmt->program = prog;
ts->ess[pmt->pid] = (GF_M2TS_ES *)pmt;
pmt->sec = gf_m2ts_section_filter_new(gf_m2ts_process_pmt, 0);
}
}
evt_type = (status&GF_M2TS_TABLE_UPDATE) ? GF_M2TS_EVT_PAT_UPDATE : GF_M2TS_EVT_PAT_FOUND;
if (ts->on_event) ts->on_event(ts, evt_type, NULL);
} | 1 | [
"CWE-416"
] | gpac | 98b727637e32d1d4824101d8947e2dbd573d4fc8 | 256,705,383,909,547,300,000,000,000,000,000,000,000 | 75 | be stricter in PAT processing - cf #1269 |
static void cm_format_req(struct cm_req_msg *req_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_req_param *param)
{
struct ib_sa_path_rec *pri_path = param->primary_path;
struct ib_sa_path_rec *alt_path = param->alternate_path;
cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
req_msg->local_comm_id = cm_id_priv->id.local_id;
req_msg->service_id = param->service_id;
req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
cm_req_set_init_depth(req_msg, param->initiator_depth);
cm_req_set_remote_resp_timeout(req_msg,
param->remote_cm_response_timeout);
cm_req_set_qp_type(req_msg, param->qp_type);
cm_req_set_flow_ctrl(req_msg, param->flow_control);
cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
cm_req_set_local_resp_timeout(req_msg,
param->local_cm_response_timeout);
req_msg->pkey = param->primary_path->pkey;
cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
if (param->qp_type != IB_QPT_XRC_INI) {
cm_req_set_resp_res(req_msg, param->responder_resources);
cm_req_set_retry_count(req_msg, param->retry_count);
cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
cm_req_set_srq(req_msg, param->srq);
}
if (pri_path->hop_limit <= 1) {
req_msg->primary_local_lid = pri_path->slid;
req_msg->primary_remote_lid = pri_path->dlid;
} else {
/* Work-around until there's a way to obtain remote LID info */
req_msg->primary_local_lid = IB_LID_PERMISSIVE;
req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
}
req_msg->primary_local_gid = pri_path->sgid;
req_msg->primary_remote_gid = pri_path->dgid;
cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
req_msg->primary_traffic_class = pri_path->traffic_class;
req_msg->primary_hop_limit = pri_path->hop_limit;
cm_req_set_primary_sl(req_msg, pri_path->sl);
cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
cm_req_set_primary_local_ack_timeout(req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
pri_path->packet_life_time));
if (alt_path) {
if (alt_path->hop_limit <= 1) {
req_msg->alt_local_lid = alt_path->slid;
req_msg->alt_remote_lid = alt_path->dlid;
} else {
req_msg->alt_local_lid = IB_LID_PERMISSIVE;
req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
}
req_msg->alt_local_gid = alt_path->sgid;
req_msg->alt_remote_gid = alt_path->dgid;
cm_req_set_alt_flow_label(req_msg,
alt_path->flow_label);
cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
req_msg->alt_traffic_class = alt_path->traffic_class;
req_msg->alt_hop_limit = alt_path->hop_limit;
cm_req_set_alt_sl(req_msg, alt_path->sl);
cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
cm_req_set_alt_local_ack_timeout(req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
alt_path->packet_life_time));
}
if (param->private_data && param->private_data_len)
memcpy(req_msg->private_data, param->private_data,
param->private_data_len);
} | 0 | [
"CWE-20"
] | linux | b2853fd6c2d0f383dbdf7427e263eb576a633867 | 316,362,408,998,586,800,000,000,000,000,000,000,000 | 79 | IB/core: Don't resolve passive side RoCE L2 address in CMA REQ handler
The code that resolves the passive side source MAC within the rdma_cm
connection request handler was both redundant and buggy, so remove it.
It was redundant since later, when an RC QP is modified to RTR state,
the resolution will take place in the ib_core module. It was buggy
because this callback also deals with UD SIDR exchange, for which we
incorrectly looked at the REQ member of the CM event and dereferenced
a random value.
Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures")
Signed-off-by: Moni Shoua <[email protected]>
Signed-off-by: Or Gerlitz <[email protected]>
Signed-off-by: Roland Dreier <[email protected]> |
int kvm_arch_init(void *opaque)
{
return 0;
} | 0 | [
"CWE-476"
] | linux | ac64115a66c18c01745bbd3c47a36b124e5fd8c0 | 51,618,142,899,456,100,000,000,000,000,000,000,000 | 4 | KVM: PPC: Fix oops when checking KVM_CAP_PPC_HTM
The following program causes a kernel oops:
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <linux/kvm.h>
main()
{
int fd = open("/dev/kvm", O_RDWR);
ioctl(fd, KVM_CHECK_EXTENSION, KVM_CAP_PPC_HTM);
}
This happens because when using the global KVM fd with
KVM_CHECK_EXTENSION, kvm_vm_ioctl_check_extension() gets
called with a NULL kvm argument, which gets dereferenced
in is_kvmppc_hv_enabled(). Spotted while reading the code.
Let's use the hv_enabled fallback variable, like everywhere
else in this function.
Fixes: 23528bb21ee2 ("KVM: PPC: Introduce KVM_CAP_PPC_HTM")
Cc: [email protected] # v4.7+
Signed-off-by: Greg Kurz <[email protected]>
Reviewed-by: David Gibson <[email protected]>
Reviewed-by: Thomas Huth <[email protected]>
Signed-off-by: Paul Mackerras <[email protected]> |
static int sctp_verify_ext_param(struct net *net, union sctp_params param)
{
__u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t);
int have_auth = 0;
int have_asconf = 0;
int i;
for (i = 0; i < num_ext; i++) {
switch (param.ext->chunks[i]) {
case SCTP_CID_AUTH:
have_auth = 1;
break;
case SCTP_CID_ASCONF:
case SCTP_CID_ASCONF_ACK:
have_asconf = 1;
break;
}
}
/* ADD-IP Security: The draft requires us to ABORT or ignore the
* INIT/INIT-ACK if ADD-IP is listed, but AUTH is not. Do this
* only if ADD-IP is turned on and we are not backward-compatible
* mode.
*/
if (net->sctp.addip_noauth)
return 1;
if (net->sctp.addip_enable && !have_auth && have_asconf)
return 0;
return 1;
} | 0 | [
"CWE-20",
"CWE-399"
] | linux | 9de7922bc709eee2f609cd01d98aaedc4cf5ea74 | 38,316,703,079,721,880,000,000,000,000,000,000,000 | 32 | net: sctp: fix skb_over_panic when receiving malformed ASCONF chunks
Commit 6f4c618ddb0 ("SCTP : Add paramters validity check for
ASCONF chunk") added basic verification of ASCONF chunks, however,
it is still possible to remotely crash a server by sending a
special crafted ASCONF chunk, even up to pre 2.6.12 kernels:
skb_over_panic: text:ffffffffa01ea1c3 len:31056 put:30768
head:ffff88011bd81800 data:ffff88011bd81800 tail:0x7950
end:0x440 dev:<NULL>
------------[ cut here ]------------
kernel BUG at net/core/skbuff.c:129!
[...]
Call Trace:
<IRQ>
[<ffffffff8144fb1c>] skb_put+0x5c/0x70
[<ffffffffa01ea1c3>] sctp_addto_chunk+0x63/0xd0 [sctp]
[<ffffffffa01eadaf>] sctp_process_asconf+0x1af/0x540 [sctp]
[<ffffffff8152d025>] ? _read_unlock_bh+0x15/0x20
[<ffffffffa01e0038>] sctp_sf_do_asconf+0x168/0x240 [sctp]
[<ffffffffa01e3751>] sctp_do_sm+0x71/0x1210 [sctp]
[<ffffffff8147645d>] ? fib_rules_lookup+0xad/0xf0
[<ffffffffa01e6b22>] ? sctp_cmp_addr_exact+0x32/0x40 [sctp]
[<ffffffffa01e8393>] sctp_assoc_bh_rcv+0xd3/0x180 [sctp]
[<ffffffffa01ee986>] sctp_inq_push+0x56/0x80 [sctp]
[<ffffffffa01fcc42>] sctp_rcv+0x982/0xa10 [sctp]
[<ffffffffa01d5123>] ? ipt_local_in_hook+0x23/0x28 [iptable_filter]
[<ffffffff8148bdc9>] ? nf_iterate+0x69/0xb0
[<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0
[<ffffffff8148bf86>] ? nf_hook_slow+0x76/0x120
[<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0
[<ffffffff81496ded>] ip_local_deliver_finish+0xdd/0x2d0
[<ffffffff81497078>] ip_local_deliver+0x98/0xa0
[<ffffffff8149653d>] ip_rcv_finish+0x12d/0x440
[<ffffffff81496ac5>] ip_rcv+0x275/0x350
[<ffffffff8145c88b>] __netif_receive_skb+0x4ab/0x750
[<ffffffff81460588>] netif_receive_skb+0x58/0x60
This can be triggered e.g., through a simple scripted nmap
connection scan injecting the chunk after the handshake, for
example, ...
-------------- INIT[ASCONF; ASCONF_ACK] ------------->
<----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------
-------------------- COOKIE-ECHO -------------------->
<-------------------- COOKIE-ACK ---------------------
------------------ ASCONF; UNKNOWN ------------------>
... where ASCONF chunk of length 280 contains 2 parameters ...
1) Add IP address parameter (param length: 16)
2) Add/del IP address parameter (param length: 255)
... followed by an UNKNOWN chunk of e.g. 4 bytes. Here, the
Address Parameter in the ASCONF chunk is even missing, too.
This is just an example and similarly-crafted ASCONF chunks
could be used just as well.
The ASCONF chunk passes through sctp_verify_asconf() as all
parameters passed sanity checks, and after walking, we ended
up successfully at the chunk end boundary, and thus may invoke
sctp_process_asconf(). Parameter walking is done with
WORD_ROUND() to take padding into account.
In sctp_process_asconf()'s TLV processing, we may fail in
sctp_process_asconf_param() e.g., due to removal of the IP
address that is also the source address of the packet containing
the ASCONF chunk, and thus we need to add all TLVs after the
failure to our ASCONF response to remote via helper function
sctp_add_asconf_response(), which basically invokes a
sctp_addto_chunk() adding the error parameters to the given
skb.
When walking to the next parameter this time, we proceed
with ...
length = ntohs(asconf_param->param_hdr.length);
asconf_param = (void *)asconf_param + length;
... instead of the WORD_ROUND()'ed length, thus resulting here
in an off-by-one that leads to reading the follow-up garbage
parameter length of 12336, and thus throwing an skb_over_panic
for the reply when trying to sctp_addto_chunk() next time,
which implicitly calls the skb_put() with that length.
Fix it by using sctp_walk_params() [ which is also used in
INIT parameter processing ] macro in the verification *and*
in ASCONF processing: it will make sure we don't spill over,
that we walk parameters WORD_ROUND()'ed. Moreover, we're being
more defensive and guard against unknown parameter types and
missized addresses.
Joint work with Vlad Yasevich.
Fixes: b896b82be4ae ("[SCTP] ADDIP: Support for processing incoming ASCONF_ACK chunks.")
Signed-off-by: Daniel Borkmann <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Acked-by: Neil Horman <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
{
u64 mask = -1;
u32 lmask = mask;
u32 hmask = mask >> 32;
int err;
WARN_ON(system_state != SYSTEM_BOOTING);
if (boot_cpu_has(X86_FEATURE_XSAVES))
XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
else
XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
/* We should never fault when copying to a kernel buffer: */
WARN_ON_FPU(err);
} | 0 | [
"CWE-119",
"CWE-732",
"CWE-787"
] | linux | 59c4bd853abcea95eccc167a7d7fd5f1a5f47b98 | 252,098,199,920,099,400,000,000,000,000,000,000,000 | 17 | x86/fpu: Don't cache access to fpu_fpregs_owner_ctx
The state/owner of the FPU is saved to fpu_fpregs_owner_ctx by pointing
to the context that is currently loaded. It never changed during the
lifetime of a task - it remained stable/constant.
After deferred FPU registers loading until return to userland was
implemented, the content of fpu_fpregs_owner_ctx may change during
preemption and must not be cached.
This went unnoticed for some time and was now noticed, in particular
since gcc 9 is caching that load in copy_fpstate_to_sigframe() and
reusing it in the retry loop:
copy_fpstate_to_sigframe()
load fpu_fpregs_owner_ctx and save on stack
fpregs_lock()
copy_fpregs_to_sigframe() /* failed */
fpregs_unlock()
*** PREEMPTION, another uses FPU, changes fpu_fpregs_owner_ctx ***
fault_in_pages_writeable() /* succeed, retry */
fpregs_lock()
__fpregs_load_activate()
fpregs_state_valid() /* uses fpu_fpregs_owner_ctx from stack */
copy_fpregs_to_sigframe() /* succeeds, random FPU content */
This is a comparison of the assembly produced by gcc 9, without vs with this
patch:
| # arch/x86/kernel/fpu/signal.c:173: if (!access_ok(buf, size))
| cmpq %rdx, %rax # tmp183, _4
| jb .L190 #,
|-# arch/x86/include/asm/fpu/internal.h:512: return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
|-#APP
|-# 512 "arch/x86/include/asm/fpu/internal.h" 1
|- movq %gs:fpu_fpregs_owner_ctx,%rax #, pfo_ret__
|-# 0 "" 2
|-#NO_APP
|- movq %rax, -88(%rbp) # pfo_ret__, %sfp
…
|-# arch/x86/include/asm/fpu/internal.h:512: return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
|- movq -88(%rbp), %rcx # %sfp, pfo_ret__
|- cmpq %rcx, -64(%rbp) # pfo_ret__, %sfp
|+# arch/x86/include/asm/fpu/internal.h:512: return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
|+#APP
|+# 512 "arch/x86/include/asm/fpu/internal.h" 1
|+ movq %gs:fpu_fpregs_owner_ctx(%rip),%rax # fpu_fpregs_owner_ctx, pfo_ret__
|+# 0 "" 2
|+# arch/x86/include/asm/fpu/internal.h:512: return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
|+#NO_APP
|+ cmpq %rax, -64(%rbp) # pfo_ret__, %sfp
Use this_cpu_read() instead this_cpu_read_stable() to avoid caching of
fpu_fpregs_owner_ctx during preemption points.
The Fixes: tag points to the commit where deferred FPU loading was
added. Since this commit, the compiler is no longer allowed to move the
load of fpu_fpregs_owner_ctx somewhere else / outside of the locked
section. A task preemption will change its value and stale content will
be observed.
[ bp: Massage. ]
Debugged-by: Austin Clements <[email protected]>
Debugged-by: David Chase <[email protected]>
Debugged-by: Ian Lance Taylor <[email protected]>
Fixes: 5f409e20b7945 ("x86/fpu: Defer FPU state load until return to userspace")
Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
Reviewed-by: Rik van Riel <[email protected]>
Tested-by: Borislav Petkov <[email protected]>
Cc: Aubrey Li <[email protected]>
Cc: Austin Clements <[email protected]>
Cc: Barret Rhoden <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: David Chase <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: [email protected]
Cc: Ingo Molnar <[email protected]>
Cc: Josh Bleecher Snyder <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: x86-ml <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
Link: https://bugzilla.kernel.org/show_bug.cgi?id=205663 |
u32 parse_mpegu(char *arg_val, u32 opt)
{
pack_file = arg_val;
pack_wgt = GF_TRUE;
return 0; | 0 | [
"CWE-787"
] | gpac | 4e56ad72ac1afb4e049a10f2d99e7512d7141f9d | 162,265,608,406,098,040,000,000,000,000,000,000,000 | 6 | fixed #2216 |
R_API int r_core_search_value_in_range(RCore *core, RInterval search_itv, ut64 vmin,
ut64 vmax, int vsize, inRangeCb cb, void *cb_user) {
int i, align = core->search->align, hitctr = 0;
bool vinfun = r_config_get_i (core->config, "anal.vinfun");
bool vinfunr = r_config_get_i (core->config, "anal.vinfunrange");
bool analStrings = r_config_get_i (core->config, "anal.strings");
mycore = core;
ut8 buf[4096];
ut64 v64, value = 0, size;
ut64 from = search_itv.addr, to = r_itv_end (search_itv);
ut32 v32;
ut16 v16;
if (from >= to) {
eprintf ("Error: from must be lower than to\n");
return -1;
}
bool maybeThumb = false;
if (align && core->anal->cur && core->anal->cur->arch) {
if (!strcmp (core->anal->cur->arch, "arm") && core->anal->bits != 64) {
maybeThumb = true;
}
}
if (vmin >= vmax) {
eprintf ("Error: vmin must be lower than vmax\n");
return -1;
}
if (to == UT64_MAX) {
eprintf ("Error: Invalid destination boundary\n");
return -1;
}
r_cons_break_push (NULL, NULL);
if (!r_io_is_valid_offset (core->io, from, 0)) {
return -1;
}
while (from < to) {
size = R_MIN (to - from, sizeof (buf));
memset (buf, 0xff, sizeof (buf)); // probably unnecessary
if (r_cons_is_breaked ()) {
goto beach;
}
bool res = r_io_read_at_mapped (core->io, from, buf, size);
if (!res || !memcmp (buf, "\xff\xff\xff\xff", 4) || !memcmp (buf, "\x00\x00\x00\x00", 4)) {
if (!isValidAddress (core, from)) {
ut64 next = from;
if (!r_io_map_locate (core->io, &next, 1, 0)) {
from += sizeof (buf);
} else {
from += (next - from);
}
continue;
}
}
for (i = 0; i <= (size - vsize); i++) {
void *v = (buf + i);
ut64 addr = from + i;
if (r_cons_is_breaked ()) {
goto beach;
}
if (align && (addr) % align) {
continue;
}
int match = false;
int left = size - i;
if (vsize > left) {
break;
}
switch (vsize) {
case 1: value = *(ut8 *)v; match = (buf[i] >= vmin && buf[i] <= vmax); break;
case 2: v16 = *(uut16 *)v; match = (v16 >= vmin && v16 <= vmax); value = v16; break;
case 4: v32 = *(uut32 *)v; match = (v32 >= vmin && v32 <= vmax); value = v32; break;
case 8: v64 = *(uut64 *)v; match = (v64 >= vmin && v64 <= vmax); value = v64; break;
default: eprintf ("Unknown vsize %d\n", vsize); return -1;
}
if (match && !vinfun) {
if (vinfunr) {
if (r_anal_get_fcn_in_bounds (core->anal, addr, R_ANAL_FCN_TYPE_NULL)) {
match = false;
}
} else {
if (r_anal_get_fcn_in (core->anal, addr, R_ANAL_FCN_TYPE_NULL)) {
match = false;
}
}
}
if (match && value) {
bool isValidMatch = true;
if (align && (value % align)) {
// ignored .. unless we are analyzing arm/thumb and lower bit is 1
isValidMatch = false;
if (maybeThumb && (value & 1)) {
isValidMatch = true;
}
}
if (isValidMatch) {
cb (core, addr, value, vsize, cb_user);
if (analStrings && stringAt (core, addr)) {
add_string_ref (mycore, addr, value);
}
hitctr++;
}
}
}
if (size == to-from) {
break;
}
from += size-vsize+1;
}
beach:
r_cons_break_pop ();
return hitctr;
} | 0 | [
"CWE-416"
] | radare2 | 10517e3ff0e609697eb8cde60ec8dc999ee5ea24 | 101,542,055,282,549,000,000,000,000,000,000,000,000 | 113 | aaef on arm/thumb switches causes uaf ##crash
* Reported by peacock-doris via huntr.dev
* Reproducer: poc_uaf_r_reg_get |
static void cm_format_sidr_rep_event(struct cm_work *work)
{
struct cm_sidr_rep_msg *sidr_rep_msg;
struct ib_cm_sidr_rep_event_param *param;
sidr_rep_msg = (struct cm_sidr_rep_msg *)
work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.sidr_rep_rcvd;
param->status = sidr_rep_msg->status;
param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
param->info = &sidr_rep_msg->info;
param->info_len = sidr_rep_msg->info_length;
work->cm_event.private_data = &sidr_rep_msg->private_data;
} | 0 | [
"CWE-20"
] | linux | b2853fd6c2d0f383dbdf7427e263eb576a633867 | 318,326,687,554,422,370,000,000,000,000,000,000,000 | 15 | IB/core: Don't resolve passive side RoCE L2 address in CMA REQ handler
The code that resolves the passive side source MAC within the rdma_cm
connection request handler was both redundant and buggy, so remove it.
It was redundant since later, when an RC QP is modified to RTR state,
the resolution will take place in the ib_core module. It was buggy
because this callback also deals with UD SIDR exchange, for which we
incorrectly looked at the REQ member of the CM event and dereferenced
a random value.
Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures")
Signed-off-by: Moni Shoua <[email protected]>
Signed-off-by: Or Gerlitz <[email protected]>
Signed-off-by: Roland Dreier <[email protected]> |
cmsBool Write16bitTables(cmsContext ContextID, cmsIOHANDLER* io, _cmsStageToneCurvesData* Tables)
{
int j;
cmsUInt32Number i;
cmsUInt16Number val;
int nEntries = 256;
_cmsAssert(Tables != NULL);
nEntries = Tables->TheCurves[0]->nEntries;
for (i=0; i < Tables ->nCurves; i++) {
for (j=0; j < nEntries; j++) {
val = Tables->TheCurves[i]->Table16[j];
if (!_cmsWriteUInt16Number(io, val)) return FALSE;
}
}
return TRUE;
cmsUNUSED_PARAMETER(ContextID);
} | 0 | [] | Little-CMS | 886e2f524268efe8a1c3aa838c28e446fda24486 | 201,099,817,218,170,630,000,000,000,000,000,000,000 | 23 | Fixes from coverity check |
OVS_REQUIRES(ct->ct_lock)
{
ovs_assert(conn->conn_type == CT_CONN_TYPE_DEFAULT);
conn_clean_cmn(ct, conn);
if (conn->nat_conn) {
uint32_t hash = conn_key_hash(&conn->nat_conn->key, ct->hash_basis);
cmap_remove(&ct->conns, &conn->nat_conn->cm_node, hash);
}
ovs_list_remove(&conn->exp_node);
conn->cleaned = true;
ovsrcu_postpone(delete_conn, conn);
atomic_count_dec(&ct->n_conn);
} | 0 | [
"CWE-400"
] | ovs | 79349cbab0b2a755140eedb91833ad2760520a83 | 107,874,470,479,868,290,000,000,000,000,000,000,000 | 14 | flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]> |
size_t parse_core(const char *s, size_t n, SemanticValues &sv, Context &c,
any &dt) const override {
return parse_expression(s, n, sv, c, dt, 0);
} | 0 | [
"CWE-125"
] | cpp-peglib | b3b29ce8f3acf3a32733d930105a17d7b0ba347e | 34,334,709,192,056,847,000,000,000,000,000,000,000 | 4 | Fix #122 |
int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
{
struct ieee80211_local *local = sta->local;
int err;
might_sleep();
mutex_lock(&local->sta_mtx);
err = sta_info_insert_check(sta);
if (err) {
mutex_unlock(&local->sta_mtx);
rcu_read_lock();
goto out_free;
}
err = sta_info_insert_finish(sta);
if (err)
goto out_free;
return 0;
out_free:
sta_info_free(local, sta);
return err;
} | 0 | [
"CWE-287"
] | linux | 3e493173b7841259a08c5c8e5cbe90adb349da7e | 178,728,736,859,598,850,000,000,000,000,000,000,000 | 25 | mac80211: Do not send Layer 2 Update frame before authorization
The Layer 2 Update frame is used to update bridges when a station roams
to another AP even if that STA does not transmit any frames after the
reassociation. This behavior was described in IEEE Std 802.11F-2003 as
something that would happen based on MLME-ASSOCIATE.indication, i.e.,
before completing 4-way handshake. However, this IEEE trial-use
recommended practice document was published before RSN (IEEE Std
802.11i-2004) and as such, did not consider RSN use cases. Furthermore,
IEEE Std 802.11F-2003 was withdrawn in 2006 and as such, has not been
maintained amd should not be used anymore.
Sending out the Layer 2 Update frame immediately after association is
fine for open networks (and also when using SAE, FT protocol, or FILS
authentication when the station is actually authenticated by the time
association completes). However, it is not appropriate for cases where
RSN is used with PSK or EAP authentication since the station is actually
fully authenticated only once the 4-way handshake completes after
authentication and attackers might be able to use the unauthenticated
triggering of Layer 2 Update frame transmission to disrupt bridge
behavior.
Fix this by postponing transmission of the Layer 2 Update frame from
station entry addition to the point when the station entry is marked
authorized. Similarly, send out the VLAN binding update only if the STA
entry has already been authorized.
Signed-off-by: Jouni Malinen <[email protected]>
Reviewed-by: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int cieavalidate(i_ctx_t *i_ctx_p, ref *space, float *values, int num_comps)
{
os_ptr op = osp;
if (num_comps < 1)
return_error(gs_error_stackunderflow);
if (!r_has_type(op, t_integer) && !r_has_type(op, t_real))
return_error(gs_error_typecheck);
return 0;
} | 0 | [] | ghostpdl | b326a71659b7837d3acde954b18bda1a6f5e9498 | 257,332,610,768,514,330,000,000,000,000,000,000,000 | 12 | Bug 699655: Properly check the return value....
...when getting a value from a dictionary |
static void enum_aux_records(AvahiServer *s, AvahiInterface *i, const char *name, uint16_t type, void (*callback)(AvahiServer *s, AvahiRecord *r, int flush_cache, void* userdata), void* userdata) {
assert(s);
assert(i);
assert(name);
assert(callback);
if (type == AVAHI_DNS_TYPE_ANY) {
AvahiEntry *e;
for (e = s->entries; e; e = e->entries_next)
if (!e->dead &&
avahi_entry_is_registered(s, e, i) &&
e->record->key->clazz == AVAHI_DNS_CLASS_IN &&
avahi_domain_equal(name, e->record->key->name))
callback(s, e->record, e->flags & AVAHI_PUBLISH_UNIQUE, userdata);
} else {
AvahiEntry *e;
AvahiKey *k;
if (!(k = avahi_key_new(name, AVAHI_DNS_CLASS_IN, type)))
return; /** OOM */
for (e = avahi_hashmap_lookup(s->entries_by_key, k); e; e = e->by_key_next)
if (!e->dead && avahi_entry_is_registered(s, e, i))
callback(s, e->record, e->flags & AVAHI_PUBLISH_UNIQUE, userdata);
avahi_key_unref(k);
}
} | 0 | [
"CWE-399"
] | avahi | 3093047f1aa36bed8a37fa79004bf0ee287929f4 | 146,753,198,746,965,180,000,000,000,000,000,000,000 | 30 | Don't get confused by UDP packets with a source port that is zero
This is a fix for rhbz 475394.
Problem identified by Hugo Dias. |
static CImg<T> get_load_cimg(const char *const filename,
const unsigned int n0, const unsigned int n1,
const unsigned int x0, const unsigned int y0,
const unsigned int z0, const unsigned int c0,
const unsigned int x1, const unsigned int y1,
const unsigned int z1, const unsigned int c1,
const char axis='z', const float align=0) {
return CImg<T>().load_cimg(filename,n0,n1,x0,y0,z0,c0,x1,y1,z1,c1,axis,align);
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 183,625,315,193,085,320,000,000,000,000,000,000,000 | 9 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
static void io_eventfd_put(struct rcu_head *rcu)
{
struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
eventfd_ctx_put(ev_fd->cq_ev_fd);
kfree(ev_fd); | 0 | [
"CWE-416"
] | linux | e677edbcabee849bfdd43f1602bccbecf736a646 | 38,118,697,624,216,214,000,000,000,000,000,000,000 | 7 | io_uring: fix race between timeout flush and removal
io_flush_timeouts() assumes the timeout isn't in progress of triggering
or being removed/canceled, so it unconditionally removes it from the
timeout list and attempts to cancel it.
Leave it on the list and let the normal timeout cancelation take care
of it.
Cc: [email protected] # 5.5+
Signed-off-by: Jens Axboe <[email protected]> |
void gf_sm_dumper_del(GF_SceneDumper *sdump)
{
gf_list_del(sdump->dump_nodes);
while (gf_list_count(sdump->mem_def_nodes)) {
GF_Node *tmp = (GF_Node *)gf_list_get(sdump->mem_def_nodes, 0);
gf_list_rem(sdump->mem_def_nodes, 0);
gf_node_unregister(tmp, NULL);
}
gf_list_del(sdump->mem_def_nodes);
gf_list_del(sdump->inserted_routes);
if (sdump->trace != stdout) gf_fclose(sdump->trace);
if (sdump->filename) {
gf_free(sdump->filename);
sdump->filename = NULL;
}
gf_free(sdump);
} | 0 | [
"CWE-476"
] | gpac | 0102c5d4db7fdbf08b5b591b2a6264de33867a07 | 191,262,423,177,686,430,000,000,000,000,000,000,000 | 17 | fixed #2232 |
int platform_get_irq_byname(struct platform_device *dev, const char *name)
{
struct resource *r;
if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
int ret;
ret = of_irq_get_byname(dev->dev.of_node, name);
if (ret > 0 || ret == -EPROBE_DEFER)
return ret;
}
r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
return r ? r->start : -ENXIO;
} | 0 | [
"CWE-362",
"CWE-284"
] | linux | 6265539776a0810b7ce6398c27866ddb9c6bd154 | 113,404,236,739,455,020,000,000,000,000,000,000,000 | 15 | driver core: platform: fix race condition with driver_override
The driver_override implementation is susceptible to race condition when
different threads are reading vs storing a different driver override.
Add locking to avoid race condition.
Fixes: 3d713e0e382e ("driver core: platform: add device binding path 'driver_override'")
Cc: [email protected]
Signed-off-by: Adrian Salido <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
MagickExport Image *DisposeImages(const Image *images,ExceptionInfo *exception)
{
Image
*dispose_image,
*dispose_images;
RectangleInfo
bounds;
register Image
*image,
*next;
/*
Run the image through the animation sequence
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=GetFirstImageInList(images);
dispose_image=CloneImage(image,image->page.width,image->page.height,
MagickTrue,exception);
if (dispose_image == (Image *) NULL)
return((Image *) NULL);
dispose_image->page=image->page;
dispose_image->page.x=0;
dispose_image->page.y=0;
dispose_image->dispose=NoneDispose;
dispose_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(dispose_image);
dispose_images=NewImageList();
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
Image
*current_image;
/*
Overlay this frame's image over the previous disposal image.
*/
current_image=CloneImage(dispose_image,0,0,MagickTrue,exception);
if (current_image == (Image *) NULL)
{
dispose_images=DestroyImageList(dispose_images);
dispose_image=DestroyImage(dispose_image);
return((Image *) NULL);
}
(void) CompositeImage(current_image,next->matte != MagickFalse ?
OverCompositeOp : CopyCompositeOp,next,next->page.x,next->page.y);
/*
Handle Background dispose: image is displayed for the delay period.
*/
if (next->dispose == BackgroundDispose)
{
bounds=next->page;
bounds.width=next->columns;
bounds.height=next->rows;
if (bounds.x < 0)
{
bounds.width+=bounds.x;
bounds.x=0;
}
if ((ssize_t) (bounds.x+bounds.width) > (ssize_t) current_image->columns)
bounds.width=current_image->columns-bounds.x;
if (bounds.y < 0)
{
bounds.height+=bounds.y;
bounds.y=0;
}
if ((ssize_t) (bounds.y+bounds.height) > (ssize_t) current_image->rows)
bounds.height=current_image->rows-bounds.y;
ClearBounds(current_image,&bounds);
}
/*
Select the appropriate previous/disposed image.
*/
if (next->dispose == PreviousDispose)
current_image=DestroyImage(current_image);
else
{
dispose_image=DestroyImage(dispose_image);
dispose_image=current_image;
current_image=(Image *) NULL;
}
/*
Save the dispose image just calculated for return.
*/
{
Image
*dispose;
dispose=CloneImage(dispose_image,0,0,MagickTrue,exception);
if (dispose == (Image *) NULL)
{
dispose_images=DestroyImageList(dispose_images);
dispose_image=DestroyImage(dispose_image);
return((Image *) NULL);
}
(void) CloneImageProfiles(dispose,next);
(void) CloneImageProperties(dispose,next);
(void) CloneImageArtifacts(dispose,next);
dispose->page.x=0;
dispose->page.y=0;
dispose->dispose=next->dispose;
AppendImageToList(&dispose_images,dispose);
}
}
dispose_image=DestroyImage(dispose_image);
return(GetFirstImageInList(dispose_images));
} | 0 | [
"CWE-369"
] | ImageMagick6 | 4f31d78716ac94c85c244efcea368fea202e2ed4 | 279,783,746,081,113,300,000,000,000,000,000,000,000 | 112 | https://github.com/ImageMagick/ImageMagick/issues/1629 |
Value ExpressionType::evaluate(const Document& root) const {
Value val(vpOperand[0]->evaluate(root));
return Value(StringData(typeName(val.getType())));
} | 0 | [
"CWE-835"
] | mongo | 0a076417d1d7fba3632b73349a1fd29a83e68816 | 54,673,674,860,028,650,000,000,000,000,000,000,000 | 4 | SERVER-38070 fix infinite loop in agg expression |
template<typename t>
CImg<T>& operator+=(const t value) {
if (is_empty()) return *this;
cimg_pragma_openmp(parallel for cimg_openmp_if(size()>=524288))
cimg_rof(*this,ptrd,T) *ptrd = (T)(*ptrd + value);
return *this; | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 970,615,797,215,316,400,000,000,000,000,000,000 | 6 | Fix other issues in 'CImg<T>::load_bmp()'. |
int pt_setxattr(FsContext *ctx, const char *path, const char *name, void *value,
size_t size, int flags)
{
return local_setxattr_nofollow(ctx, path, name, value, size, flags);
} | 0 | [
"CWE-772"
] | qemu | 4ffcdef4277a91af15a3c09f7d16af072c29f3f2 | 15,942,573,898,607,360,000,000,000,000,000,000,000 | 5 | 9pfs: xattr: fix memory leak in v9fs_list_xattr
Free 'orig_value' in error path.
Signed-off-by: Li Qiang <[email protected]>
Signed-off-by: Greg Kurz <[email protected]> |
*/
int _php_imap_mail(char *to, char *subject, char *message, char *headers, char *cc, char *bcc, char* rpath)
{
#ifdef PHP_WIN32
int tsm_err;
#else
FILE *sendmail;
int ret;
#endif
#ifdef PHP_WIN32
char *tempMailTo;
char *tsm_errmsg = NULL;
ADDRESS *addr;
char *bufferTo = NULL, *bufferCc = NULL, *bufferBcc = NULL, *bufferHeader = NULL;
size_t offset, bufferLen = 0;
size_t bt_len;
if (headers) {
bufferLen += strlen(headers);
}
if (to) {
bufferLen += strlen(to) + 6;
}
if (cc) {
bufferLen += strlen(cc) + 6;
}
#define PHP_IMAP_CLEAN if (bufferTo) efree(bufferTo); if (bufferCc) efree(bufferCc); if (bufferBcc) efree(bufferBcc); if (bufferHeader) efree(bufferHeader);
#define PHP_IMAP_BAD_DEST PHP_IMAP_CLEAN; efree(tempMailTo); return (BAD_MSG_DESTINATION);
bufferHeader = (char *)safe_emalloc(bufferLen, 1, 1);
memset(bufferHeader, 0, bufferLen);
if (to && *to) {
strlcat(bufferHeader, "To: ", bufferLen + 1);
strlcat(bufferHeader, to, bufferLen + 1);
strlcat(bufferHeader, "\r\n", bufferLen + 1);
tempMailTo = estrdup(to);
bt_len = strlen(to);
bufferTo = (char *)safe_emalloc(bt_len, 1, 1);
bt_len++;
offset = 0;
addr = NULL;
rfc822_parse_adrlist(&addr, tempMailTo, "NO HOST");
while (addr) {
if (addr->host == NULL || strcmp(addr->host, ERRHOST) == 0) {
PHP_IMAP_BAD_DEST;
} else {
bufferTo = safe_erealloc(bufferTo, bt_len, 1, strlen(addr->mailbox));
bt_len += strlen(addr->mailbox);
bufferTo = safe_erealloc(bufferTo, bt_len, 1, strlen(addr->host));
bt_len += strlen(addr->host);
offset += slprintf(bufferTo + offset, bt_len - offset, "%s@%s,", addr->mailbox, addr->host);
}
addr = addr->next;
}
efree(tempMailTo);
if (offset>0) {
bufferTo[offset-1] = 0;
}
}
if (cc && *cc) {
strlcat(bufferHeader, "Cc: ", bufferLen + 1);
strlcat(bufferHeader, cc, bufferLen + 1);
strlcat(bufferHeader, "\r\n", bufferLen + 1);
tempMailTo = estrdup(cc);
bt_len = strlen(cc);
bufferCc = (char *)safe_emalloc(bt_len, 1, 1);
bt_len++;
offset = 0;
addr = NULL;
rfc822_parse_adrlist(&addr, tempMailTo, "NO HOST");
while (addr) {
if (addr->host == NULL || strcmp(addr->host, ERRHOST) == 0) {
PHP_IMAP_BAD_DEST;
} else {
bufferCc = safe_erealloc(bufferCc, bt_len, 1, strlen(addr->mailbox));
bt_len += strlen(addr->mailbox);
bufferCc = safe_erealloc(bufferCc, bt_len, 1, strlen(addr->host));
bt_len += strlen(addr->host);
offset += slprintf(bufferCc + offset, bt_len - offset, "%s@%s,", addr->mailbox, addr->host);
}
addr = addr->next;
}
efree(tempMailTo);
if (offset>0) {
bufferCc[offset-1] = 0;
}
}
if (bcc && *bcc) {
tempMailTo = estrdup(bcc);
bt_len = strlen(bcc);
bufferBcc = (char *)safe_emalloc(bt_len, 1, 1);
bt_len++;
offset = 0;
addr = NULL;
rfc822_parse_adrlist(&addr, tempMailTo, "NO HOST");
while (addr) {
if (addr->host == NULL || strcmp(addr->host, ERRHOST) == 0) {
PHP_IMAP_BAD_DEST;
} else {
bufferBcc = safe_erealloc(bufferBcc, bt_len, 1, strlen(addr->mailbox));
bt_len += strlen(addr->mailbox);
bufferBcc = safe_erealloc(bufferBcc, bt_len, 1, strlen(addr->host));
bt_len += strlen(addr->host);
offset += slprintf(bufferBcc + offset, bt_len - offset, "%s@%s,", addr->mailbox, addr->host);
}
addr = addr->next;
}
efree(tempMailTo);
if (offset>0) {
bufferBcc[offset-1] = 0;
}
}
if (headers && *headers) {
strlcat(bufferHeader, headers, bufferLen + 1);
}
if (TSendMail(INI_STR("SMTP"), &tsm_err, &tsm_errmsg, bufferHeader, subject, bufferTo, message, bufferCc, bufferBcc, rpath) != SUCCESS) {
if (tsm_errmsg) {
php_error_docref(NULL, E_WARNING, "%s", tsm_errmsg);
efree(tsm_errmsg);
} else {
php_error_docref(NULL, E_WARNING, "%s", GetSMErrorText(tsm_err));
}
PHP_IMAP_CLEAN;
return 0;
}
PHP_IMAP_CLEAN;
#else
if (!INI_STR("sendmail_path")) {
return 0;
}
sendmail = popen(INI_STR("sendmail_path"), "w");
if (sendmail) {
if (rpath && rpath[0]) fprintf(sendmail, "From: %s\n", rpath);
fprintf(sendmail, "To: %s\n", to);
if (cc && cc[0]) fprintf(sendmail, "Cc: %s\n", cc);
if (bcc && bcc[0]) fprintf(sendmail, "Bcc: %s\n", bcc);
fprintf(sendmail, "Subject: %s\n", subject);
if (headers != NULL) {
fprintf(sendmail, "%s\n", headers);
}
fprintf(sendmail, "\n%s\n", message);
ret = pclose(sendmail);
if (ret == -1) {
return 0;
} else {
return 1;
}
} else {
php_error_docref(NULL, E_WARNING, "Could not execute mail delivery program");
return 0;
}
#endif
return 1; | 0 | [
"CWE-88"
] | php-src | 336d2086a9189006909ae06c7e95902d7d5ff77e | 272,587,444,336,482,660,000,000,000,000,000,000,000 | 159 | Disable rsh/ssh functionality in imap by default (bug #77153) |
vhost_user_set_protocol_features(struct virtio_net **pdev,
struct VhostUserMsg *msg,
int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
uint64_t protocol_features = msg->payload.u64;
uint64_t slave_protocol_features = 0;
if (validate_msg_fds(msg, 0) != 0)
return RTE_VHOST_MSG_RESULT_ERR;
rte_vhost_driver_get_protocol_features(dev->ifname,
&slave_protocol_features);
if (protocol_features & ~slave_protocol_features) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) received invalid protocol features.\n",
dev->vid);
return RTE_VHOST_MSG_RESULT_ERR;
}
dev->protocol_features = protocol_features;
RTE_LOG(INFO, VHOST_CONFIG,
"negotiated Vhost-user protocol features: 0x%" PRIx64 "\n",
dev->protocol_features);
return RTE_VHOST_MSG_RESULT_OK;
} | 0 | [] | dpdk | bf472259dde6d9c4dd3ebad2c2b477a168c6e021 | 277,102,550,561,710,700,000,000,000,000,000,000,000 | 27 | vhost: fix possible denial of service by leaking FDs
A malicious Vhost-user master could send in loop hand-crafted
vhost-user messages containing more file descriptors the
vhost-user slave expects. Doing so causes the application using
the vhost-user library to run out of FDs.
This issue has been assigned CVE-2019-14818
Fixes: 8f972312b8f4 ("vhost: support vhost-user")
Signed-off-by: Maxime Coquelin <[email protected]> |
void TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onData(Buffer::Instance& data) {
ENVOY_CONN_LOG(trace, "total pending buffer={}", *client_, data.length());
// TODO(lilika): The TCP health checker does generic pattern matching so we can't differentiate
// between wrong data and not enough data. We could likely do better here and figure out cases in
// which a match is not possible but that is not done now.
if (TcpHealthCheckMatcher::match(parent_.receive_bytes_, data)) {
ENVOY_CONN_LOG(trace, "healthcheck passed", *client_);
data.drain(data.length());
handleSuccess(false);
if (!parent_.reuse_connection_) {
expect_close_ = true;
client_->close(Network::ConnectionCloseType::NoFlush);
}
}
} | 0 | [
"CWE-476"
] | envoy | 9b1c3962172a972bc0359398af6daa3790bb59db | 52,056,369,487,964,060,000,000,000,000,000,000,000 | 15 | healthcheck: fix grpc inline removal crashes (#749)
Signed-off-by: Matt Klein <[email protected]>
Signed-off-by: Pradeep Rao <[email protected]> |
static inline unsigned long pages_to_mb(unsigned long npg)
{
return npg >> (20 - PAGE_SHIFT);
} | 0 | [
"CWE-119",
"CWE-787"
] | linux | 027ef6c87853b0a9df53175063028edb4950d476 | 43,710,989,938,352,840,000,000,000,000,000,000,000 | 4 | mm: thp: fix pmd_present for split_huge_page and PROT_NONE with THP
In many places !pmd_present has been converted to pmd_none. For pmds
that's equivalent and pmd_none is quicker so using pmd_none is better.
However (unless we delete pmd_present) we should provide an accurate
pmd_present too. This will avoid the risk of code thinking the pmd is non
present because it's under __split_huge_page_map, see the pmd_mknotpresent
there and the comment above it.
If the page has been mprotected as PROT_NONE, it would also lead to a
pmd_present false negative in the same way as the race with
split_huge_page.
Because the PSE bit stays on at all times (both during split_huge_page and
when the _PAGE_PROTNONE bit get set), we could only check for the PSE bit,
but checking the PROTNONE bit too is still good to remember pmd_present
must always keep PROT_NONE into account.
This explains a not reproducible BUG_ON that was seldom reported on the
lists.
The same issue is in pmd_large, it would go wrong with both PROT_NONE and
if it races with split_huge_page.
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void tag_database_dirty(PgDatabase *db)
{
struct List *item;
PgPool *pool;
statlist_for_each(item, &pool_list) {
pool = container_of(item, PgPool, head);
if (pool->db == db)
tag_pool_dirty(pool);
}
} | 0 | [] | pgbouncer | 4b92112b820830b30cd7bc91bef3dd8f35305525 | 243,499,534,750,243,940,000,000,000,000,000,000,000 | 11 | add_database: fail gracefully if too long db name
Truncating & adding can lead to fatal() later.
It was not an issue before, but with audodb (* in [databases] section)
the database name can some from network, thus allowing remote shutdown.. |
void del_random_ready_callback(struct random_ready_callback *rdy)
{
unsigned long flags;
struct module *owner = NULL;
spin_lock_irqsave(&random_ready_list_lock, flags);
if (!list_empty(&rdy->list)) {
list_del_init(&rdy->list);
owner = rdy->owner;
}
spin_unlock_irqrestore(&random_ready_list_lock, flags);
module_put(owner);
} | 0 | [
"CWE-200",
"CWE-330"
] | linux | f227e3ec3b5cad859ad15666874405e8c1bbc1d4 | 3,948,263,076,743,021,000,000,000,000,000,000,000 | 14 | random32: update the net random state on interrupt and activity
This modifies the first 32 bits out of the 128 bits of a random CPU's
net_rand_state on interrupt or CPU activity to complicate remote
observations that could lead to guessing the network RNG's internal
state.
Note that depending on some network devices' interrupt rate moderation
or binding, this re-seeding might happen on every packet or even almost
never.
In addition, with NOHZ some CPUs might not even get timer interrupts,
leaving their local state rarely updated, while they are running
networked processes making use of the random state. For this reason, we
also perform this update in update_process_times() in order to at least
update the state when there is user or system activity, since it's the
only case we care about.
Reported-by: Amit Klein <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Cc: Eric Dumazet <[email protected]>
Cc: "Jason A. Donenfeld" <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: <[email protected]>
Signed-off-by: Willy Tarreau <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
read_tube_block (FILE *f,
gint image_ID,
guint total_len,
PSPimage *ia)
{
guint16 version;
guchar name[514];
guint32 step_size, column_count, row_count, cell_count;
guint32 placement_mode, selection_mode;
gint i;
GimpPixPipeParams params;
GimpParasite *pipe_parasite;
gchar *parasite_text;
gimp_pixpipe_params_init (¶ms);
if (fread (&version, 2, 1, f) < 1
|| fread (name, 513, 1, f) < 1
|| fread (&step_size, 4, 1, f) < 1
|| fread (&column_count, 4, 1, f) < 1
|| fread (&row_count, 4, 1, f) < 1
|| fread (&cell_count, 4, 1, f) < 1
|| fread (&placement_mode, 4, 1, f) < 1
|| fread (&selection_mode, 4, 1, f) < 1)
{
g_message ("Error reading tube data chunk");
return -1;
}
name[513] = 0;
version = GUINT16_FROM_LE (version);
params.step = GUINT32_FROM_LE (step_size);
params.cols = GUINT32_FROM_LE (column_count);
params.rows = GUINT32_FROM_LE (row_count);
params.ncells = GUINT32_FROM_LE (cell_count);
placement_mode = GUINT32_FROM_LE (placement_mode);
selection_mode = GUINT32_FROM_LE (selection_mode);
for (i = 1; i < params.cols; i++)
gimp_image_add_vguide (image_ID, (ia->width * i)/params.cols);
for (i = 1; i < params.rows; i++)
gimp_image_add_hguide (image_ID, (ia->height * i)/params.rows);
/* We use a parasite to pass in the tube (pipe) parameters in
* case we will have any use of those, for instance in the gpb
* plug-in that saves a GIMP image pipe.
*/
params.dim = 1;
params.cellwidth = ia->width / params.cols;
params.cellheight = ia->height / params.rows;
params.placement = (placement_mode == tpmRandom ? "random" :
(placement_mode == tpmConstant ? "constant" :
"default"));
params.rank[0] = params.ncells;
params.selection[0] = (selection_mode == tsmRandom ? "random" :
(selection_mode == tsmIncremental ? "incremental" :
(selection_mode == tsmAngular ? "angular" :
(selection_mode == tsmPressure ? "pressure" :
(selection_mode == tsmVelocity ? "velocity" :
"default")))));
parasite_text = gimp_pixpipe_params_build (¶ms);
IFDBG(2) g_message ("parasite: %s", parasite_text);
pipe_parasite = gimp_parasite_new ("gimp-brush-pipe-parameters",
GIMP_PARASITE_PERSISTENT,
strlen (parasite_text) + 1, parasite_text);
gimp_image_parasite_attach (image_ID, pipe_parasite);
gimp_parasite_free (pipe_parasite);
g_free (parasite_text);
return 0;
} | 0 | [
"CWE-787"
] | gimp | 48ec15890e1751dede061f6d1f469b6508c13439 | 224,309,386,365,681,360,000,000,000,000,000,000,000 | 73 | file-psp: fix for bogus input data. Fixes bug #639203 |
static int get_max_inline_xattr_value_size(struct inode *inode,
struct ext4_iloc *iloc)
{
struct ext4_xattr_ibody_header *header;
struct ext4_xattr_entry *entry;
struct ext4_inode *raw_inode;
int free, min_offs;
min_offs = EXT4_SB(inode->i_sb)->s_inode_size -
EXT4_GOOD_OLD_INODE_SIZE -
EXT4_I(inode)->i_extra_isize -
sizeof(struct ext4_xattr_ibody_header);
/*
* We need to subtract another sizeof(__u32) since an in-inode xattr
* needs an empty 4 bytes to indicate the gap between the xattr entry
* and the name/value pair.
*/
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
return EXT4_XATTR_SIZE(min_offs -
EXT4_XATTR_LEN(strlen(EXT4_XATTR_SYSTEM_DATA)) -
EXT4_XATTR_ROUND - sizeof(__u32));
raw_inode = ext4_raw_inode(iloc);
header = IHDR(inode, raw_inode);
entry = IFIRST(header);
/* Compute min_offs. */
for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
if (!entry->e_value_inum && entry->e_value_size) {
size_t offs = le16_to_cpu(entry->e_value_offs);
if (offs < min_offs)
min_offs = offs;
}
}
free = min_offs -
((void *)entry - (void *)IFIRST(header)) - sizeof(__u32);
if (EXT4_I(inode)->i_inline_off) {
entry = (struct ext4_xattr_entry *)
((void *)raw_inode + EXT4_I(inode)->i_inline_off);
free += EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size));
goto out;
}
free -= EXT4_XATTR_LEN(strlen(EXT4_XATTR_SYSTEM_DATA));
if (free > EXT4_XATTR_ROUND)
free = EXT4_XATTR_SIZE(free - EXT4_XATTR_ROUND);
else
free = 0;
out:
return free;
} | 0 | [
"CWE-416"
] | linux | 117166efb1ee8f13c38f9e96b258f16d4923f888 | 23,006,197,447,531,240,000,000,000,000,000,000,000 | 56 | ext4: do not allow external inodes for inline data
The inline data feature was implemented before we added support for
external inodes for xattrs. It makes no sense to support that
combination, but the problem is that there are a number of extended
attribute checks that are skipped if e_value_inum is non-zero.
Unfortunately, the inline data code is completely e_value_inum
unaware, and attempts to interpret the xattr fields as if it were an
inline xattr --- at which point, Hilarty Ensues.
This addresses CVE-2018-11412.
https://bugzilla.kernel.org/show_bug.cgi?id=199803
Reported-by: Jann Horn <[email protected]>
Reviewed-by: Andreas Dilger <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
Fixes: e50e5129f384 ("ext4: xattr-in-inode support")
Cc: [email protected] |
static int __hidp_send_ctrl_message(struct hidp_session *session,
unsigned char hdr, unsigned char *data,
int size)
{
struct sk_buff *skb;
BT_DBG("session %p data %p size %d", session, data, size);
if (atomic_read(&session->terminate))
return -EIO;
skb = alloc_skb(size + 1, GFP_ATOMIC);
if (!skb) {
BT_ERR("Can't allocate memory for new frame");
return -ENOMEM;
}
*skb_put(skb, 1) = hdr;
if (data && size > 0)
memcpy(skb_put(skb, size), data, size);
skb_queue_tail(&session->ctrl_transmit, skb);
return 0;
} | 0 | [
"CWE-200"
] | linux | 0a9ab9bdb3e891762553f667066190c1d22ad62b | 194,436,830,689,364,260,000,000,000,000,000,000,000 | 25 | Bluetooth: Fix incorrect strncpy() in hidp_setup_hid()
The length parameter should be sizeof(req->name) - 1 because there is no
guarantee that string provided by userspace will contain the trailing
'\0'.
Can be easily reproduced by manually setting req->name to 128 non-zero
bytes prior to ioctl(HIDPCONNADD) and checking the device name setup on
input subsystem:
$ cat /sys/devices/pnp0/00\:04/tty/ttyS0/hci0/hci0\:1/input8/name
AAAAAA[...]AAAAAAAAf0:af:f0:af:f0:af
("f0:af:f0:af:f0:af" is the device bluetooth address, taken from "phys"
field in struct hid_device due to overflow.)
Cc: [email protected]
Signed-off-by: Anderson Lizardo <[email protected]>
Acked-by: Marcel Holtmann <[email protected]>
Signed-off-by: Gustavo Padovan <[email protected]> |
static void AppLayerProtoDetectFreeProbingParsers(AppLayerProtoDetectProbingParser *pp)
{
SCEnter();
AppLayerProtoDetectProbingParser *tmp_pp = NULL;
if (pp == NULL)
goto end;
while (pp != NULL) {
tmp_pp = pp->next;
AppLayerProtoDetectProbingParserFree(pp);
pp = tmp_pp;
}
end:
SCReturn;
} | 0 | [
"CWE-20"
] | suricata | 8357ef3f8ffc7d99ef6571350724160de356158b | 318,483,309,752,697,540,000,000,000,000,000,000,000 | 18 | proto/detect: workaround dns misdetected as dcerpc
The DCERPC UDP detection would misfire on DNS with transaction
ID 0x0400. This would happen as the protocol detection engine
gives preference to pattern based detection over probing parsers for
performance reasons.
This hack/workaround fixes this specific case by still running the
probing parser if DCERPC has been detected on UDP. The probing
parser result will take precedence.
Bug #2736. |
int32_t cli_bcapi_hashset_remove(struct cli_bc_ctx *ctx , int32_t id, uint32_t key)
{
struct cli_hashset *s = get_hashset(ctx, id);
if (!s)
return -1;
return cli_hashset_removekey(s, key);
} | 0 | [
"CWE-189"
] | clamav-devel | 3d664817f6ef833a17414a4ecea42004c35cc42f | 24,033,274,528,873,460,000,000,000,000,000,000,000 | 7 | fix recursion level crash (bb #3706).
Thanks to Stephane Chazelas for the analysis. |
void gdImagePaletteCopy (gdImagePtr to, gdImagePtr from)
{
int i;
int x, y, p;
int xlate[256];
if (to->trueColor || from->trueColor) {
return;
}
for (i = 0; i < 256; i++) {
xlate[i] = -1;
}
for (y = 0; y < to->sy; y++) {
for (x = 0; x < to->sx; x++) {
p = gdImageGetPixel(to, x, y);
if (xlate[p] == -1) {
/* This ought to use HWB, but we don't have an alpha-aware version of that yet. */
xlate[p] = gdImageColorClosestAlpha (from, to->red[p], to->green[p], to->blue[p], to->alpha[p]);
}
gdImageSetPixel(to, x, y, xlate[p]);
}
}
for (i = 0; i < from->colorsTotal; i++) {
to->red[i] = from->red[i];
to->blue[i] = from->blue[i];
to->green[i] = from->green[i];
to->alpha[i] = from->alpha[i];
to->open[i] = 0;
}
for (i = from->colorsTotal; i < to->colorsTotal; i++) {
to->open[i] = 1;
}
to->colorsTotal = from->colorsTotal;
} | 0 | [
"CWE-119"
] | php-src | e7f2356665c2569191a946b6fc35b437f0ae1384 | 244,273,979,421,462,600,000,000,000,000,000,000,000 | 38 | Fix #66387: Stack overflow with imagefilltoborder
The stack overflow is caused by the recursive algorithm in combination with a
very large negative coordinate passed to gdImageFillToBorder(). As there is
already a clipping for large positive coordinates to the width and height of
the image, it seems to be consequent to clip to zero also. |
gdev_x_put_params(gx_device * dev, gs_param_list * plist)
{
gx_device_X *xdev = (gx_device_X *) dev;
/*
* Provide copies of values of parameters being set:
* is_open, width, height, HWResolution, IsPageDevice, Max*.
*/
gx_device_X values;
int orig_MaxBitmap = xdev->space_params.MaxBitmap;
long pwin = (long)xdev->pwin;
bool save_is_page = xdev->IsPageDevice;
int ecode = 0, code;
bool clear_window = false;
values = *xdev;
/* Handle extra parameters */
ecode = param_put_long(plist, "WindowID", &pwin, ecode);
ecode = param_put_bool(plist, ".IsPageDevice", &values.IsPageDevice, ecode);
ecode = param_put_int(plist, "MaxTempPixmap", &values.MaxTempPixmap, ecode);
ecode = param_put_int(plist, "MaxTempImage", &values.MaxTempImage, ecode);
if (ecode < 0)
return ecode;
/* Unless we specified a new window ID, */
/* prevent gx_default_put_params from closing the device. */
if (pwin == (long)xdev->pwin)
dev->is_open = false;
xdev->IsPageDevice = values.IsPageDevice;
code = gx_default_put_params(dev, plist);
dev->is_open = values.is_open; /* saved value */
if (code < 0) { /* Undo setting of .IsPageDevice */
xdev->IsPageDevice = save_is_page;
return code;
}
if (pwin != (long)xdev->pwin) {
if (xdev->is_open)
gs_closedevice(dev);
xdev->pwin = (Window) pwin;
}
/* Restore the original page size if it was set by Ghostview */
/* This gives the Ghostview user control over the /setpage entry */
if (xdev->is_open && xdev->ghostview) {
dev->width = values.width;
dev->height = values.height;
dev->x_pixels_per_inch = values.x_pixels_per_inch;
dev->y_pixels_per_inch = values.y_pixels_per_inch;
dev->HWResolution[0] = values.HWResolution[0];
dev->HWResolution[1] = values.HWResolution[1];
dev->MediaSize[0] = values.MediaSize[0];
dev->MediaSize[1] = values.MediaSize[1];
}
/* If the device is open, resize the window. */
/* Don't do this if Ghostview is active. */
if (xdev->is_open && !xdev->ghostview &&
(dev->width != values.width || dev->height != values.height ||
dev->HWResolution[0] != values.HWResolution[0] ||
dev->HWResolution[1] != values.HWResolution[1])
) {
int area_width = WidthOfScreen(xdev->scr), area_height = HeightOfScreen(xdev->scr);
int dw, dh;
/* Get work area */
x_get_work_area(xdev, &area_width, &area_height);
/* Preserve screen resolution */
dev->x_pixels_per_inch = values.x_pixels_per_inch;
dev->y_pixels_per_inch = values.y_pixels_per_inch;
dev->HWResolution[0] = values.HWResolution[0];
dev->HWResolution[1] = values.HWResolution[1];
/* Recompute window size using screen resolution and available work area size*/
/* pixels */
dev->width = min(dev->width, area_width);
dev->height = min(dev->height, area_height);
if (dev->width <= 0 || dev->height <= 0) {
emprintf3(dev->memory, "Requested pagesize %d x %d not supported by %s device\n",
dev->width, dev->height, dev->dname);
return_error(gs_error_rangecheck);
}
/* points */
dev->MediaSize[0] = (float)dev->width / xdev->x_pixels_per_inch * 72;
dev->MediaSize[1] = (float)dev->height / xdev->y_pixels_per_inch * 72;
dw = dev->width - values.width;
dh = dev->height - values.height;
if (dw || dh) {
XResizeWindow(xdev->dpy, xdev->win,
dev->width, dev->height);
if (xdev->bpixmap != (Pixmap) 0) {
XFreePixmap(xdev->dpy, xdev->bpixmap);
xdev->bpixmap = (Pixmap) 0;
}
xdev->dest = 0;
clear_window = true;
}
/* Attempt to update the initial matrix in a sensible way. */
/* The whole handling of the initial matrix is a hack! */
if (xdev->initial_matrix.xy == 0) {
if (xdev->initial_matrix.xx < 0) { /* 180 degree rotation */
xdev->initial_matrix.tx += dw;
} else { /* no rotation */
xdev->initial_matrix.ty += dh;
}
} else {
if (xdev->initial_matrix.xy < 0) { /* 90 degree rotation */
xdev->initial_matrix.tx += dh;
xdev->initial_matrix.ty += dw;
} else { /* 270 degree rotation */
}
}
}
xdev->MaxTempPixmap = values.MaxTempPixmap;
xdev->MaxTempImage = values.MaxTempImage;
if (clear_window || xdev->space_params.MaxBitmap != orig_MaxBitmap) {
if (xdev->is_open)
gdev_x_clear_window(xdev);
}
return 0;
} | 0 | [] | ghostpdl | c432131c3fdb2143e148e8ba88555f7f7a63b25e | 276,794,799,669,222,400,000,000,000,000,000,000,000 | 126 | Bug 699661: Avoid sharing pointers between pdf14 compositors
If a copdevice is triggered when the pdf14 compositor is the device, we make
a copy of the device, then throw an error because, by default we're only allowed
to copy the device prototype - then freeing it calls the finalize, which frees
several pointers shared with the parent.
Make a pdf14 specific finish_copydevice() which NULLs the relevant pointers,
before, possibly, throwing the same error as the default method.
This also highlighted a problem with reopening the X11 devices, where a custom
error handler could be replaced with itself, meaning it also called itself,
and infifite recursion resulted.
Keep a note of if the handler replacement has been done, and don't do it a
second time. |
int snd_ctl_create(struct snd_card *card)
{
static const struct snd_device_ops ops = {
.dev_free = snd_ctl_dev_free,
.dev_register = snd_ctl_dev_register,
.dev_disconnect = snd_ctl_dev_disconnect,
};
int err;
if (snd_BUG_ON(!card))
return -ENXIO;
if (snd_BUG_ON(card->number < 0 || card->number >= SNDRV_CARDS))
return -ENXIO;
snd_device_initialize(&card->ctl_dev, card);
dev_set_name(&card->ctl_dev, "controlC%d", card->number);
err = snd_device_new(card, SNDRV_DEV_CONTROL, card, &ops);
if (err < 0)
put_device(&card->ctl_dev);
return err;
} | 0 | [
"CWE-416",
"CWE-125"
] | linux | 6ab55ec0a938c7f943a4edba3d6514f775983887 | 51,154,577,933,994,680,000,000,000,000,000,000,000 | 22 | ALSA: control: Fix an out-of-bounds bug in get_ctl_id_hash()
Since the user can control the arguments provided to the kernel by the
ioctl() system call, an out-of-bounds bug occurs when the 'id->name'
provided by the user does not end with '\0'.
The following log can reveal it:
[ 10.002313] BUG: KASAN: stack-out-of-bounds in snd_ctl_find_id+0x36c/0x3a0
[ 10.002895] Read of size 1 at addr ffff888109f5fe28 by task snd/439
[ 10.004934] Call Trace:
[ 10.007140] snd_ctl_find_id+0x36c/0x3a0
[ 10.007489] snd_ctl_ioctl+0x6cf/0x10e0
Fix this by checking the bound of 'id->name' in the loop.
Fixes: c27e1efb61c5 ("ALSA: control: Use xarray for faster lookups")
Signed-off-by: Zheyu Ma <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Takashi Iwai <[email protected]> |
Item *get_copy(THD *thd)
{ return get_item_copy<Item_hex_hybrid>(thd, this); } | 0 | [
"CWE-617"
] | server | 807945f2eb5fa22e6f233cc17b85a2e141efe2c8 | 271,403,700,548,027,640,000,000,000,000,000,000,000 | 2 | MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item. |
static MOBI_RET mobi_parse_index_entry(MOBIIndx *indx, const MOBIIdxt idxt, const MOBITagx *tagx, const MOBIOrdt *ordt, MOBIBuffer *buf, const size_t curr_number) {
if (indx == NULL) {
debug_print("%s", "INDX structure not initialized\n");
return MOBI_INIT_FAILED;
}
const size_t entry_offset = indx->entries_count;
const size_t entry_length = idxt.offsets[curr_number + 1] - idxt.offsets[curr_number];
mobi_buffer_setpos(buf, idxt.offsets[curr_number]);
size_t entry_number = curr_number + entry_offset;
if (entry_number >= indx->total_entries_count) {
debug_print("Entry number beyond array: %zu\n", entry_number);
return MOBI_DATA_CORRUPT;
}
/* save original record maxlen */
const size_t buf_maxlen = buf->maxlen;
if (buf->offset + entry_length >= buf_maxlen) {
debug_print("Entry length too long: %zu\n", entry_length);
return MOBI_DATA_CORRUPT;
}
buf->maxlen = buf->offset + entry_length;
size_t label_length = mobi_buffer_get8(buf);
if (label_length > entry_length) {
debug_print("Label length too long: %zu\n", label_length);
return MOBI_DATA_CORRUPT;
}
char text[INDX_LABEL_SIZEMAX];
/* FIXME: what is ORDT1 for? */
if (ordt->ordt2) {
label_length = mobi_getstring_ordt(ordt, buf, (unsigned char*) text, label_length);
} else {
label_length = mobi_indx_get_label((unsigned char*) text, buf, label_length, indx->ligt_entries_count);
}
indx->entries[entry_number].label = malloc(label_length + 1);
if (indx->entries[entry_number].label == NULL) {
debug_print("Memory allocation failed (%zu bytes)\n", label_length);
return MOBI_MALLOC_FAILED;
}
strncpy(indx->entries[entry_number].label, text, label_length + 1);
//debug_print("tag label[%zu]: %s\n", entry_number, indx->entries[entry_number].label);
unsigned char *control_bytes;
control_bytes = buf->data + buf->offset;
mobi_buffer_seek(buf, (int) tagx->control_byte_count);
indx->entries[entry_number].tags_count = 0;
indx->entries[entry_number].tags = NULL;
if (tagx->tags_count > 0) {
typedef struct {
uint8_t tag;
uint8_t tag_value_count;
uint32_t value_count;
uint32_t value_bytes;
} MOBIPtagx;
MOBIPtagx *ptagx = malloc(tagx->tags_count * sizeof(MOBIPtagx));
if (ptagx == NULL) {
debug_print("Memory allocation failed (%zu bytes)\n", tagx->tags_count * sizeof(MOBIPtagx));
return MOBI_MALLOC_FAILED;
}
uint32_t ptagx_count = 0;
size_t len;
size_t i = 0;
while (i < tagx->tags_count) {
if (tagx->tags[i].control_byte == 1) {
control_bytes++;
i++;
continue;
}
uint32_t value = control_bytes[0] & tagx->tags[i].bitmask;
if (value != 0) {
/* FIXME: is it safe to use MOBI_NOTSET? */
uint32_t value_count = MOBI_NOTSET;
uint32_t value_bytes = MOBI_NOTSET;
/* all bits of masked value are set */
if (value == tagx->tags[i].bitmask) {
/* more than 1 bit set */
if (mobi_bitcount(tagx->tags[i].bitmask) > 1) {
/* read value bytes from entry */
len = 0;
value_bytes = mobi_buffer_get_varlen(buf, &len);
} else {
value_count = 1;
}
} else {
uint8_t mask = tagx->tags[i].bitmask;
while ((mask & 1) == 0) {
mask >>= 1;
value >>= 1;
}
value_count = value;
}
ptagx[ptagx_count].tag = tagx->tags[i].tag;
ptagx[ptagx_count].tag_value_count = tagx->tags[i].values_count;
ptagx[ptagx_count].value_count = value_count;
ptagx[ptagx_count].value_bytes = value_bytes;
ptagx_count++;
}
i++;
}
indx->entries[entry_number].tags = malloc(tagx->tags_count * sizeof(MOBIIndexTag));
if (indx->entries[entry_number].tags == NULL) {
debug_print("Memory allocation failed (%zu bytes)\n", tagx->tags_count * sizeof(MOBIIndexTag));
free(ptagx);
return MOBI_MALLOC_FAILED;
}
i = 0;
while (i < ptagx_count) {
uint32_t tagvalues_count = 0;
/* FIXME: is it safe to use MOBI_NOTSET? */
/* value count is set */
uint32_t tagvalues[INDX_TAGVALUES_MAX];
if (ptagx[i].value_count != MOBI_NOTSET) {
size_t count = ptagx[i].value_count * ptagx[i].tag_value_count;
while (count-- && tagvalues_count < INDX_TAGVALUES_MAX) {
len = 0;
const uint32_t value_bytes = mobi_buffer_get_varlen(buf, &len);
tagvalues[tagvalues_count++] = value_bytes;
}
/* value count is not set */
} else {
/* read value_bytes bytes */
len = 0;
while (len < ptagx[i].value_bytes && tagvalues_count < INDX_TAGVALUES_MAX) {
const uint32_t value_bytes = mobi_buffer_get_varlen(buf, &len);
tagvalues[tagvalues_count++] = value_bytes;
}
}
if (tagvalues_count) {
const size_t arr_size = tagvalues_count * sizeof(*indx->entries[entry_number].tags[i].tagvalues);
indx->entries[entry_number].tags[i].tagvalues = malloc(arr_size);
if (indx->entries[entry_number].tags[i].tagvalues == NULL) {
debug_print("Memory allocation failed (%zu bytes)\n", arr_size);
free(ptagx);
return MOBI_MALLOC_FAILED;
}
memcpy(indx->entries[entry_number].tags[i].tagvalues, tagvalues, arr_size);
} else {
indx->entries[entry_number].tags[i].tagvalues = NULL;
}
indx->entries[entry_number].tags[i].tagid = ptagx[i].tag;
indx->entries[entry_number].tags[i].tagvalues_count = tagvalues_count;
indx->entries[entry_number].tags_count++;
i++;
}
free(ptagx);
}
/* restore buffer maxlen */
buf->maxlen = buf_maxlen;
return MOBI_SUCCESS;
} | 0 | [
"CWE-476"
] | libmobi | ce0ab6586069791b1e8e2a42f44318e581c39939 | 99,473,420,116,885,710,000,000,000,000,000,000,000 | 147 | Fix issue with corrupt files with tagvalues_count = 0 that caused null pointer dereference |
string_isupper(PyStringObject *self)
{
register const unsigned char *p
= (unsigned char *) PyString_AS_STRING(self);
register const unsigned char *e;
int cased;
/* Shortcut for single character strings */
if (PyString_GET_SIZE(self) == 1)
return PyBool_FromLong(isupper(*p) != 0);
/* Special case for empty strings */
if (PyString_GET_SIZE(self) == 0)
return PyBool_FromLong(0);
e = p + PyString_GET_SIZE(self);
cased = 0;
for (; p < e; p++) {
if (islower(*p))
return PyBool_FromLong(0);
else if (!cased && isupper(*p))
cased = 1;
}
return PyBool_FromLong(cased);
} | 0 | [
"CWE-190"
] | cpython | c3c9db89273fabc62ea1b48389d9a3000c1c03ae | 168,275,164,279,571,550,000,000,000,000,000,000,000 | 25 | [2.7] bpo-30657: Check & prevent integer overflow in PyString_DecodeEscape (#2174) |
SPL_METHOD(SplObjectStorage, getHash)
{
zval *obj;
char *hash;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "o", &obj) == FAILURE) {
return;
}
hash = emalloc(33);
php_spl_object_hash(obj, hash TSRMLS_CC);
RETVAL_STRING(hash, 0);
} /* }}} */ | 1 | [
"CWE-416"
] | php-src | c2e197e4efc663ca55f393bf0e799848842286f3 | 156,620,799,453,102,560,000,000,000,000,000,000,000 | 15 | Fix bug #70168 - Use After Free Vulnerability in unserialize() with SplObjectStorage |
static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
u64 umin_val = src_reg->umin_value;
/* Upon reaching here, src_known is true and umax_val is equal
* to umin_val.
*/
dst_reg->smin_value >>= umin_val;
dst_reg->smax_value >>= umin_val;
dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
/* blow away the dst_reg umin_value/umax_value and rely on
* dst_reg var_off to refine the result.
*/
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
/* Its not easy to operate on alu32 bounds here because it depends
* on bits being shifted in from upper 32-bits. Take easy way out
* and mark unbounded so we can recalculate later from tnum.
*/
__mark_reg32_unbounded(dst_reg);
__update_reg_bounds(dst_reg);
} | 0 | [
"CWE-119",
"CWE-681",
"CWE-787"
] | linux | 5b9fbeb75b6a98955f628e205ac26689bcb1383e | 157,473,989,494,475,100,000,000,000,000,000,000,000 | 26 | bpf: Fix scalar32_min_max_or bounds tracking
Simon reported an issue with the current scalar32_min_max_or() implementation.
That is, compared to the other 32 bit subreg tracking functions, the code in
scalar32_min_max_or() stands out that it's using the 64 bit registers instead
of 32 bit ones. This leads to bounds tracking issues, for example:
[...]
8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
8: (79) r1 = *(u64 *)(r0 +0)
R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
9: (b7) r0 = 1
10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
10: (18) r2 = 0x600000002
12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
12: (ad) if r1 < r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: (95) exit
14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
14: (25) if r1 > 0x0 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: (95) exit
16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
16: (47) r1 |= 0
17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x1; 0x700000000),s32_max_value=1,u32_max_value=1) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
[...]
The bound tests on the map value force the upper unsigned bound to be 25769803777
in 64 bit (0b11000000000000000000000000000000001) and then lower one to be 1. By
using OR they are truncated and thus result in the range [1,1] for the 32 bit reg
tracker. This is incorrect given the only thing we know is that the value must be
positive and thus 2147483647 (0b1111111111111111111111111111111) at max for the
subregs. Fix it by using the {u,s}32_{min,max}_value vars instead. This also makes
sense, for example, for the case where we update dst_reg->s32_{min,max}_value in
the else branch we need to use the newly computed dst_reg->u32_{min,max}_value as
we know that these are positive. Previously, in the else branch the 64 bit values
of umin_value=1 and umax_value=32212254719 were used and latter got truncated to
be 1 as upper bound there. After the fix the subreg range is now correct:
[...]
8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
8: (79) r1 = *(u64 *)(r0 +0)
R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
9: (b7) r0 = 1
10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
10: (18) r2 = 0x600000002
12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
12: (ad) if r1 < r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: (95) exit
14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
14: (25) if r1 > 0x0 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: (95) exit
16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
16: (47) r1 |= 0
17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
[...]
Fixes: 3f50f132d840 ("bpf: Verifier, do explicit ALU32 bounds tracking")
Reported-by: Simon Scannell <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Reviewed-by: John Fastabend <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]> |
static MaybeLocal<Value> GetX509NameObject(Environment* env, X509* cert) {
X509_NAME* name = get_name(cert);
CHECK_NOT_NULL(name);
int cnt = X509_NAME_entry_count(name);
CHECK_GE(cnt, 0);
Local<Object> result =
Object::New(env->isolate(), Null(env->isolate()), nullptr, nullptr, 0);
if (result.IsEmpty()) {
return MaybeLocal<Value>();
}
for (int i = 0; i < cnt; i++) {
X509_NAME_ENTRY* entry = X509_NAME_get_entry(name, i);
CHECK_NOT_NULL(entry);
// We intentionally ignore the value of X509_NAME_ENTRY_set because the
// representation as an object does not allow grouping entries into sets
// anyway, and multi-value RDNs are rare, i.e., the vast majority of
// Relative Distinguished Names contains a single type-value pair only.
const ASN1_OBJECT* type = X509_NAME_ENTRY_get_object(entry);
const ASN1_STRING* value = X509_NAME_ENTRY_get_data(entry);
// If OpenSSL knows the type, use the short name of the type as the key, and
// the numeric representation of the type's OID otherwise.
int type_nid = OBJ_obj2nid(type);
char type_buf[80];
const char* type_str;
if (type_nid != NID_undef) {
type_str = OBJ_nid2sn(type_nid);
CHECK_NOT_NULL(type_str);
} else {
OBJ_obj2txt(type_buf, sizeof(type_buf), type, true);
type_str = type_buf;
}
Local<String> v8_name;
if (!String::NewFromUtf8(env->isolate(), type_str).ToLocal(&v8_name)) {
return MaybeLocal<Value>();
}
// The previous implementation used X509_NAME_print_ex, which escapes some
// characters in the value. The old implementation did not decode/unescape
// values correctly though, leading to ambiguous and incorrect
// representations. The new implementation only converts to Unicode and does
// not escape anything.
unsigned char* value_str;
int value_str_size = ASN1_STRING_to_UTF8(&value_str, value);
if (value_str_size < 0) {
return Undefined(env->isolate());
}
Local<String> v8_value;
if (!String::NewFromUtf8(env->isolate(),
reinterpret_cast<const char*>(value_str),
NewStringType::kNormal,
value_str_size).ToLocal(&v8_value)) {
OPENSSL_free(value_str);
return MaybeLocal<Value>();
}
OPENSSL_free(value_str);
// For backward compatibility, we only create arrays if multiple values
// exist for the same key. That is not great but there is not much we can
// change here without breaking things. Note that this creates nested data
// structures, yet still does not allow representing Distinguished Names
// accurately.
if (result->HasOwnProperty(env->context(), v8_name).ToChecked()) {
Local<Value> accum =
result->Get(env->context(), v8_name).ToLocalChecked();
if (!accum->IsArray()) {
accum = Array::New(env->isolate(), &accum, 1);
result->Set(env->context(), v8_name, accum).Check();
}
Local<Array> array = accum.As<Array>();
array->Set(env->context(), array->Length(), v8_value).Check();
} else {
result->Set(env->context(), v8_name, v8_value).Check();
}
}
return result;
} | 0 | [
"CWE-295"
] | node | a336444c7fb9fd1d0055481d84cdd57d7d569879 | 153,268,819,361,709,940,000,000,000,000,000,000,000 | 85 | tls: fix handling of x509 subject and issuer
When subject and verifier are represented as strings, escape special
characters (such as '+') to guarantee unambiguity. Previously, different
distinguished names could result in the same string when encoded. In
particular, inserting a '+' in a single-value Relative Distinguished
Name (e.g., L or OU) would produce a string that is indistinguishable
from a multi-value Relative Distinguished Name. Third-party code that
correctly interprets the generated string representation as a
multi-value Relative Distinguished Name could then be vulnerable to an
injection attack, e.g., when an attacker includes a single-value RDN
with type OU and value 'HR + CN=example.com', the string representation
produced by unpatched versions of Node.js would be
'OU=HR + CN=example.com', which represents a multi-value RDN.
Node.js itself is not vulnerable to this attack because the current
implementation that parses such strings into objects does not handle '+'
at all. This oversight leads to incorrect results, but at the same time
appears to prevent injection attacks (as described above).
With this change, the JavaScript objects representing the subject and
issuer Relative Distinguished Names are constructed in C++ directly,
instead of (incorrectly) encoding them as strings and then (incorrectly)
decoding the strings in JavaScript.
This addresses CVE-2021-44533.
CVE-ID: CVE-2021-44533
PR-URL: https://github.com/nodejs-private/node-private/pull/300
Reviewed-By: Michael Dawson <[email protected]>
Reviewed-By: Rich Trott <[email protected]> |
CImg<_cimg_Ttfloat> get_solve(const CImg<t>& A, const bool use_LU=false) const {
typedef _cimg_Ttfloat Ttfloat;
return CImg<Ttfloat>(*this,false).solve(A,use_LU);
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 101,612,431,420,972,860,000,000,000,000,000,000,000 | 4 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
struct btrfs_path *btrfs_alloc_path(void)
{
struct btrfs_path *path;
path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
return path;
} | 0 | [
"CWE-416",
"CWE-362"
] | linux | 5f5bc6b1e2d5a6f827bc860ef2dc5b6f365d1339 | 164,282,362,144,643,340,000,000,000,000,000,000,000 | 6 | Btrfs: make xattr replace operations atomic
Replacing a xattr consists of doing a lookup for its existing value, delete
the current value from the respective leaf, release the search path and then
finally insert the new value. This leaves a time window where readers (getxattr,
listxattrs) won't see any value for the xattr. Xattrs are used to store ACLs,
so this has security implications.
This change also fixes 2 other existing issues which were:
*) Deleting the old xattr value without verifying first if the new xattr will
fit in the existing leaf item (in case multiple xattrs are packed in the
same item due to name hash collision);
*) Returning -EEXIST when the flag XATTR_CREATE is given and the xattr doesn't
exist but we have have an existing item that packs muliple xattrs with
the same name hash as the input xattr. In this case we should return ENOSPC.
A test case for xfstests follows soon.
Thanks to Alexandre Oliva for reporting the non-atomicity of the xattr replace
implementation.
Reported-by: Alexandre Oliva <[email protected]>
Signed-off-by: Filipe Manana <[email protected]>
Signed-off-by: Chris Mason <[email protected]> |
static int prec(enum js_AstType type)
{
switch (type) {
case AST_IDENTIFIER:
case EXP_IDENTIFIER:
case EXP_NUMBER:
case EXP_STRING:
case EXP_REGEXP:
case EXP_UNDEF:
case EXP_NULL:
case EXP_TRUE:
case EXP_FALSE:
case EXP_THIS:
case EXP_ARRAY:
case EXP_OBJECT:
return 170;
case EXP_FUN:
case EXP_INDEX:
case EXP_MEMBER:
case EXP_CALL:
case EXP_NEW:
return 160;
case EXP_POSTINC:
case EXP_POSTDEC:
return 150;
case EXP_DELETE:
case EXP_VOID:
case EXP_TYPEOF:
case EXP_PREINC:
case EXP_PREDEC:
case EXP_POS:
case EXP_NEG:
case EXP_BITNOT:
case EXP_LOGNOT:
return 140;
case EXP_MOD:
case EXP_DIV:
case EXP_MUL:
return 130;
case EXP_SUB:
case EXP_ADD:
return 120;
case EXP_USHR:
case EXP_SHR:
case EXP_SHL:
return 110;
case EXP_IN:
case EXP_INSTANCEOF:
case EXP_GE:
case EXP_LE:
case EXP_GT:
case EXP_LT:
return 100;
case EXP_STRICTNE:
case EXP_STRICTEQ:
case EXP_NE:
case EXP_EQ:
return 90;
case EXP_BITAND: return 80;
case EXP_BITXOR: return 70;
case EXP_BITOR: return 60;
case EXP_LOGAND: return 50;
case EXP_LOGOR: return 40;
case EXP_COND:
return 30;
case EXP_ASS:
case EXP_ASS_MUL:
case EXP_ASS_DIV:
case EXP_ASS_MOD:
case EXP_ASS_ADD:
case EXP_ASS_SUB:
case EXP_ASS_SHL:
case EXP_ASS_SHR:
case EXP_ASS_USHR:
case EXP_ASS_BITAND:
case EXP_ASS_BITXOR:
case EXP_ASS_BITOR:
return 20;
#define COMMA 15
case EXP_COMMA:
return 10;
default:
return 0;
}
} | 0 | [
"CWE-476"
] | mujs | f5b3c703e18725e380b83427004632e744f85a6f | 306,559,970,460,768,770,000,000,000,000,000,000,000 | 99 | Issue #161: Cope with empty programs in mujs-pp. |
static int qcow2_load_vmstate(BlockDriverState *bs, uint8_t *buf,
int64_t pos, int size)
{
BDRVQcowState *s = bs->opaque;
int growable = bs->growable;
bool zero_beyond_eof = bs->zero_beyond_eof;
int ret;
BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD);
bs->growable = 1;
bs->zero_beyond_eof = false;
ret = bdrv_pread(bs, qcow2_vm_state_offset(s) + pos, buf, size);
bs->growable = growable;
bs->zero_beyond_eof = zero_beyond_eof;
return ret;
} | 0 | [
"CWE-476"
] | qemu | 11b128f4062dd7f89b14abc8877ff20d41b28be9 | 331,692,766,298,454,040,000,000,000,000,000,000,000 | 17 | qcow2: Fix NULL dereference in qcow2_open() error path (CVE-2014-0146)
The qcow2 code assumes that s->snapshots is non-NULL if s->nb_snapshots
!= 0. By having the initialisation of both fields separated in
qcow2_open(), any error occuring in between would cause the error path
to dereference NULL in qcow2_free_snapshots() if the image had any
snapshots.
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]> |
void rtl8xxxu_fill_iqk_matrix_a(struct rtl8xxxu_priv *priv, bool iqk_ok,
int result[][8], int candidate, bool tx_only)
{
u32 oldval, x, tx0_a, reg;
int y, tx0_c;
u32 val32;
if (!iqk_ok)
return;
val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE);
oldval = val32 >> 22;
x = result[candidate][0];
if ((x & 0x00000200) != 0)
x = x | 0xfffffc00;
tx0_a = (x * oldval) >> 8;
val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE);
val32 &= ~0x3ff;
val32 |= tx0_a;
rtl8xxxu_write32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE, val32);
val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
val32 &= ~BIT(31);
if ((x * oldval >> 7) & 0x1)
val32 |= BIT(31);
rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
y = result[candidate][1];
if ((y & 0x00000200) != 0)
y = y | 0xfffffc00;
tx0_c = (y * oldval) >> 8;
val32 = rtl8xxxu_read32(priv, REG_OFDM0_XC_TX_AFE);
val32 &= ~0xf0000000;
val32 |= (((tx0_c & 0x3c0) >> 6) << 28);
rtl8xxxu_write32(priv, REG_OFDM0_XC_TX_AFE, val32);
val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE);
val32 &= ~0x003f0000;
val32 |= ((tx0_c & 0x3f) << 16);
rtl8xxxu_write32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE, val32);
val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
val32 &= ~BIT(29);
if ((y * oldval >> 7) & 0x1)
val32 |= BIT(29);
rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
if (tx_only) {
dev_dbg(&priv->udev->dev, "%s: only TX\n", __func__);
return;
}
reg = result[candidate][2];
val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE);
val32 &= ~0x3ff;
val32 |= (reg & 0x3ff);
rtl8xxxu_write32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE, val32);
reg = result[candidate][3] & 0x3F;
val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE);
val32 &= ~0xfc00;
val32 |= ((reg << 10) & 0xfc00);
rtl8xxxu_write32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE, val32);
reg = (result[candidate][3] >> 6) & 0xF;
val32 = rtl8xxxu_read32(priv, REG_OFDM0_RX_IQ_EXT_ANTA);
val32 &= ~0xf0000000;
val32 |= (reg << 28);
rtl8xxxu_write32(priv, REG_OFDM0_RX_IQ_EXT_ANTA, val32);
} | 0 | [
"CWE-400",
"CWE-401"
] | linux | a2cdd07488e666aa93a49a3fc9c9b1299e27ef3c | 177,551,788,972,231,600,000,000,000,000,000,000,000 | 76 | rtl8xxxu: prevent leaking urb
In rtl8xxxu_submit_int_urb if usb_submit_urb fails the allocated urb
should be released.
Signed-off-by: Navid Emamdoost <[email protected]>
Reviewed-by: Chris Chiu <[email protected]>
Signed-off-by: Kalle Valo <[email protected]> |
static void sungem_update_status(SunGEMState *s, uint32_t bits, bool val)
{
uint32_t stat;
stat = s->gregs[GREG_STAT >> 2];
if (val) {
stat |= bits;
} else {
stat &= ~bits;
}
s->gregs[GREG_STAT >> 2] = stat;
sungem_eval_irq(s);
} | 0 | [
"CWE-835"
] | qemu | 8c92060d3c0248bd4d515719a35922cd2391b9b4 | 214,406,158,339,271,260,000,000,000,000,000,000,000 | 13 | sungem: switch to use qemu_receive_packet() for loopback
This patch switches to use qemu_receive_packet() which can detect
reentrancy and return early.
This is intended to address CVE-2021-3416.
Cc: Prasad J Pandit <[email protected]>
Cc: [email protected]
Reviewed-by: Mark Cave-Ayland <[email protected]>
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Reviewed-by: Alistair Francis <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
TEST_P(ProtocolIntegrationTest, MultipleSetCookies) {
initialize();
codec_client_ = makeHttpConnection(lookupPort("http"));
Http::TestResponseHeaderMapImpl response_headers{
{":status", "200"}, {"set-cookie", "foo"}, {"set-cookie", "bar"}};
auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, response_headers, 0);
ASSERT_TRUE(response->complete());
EXPECT_EQ("200", response->headers().getStatusValue());
std::vector<absl::string_view> out;
Http::HeaderUtility::getAllOfHeader(response->headers(), "set-cookie", out);
ASSERT_EQ(out.size(), 2);
ASSERT_EQ(out[0], "foo");
ASSERT_EQ(out[1], "bar");
} | 0 | [
"CWE-770"
] | envoy | 7ca28ff7d46454ae930e193d97b7d08156b1ba59 | 219,500,115,598,498,700,000,000,000,000,000,000,000 | 19 | [http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145)
Signed-off-by: antonio <[email protected]> |
nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_rename *rename)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, 40);
if (!p)
return nfserr_resource;
p = encode_cinfo(p, &rename->rn_sinfo);
p = encode_cinfo(p, &rename->rn_tinfo);
}
return nfserr;
} | 0 | [
"CWE-20",
"CWE-129"
] | linux | f961e3f2acae94b727380c0b74e2d3954d0edf79 | 52,067,851,523,365,120,000,000,000,000,000,000,000 | 14 | nfsd: encoders mustn't use unitialized values in error cases
In error cases, lgp->lg_layout_type may be out of bounds; so we
shouldn't be using it until after the check of nfserr.
This was seen to crash nfsd threads when the server receives a LAYOUTGET
request with a large layout type.
GETDEVICEINFO has the same problem.
Reported-by: Ari Kauppi <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: [email protected]
Signed-off-by: J. Bruce Fields <[email protected]> |
static int ssl_security_cert_sig(SSL *s, SSL_CTX *ctx, X509 *x, int op)
{
/* Lookup signature algorithm digest */
int secbits = -1, md_nid = NID_undef, sig_nid;
sig_nid = X509_get_signature_nid(x);
if (sig_nid && OBJ_find_sigid_algs(sig_nid, &md_nid, NULL))
{
const EVP_MD *md;
if (md_nid && (md = EVP_get_digestbynid(md_nid)))
secbits = EVP_MD_size(md) * 4;
}
if (s)
return ssl_security(s, op, secbits, md_nid, x);
else
return ssl_ctx_security(ctx, op, secbits, md_nid, x);
} | 0 | [] | openssl | 80bd7b41b30af6ee96f519e629463583318de3b0 | 101,156,393,288,636,800,000,000,000,000,000,000,000 | 16 | Fix SRP ciphersuite DoS vulnerability.
If a client attempted to use an SRP ciphersuite and it had not been
set up correctly it would crash with a null pointer read. A malicious
server could exploit this in a DoS attack.
Thanks to Joonas Kuorilehto and Riku Hietamäki from Codenomicon
for reporting this issue.
CVE-2014-2970
Reviewed-by: Tim Hudson <[email protected]> |
bgp_packet_set_marker (struct stream *s, u_char type)
{
int i;
/* Fill in marker. */
for (i = 0; i < BGP_MARKER_SIZE; i++)
stream_putc (s, 0xff);
/* Dummy total length. This field is should be filled in later on. */
stream_putw (s, 0);
/* BGP packet type. */
stream_putc (s, type);
/* Return current stream size. */
return stream_get_endp (s);
} | 0 | [
"CWE-119"
] | quagga | 5861739f8c38bc36ea9955e5cb2be2bf2f482d70 | 95,562,112,133,381,900,000,000,000,000,000,000,000 | 17 | bgpd: Open option parse errors don't NOTIFY, resulting in abort & DoS
* bgp_packet.c: (bgp_open_receive) Errors from bgp_open_option_parse are
detected, and the code will stop processing the OPEN and return. However
it does so without calling bgp_notify_send to send a NOTIFY - which means
the peer FSM doesn't get stopped, and bgp_read will be called again later.
Because it returns, it doesn't go through the code near the end of the
function that removes the current message from the peer input streaam.
Thus the next call to bgp_read will try to parse a half-parsed stream as
if it were a new BGP message, leading to an assert later in the code when
it tries to read stuff that isn't there. Add the required call to
bgp_notify_send before returning.
* bgp_open.c: (bgp_capability_as4) Be a bit stricter, check the length field
corresponds to the only value it can be, which is the amount we're going to
read off the stream. And make sure the capability flag gets set, so
callers can know this capability was read, regardless.
(peek_for_as4_capability) Let bgp_capability_as4 do the length check. |
png_read_filter_row(png_structrp pp, png_row_infop row_info, png_bytep row,
png_const_bytep prev_row, int filter)
{
/* OPTIMIZATION: DO NOT MODIFY THIS FUNCTION, instead #define
* PNG_FILTER_OPTIMIZATIONS to a function that overrides the generic
* implementations. See png_init_filter_functions above.
*/
if (filter > PNG_FILTER_VALUE_NONE && filter < PNG_FILTER_VALUE_LAST)
{
if (pp->read_filter[0] == NULL)
png_init_filter_functions(pp);
pp->read_filter[filter-1](row_info, row, prev_row);
}
} | 0 | [
"CWE-190",
"CWE-369"
] | libpng | 8a05766cb74af05c04c53e6c9d60c13fc4d59bf2 | 36,131,553,456,958,800,000,000,000,000,000,000,000 | 15 | [libpng16] Fix the calculation of row_factor in png_check_chunk_length
(Bug report by Thuan Pham, SourceForge issue #278) |
void afra_box_del(GF_Box *s)
{
GF_AdobeFragRandomAccessBox *ptr = (GF_AdobeFragRandomAccessBox *)s;
if (ptr == NULL) return;
while (gf_list_count(ptr->local_access_entries)) {
gf_free(gf_list_get(ptr->local_access_entries, 0));
gf_list_rem(ptr->local_access_entries, 0);
}
gf_list_del(ptr->local_access_entries);
while (gf_list_count(ptr->global_access_entries)) {
gf_free(gf_list_get(ptr->global_access_entries, 0));
gf_list_rem(ptr->global_access_entries, 0);
}
gf_list_del(ptr->global_access_entries);
gf_free(ptr);
} | 0 | [
"CWE-125"
] | gpac | 093283e727f396130651280609e687cd4778e0d1 | 154,441,408,509,164,640,000,000,000,000,000,000,000 | 19 | fixed #1564 |
dissect_tcpopt_rvbd_probe(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data)
{
guint8 ver, type;
proto_tree *field_tree;
proto_item *pitem;
proto_item *length_item;
int offset = 0,
optlen = tvb_reported_length(tvb);
struct tcpheader *tcph = (struct tcpheader*)data;
pitem = proto_tree_add_item(tree, proto_tcp_option_rvbd_probe, tvb, offset, -1, ENC_NA);
field_tree = proto_item_add_subtree(pitem, ett_tcp_opt_rvbd_probe);
proto_tree_add_item(field_tree, hf_tcp_option_kind, tvb,
offset, 1, ENC_BIG_ENDIAN);
length_item = proto_tree_add_item(field_tree, hf_tcp_option_len, tvb,
offset + 1, 1, ENC_BIG_ENDIAN);
if (optlen < TCPOLEN_RVBD_PROBE_MIN) {
/* Bogus - option length is less than what it's supposed to be for
this option. */
expert_add_info_format(pinfo, length_item, &ei_tcp_opt_len_invalid,
"option length should be >= %u)",
TCPOLEN_RVBD_PROBE_MIN);
return tvb_captured_length(tvb);
}
rvbd_probe_decode_version_type(
tvb_get_guint8(tvb, offset + PROBE_VERSION_TYPE_OFFSET),
&ver, &type);
proto_item_append_text(pitem, ": %s", val_to_str_const(type, rvbd_probe_type_vs, "Probe Unknown"));
if (type >= PROBE_TYPE_MAX)
return tvb_captured_length(tvb);
if (ver == PROBE_VERSION_1) {
guint16 port;
proto_tree_add_item(field_tree, hf_tcp_option_rvbd_probe_type1, tvb,
offset + PROBE_VERSION_TYPE_OFFSET, 1, ENC_BIG_ENDIAN);
proto_tree_add_item(field_tree, hf_tcp_option_rvbd_probe_version1, tvb,
offset + PROBE_VERSION_TYPE_OFFSET, 1, ENC_BIG_ENDIAN);
if (type == PROBE_INTERNAL)
return offset + PROBE_VERSION_TYPE_OFFSET;
proto_tree_add_item(field_tree, hf_tcp_option_rvbd_probe_reserved, tvb, offset + PROBE_V1_RESERVED_OFFSET, 1, ENC_BIG_ENDIAN);
proto_tree_add_item(field_tree, hf_tcp_option_rvbd_probe_prober, tvb,
offset + PROBE_V1_PROBER_OFFSET, 4, ENC_BIG_ENDIAN);
switch (type) {
case PROBE_QUERY:
case PROBE_QUERY_SH:
case PROBE_TRACE:
{
rvbd_option_data* option_data;
proto_tree_add_item(field_tree, hf_tcp_option_rvbd_probe_appli_ver, tvb,
offset + PROBE_V1_APPLI_VERSION_OFFSET, 2,
ENC_BIG_ENDIAN);
proto_item_append_text(pitem, ", CSH IP: %s", tvb_ip_to_str(tvb, offset + PROBE_V1_PROBER_OFFSET));
option_data = (rvbd_option_data*)p_get_proto_data(pinfo->pool, pinfo, proto_tcp_option_rvbd_probe, pinfo->curr_layer_num);
if (option_data == NULL)
{
option_data = wmem_new0(pinfo->pool, rvbd_option_data);
p_add_proto_data(pinfo->pool, pinfo, proto_tcp_option_rvbd_probe, pinfo->curr_layer_num, option_data);
}
option_data->valid = TRUE;
option_data->type = type;
}
break;
case PROBE_RESPONSE:
proto_tree_add_item(field_tree, hf_tcp_option_rvbd_probe_proxy, tvb,
offset + PROBE_V1_PROXY_ADDR_OFFSET, 4, ENC_BIG_ENDIAN);
port = tvb_get_ntohs(tvb, offset + PROBE_V1_PROXY_PORT_OFFSET);
proto_tree_add_item(field_tree, hf_tcp_option_rvbd_probe_proxy_port, tvb,
offset + PROBE_V1_PROXY_PORT_OFFSET, 2, ENC_BIG_ENDIAN);
rvbd_probe_resp_add_info(pitem, pinfo, tvb, offset + PROBE_V1_PROXY_ADDR_OFFSET, port);
break;
case PROBE_RESPONSE_SH:
proto_tree_add_item(field_tree,
hf_tcp_option_rvbd_probe_client, tvb,
offset + PROBE_V1_SH_CLIENT_ADDR_OFFSET, 4,
ENC_BIG_ENDIAN);
proto_tree_add_item(field_tree, hf_tcp_option_rvbd_probe_proxy, tvb,
offset + PROBE_V1_SH_PROXY_ADDR_OFFSET, 4, ENC_BIG_ENDIAN);
port = tvb_get_ntohs(tvb, offset + PROBE_V1_SH_PROXY_PORT_OFFSET);
proto_tree_add_item(field_tree, hf_tcp_option_rvbd_probe_proxy_port, tvb,
offset + PROBE_V1_SH_PROXY_PORT_OFFSET, 2, ENC_BIG_ENDIAN);
rvbd_probe_resp_add_info(pitem, pinfo, tvb, offset + PROBE_V1_SH_PROXY_ADDR_OFFSET, port);
break;
}
}
else if (ver == PROBE_VERSION_2) {
proto_item *ver_pi;
proto_item *flag_pi;
proto_tree *flag_tree;
guint8 flags;
proto_tree_add_item(field_tree, hf_tcp_option_rvbd_probe_type2, tvb,
offset + PROBE_VERSION_TYPE_OFFSET, 1, ENC_BIG_ENDIAN);
proto_tree_add_uint_format_value(
field_tree, hf_tcp_option_rvbd_probe_version2, tvb,
offset + PROBE_VERSION_TYPE_OFFSET, 1, ver, "%u", ver);
/* Use version1 for filtering purposes because version2 packet
value is 0, but filtering is usually done for value 2 */
ver_pi = proto_tree_add_uint(field_tree, hf_tcp_option_rvbd_probe_version1, tvb,
offset + PROBE_VERSION_TYPE_OFFSET, 1, ver);
PROTO_ITEM_SET_HIDDEN(ver_pi);
switch (type) {
case PROBE_QUERY_INFO:
case PROBE_QUERY_INFO_SH:
case PROBE_QUERY_INFO_SID:
flags = tvb_get_guint8(tvb, offset + PROBE_V2_INFO_OFFSET);
flag_pi = proto_tree_add_uint(field_tree, hf_tcp_option_rvbd_probe_flags,
tvb, offset + PROBE_V2_INFO_OFFSET,
1, flags);
flag_tree = proto_item_add_subtree(flag_pi, ett_tcp_opt_rvbd_probe_flags);
proto_tree_add_item(flag_tree,
hf_tcp_option_rvbd_probe_flag_not_cfe,
tvb, offset + PROBE_V2_INFO_OFFSET, 1, ENC_BIG_ENDIAN);
proto_tree_add_item(flag_tree,
hf_tcp_option_rvbd_probe_flag_last_notify,
tvb, offset + PROBE_V2_INFO_OFFSET, 1, ENC_BIG_ENDIAN);
switch (type)
{
case PROBE_QUERY_INFO:
{
rvbd_option_data* option_data = (rvbd_option_data*)p_get_proto_data(pinfo->pool, pinfo, proto_tcp_option_rvbd_probe, pinfo->curr_layer_num);
if (option_data == NULL)
{
option_data = wmem_new0(pinfo->pool, rvbd_option_data);
p_add_proto_data(pinfo->pool, pinfo, proto_tcp_option_rvbd_probe, pinfo->curr_layer_num, option_data);
}
option_data->probe_flags = flags;
}
break;
case PROBE_QUERY_INFO_SH:
proto_tree_add_item(flag_tree,
hf_tcp_option_rvbd_probe_client, tvb,
offset + PROBE_V2_INFO_CLIENT_ADDR_OFFSET,
4, ENC_BIG_ENDIAN);
break;
case PROBE_QUERY_INFO_SID:
proto_tree_add_item(flag_tree,
hf_tcp_option_rvbd_probe_storeid, tvb,
offset + PROBE_V2_INFO_STOREID_OFFSET,
4, ENC_BIG_ENDIAN);
break;
}
if (type != PROBE_QUERY_INFO_SID &&
tcph != NULL &&
(tcph->th_flags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK) &&
(flags & RVBD_FLAGS_PROBE_LAST)) {
col_prepend_fstr(pinfo->cinfo, COL_INFO, "SA++, ");
}
break;
case PROBE_RESPONSE_INFO:
flag_pi = proto_tree_add_item(field_tree, hf_tcp_option_rvbd_probe_flags,
tvb, offset + PROBE_V2_INFO_OFFSET,
1, ENC_BIG_ENDIAN);
flag_tree = proto_item_add_subtree(flag_pi, ett_tcp_opt_rvbd_probe_flags);
proto_tree_add_item(flag_tree,
hf_tcp_option_rvbd_probe_flag_probe_cache,
tvb, offset + PROBE_V2_INFO_OFFSET, 1, ENC_BIG_ENDIAN);
proto_tree_add_item(flag_tree,
hf_tcp_option_rvbd_probe_flag_sslcert,
tvb, offset + PROBE_V2_INFO_OFFSET, 1, ENC_BIG_ENDIAN);
proto_tree_add_item(flag_tree,
hf_tcp_option_rvbd_probe_flag_server_connected,
tvb, offset + PROBE_V2_INFO_OFFSET, 1, ENC_BIG_ENDIAN);
break;
case PROBE_RST:
proto_tree_add_item(field_tree, hf_tcp_option_rvbd_probe_flags,
tvb, offset + PROBE_V2_INFO_OFFSET,
1, ENC_BIG_ENDIAN);
break;
}
}
return tvb_captured_length(tvb);
} | 0 | [
"CWE-354"
] | wireshark | 7f3fe6164a68b76d9988c4253b24d43f498f1753 | 53,817,146,921,350,480,000,000,000,000,000,000,000 | 206 | TCP: do not use an unknown status when the checksum is 0xffff
Otherwise it triggers an assert when adding the column as the field is
defined as BASE_NONE and not BASE_DEC or BASE_HEX. Thus an unknown value
(not in proto_checksum_vals[)array) cannot be represented.
Mark the checksum as bad even if we process the packet.
Closes #16816
Conflicts:
epan/dissectors/packet-tcp.c |
static char *get_local_name(struct file_list *flist, char *dest_path)
{
STRUCT_STAT st;
int statret, trailing_slash;
char *cp;
if (DEBUG_GTE(RECV, 1)) {
rprintf(FINFO, "get_local_name count=%d %s\n",
file_total, NS(dest_path));
}
if (!dest_path || list_only)
return NULL;
/* Treat an empty string as a copy into the current directory. */
if (!*dest_path)
dest_path = ".";
if (daemon_filter_list.head) {
char *slash = strrchr(dest_path, '/');
if (slash && (slash[1] == '\0' || (slash[1] == '.' && slash[2] == '\0')))
*slash = '\0';
else
slash = NULL;
if ((*dest_path != '.' || dest_path[1] != '\0')
&& (check_filter(&daemon_filter_list, FLOG, dest_path, 0) < 0
|| check_filter(&daemon_filter_list, FLOG, dest_path, 1) < 0)) {
rprintf(FERROR, "ERROR: daemon has excluded destination \"%s\"\n",
dest_path);
exit_cleanup(RERR_FILESELECT);
}
if (slash)
*slash = '/';
}
/* See what currently exists at the destination. */
statret = do_stat(dest_path, &st);
cp = strrchr(dest_path, '/');
trailing_slash = cp && !cp[1];
if (mkpath_dest_arg && statret < 0 && (cp || file_total > 1)) {
int save_errno = errno;
int ret = make_path(dest_path, file_total > 1 && !trailing_slash ? 0 : MKP_DROP_NAME);
if (ret < 0)
goto mkdir_error;
if (ret && (INFO_GTE(NAME, 1) || stdout_format_has_i)) {
if (file_total == 1 || trailing_slash)
*cp = '\0';
rprintf(FINFO, "created %d director%s for %s\n", ret, ret == 1 ? "y" : "ies", dest_path);
if (file_total == 1 || trailing_slash)
*cp = '/';
}
if (ret)
statret = do_stat(dest_path, &st);
else
errno = save_errno;
}
if (statret == 0) {
/* If the destination is a dir, enter it and use mode 1. */
if (S_ISDIR(st.st_mode)) {
if (!change_dir(dest_path, CD_NORMAL)) {
rsyserr(FERROR, errno, "change_dir#1 %s failed",
full_fname(dest_path));
exit_cleanup(RERR_FILESELECT);
}
filesystem_dev = st.st_dev; /* ensures --force works right w/-x */
return NULL;
}
if (file_total > 1) {
rprintf(FERROR,
"ERROR: destination must be a directory when"
" copying more than 1 file\n");
exit_cleanup(RERR_FILESELECT);
}
if (file_total == 1 && S_ISDIR(flist->files[0]->mode)) {
rprintf(FERROR,
"ERROR: cannot overwrite non-directory"
" with a directory\n");
exit_cleanup(RERR_FILESELECT);
}
} else if (errno != ENOENT) {
/* If we don't know what's at the destination, fail. */
rsyserr(FERROR, errno, "ERROR: cannot stat destination %s",
full_fname(dest_path));
exit_cleanup(RERR_FILESELECT);
}
/* If we need a destination directory because the transfer is not
* of a single non-directory or the user has requested one via a
* destination path ending in a slash, create one and use mode 1. */
if (file_total > 1 || trailing_slash) {
if (trailing_slash)
*cp = '\0'; /* Lop off the final slash (if any). */
if (statret == 0) {
rprintf(FERROR, "ERROR: destination path is not a directory\n");
exit_cleanup(RERR_SYNTAX);
}
if (do_mkdir(dest_path, ACCESSPERMS) != 0) {
mkdir_error:
rsyserr(FERROR, errno, "mkdir %s failed",
full_fname(dest_path));
exit_cleanup(RERR_FILEIO);
}
if (flist->high >= flist->low
&& strcmp(flist->files[flist->low]->basename, ".") == 0)
flist->files[0]->flags |= FLAG_DIR_CREATED;
if (INFO_GTE(NAME, 1) || stdout_format_has_i)
rprintf(FINFO, "created directory %s\n", dest_path);
if (dry_run) {
/* Indicate that dest dir doesn't really exist. */
dry_run++;
}
if (!change_dir(dest_path, dry_run > 1 ? CD_SKIP_CHDIR : CD_NORMAL)) {
rsyserr(FERROR, errno, "change_dir#2 %s failed",
full_fname(dest_path));
exit_cleanup(RERR_FILESELECT);
}
return NULL;
}
/* Otherwise, we are writing a single file, possibly on top of an
* existing non-directory. Change to the item's parent directory
* (if it has a path component), return the basename of the
* destination file as the local name, and use mode 2. */
if (!cp)
return dest_path;
if (cp == dest_path)
dest_path = "/";
*cp = '\0';
if (!change_dir(dest_path, CD_NORMAL)) {
rsyserr(FERROR, errno, "change_dir#3 %s failed",
full_fname(dest_path));
exit_cleanup(RERR_FILESELECT);
}
*cp = '/';
return cp + 1;
} | 0 | [] | rsync | b7231c7d02cfb65d291af74ff66e7d8c507ee871 | 323,614,097,103,203,640,000,000,000,000,000,000,000 | 148 | Some extra file-list safety checks. |
static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct extent_buffer *leaf)
{
WARN_ON(btrfs_header_generation(leaf) != trans->transid);
del_ptr(root, path, 1, path->slots[1]);
/*
* btrfs_free_extent is expensive, we want to make sure we
* aren't holding any locks when we call it
*/
btrfs_unlock_up_safe(path, 0);
root_sub_used(root, leaf->len);
extent_buffer_get(leaf);
btrfs_free_tree_block(trans, root, leaf, 0, 1);
free_extent_buffer_stale(leaf);
} | 0 | [
"CWE-416",
"CWE-362"
] | linux | 5f5bc6b1e2d5a6f827bc860ef2dc5b6f365d1339 | 216,523,924,729,097,730,000,000,000,000,000,000,000 | 20 | Btrfs: make xattr replace operations atomic
Replacing a xattr consists of doing a lookup for its existing value, delete
the current value from the respective leaf, release the search path and then
finally insert the new value. This leaves a time window where readers (getxattr,
listxattrs) won't see any value for the xattr. Xattrs are used to store ACLs,
so this has security implications.
This change also fixes 2 other existing issues which were:
*) Deleting the old xattr value without verifying first if the new xattr will
fit in the existing leaf item (in case multiple xattrs are packed in the
same item due to name hash collision);
*) Returning -EEXIST when the flag XATTR_CREATE is given and the xattr doesn't
exist but we have have an existing item that packs muliple xattrs with
the same name hash as the input xattr. In this case we should return ENOSPC.
A test case for xfstests follows soon.
Thanks to Alexandre Oliva for reporting the non-atomicity of the xattr replace
implementation.
Reported-by: Alexandre Oliva <[email protected]>
Signed-off-by: Filipe Manana <[email protected]>
Signed-off-by: Chris Mason <[email protected]> |
void testUriUserInfoHostPort3() {
// User info without ":", no port
UriParserStateA stateA;
UriUriA uriA;
stateA.uri = &uriA;
// 0 4 0 3 0 7 01 0 9
const char * const input = "http" "://" "abcdefg" "@" "localhost";
TEST_ASSERT(0 == uriParseUriA(&stateA, input));
TEST_ASSERT(uriA.userInfo.first == input + 4 + 3);
TEST_ASSERT(uriA.userInfo.afterLast == input + 4 + 3 + 7);
TEST_ASSERT(uriA.hostText.first == input + 4 + 3 + 7 + 1);
TEST_ASSERT(uriA.hostText.afterLast == input + 4 + 3 + 7 + 1 + 9);
TEST_ASSERT(uriA.portText.first == NULL);
TEST_ASSERT(uriA.portText.afterLast == NULL);
uriFreeUriMembersA(&uriA);
} | 0 | [
"CWE-787"
] | uriparser | 864f5d4c127def386dd5cc926ad96934b297f04e | 326,302,897,812,754,330,000,000,000,000,000,000,000 | 17 | UriQuery.c: Fix out-of-bounds-write in ComposeQuery and ...Ex
Reported by Google Autofuzz team |
GF_Box *csgp_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_CompactSampleGroupBox, GF_ISOM_BOX_TYPE_CSGP);
return (GF_Box *)tmp; | 0 | [
"CWE-476",
"CWE-787"
] | gpac | b8f8b202d4fc23eb0ab4ce71ae96536ca6f5d3f8 | 122,412,408,677,261,770,000,000,000,000,000,000,000 | 5 | fixed #1757 |
void manager_enumerate(Manager *m) {
UnitType c;
assert(m);
/* Let's ask every type to load all units from disk/kernel
* that it might know */
for (c = 0; c < _UNIT_TYPE_MAX; c++) {
if (!unit_type_supported(c)) {
log_debug("Unit type .%s is not supported on this system.", unit_type_to_string(c));
continue;
}
if (!unit_vtable[c]->enumerate)
continue;
unit_vtable[c]->enumerate(m);
}
manager_dispatch_load_queue(m);
} | 0 | [
"CWE-20"
] | systemd | 531ac2b2349da02acc9c382849758e07eb92b020 | 240,364,942,364,572,500,000,000,000,000,000,000,000 | 21 | If the notification message length is 0, ignore the message (#4237)
Fixes #4234.
Signed-off-by: Jorge Niedbalski <[email protected]> |
ClientRequestContext::ClientRequestContext(ClientHttpRequest *anHttp) :
http(cbdataReference(anHttp)),
acl_checklist(NULL),
redirect_state(REDIRECT_NONE),
store_id_state(REDIRECT_NONE),
host_header_verify_done(false),
http_access_done(false),
adapted_http_access_done(false),
#if USE_ADAPTATION
adaptation_acl_check_done(false),
#endif
redirect_done(false),
store_id_done(false),
no_cache_done(false),
interpreted_req_hdrs(false),
toClientMarkingDone(false),
#if USE_OPENSSL
sslBumpCheckDone(false),
#endif
error(NULL),
readNextRequest(false)
{
debugs(85, 3, "ClientRequestContext constructed, this=" << this);
} | 0 | [
"CWE-116"
] | squid | 7024fb734a59409889e53df2257b3fc817809fb4 | 193,101,068,739,356,800,000,000,000,000,000,000,000 | 24 | Handle more Range requests (#790)
Also removed some effectively unused code. |
int DaemonServer::init(uint64_t gid, entity_addr_t client_addr)
{
// Initialize Messenger
std::string public_msgr_type = g_conf->ms_public_type.empty() ?
g_conf->get_val<std::string>("ms_type") : g_conf->ms_public_type;
msgr = Messenger::create(g_ceph_context, public_msgr_type,
entity_name_t::MGR(gid),
"mgr",
getpid(), 0);
msgr->set_default_policy(Messenger::Policy::stateless_server(0));
// throttle clients
msgr->set_policy_throttlers(entity_name_t::TYPE_CLIENT,
client_byte_throttler.get(),
client_msg_throttler.get());
// servers
msgr->set_policy_throttlers(entity_name_t::TYPE_OSD,
osd_byte_throttler.get(),
osd_msg_throttler.get());
msgr->set_policy_throttlers(entity_name_t::TYPE_MDS,
mds_byte_throttler.get(),
mds_msg_throttler.get());
msgr->set_policy_throttlers(entity_name_t::TYPE_MON,
mon_byte_throttler.get(),
mon_msg_throttler.get());
int r = msgr->bind(g_conf->public_addr);
if (r < 0) {
derr << "unable to bind mgr to " << g_conf->public_addr << dendl;
return r;
}
msgr->set_myname(entity_name_t::MGR(gid));
msgr->set_addr_unknowns(client_addr);
msgr->start();
msgr->add_dispatcher_tail(this);
started_at = ceph_clock_now();
return 0;
} | 0 | [
"CWE-287",
"CWE-284"
] | ceph | 5ead97120e07054d80623dada90a5cc764c28468 | 235,367,306,239,380,230,000,000,000,000,000,000,000 | 43 | auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random() |
void upstream_timeoutcb(struct ev_loop *loop, ev_timer *w, int revents) {
auto downstream = static_cast<Downstream *>(w->data);
auto upstream = downstream->get_upstream();
auto which = revents == EV_READ ? "read" : "write";
if (LOG_ENABLED(INFO)) {
DLOG(INFO, downstream) << "upstream timeout stream_id="
<< downstream->get_stream_id() << " event=" << which;
}
downstream->disable_upstream_rtimer();
downstream->disable_upstream_wtimer();
upstream->on_timeout(downstream);
} | 0 | [] | nghttp2 | 319d5ab1c6d916b6b8a0d85b2ae3f01b3ad04f2c | 202,220,446,119,303,280,000,000,000,000,000,000,000 | 16 | nghttpx: Fix request stall
Fix request stall if backend connection is reused and buffer is full. |
template<typename t>
CImg<_cimg_Tt> operator+(const CImg<t>& img) const {
return CImg<_cimg_Tt>(*this,false)+=img; | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 164,364,661,907,671,950,000,000,000,000,000,000,000 | 3 | Fix other issues in 'CImg<T>::load_bmp()'. |
void hvcc_del(GF_Box *s)
{
GF_HEVCConfigurationBox *ptr = (GF_HEVCConfigurationBox*)s;
if (ptr->config) gf_odf_hevc_cfg_del(ptr->config);
gf_free(ptr);
} | 0 | [
"CWE-119",
"CWE-787"
] | gpac | 90dc7f853d31b0a4e9441cba97feccf36d8b69a4 | 339,227,990,912,655,120,000,000,000,000,000,000,000 | 6 | fix some exploitable overflows (#994, #997) |
Subsets and Splits