func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
inline int PacketSetData(Packet *p, uint8_t *pktdata, uint32_t pktlen)
{
SET_PKT_LEN(p, (size_t)pktlen);
if (unlikely(!pktdata)) {
return -1;
}
p->ext_pkt = pktdata;
p->flags |= PKT_ZERO_COPY;
return 0;
} | 0 | [
"CWE-20"
]
| suricata | 11f3659f64a4e42e90cb3c09fcef66894205aefe | 114,546,564,549,643,570,000,000,000,000,000,000,000 | 11 | teredo: be stricter on what to consider valid teredo
Invalid Teredo can lead to valid DNS traffic (or other UDP traffic)
being misdetected as Teredo. This leads to false negatives in the
UDP payload inspection.
Make the teredo code only consider a packet teredo if the encapsulated
data was decoded without any 'invalid' events being set.
Bug #2736. |
long __sys_sendmsg_sock(struct socket *sock, struct user_msghdr __user *umsg,
unsigned int flags)
{
struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
struct sockaddr_storage address;
struct msghdr msg = { .msg_name = &address };
ssize_t err;
err = sendmsg_copy_msghdr(&msg, umsg, flags, &iov);
if (err)
return err;
/* disallow ancillary data requests from this path */
if (msg.msg_control || msg.msg_controllen) {
err = -EINVAL;
goto out;
}
err = ____sys_sendmsg(sock, &msg, flags, NULL, 0);
out:
kfree(iov);
return err;
} | 0 | []
| linux | d69e07793f891524c6bbf1e75b9ae69db4450953 | 198,201,028,473,020,850,000,000,000,000,000,000,000 | 22 | net: disallow ancillary data for __sys_{send,recv}msg_file()
Only io_uring uses (and added) these, and we want to disallow the
use of sendmsg/recvmsg for anything but regular data transfers.
Use the newly added prep helper to split the msghdr copy out from
the core function, to check for msg_control and msg_controllen
settings. If either is set, we return -EINVAL.
Acked-by: David S. Miller <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
void mg_mqtt_disconnect(struct mg_connection *nc) {
mg_send_mqtt_header(nc, MG_MQTT_CMD_DISCONNECT, 0, 0);
} | 0 | [
"CWE-119",
"CWE-284",
"CWE-787"
]
| mongoose | b3e0f780c34cea88f057a62213c012aa88fe2deb | 66,567,227,238,075,080,000,000,000,000,000,000,000 | 3 | Fix heap-based overflow in parse_mqtt
PUBLISHED_FROM=3306592896298597fff5269634df0c1a1555113b |
static int __sctp_connect(struct sock *sk, struct sockaddr *kaddrs,
int addrs_size, int flags, sctp_assoc_t *assoc_id)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_endpoint *ep = sp->ep;
struct sctp_transport *transport;
struct sctp_association *asoc;
void *addr_buf = kaddrs;
union sctp_addr *daddr;
struct sctp_af *af;
int walk_size, err;
long timeo;
if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) ||
(sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)))
return -EISCONN;
daddr = addr_buf;
af = sctp_get_af_specific(daddr->sa.sa_family);
if (!af || af->sockaddr_len > addrs_size)
return -EINVAL;
err = sctp_verify_addr(sk, daddr, af->sockaddr_len);
if (err)
return err;
asoc = sctp_endpoint_lookup_assoc(ep, daddr, &transport);
if (asoc)
return asoc->state >= SCTP_STATE_ESTABLISHED ? -EISCONN
: -EALREADY;
err = sctp_connect_new_asoc(ep, daddr, NULL, &transport);
if (err)
return err;
asoc = transport->asoc;
addr_buf += af->sockaddr_len;
walk_size = af->sockaddr_len;
while (walk_size < addrs_size) {
err = -EINVAL;
if (walk_size + sizeof(sa_family_t) > addrs_size)
goto out_free;
daddr = addr_buf;
af = sctp_get_af_specific(daddr->sa.sa_family);
if (!af || af->sockaddr_len + walk_size > addrs_size)
goto out_free;
if (asoc->peer.port != ntohs(daddr->v4.sin_port))
goto out_free;
err = sctp_connect_add_peer(asoc, daddr, af->sockaddr_len);
if (err)
goto out_free;
addr_buf += af->sockaddr_len;
walk_size += af->sockaddr_len;
}
/* In case the user of sctp_connectx() wants an association
* id back, assign one now.
*/
if (assoc_id) {
err = sctp_assoc_set_id(asoc, GFP_KERNEL);
if (err < 0)
goto out_free;
}
err = sctp_primitive_ASSOCIATE(sock_net(sk), asoc, NULL);
if (err < 0)
goto out_free;
/* Initialize sk's dport and daddr for getpeername() */
inet_sk(sk)->inet_dport = htons(asoc->peer.port);
sp->pf->to_sk_daddr(daddr, sk);
sk->sk_err = 0;
if (assoc_id)
*assoc_id = asoc->assoc_id;
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
return sctp_wait_for_connect(asoc, &timeo);
out_free:
pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n",
__func__, asoc, kaddrs, err);
sctp_association_free(asoc);
return err;
} | 0 | [
"CWE-362"
]
| linux | b166a20b07382b8bc1dcee2a448715c9c2c81b5b | 240,288,751,249,244,100,000,000,000,000,000,000,000 | 89 | net/sctp: fix race condition in sctp_destroy_sock
If sctp_destroy_sock is called without sock_net(sk)->sctp.addr_wq_lock
held and sp->do_auto_asconf is true, then an element is removed
from the auto_asconf_splist without any proper locking.
This can happen in the following functions:
1. In sctp_accept, if sctp_sock_migrate fails.
2. In inet_create or inet6_create, if there is a bpf program
attached to BPF_CGROUP_INET_SOCK_CREATE which denies
creation of the sctp socket.
The bug is fixed by acquiring addr_wq_lock in sctp_destroy_sock
instead of sctp_close.
This addresses CVE-2021-23133.
Reported-by: Or Cohen <[email protected]>
Reviewed-by: Xin Long <[email protected]>
Fixes: 610236587600 ("bpf: Add new cgroup attach type to enable sock modifications")
Signed-off-by: Or Cohen <[email protected]>
Acked-by: Marcelo Ricardo Leitner <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int ssl3_send_client_key_exchange(SSL *s)
{
unsigned char *p;
int n;
unsigned long alg_k;
#ifndef OPENSSL_NO_RSA
unsigned char *q;
EVP_PKEY *pkey = NULL;
#endif
#ifndef OPENSSL_NO_KRB5
KSSL_ERR kssl_err;
#endif /* OPENSSL_NO_KRB5 */
#ifndef OPENSSL_NO_ECDH
EC_KEY *clnt_ecdh = NULL;
const EC_POINT *srvr_ecpoint = NULL;
EVP_PKEY *srvr_pub_pkey = NULL;
unsigned char *encodedPoint = NULL;
int encoded_pt_len = 0;
BN_CTX *bn_ctx = NULL;
#endif
if (s->state == SSL3_ST_CW_KEY_EXCH_A) {
p = ssl_handshake_start(s);
alg_k = s->s3->tmp.new_cipher->algorithm_mkey;
/* Fool emacs indentation */
if (0) {
}
#ifndef OPENSSL_NO_RSA
else if (alg_k & SSL_kRSA) {
RSA *rsa;
unsigned char tmp_buf[SSL_MAX_MASTER_KEY_LENGTH];
if (s->session->sess_cert == NULL) {
/*
* We should always have a server certificate with SSL_kRSA.
*/
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
if (s->session->sess_cert->peer_rsa_tmp != NULL)
rsa = s->session->sess_cert->peer_rsa_tmp;
else {
pkey =
X509_get_pubkey(s->session->
sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].
x509);
if ((pkey == NULL) || (pkey->type != EVP_PKEY_RSA)
|| (pkey->pkey.rsa == NULL)) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
rsa = pkey->pkey.rsa;
EVP_PKEY_free(pkey);
}
tmp_buf[0] = s->client_version >> 8;
tmp_buf[1] = s->client_version & 0xff;
if (RAND_bytes(&(tmp_buf[2]), sizeof tmp_buf - 2) <= 0)
goto err;
s->session->master_key_length = sizeof tmp_buf;
q = p;
/* Fix buf for TLS and beyond */
if (s->version > SSL3_VERSION)
p += 2;
n = RSA_public_encrypt(sizeof tmp_buf,
tmp_buf, p, rsa, RSA_PKCS1_PADDING);
# ifdef PKCS1_CHECK
if (s->options & SSL_OP_PKCS1_CHECK_1)
p[1]++;
if (s->options & SSL_OP_PKCS1_CHECK_2)
tmp_buf[0] = 0x70;
# endif
if (n <= 0) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_BAD_RSA_ENCRYPT);
goto err;
}
/* Fix buf for TLS and beyond */
if (s->version > SSL3_VERSION) {
s2n(n, q);
n += 2;
}
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
tmp_buf,
sizeof tmp_buf);
OPENSSL_cleanse(tmp_buf, sizeof tmp_buf);
}
#endif
#ifndef OPENSSL_NO_KRB5
else if (alg_k & SSL_kKRB5) {
krb5_error_code krb5rc;
KSSL_CTX *kssl_ctx = s->kssl_ctx;
/* krb5_data krb5_ap_req; */
krb5_data *enc_ticket;
krb5_data authenticator, *authp = NULL;
EVP_CIPHER_CTX ciph_ctx;
const EVP_CIPHER *enc = NULL;
unsigned char iv[EVP_MAX_IV_LENGTH];
unsigned char tmp_buf[SSL_MAX_MASTER_KEY_LENGTH];
unsigned char epms[SSL_MAX_MASTER_KEY_LENGTH + EVP_MAX_IV_LENGTH];
int padl, outl = sizeof(epms);
EVP_CIPHER_CTX_init(&ciph_ctx);
# ifdef KSSL_DEBUG
fprintf(stderr, "ssl3_send_client_key_exchange(%lx & %lx)\n",
alg_k, SSL_kKRB5);
# endif /* KSSL_DEBUG */
authp = NULL;
# ifdef KRB5SENDAUTH
if (KRB5SENDAUTH)
authp = &authenticator;
# endif /* KRB5SENDAUTH */
krb5rc = kssl_cget_tkt(kssl_ctx, &enc_ticket, authp, &kssl_err);
enc = kssl_map_enc(kssl_ctx->enctype);
if (enc == NULL)
goto err;
# ifdef KSSL_DEBUG
{
fprintf(stderr, "kssl_cget_tkt rtn %d\n", krb5rc);
if (krb5rc && kssl_err.text)
fprintf(stderr, "kssl_cget_tkt kssl_err=%s\n",
kssl_err.text);
}
# endif /* KSSL_DEBUG */
if (krb5rc) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE);
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, kssl_err.reason);
goto err;
}
/*-
* 20010406 VRS - Earlier versions used KRB5 AP_REQ
* in place of RFC 2712 KerberosWrapper, as in:
*
* Send ticket (copy to *p, set n = length)
* n = krb5_ap_req.length;
* memcpy(p, krb5_ap_req.data, krb5_ap_req.length);
* if (krb5_ap_req.data)
* kssl_krb5_free_data_contents(NULL,&krb5_ap_req);
*
* Now using real RFC 2712 KerberosWrapper
* (Thanks to Simon Wilkinson <[email protected]>)
* Note: 2712 "opaque" types are here replaced
* with a 2-byte length followed by the value.
* Example:
* KerberosWrapper= xx xx asn1ticket 0 0 xx xx encpms
* Where "xx xx" = length bytes. Shown here with
* optional authenticator omitted.
*/
/* KerberosWrapper.Ticket */
s2n(enc_ticket->length, p);
memcpy(p, enc_ticket->data, enc_ticket->length);
p += enc_ticket->length;
n = enc_ticket->length + 2;
/* KerberosWrapper.Authenticator */
if (authp && authp->length) {
s2n(authp->length, p);
memcpy(p, authp->data, authp->length);
p += authp->length;
n += authp->length + 2;
free(authp->data);
authp->data = NULL;
authp->length = 0;
} else {
s2n(0, p); /* null authenticator length */
n += 2;
}
tmp_buf[0] = s->client_version >> 8;
tmp_buf[1] = s->client_version & 0xff;
if (RAND_bytes(&(tmp_buf[2]), sizeof tmp_buf - 2) <= 0)
goto err;
/*-
* 20010420 VRS. Tried it this way; failed.
* EVP_EncryptInit_ex(&ciph_ctx,enc, NULL,NULL);
* EVP_CIPHER_CTX_set_key_length(&ciph_ctx,
* kssl_ctx->length);
* EVP_EncryptInit_ex(&ciph_ctx,NULL, key,iv);
*/
memset(iv, 0, sizeof iv); /* per RFC 1510 */
EVP_EncryptInit_ex(&ciph_ctx, enc, NULL, kssl_ctx->key, iv);
EVP_EncryptUpdate(&ciph_ctx, epms, &outl, tmp_buf,
sizeof tmp_buf);
EVP_EncryptFinal_ex(&ciph_ctx, &(epms[outl]), &padl);
outl += padl;
if (outl > (int)sizeof epms) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
EVP_CIPHER_CTX_cleanup(&ciph_ctx);
/* KerberosWrapper.EncryptedPreMasterSecret */
s2n(outl, p);
memcpy(p, epms, outl);
p += outl;
n += outl + 2;
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
tmp_buf,
sizeof tmp_buf);
OPENSSL_cleanse(tmp_buf, sizeof tmp_buf);
OPENSSL_cleanse(epms, outl);
}
#endif
#ifndef OPENSSL_NO_DH
else if (alg_k & (SSL_kEDH | SSL_kDHr | SSL_kDHd)) {
DH *dh_srvr, *dh_clnt;
SESS_CERT *scert = s->session->sess_cert;
if (scert == NULL) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE);
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_UNEXPECTED_MESSAGE);
goto err;
}
if (scert->peer_dh_tmp != NULL) {
dh_srvr = scert->peer_dh_tmp;
} else {
dh_srvr = get_server_static_dh_key(scert);
if (dh_srvr == NULL)
goto err;
}
if (s->s3->flags & TLS1_FLAGS_SKIP_CERT_VERIFY) {
/* Use client certificate key */
EVP_PKEY *clkey = s->cert->key->privatekey;
dh_clnt = NULL;
if (clkey)
dh_clnt = EVP_PKEY_get1_DH(clkey);
if (dh_clnt == NULL) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
} else {
/* generate a new random key */
if ((dh_clnt = DHparams_dup(dh_srvr)) == NULL) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_DH_LIB);
goto err;
}
if (!DH_generate_key(dh_clnt)) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_DH_LIB);
DH_free(dh_clnt);
goto err;
}
}
/*
* use the 'p' output buffer for the DH key, but make sure to
* clear it out afterwards
*/
n = DH_compute_key(p, dh_srvr->pub_key, dh_clnt);
if (scert->peer_dh_tmp == NULL)
DH_free(dh_srvr);
if (n <= 0) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_DH_LIB);
DH_free(dh_clnt);
goto err;
}
/* generate master key from the result */
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
p, n);
/* clean up */
memset(p, 0, n);
if (s->s3->flags & TLS1_FLAGS_SKIP_CERT_VERIFY)
n = 0;
else {
/* send off the data */
n = BN_num_bytes(dh_clnt->pub_key);
s2n(n, p);
BN_bn2bin(dh_clnt->pub_key, p);
n += 2;
}
DH_free(dh_clnt);
}
#endif
#ifndef OPENSSL_NO_ECDH
else if (alg_k & (SSL_kEECDH | SSL_kECDHr | SSL_kECDHe)) {
const EC_GROUP *srvr_group = NULL;
EC_KEY *tkey;
int ecdh_clnt_cert = 0;
int field_size = 0;
if (s->session->sess_cert == NULL) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE);
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_UNEXPECTED_MESSAGE);
goto err;
}
/*
* Did we send out the client's ECDH share for use in premaster
* computation as part of client certificate? If so, set
* ecdh_clnt_cert to 1.
*/
if ((alg_k & (SSL_kECDHr | SSL_kECDHe)) && (s->cert != NULL)) {
/*-
* XXX: For now, we do not support client
* authentication using ECDH certificates.
* To add such support, one needs to add
* code that checks for appropriate
* conditions and sets ecdh_clnt_cert to 1.
* For example, the cert have an ECC
* key on the same curve as the server's
* and the key should be authorized for
* key agreement.
*
* One also needs to add code in ssl3_connect
* to skip sending the certificate verify
* message.
*
* if ((s->cert->key->privatekey != NULL) &&
* (s->cert->key->privatekey->type ==
* EVP_PKEY_EC) && ...)
* ecdh_clnt_cert = 1;
*/
}
if (s->session->sess_cert->peer_ecdh_tmp != NULL) {
tkey = s->session->sess_cert->peer_ecdh_tmp;
} else {
/* Get the Server Public Key from Cert */
srvr_pub_pkey =
X509_get_pubkey(s->session->
sess_cert->peer_pkeys[SSL_PKEY_ECC].x509);
if ((srvr_pub_pkey == NULL)
|| (srvr_pub_pkey->type != EVP_PKEY_EC)
|| (srvr_pub_pkey->pkey.ec == NULL)) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
tkey = srvr_pub_pkey->pkey.ec;
}
srvr_group = EC_KEY_get0_group(tkey);
srvr_ecpoint = EC_KEY_get0_public_key(tkey);
if ((srvr_group == NULL) || (srvr_ecpoint == NULL)) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
if ((clnt_ecdh = EC_KEY_new()) == NULL) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
if (!EC_KEY_set_group(clnt_ecdh, srvr_group)) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_EC_LIB);
goto err;
}
if (ecdh_clnt_cert) {
/*
* Reuse key info from our certificate We only need our
* private key to perform the ECDH computation.
*/
const BIGNUM *priv_key;
tkey = s->cert->key->privatekey->pkey.ec;
priv_key = EC_KEY_get0_private_key(tkey);
if (priv_key == NULL) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
if (!EC_KEY_set_private_key(clnt_ecdh, priv_key)) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_EC_LIB);
goto err;
}
} else {
/* Generate a new ECDH key pair */
if (!(EC_KEY_generate_key(clnt_ecdh))) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_ECDH_LIB);
goto err;
}
}
/*
* use the 'p' output buffer for the ECDH key, but make sure to
* clear it out afterwards
*/
field_size = EC_GROUP_get_degree(srvr_group);
if (field_size <= 0) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_ECDH_LIB);
goto err;
}
n = ECDH_compute_key(p, (field_size + 7) / 8, srvr_ecpoint,
clnt_ecdh, NULL);
if (n <= 0) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_ECDH_LIB);
goto err;
}
/* generate master key from the result */
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
p, n);
memset(p, 0, n); /* clean up */
if (ecdh_clnt_cert) {
/* Send empty client key exch message */
n = 0;
} else {
/*
* First check the size of encoding and allocate memory
* accordingly.
*/
encoded_pt_len =
EC_POINT_point2oct(srvr_group,
EC_KEY_get0_public_key(clnt_ecdh),
POINT_CONVERSION_UNCOMPRESSED,
NULL, 0, NULL);
encodedPoint = (unsigned char *)
OPENSSL_malloc(encoded_pt_len * sizeof(unsigned char));
bn_ctx = BN_CTX_new();
if ((encodedPoint == NULL) || (bn_ctx == NULL)) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
/* Encode the public key */
n = EC_POINT_point2oct(srvr_group,
EC_KEY_get0_public_key(clnt_ecdh),
POINT_CONVERSION_UNCOMPRESSED,
encodedPoint, encoded_pt_len, bn_ctx);
*p = n; /* length of encoded point */
/* Encoded point will be copied here */
p += 1;
/* copy the point */
memcpy((unsigned char *)p, encodedPoint, n);
/* increment n to account for length field */
n += 1;
}
/* Free allocated memory */
BN_CTX_free(bn_ctx);
if (encodedPoint != NULL)
OPENSSL_free(encodedPoint);
if (clnt_ecdh != NULL)
EC_KEY_free(clnt_ecdh);
EVP_PKEY_free(srvr_pub_pkey);
}
#endif /* !OPENSSL_NO_ECDH */
else if (alg_k & SSL_kGOST) {
/* GOST key exchange message creation */
EVP_PKEY_CTX *pkey_ctx;
X509 *peer_cert;
size_t msglen;
unsigned int md_len;
int keytype;
unsigned char premaster_secret[32], shared_ukm[32], tmp[256];
EVP_MD_CTX *ukm_hash;
EVP_PKEY *pub_key;
/*
* Get server sertificate PKEY and create ctx from it
*/
peer_cert =
s->session->
sess_cert->peer_pkeys[(keytype = SSL_PKEY_GOST01)].x509;
if (!peer_cert)
peer_cert =
s->session->
sess_cert->peer_pkeys[(keytype = SSL_PKEY_GOST94)].x509;
if (!peer_cert) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_NO_GOST_CERTIFICATE_SENT_BY_PEER);
goto err;
}
pkey_ctx = EVP_PKEY_CTX_new(pub_key =
X509_get_pubkey(peer_cert), NULL);
/*
* If we have send a certificate, and certificate key
*
* * parameters match those of server certificate, use
* certificate key for key exchange
*/
/* Otherwise, generate ephemeral key pair */
EVP_PKEY_encrypt_init(pkey_ctx);
/* Generate session key */
if (RAND_bytes(premaster_secret, 32) <= 0) {
EVP_PKEY_CTX_free(pkey_ctx);
goto err;
}
/*
* If we have client certificate, use its secret as peer key
*/
if (s->s3->tmp.cert_req && s->cert->key->privatekey) {
if (EVP_PKEY_derive_set_peer
(pkey_ctx, s->cert->key->privatekey) <= 0) {
/*
* If there was an error - just ignore it. Ephemeral key
* * would be used
*/
ERR_clear_error();
}
}
/*
* Compute shared IV and store it in algorithm-specific context
* data
*/
ukm_hash = EVP_MD_CTX_create();
EVP_DigestInit(ukm_hash,
EVP_get_digestbynid(NID_id_GostR3411_94));
EVP_DigestUpdate(ukm_hash, s->s3->client_random,
SSL3_RANDOM_SIZE);
EVP_DigestUpdate(ukm_hash, s->s3->server_random,
SSL3_RANDOM_SIZE);
EVP_DigestFinal_ex(ukm_hash, shared_ukm, &md_len);
EVP_MD_CTX_destroy(ukm_hash);
if (EVP_PKEY_CTX_ctrl
(pkey_ctx, -1, EVP_PKEY_OP_ENCRYPT, EVP_PKEY_CTRL_SET_IV, 8,
shared_ukm) < 0) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_LIBRARY_BUG);
goto err;
}
/* Make GOST keytransport blob message */
/*
* Encapsulate it into sequence
*/
*(p++) = V_ASN1_SEQUENCE | V_ASN1_CONSTRUCTED;
msglen = 255;
if (EVP_PKEY_encrypt(pkey_ctx, tmp, &msglen, premaster_secret, 32)
< 0) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_LIBRARY_BUG);
goto err;
}
if (msglen >= 0x80) {
*(p++) = 0x81;
*(p++) = msglen & 0xff;
n = msglen + 3;
} else {
*(p++) = msglen & 0xff;
n = msglen + 2;
}
memcpy(p, tmp, msglen);
/* Check if pubkey from client certificate was used */
if (EVP_PKEY_CTX_ctrl
(pkey_ctx, -1, -1, EVP_PKEY_CTRL_PEER_KEY, 2, NULL) > 0) {
/* Set flag "skip certificate verify" */
s->s3->flags |= TLS1_FLAGS_SKIP_CERT_VERIFY;
}
EVP_PKEY_CTX_free(pkey_ctx);
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
premaster_secret,
32);
EVP_PKEY_free(pub_key);
}
#ifndef OPENSSL_NO_SRP
else if (alg_k & SSL_kSRP) {
if (s->srp_ctx.A != NULL) {
/* send off the data */
n = BN_num_bytes(s->srp_ctx.A);
s2n(n, p);
BN_bn2bin(s->srp_ctx.A, p);
n += 2;
} else {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
if (s->session->srp_username != NULL)
OPENSSL_free(s->session->srp_username);
s->session->srp_username = BUF_strdup(s->srp_ctx.login);
if (s->session->srp_username == NULL) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
if ((s->session->master_key_length =
SRP_generate_client_master_secret(s,
s->session->master_key)) <
0) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
}
#endif
#ifndef OPENSSL_NO_PSK
else if (alg_k & SSL_kPSK) {
/*
* The callback needs PSK_MAX_IDENTITY_LEN + 1 bytes to return a
* \0-terminated identity. The last byte is for us for simulating
* strnlen.
*/
char identity[PSK_MAX_IDENTITY_LEN + 2];
size_t identity_len;
unsigned char *t = NULL;
unsigned char psk_or_pre_ms[PSK_MAX_PSK_LEN * 2 + 4];
unsigned int pre_ms_len = 0, psk_len = 0;
int psk_err = 1;
n = 0;
if (s->psk_client_callback == NULL) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_NO_CLIENT_CB);
goto err;
}
memset(identity, 0, sizeof(identity));
psk_len = s->psk_client_callback(s, s->ctx->psk_identity_hint,
identity, sizeof(identity) - 1,
psk_or_pre_ms,
sizeof(psk_or_pre_ms));
if (psk_len > PSK_MAX_PSK_LEN) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto psk_err;
} else if (psk_len == 0) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_IDENTITY_NOT_FOUND);
goto psk_err;
}
identity[PSK_MAX_IDENTITY_LEN + 1] = '\0';
identity_len = strlen(identity);
if (identity_len > PSK_MAX_IDENTITY_LEN) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto psk_err;
}
/* create PSK pre_master_secret */
pre_ms_len = 2 + psk_len + 2 + psk_len;
t = psk_or_pre_ms;
memmove(psk_or_pre_ms + psk_len + 4, psk_or_pre_ms, psk_len);
s2n(psk_len, t);
memset(t, 0, psk_len);
t += psk_len;
s2n(psk_len, t);
if (s->session->psk_identity_hint != NULL)
OPENSSL_free(s->session->psk_identity_hint);
s->session->psk_identity_hint =
BUF_strdup(s->ctx->psk_identity_hint);
if (s->ctx->psk_identity_hint != NULL
&& s->session->psk_identity_hint == NULL) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto psk_err;
}
if (s->session->psk_identity != NULL)
OPENSSL_free(s->session->psk_identity);
s->session->psk_identity = BUF_strdup(identity);
if (s->session->psk_identity == NULL) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto psk_err;
}
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
psk_or_pre_ms,
pre_ms_len);
s2n(identity_len, p);
memcpy(p, identity, identity_len);
n = 2 + identity_len;
psk_err = 0;
psk_err:
OPENSSL_cleanse(identity, sizeof(identity));
OPENSSL_cleanse(psk_or_pre_ms, sizeof(psk_or_pre_ms));
if (psk_err != 0) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE);
goto err;
}
}
#endif
else {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE);
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
ssl_set_handshake_header(s, SSL3_MT_CLIENT_KEY_EXCHANGE, n);
s->state = SSL3_ST_CW_KEY_EXCH_B;
}
/* SSL3_ST_CW_KEY_EXCH_B */
return ssl_do_write(s);
err:
#ifndef OPENSSL_NO_ECDH
BN_CTX_free(bn_ctx);
if (encodedPoint != NULL)
OPENSSL_free(encodedPoint);
if (clnt_ecdh != NULL)
EC_KEY_free(clnt_ecdh);
EVP_PKEY_free(srvr_pub_pkey);
#endif
s->state = SSL_ST_ERR;
return (-1);
} | 1 | [
"CWE-362"
]
| openssl | 3c66a669dfc7b3792f7af0758ea26fe8502ce70c | 256,052,501,858,540,340,000,000,000,000,000,000,000 | 750 | Fix PSK handling.
The PSK identity hint should be stored in the SSL_SESSION structure
and not in the parent context (which will overwrite values used
by other SSL structures with the same SSL_CTX).
Use BUF_strndup when copying identity as it may not be null terminated.
Reviewed-by: Tim Hudson <[email protected]> |
static int ml_ff_playback(struct input_dev *dev, int effect_id, int value)
{
struct ml_device *ml = dev->ff->private;
struct ml_effect_state *state = &ml->states[effect_id];
if (value > 0) {
pr_debug("initiated play\n");
__set_bit(FF_EFFECT_STARTED, &state->flags);
state->count = value;
state->play_at = jiffies +
msecs_to_jiffies(state->effect->replay.delay);
state->stop_at = state->play_at +
msecs_to_jiffies(state->effect->replay.length);
state->adj_at = state->play_at;
} else {
pr_debug("initiated stop\n");
if (test_bit(FF_EFFECT_PLAYING, &state->flags))
__set_bit(FF_EFFECT_ABORTING, &state->flags);
else
__clear_bit(FF_EFFECT_STARTED, &state->flags);
}
ml_play_effects(ml);
return 0;
} | 0 | [
"CWE-416"
]
| linux | fa3a5a1880c91bb92594ad42dfe9eedad7996b86 | 151,065,715,036,261,460,000,000,000,000,000,000,000 | 29 | Input: ff-memless - kill timer in destroy()
No timer must be left running when the device goes away.
Signed-off-by: Oliver Neukum <[email protected]>
Reported-and-tested-by: [email protected]
Cc: [email protected]
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Dmitry Torokhov <[email protected]> |
void Dispatcher::enqueueKernel(cl_command_queue & clQueue, cl_kernel & clKernel, size_t worksizeGlobal, const size_t worksizeLocal, cl_event * pEvent = NULL) {
const size_t worksizeMax = m_worksizeMax;
size_t worksizeOffset = 0;
while (worksizeGlobal) {
const size_t worksizeRun = std::min(worksizeGlobal, worksizeMax);
const size_t * const pWorksizeLocal = (worksizeLocal == 0 ? NULL : &worksizeLocal);
const auto res = clEnqueueNDRangeKernel(clQueue, clKernel, 1, &worksizeOffset, &worksizeRun, pWorksizeLocal, 0, NULL, pEvent);
OpenCLException::throwIfError("kernel queueing failed", res);
worksizeGlobal -= worksizeRun;
worksizeOffset += worksizeRun;
}
} | 0 | [
"CWE-703"
]
| profanity | 69ff010c14ff80ec14246772db6a245aa59e6689 | 263,898,472,981,233,150,000,000,000,000,000,000,000 | 13 | [FIX] pritive key seed . |
static gdImagePtr gdImageScaleBilinearTC(gdImagePtr im, const unsigned int new_width, const unsigned int new_height)
{
long dst_w = MAX(1, new_width);
long dst_h = MAX(1, new_height);
float dx = (float)gdImageSX(im) / (float)dst_w;
float dy = (float)gdImageSY(im) / (float)dst_h;
gdFixed f_dx = gd_ftofx(dx);
gdFixed f_dy = gd_ftofx(dy);
gdFixed f_1 = gd_itofx(1);
int dst_offset_h;
int dst_offset_v = 0;
long i;
gdImagePtr new_img;
new_img = gdImageCreateTrueColor(new_width, new_height);
if (!new_img){
return NULL;
}
for (i=0; i < dst_h; i++) {
long j;
dst_offset_h = 0;
for (j=0; j < dst_w; j++) {
/* Update bitmap */
gdFixed f_i = gd_itofx(i);
gdFixed f_j = gd_itofx(j);
gdFixed f_a = gd_mulfx(f_i, f_dy);
gdFixed f_b = gd_mulfx(f_j, f_dx);
const gdFixed m = gd_fxtoi(f_a);
const gdFixed n = gd_fxtoi(f_b);
gdFixed f_f = f_a - gd_itofx(m);
gdFixed f_g = f_b - gd_itofx(n);
const gdFixed f_w1 = gd_mulfx(f_1-f_f, f_1-f_g);
const gdFixed f_w2 = gd_mulfx(f_1-f_f, f_g);
const gdFixed f_w3 = gd_mulfx(f_f, f_1-f_g);
const gdFixed f_w4 = gd_mulfx(f_f, f_g);
unsigned int pixel1;
unsigned int pixel2;
unsigned int pixel3;
unsigned int pixel4;
register gdFixed f_r1, f_r2, f_r3, f_r4,
f_g1, f_g2, f_g3, f_g4,
f_b1, f_b2, f_b3, f_b4,
f_a1, f_a2, f_a3, f_a4;
/* 0 for bgColor, nothing gets outside anyway */
pixel1 = getPixelOverflowTC(im, n, m, 0);
pixel2 = getPixelOverflowTC(im, n + 1, m, 0);
pixel3 = getPixelOverflowTC(im, n, m + 1, 0);
pixel4 = getPixelOverflowTC(im, n + 1, m + 1, 0);
f_r1 = gd_itofx(gdTrueColorGetRed(pixel1));
f_r2 = gd_itofx(gdTrueColorGetRed(pixel2));
f_r3 = gd_itofx(gdTrueColorGetRed(pixel3));
f_r4 = gd_itofx(gdTrueColorGetRed(pixel4));
f_g1 = gd_itofx(gdTrueColorGetGreen(pixel1));
f_g2 = gd_itofx(gdTrueColorGetGreen(pixel2));
f_g3 = gd_itofx(gdTrueColorGetGreen(pixel3));
f_g4 = gd_itofx(gdTrueColorGetGreen(pixel4));
f_b1 = gd_itofx(gdTrueColorGetBlue(pixel1));
f_b2 = gd_itofx(gdTrueColorGetBlue(pixel2));
f_b3 = gd_itofx(gdTrueColorGetBlue(pixel3));
f_b4 = gd_itofx(gdTrueColorGetBlue(pixel4));
f_a1 = gd_itofx(gdTrueColorGetAlpha(pixel1));
f_a2 = gd_itofx(gdTrueColorGetAlpha(pixel2));
f_a3 = gd_itofx(gdTrueColorGetAlpha(pixel3));
f_a4 = gd_itofx(gdTrueColorGetAlpha(pixel4));
{
const unsigned char red = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_r1) + gd_mulfx(f_w2, f_r2) + gd_mulfx(f_w3, f_r3) + gd_mulfx(f_w4, f_r4));
const unsigned char green = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_g1) + gd_mulfx(f_w2, f_g2) + gd_mulfx(f_w3, f_g3) + gd_mulfx(f_w4, f_g4));
const unsigned char blue = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_b1) + gd_mulfx(f_w2, f_b2) + gd_mulfx(f_w3, f_b3) + gd_mulfx(f_w4, f_b4));
const unsigned char alpha = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_a1) + gd_mulfx(f_w2, f_a2) + gd_mulfx(f_w3, f_a3) + gd_mulfx(f_w4, f_a4));
new_img->tpixels[dst_offset_v][dst_offset_h] = gdTrueColorAlpha(red, green, blue, alpha);
}
dst_offset_h++;
}
dst_offset_v++;
}
return new_img;
} | 0 | [
"CWE-399"
]
| libgd | 4751b606fa38edc456d627140898a7ec679fcc24 | 318,586,987,082,747,200,000,000,000,000,000,000,000 | 84 | gdImageScaleTwoPass memory leak fix
Fixing memory leak in gdImageScaleTwoPass, as reported by @cmb69 and
confirmed by @vapier. This bug actually bit me in production and I'm
very thankful that it was reported with an easy fix.
Fixes #173. |
void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
{
struct page *page = grab_meta_page(sbi, blk_addr);
void *dst = page_address(page);
if (src)
memcpy(dst, src, PAGE_SIZE);
else
memset(dst, 0, PAGE_SIZE);
set_page_dirty(page);
f2fs_put_page(page, 1);
} | 0 | [
"CWE-20"
]
| linux | 638164a2718f337ea224b747cf5977ef143166a4 | 25,930,585,253,941,263,000,000,000,000,000,000,000 | 12 | f2fs: fix potential panic during fstrim
As Ju Hyung Park reported:
"When 'fstrim' is called for manual trim, a BUG() can be triggered
randomly with this patch.
I'm seeing this issue on both x86 Desktop and arm64 Android phone.
On x86 Desktop, this was caused during Ubuntu boot-up. I have a
cronjob installed which calls 'fstrim -v /' during boot. On arm64
Android, this was caused during GC looping with 1ms gc_min_sleep_time
& gc_max_sleep_time."
Root cause of this issue is that f2fs_wait_discard_bios can only be
used by f2fs_put_super, because during put_super there must be no
other referrers, so it can ignore discard entry's reference count
when removing the entry, otherwise in other caller we will hit bug_on
in __remove_discard_cmd as there may be other issuer added reference
count in discard entry.
Thread A Thread B
- issue_discard_thread
- f2fs_ioc_fitrim
- f2fs_trim_fs
- f2fs_wait_discard_bios
- __issue_discard_cmd
- __submit_discard_cmd
- __wait_discard_cmd
- dc->ref++
- __wait_one_discard_bio
- __wait_discard_cmd
- __remove_discard_cmd
- f2fs_bug_on(sbi, dc->ref)
Fixes: 969d1b180d987c2be02de890d0fff0f66a0e80de
Reported-by: Ju Hyung Park <[email protected]>
Signed-off-by: Chao Yu <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]> |
get_option_var(int opt_idx)
{
return options[opt_idx].var;
} | 0 | [
"CWE-122"
]
| vim | b7081e135a16091c93f6f5f7525a5c58fb7ca9f9 | 156,944,685,275,752,950,000,000,000,000,000,000,000 | 4 | patch 8.2.3402: invalid memory access when using :retab with large value
Problem: Invalid memory access when using :retab with large value.
Solution: Check the number is positive. |
int bgp_capability_receive(struct peer *peer, bgp_size_t size)
{
uint8_t *pnt;
/* Fetch pointer. */
pnt = stream_pnt(peer->curr);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s rcv CAPABILITY", peer->host);
/* If peer does not have the capability, send notification. */
if (!CHECK_FLAG(peer->cap, PEER_CAP_DYNAMIC_ADV)) {
flog_err(EC_BGP_NO_CAP,
"%s [Error] BGP dynamic capability is not enabled",
peer->host);
bgp_notify_send(peer, BGP_NOTIFY_HEADER_ERR,
BGP_NOTIFY_HEADER_BAD_MESTYPE);
return BGP_Stop;
}
/* Status must be Established. */
if (!peer_established(peer)) {
flog_err(
EC_BGP_NO_CAP,
"%s [Error] Dynamic capability packet received under status %s",
peer->host,
lookup_msg(bgp_status_msg, peer->status, NULL));
bgp_notify_send(peer, BGP_NOTIFY_FSM_ERR,
bgp_fsm_error_subcode(peer->status));
return BGP_Stop;
}
/* Parse packet. */
return bgp_capability_msg_parse(peer, pnt, size);
} | 0 | [
"CWE-125"
]
| frr | ff6db1027f8f36df657ff2e5ea167773752537ed | 186,119,476,263,432,260,000,000,000,000,000,000,000 | 35 | bgpd: Make sure hdr length is at a minimum of what is expected
Ensure that if the capability length specified is enough data.
Signed-off-by: Donald Sharp <[email protected]> |
static int unimac_mdio_probe(struct platform_device *pdev)
{
struct unimac_mdio_pdata *pdata = pdev->dev.platform_data;
struct unimac_mdio_priv *priv;
struct device_node *np;
struct mii_bus *bus;
struct resource *r;
int ret;
np = pdev->dev.of_node;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r)
return -EINVAL;
/* Just ioremap, as this MDIO block is usually integrated into an
* Ethernet MAC controller register range
*/
priv->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!priv->base) {
dev_err(&pdev->dev, "failed to remap register\n");
return -ENOMEM;
}
priv->mii_bus = mdiobus_alloc();
if (!priv->mii_bus)
return -ENOMEM;
bus = priv->mii_bus;
bus->priv = priv;
if (pdata) {
bus->name = pdata->bus_name;
priv->wait_func = pdata->wait_func;
priv->wait_func_data = pdata->wait_func_data;
bus->phy_mask = ~pdata->phy_mask;
} else {
bus->name = "unimac MII bus";
priv->wait_func_data = priv;
priv->wait_func = unimac_mdio_poll;
}
bus->parent = &pdev->dev;
bus->read = unimac_mdio_read;
bus->write = unimac_mdio_write;
bus->reset = unimac_mdio_reset;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id);
ret = of_mdiobus_register(bus, np);
if (ret) {
dev_err(&pdev->dev, "MDIO bus registration failed\n");
goto out_mdio_free;
}
platform_set_drvdata(pdev, priv);
dev_info(&pdev->dev, "Broadcom UniMAC MDIO bus at 0x%p\n", priv->base);
return 0;
out_mdio_free:
mdiobus_free(bus);
return ret;
} | 0 | [
"CWE-476"
]
| linux | 297a6961ffb8ff4dc66c9fbf53b924bd1dda05d5 | 295,094,669,149,773,620,000,000,000,000,000,000,000 | 66 | net: phy: mdio-bcm-unimac: fix potential NULL dereference in unimac_mdio_probe()
platform_get_resource() may fail and return NULL, so we should
better check it's return value to avoid a NULL pointer dereference
a bit later in the code.
This is detected by Coccinelle semantic patch.
@@
expression pdev, res, n, t, e, e1, e2;
@@
res = platform_get_resource(pdev, t, n);
+ if (!res)
+ return -EINVAL;
... when != res == NULL
e = devm_ioremap(e1, res->start, e2);
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
private uint64_t
file_strncmp(const char *s1, const char *s2, size_t len, size_t maxlen,
uint32_t flags)
{
/*
* Convert the source args to unsigned here so that (1) the
* compare will be unsigned as it is in strncmp() and (2) so
* the ctype functions will work correctly without extra
* casting.
*/
const unsigned char *a = RCAST(const unsigned char *, s1);
const unsigned char *b = RCAST(const unsigned char *, s2);
uint32_t ws = flags & (STRING_COMPACT_WHITESPACE |
STRING_COMPACT_OPTIONAL_WHITESPACE);
const unsigned char *eb = b + (ws ? maxlen : len);
uint64_t v;
/*
* What we want here is v = strncmp(s1, s2, len),
* but ignoring any nulls.
*/
v = 0;
if (0L == flags) { /* normal string: do it fast */
while (len-- > 0)
if ((v = *b++ - *a++) != '\0')
break;
}
else { /* combine the others */
while (len-- > 0) {
if (b >= eb) {
v = 1;
break;
}
if ((flags & STRING_IGNORE_LOWERCASE) &&
islower(*a)) {
if ((v = tolower(*b++) - *a++) != '\0')
break;
}
else if ((flags & STRING_IGNORE_UPPERCASE) &&
isupper(*a)) {
if ((v = toupper(*b++) - *a++) != '\0')
break;
}
else if ((flags & STRING_COMPACT_WHITESPACE) &&
isspace(*a)) {
/* XXX Dirty. The data and the pattern is what is causing this.
Revert _i for the next port and see if it still matters. */
uint32_t _i = 0;
a++;
if (isspace(*b++)) {
if (!isspace(*a))
while (EXPECTED(_i++ < 2048) && b < eb && isspace(*b))
b++;
}
else {
v = 1;
break;
}
}
else if ((flags & STRING_COMPACT_OPTIONAL_WHITESPACE) &&
isspace(*a)) {
a++;
while (b < eb && isspace(*b))
b++;
}
else {
if ((v = *b++ - *a++) != '\0')
break;
}
}
} | 0 | [
"CWE-787"
]
| php-src | ca6d511fa54b34d5b75bf120a86482a1b9e1e686 | 132,725,832,259,989,760,000,000,000,000,000,000,000 | 72 | Fix #81723: Memory corruption in finfo_buffer()
We need to use the same memory allocator throughout. |
void run() {
intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
const Document spec = getSpec();
const Value args = spec["input"];
if (!spec["expected"].missing()) {
FieldIterator fields(spec["expected"].getDocument());
while (fields.more()) {
const Document::FieldPair field(fields.next());
const Value expected = field.second;
const BSONObj obj = BSON(field.first << args);
VariablesParseState vps = expCtx->variablesParseState;
const intrusive_ptr<Expression> expr =
Expression::parseExpression(expCtx, obj, vps);
const Value result = expr->evaluate(Document());
if (ValueComparator().evaluate(result != expected)) {
string errMsg = str::stream()
<< "for expression " << field.first.toString() << " with argument "
<< args.toString() << " full tree: " << expr->serialize(false).toString()
<< " expected: " << expected.toString()
<< " but got: " << result.toString();
FAIL(errMsg);
}
// TODO test optimize here
}
}
if (!spec["error"].missing()) {
const vector<Value>& asserters = spec["error"].getArray();
size_t n = asserters.size();
for (size_t i = 0; i < n; i++) {
const BSONObj obj = BSON(asserters[i].getString() << args);
VariablesParseState vps = expCtx->variablesParseState;
ASSERT_THROWS(
{
// NOTE: parse and evaluatation failures are treated the
// same
const intrusive_ptr<Expression> expr =
Expression::parseExpression(expCtx, obj, vps);
expr->evaluate(Document());
},
AssertionException);
}
}
} | 0 | [
"CWE-835"
]
| mongo | 0a076417d1d7fba3632b73349a1fd29a83e68816 | 171,016,830,294,629,730,000,000,000,000,000,000,000 | 43 | SERVER-38070 fix infinite loop in agg expression |
ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint64_t ra, uint64_t *size)
{
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
uint64_t vva;
vhost_user_iotlb_rd_lock(vq);
vva = vhost_iova_to_vva(dev, vq, ra,
size, VHOST_ACCESS_RW);
vhost_user_iotlb_rd_unlock(vq);
return vva;
}
return qva_to_vva(dev, ra, size);
} | 0 | [
"CWE-190"
]
| dpdk | 3ae4beb079ce242240c34376a066bbccd0c0b23e | 320,760,159,082,307,460,000,000,000,000,000,000,000 | 16 | vhost: check log mmap offset and size overflow
vhost_user_set_log_base() is a message handler that is
called to handle the VHOST_USER_SET_LOG_BASE message.
Its payload contains a 64 bit size and offset. Both are
added up and used as a size when calling mmap().
There is no integer overflow check. If an integer overflow
occurs a smaller memory map would be created than
requested. Since the returned mapping is mapped as writable
and used for logging, a memory corruption could occur.
CVE-2020-10722
Fixes: fbc4d248b198 ("vhost: fix offset while mmaping log base address")
Cc: [email protected]
Reported-by: Ilja Van Sprundel <[email protected]>
Signed-off-by: Maxime Coquelin <[email protected]>
Reviewed-by: Xiaolong Ye <[email protected]>
Reviewed-by: Ilja Van Sprundel <[email protected]> |
template<typename tc, typename t>
CImg<T>& draw_fill(const int x0, const int y0, const int z0,
const tc *const color, const float opacity,
CImg<t> ®ion,
const float tolerance = 0,
const bool is_high_connectivity = false) {
#define _draw_fill_push(x,y,z) if (N>=stack._width) stack.resize(2*N + 1,1,1,3,0); \
stack[N] = x; stack(N,1) = y; stack(N++,2) = z
#define _draw_fill_pop(x,y,z) x = stack[--N]; y = stack(N,1); z = stack(N,2)
#define _draw_fill_is_inside(x,y,z) !_region(x,y,z) && _draw_fill(x,y,z,ref,tolerance2)
if (!containsXYZC(x0,y0,z0,0)) return *this;
const float nopacity = cimg::abs((float)opacity), copacity = 1 - std::max((float)opacity,0.f);
const float tolerance2 = cimg::sqr(tolerance);
const CImg<T> ref = get_vector_at(x0,y0,z0);
CImg<uintT> stack(256,1,1,3);
CImg<ucharT> _region(_width,_height,_depth,1,0);
unsigned int N = 0;
int x, y, z;
_draw_fill_push(x0,y0,z0);
while (N>0) {
_draw_fill_pop(x,y,z);
if (!_region(x,y,z)) {
const int yp = y - 1, yn = y + 1, zp = z - 1, zn = z + 1;
int xl = x, xr = x;
// Using these booleans reduces the number of pushes drastically.
bool is_yp = false, is_yn = false, is_zp = false, is_zn = false;
for (int step = -1; step<2; step+=2) {
while (x>=0 && x<width() && _draw_fill_is_inside(x,y,z)) {
if (yp>=0 && _draw_fill_is_inside(x,yp,z)) {
if (!is_yp) { _draw_fill_push(x,yp,z); is_yp = true; }
} else is_yp = false;
if (yn<height() && _draw_fill_is_inside(x,yn,z)) {
if (!is_yn) { _draw_fill_push(x,yn,z); is_yn = true; }
} else is_yn = false;
if (depth()>1) {
if (zp>=0 && _draw_fill_is_inside(x,y,zp)) {
if (!is_zp) { _draw_fill_push(x,y,zp); is_zp = true; }
} else is_zp = false;
if (zn<depth() && _draw_fill_is_inside(x,y,zn)) {
if (!is_zn) { _draw_fill_push(x,y,zn); is_zn = true; }
} else is_zn = false;
}
if (is_high_connectivity) {
const int xp = x - 1, xn = x + 1;
if (yp>=0 && !is_yp) {
if (xp>=0 && _draw_fill_is_inside(xp,yp,z)) {
_draw_fill_push(xp,yp,z); if (step<0) is_yp = true;
}
if (xn<width() && _draw_fill_is_inside(xn,yp,z)) {
_draw_fill_push(xn,yp,z); if (step>0) is_yp = true;
}
}
if (yn<height() && !is_yn) {
if (xp>=0 && _draw_fill_is_inside(xp,yn,z)) {
_draw_fill_push(xp,yn,z); if (step<0) is_yn = true;
}
if (xn<width() && _draw_fill_is_inside(xn,yn,z)) {
_draw_fill_push(xn,yn,z); if (step>0) is_yn = true;
}
}
if (depth()>1) {
if (zp>=0 && !is_zp) {
if (xp>=0 && _draw_fill_is_inside(xp,y,zp)) {
_draw_fill_push(xp,y,zp); if (step<0) is_zp = true;
}
if (xn<width() && _draw_fill_is_inside(xn,y,zp)) {
_draw_fill_push(xn,y,zp); if (step>0) is_zp = true;
}
if (yp>=0 && !is_yp) {
if (_draw_fill_is_inside(x,yp,zp)) { _draw_fill_push(x,yp,zp); }
if (xp>=0 && _draw_fill_is_inside(xp,yp,zp)) { _draw_fill_push(xp,yp,zp); }
if (xn<width() && _draw_fill_is_inside(xn,yp,zp)) { _draw_fill_push(xn,yp,zp); }
}
if (yn<height() && !is_yn) {
if (_draw_fill_is_inside(x,yn,zp)) { _draw_fill_push(x,yn,zp); }
if (xp>=0 && _draw_fill_is_inside(xp,yn,zp)) { _draw_fill_push(xp,yn,zp); }
if (xn<width() && _draw_fill_is_inside(xn,yn,zp)) { _draw_fill_push(xn,yn,zp); }
}
}
if (zn<depth() && !is_zn) {
if (xp>=0 && _draw_fill_is_inside(xp,y,zn)) {
_draw_fill_push(xp,y,zn); if (step<0) is_zn = true;
}
if (xn<width() && _draw_fill_is_inside(xn,y,zn)) {
_draw_fill_push(xn,y,zn); if (step>0) is_zn = true;
}
if (yp>=0 && !is_yp) {
if (_draw_fill_is_inside(x,yp,zn)) { _draw_fill_push(x,yp,zn); }
if (xp>=0 && _draw_fill_is_inside(xp,yp,zn)) { _draw_fill_push(xp,yp,zn); }
if (xn<width() && _draw_fill_is_inside(xn,yp,zn)) { _draw_fill_push(xn,yp,zn); }
}
if (yn<height() && !is_yn) {
if (_draw_fill_is_inside(x,yn,zn)) { _draw_fill_push(x,yn,zn); }
if (xp>=0 && _draw_fill_is_inside(xp,yn,zn)) { _draw_fill_push(xp,yn,zn); }
if (xn<width() && _draw_fill_is_inside(xn,yn,zn)) { _draw_fill_push(xn,yn,zn); }
}
}
}
}
x+=step;
}
if (step<0) { xl = ++x; x = xr + 1; is_yp = is_yn = is_zp = is_zn = false; }
else xr = --x;
}
std::memset(_region.data(xl,y,z),1,xr - xl + 1);
if (opacity==1) {
if (sizeof(T)==1) {
const int dx = xr - xl + 1;
cimg_forC(*this,c) std::memset(data(xl,y,z,c),(int)color[c],dx);
} else cimg_forC(*this,c) {
const T val = (T)color[c];
T *ptri = data(xl,y,z,c); for (int k = xl; k<=xr; ++k) *(ptri++) = val;
}
} else cimg_forC(*this,c) {
const T val = (T)(color[c]*nopacity);
T *ptri = data(xl,y,z,c); for (int k = xl; k<=xr; ++k) { *ptri = (T)(val + *ptri*copacity); ++ptri; }
}
}
}
_region.move_to(region);
return *this; | 0 | [
"CWE-119",
"CWE-787"
]
| CImg | ac8003393569aba51048c9d67e1491559877b1d1 | 180,247,702,172,405,370,000,000,000,000,000,000,000 | 127 | . |
int _bson_errprintf( const char *format, ... ) {
va_list ap;
int ret = 0;
va_start( ap, format );
#ifndef R_SAFETY_NET
ret = vfprintf( stderr, format, ap );
#endif
va_end( ap );
return ret;
} | 0 | [
"CWE-190"
]
| mongo-c-driver-legacy | 1a1f5e26a4309480d88598913f9eebf9e9cba8ca | 301,676,823,880,169,970,000,000,000,000,000,000,000 | 11 | don't mix up int and size_t (first pass to fix that) |
extract_string_until (const char *original, const char *until_substring)
{
char *result;
g_assert ((int) strlen (original) >= until_substring - original);
g_assert (until_substring - original >= 0);
result = g_malloc (until_substring - original + 1);
strncpy (result, original, until_substring - original);
result[until_substring - original] = '\0';
return result;
} | 0 | []
| nautilus | ca2fd475297946f163c32dcea897f25da892b89d | 45,415,704,462,927,310,000,000,000,000,000,000,000 | 13 | Add nautilus_file_mark_desktop_file_trusted(), this now adds a #! line if
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-file-operations.c:
* libnautilus-private/nautilus-file-operations.h:
Add nautilus_file_mark_desktop_file_trusted(), this now
adds a #! line if there is none as well as makes the file
executable.
* libnautilus-private/nautilus-mime-actions.c:
Use nautilus_file_mark_desktop_file_trusted() instead of
just setting the permissions.
svn path=/trunk/; revision=15006 |
void minix_truncate(struct inode * inode)
{
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
return;
if (INODE_VERSION(inode) == MINIX_V1)
V1_minix_truncate(inode);
else
V2_minix_truncate(inode);
} | 0 | [
"CWE-189"
]
| linux-2.6 | f5fb09fa3392ad43fbcfc2f4580752f383ab5996 | 55,650,007,351,896,310,000,000,000,000,000,000,000 | 9 | [PATCH] Fix for minix crash
Mounting a (corrupt) minix filesystem with zero s_zmap_blocks
gives a spectacular crash on my 2.6.17.8 system, no doubt
because minix/inode.c does an unconditional
minix_set_bit(0,sbi->s_zmap[0]->b_data);
[[email protected]: make labels conistent while we're there]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
SpaceToBatchNDContext op_context(context, node);
// Resize the output tensor if the output tensor is dynamic.
if (IsDynamicTensor(op_context.output)) {
TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
}
#define TF_LITE_SPACE_TO_BATCH_ND(type, scalar, pad_value) \
tflite::SpaceToBatchParams op_params; \
op_params.output_offset = pad_value; \
type::SpaceToBatchND(op_params, GetTensorShape(op_context.input), \
GetTensorData<scalar>(op_context.input), \
GetTensorShape(op_context.block_shape), \
GetTensorData<int32_t>(op_context.block_shape), \
GetTensorShape(op_context.paddings), \
GetTensorData<int32_t>(op_context.paddings), \
GetTensorShape(op_context.output), \
GetTensorData<scalar>(op_context.output))
switch (op_context.input->type) { // Already know in/out types are same.
case kTfLiteFloat32:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, float, 0);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, float, 0);
}
break;
case kTfLiteUInt8:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, uint8_t,
op_context.output->params.zero_point);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, uint8_t,
op_context.output->params.zero_point);
}
break;
case kTfLiteInt8:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, int8_t,
op_context.output->params.zero_point);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, int8_t,
op_context.output->params.zero_point);
}
break;
case kTfLiteInt32:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, int32_t, 0);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, int32_t, 0);
}
break;
case kTfLiteInt64:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, int64_t, 0);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, int64_t, 0);
}
break;
default:
context->ReportError(
context, "Type %d is currently not supported by SpaceToBatch.",
op_context.input->type);
return kTfLiteError;
}
#undef TF_LITE_SPACE_TO_BATCH_ND
return kTfLiteOk;
} | 0 | [
"CWE-369"
]
| tensorflow | 6d36ba65577006affb272335b7c1abd829010708 | 257,873,700,914,522,900,000,000,000,000,000,000,000 | 68 | Prevent division by 0
PiperOrigin-RevId: 370984990
Change-Id: Ib324955bbeb1cbd97c82fd5d61a00a2697c9a2de |
static void CallNvmCtxCallback( LoRaMacNvmCtxModule_t module )
{
if( ( MacCtx.MacCallbacks != NULL ) && ( MacCtx.MacCallbacks->NvmContextChange != NULL ) )
{
MacCtx.MacCallbacks->NvmContextChange( module );
}
}
| 0 | [
"CWE-120",
"CWE-787"
]
| LoRaMac-node | e3063a91daa7ad8a687223efa63079f0c24568e4 | 333,942,935,992,899,160,000,000,000,000,000,000,000 | 7 | Added received buffer size checks. |
static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
{
if (config_enabled(CONFIG_X86_32))
return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
else if (config_enabled(CONFIG_AS_FXSAVEQ))
return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
/* See comment in fpu_fxsave() below. */
return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
} | 0 | [
"CWE-284",
"CWE-264"
]
| linux | 26bef1318adc1b3a530ecc807ef99346db2aa8b0 | 227,129,806,646,398,550,000,000,000,000,000,000,000 | 10 | x86, fpu, amd: Clear exceptions in AMD FXSAVE workaround
Before we do an EMMS in the AMD FXSAVE information leak workaround we
need to clear any pending exceptions, otherwise we trap with a
floating-point exception inside this code.
Reported-by: halfdog <[email protected]>
Tested-by: Borislav Petkov <[email protected]>
Link: http://lkml.kernel.org/r/CA%2B55aFxQnY_PCG_n4=0w-VG=YLXL-yr7oMxyy0WU2gCBAf3ydg@mail.gmail.com
Signed-off-by: H. Peter Anvin <[email protected]> |
dns_message_buildopt(dns_message_t *message, dns_rdataset_t **rdatasetp,
unsigned int version, uint16_t udpsize,
unsigned int flags, dns_ednsopt_t *ednsopts, size_t count)
{
dns_rdataset_t *rdataset = NULL;
dns_rdatalist_t *rdatalist = NULL;
dns_rdata_t *rdata = NULL;
isc_result_t result;
unsigned int len = 0, i;
REQUIRE(DNS_MESSAGE_VALID(message));
REQUIRE(rdatasetp != NULL && *rdatasetp == NULL);
result = dns_message_gettemprdatalist(message, &rdatalist);
if (result != ISC_R_SUCCESS)
return (result);
result = dns_message_gettemprdata(message, &rdata);
if (result != ISC_R_SUCCESS)
goto cleanup;
result = dns_message_gettemprdataset(message, &rdataset);
if (result != ISC_R_SUCCESS)
goto cleanup;
rdatalist->type = dns_rdatatype_opt;
/*
* Set Maximum UDP buffer size.
*/
rdatalist->rdclass = udpsize;
/*
* Set EXTENDED-RCODE and Z to 0.
*/
rdatalist->ttl = (version << 16);
rdatalist->ttl |= (flags & 0xffff);
/*
* Set EDNS options if applicable
*/
if (count != 0U) {
isc_buffer_t *buf = NULL;
for (i = 0; i < count; i++)
len += ednsopts[i].length + 4;
if (len > 0xffffU) {
result = ISC_R_NOSPACE;
goto cleanup;
}
result = isc_buffer_allocate(message->mctx, &buf, len);
if (result != ISC_R_SUCCESS)
goto cleanup;
for (i = 0; i < count; i++) {
isc_buffer_putuint16(buf, ednsopts[i].code);
isc_buffer_putuint16(buf, ednsopts[i].length);
if (ednsopts[i].length != 0) {
isc_buffer_putmem(buf, ednsopts[i].value,
ednsopts[i].length);
}
}
rdata->data = isc_buffer_base(buf);
rdata->length = len;
dns_message_takebuffer(message, &buf);
} else {
rdata->data = NULL;
rdata->length = 0;
}
rdata->rdclass = rdatalist->rdclass;
rdata->type = rdatalist->type;
rdata->flags = 0;
ISC_LIST_APPEND(rdatalist->rdata, rdata, link);
result = dns_rdatalist_tordataset(rdatalist, rdataset);
RUNTIME_CHECK(result == ISC_R_SUCCESS);
*rdatasetp = rdataset;
return (ISC_R_SUCCESS);
cleanup:
if (rdata != NULL)
dns_message_puttemprdata(message, &rdata);
if (rdataset != NULL)
dns_message_puttemprdataset(message, &rdataset);
if (rdatalist != NULL)
dns_message_puttemprdatalist(message, &rdatalist);
return (result);
} | 0 | [
"CWE-617"
]
| bind9 | 6ed167ad0a647dff20c8cb08c944a7967df2d415 | 10,834,867,176,787,575,000,000,000,000,000,000,000 | 89 | Always keep a copy of the message
this allows it to be available even when dns_message_parse()
returns a error. |
prepare_singleton_class(mrb_state *mrb, struct RBasic *o)
{
struct RClass *sc, *c;
if (o->c->tt == MRB_TT_SCLASS) return;
sc = (struct RClass*)mrb_obj_alloc(mrb, MRB_TT_SCLASS, mrb->class_class);
sc->flags |= MRB_FLAG_IS_INHERITED;
sc->mt = kh_init(mt, mrb);
sc->iv = 0;
if (o->tt == MRB_TT_CLASS) {
c = (struct RClass*)o;
if (!c->super) {
sc->super = mrb->class_class;
}
else {
sc->super = c->super->c;
}
}
else if (o->tt == MRB_TT_SCLASS) {
c = (struct RClass*)o;
while (c->super->tt == MRB_TT_ICLASS)
c = c->super;
make_metaclass(mrb, c->super);
sc->super = c->super->c;
}
else {
sc->super = o->c;
prepare_singleton_class(mrb, (struct RBasic*)sc);
}
o->c = sc;
mrb_field_write_barrier(mrb, (struct RBasic*)o, (struct RBasic*)sc);
mrb_field_write_barrier(mrb, (struct RBasic*)sc, (struct RBasic*)o);
mrb_obj_iv_set(mrb, (struct RObject*)sc, mrb_intern_lit(mrb, "__attached__"), mrb_obj_value(o));
} | 0 | [
"CWE-476",
"CWE-415"
]
| mruby | faa4eaf6803bd11669bc324b4c34e7162286bfa3 | 267,540,256,562,523,730,000,000,000,000,000,000,000 | 34 | `mrb_class_real()` did not work for `BasicObject`; fix #4037 |
MP4::Properties::Properties(File *file, MP4::Atoms *atoms, ReadStyle style)
: AudioProperties(style)
{
d = new PropertiesPrivate;
MP4::Atom *moov = atoms->find("moov");
if(!moov) {
debug("MP4: Atom 'moov' not found");
return;
}
MP4::Atom *trak = 0;
ByteVector data;
MP4::AtomList trakList = moov->findall("trak");
for (unsigned int i = 0; i < trakList.size(); i++) {
trak = trakList[i];
MP4::Atom *hdlr = trak->find("mdia", "hdlr");
if(!hdlr) {
debug("MP4: Atom 'trak.mdia.hdlr' not found");
return;
}
file->seek(hdlr->offset);
data = file->readBlock(hdlr->length);
if(data.mid(16, 4) == "soun") {
break;
}
trak = 0;
}
if (!trak) {
debug("MP4: No audio tracks");
return;
}
MP4::Atom *mdhd = trak->find("mdia", "mdhd");
if(!mdhd) {
debug("MP4: Atom 'trak.mdia.mdhd' not found");
return;
}
file->seek(mdhd->offset);
data = file->readBlock(mdhd->length);
uint version = data[8];
if(version == 1) {
if (data.size() < 36 + 8) {
debug("MP4: Atom 'trak.mdia.mdhd' is smaller than expected");
return;
}
long long unit = data.mid(28, 8).toLongLong();
long long length = data.mid(36, 8).toLongLong();
d->length = unit ? int(length / unit) : 0;
}
else {
if (data.size() < 24 + 4) {
debug("MP4: Atom 'trak.mdia.mdhd' is smaller than expected");
return;
}
unsigned int unit = data.mid(20, 4).toUInt();
unsigned int length = data.mid(24, 4).toUInt();
d->length = unit ? length / unit : 0;
}
MP4::Atom *atom = trak->find("mdia", "minf", "stbl", "stsd");
if(!atom) {
return;
}
file->seek(atom->offset);
data = file->readBlock(atom->length);
if(data.mid(20, 4) == "mp4a") {
d->channels = data.mid(40, 2).toShort();
d->bitsPerSample = data.mid(42, 2).toShort();
d->sampleRate = data.mid(46, 4).toUInt();
if(data.mid(56, 4) == "esds" && data[64] == 0x03) {
long pos = 65;
if(data.mid(pos, 3) == "\x80\x80\x80") {
pos += 3;
}
pos += 4;
if(data[pos] == 0x04) {
pos += 1;
if(data.mid(pos, 3) == "\x80\x80\x80") {
pos += 3;
}
pos += 10;
d->bitrate = (data.mid(pos, 4).toUInt() + 500) / 1000;
}
}
}
} | 0 | []
| taglib | cce6ad46c912c4137131c97f67136a3d11881726 | 40,062,336,900,697,345,000,000,000,000,000,000,000 | 90 | Reverse the version check, similarly to what mp4v2 does |
void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
u8 type, u8 code, int inner_offset, __be32 info)
{
struct sock *sk;
int hash;
const struct in6_addr *saddr, *daddr;
struct net *net;
hash = nexthdr & (RAW_HTABLE_SIZE - 1);
read_lock(&raw_v6_hashinfo.lock);
sk = sk_head(&raw_v6_hashinfo.ht[hash]);
if (sk != NULL) {
/* Note: ipv6_hdr(skb) != skb->data */
const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
saddr = &ip6h->saddr;
daddr = &ip6h->daddr;
net = dev_net(skb->dev);
while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr,
IP6CB(skb)->iif))) {
rawv6_err(sk, skb, NULL, type, code,
inner_offset, info);
sk = sk_next(sk);
}
}
read_unlock(&raw_v6_hashinfo.lock);
} | 0 | [
"CWE-20"
]
| net | bceaa90240b6019ed73b49965eac7d167610be69 | 23,566,273,031,080,430,000,000,000,000,000,000,000 | 28 | inet: prevent leakage of uninitialized memory to user in recv syscalls
Only update *addr_len when we actually fill in sockaddr, otherwise we
can return uninitialized memory from the stack to the caller in the
recvfrom, recvmmsg and recvmsg syscalls. Drop the the (addr_len == NULL)
checks because we only get called with a valid addr_len pointer either
from sock_common_recvmsg or inet_recvmsg.
If a blocking read waits on a socket which is concurrently shut down we
now return zero and set msg_msgnamelen to 0.
Reported-by: mpb <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
struct cma_multicast *mc)
{
struct iboe_mcast_work *work;
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
int err;
struct sockaddr *addr = (struct sockaddr *)&mc->addr;
struct net_device *ndev = NULL;
if (cma_zero_addr((struct sockaddr *)&mc->addr))
return -EINVAL;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (!work)
return -ENOMEM;
mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
if (!mc->multicast.ib) {
err = -ENOMEM;
goto out1;
}
cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid);
mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
if (id_priv->id.ps == RDMA_PS_UDP)
mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
if (dev_addr->bound_dev_if)
ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
if (!ndev) {
err = -ENODEV;
goto out2;
}
mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
mc->multicast.ib->rec.hop_limit = 1;
mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
dev_put(ndev);
if (!mc->multicast.ib->rec.mtu) {
err = -EINVAL;
goto out2;
}
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
&mc->multicast.ib->rec.port_gid);
work->id = id_priv;
work->mc = mc;
INIT_WORK(&work->work, iboe_mcast_work_handler);
kref_get(&mc->mcref);
queue_work(cma_wq, &work->work);
return 0;
out2:
kfree(mc->multicast.ib);
out1:
kfree(work);
return err;
} | 0 | [
"CWE-20"
]
| linux | b2853fd6c2d0f383dbdf7427e263eb576a633867 | 315,521,034,007,264,830,000,000,000,000,000,000,000 | 58 | IB/core: Don't resolve passive side RoCE L2 address in CMA REQ handler
The code that resolves the passive side source MAC within the rdma_cm
connection request handler was both redundant and buggy, so remove it.
It was redundant since later, when an RC QP is modified to RTR state,
the resolution will take place in the ib_core module. It was buggy
because this callback also deals with UD SIDR exchange, for which we
incorrectly looked at the REQ member of the CM event and dereferenced
a random value.
Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures")
Signed-off-by: Moni Shoua <[email protected]>
Signed-off-by: Or Gerlitz <[email protected]>
Signed-off-by: Roland Dreier <[email protected]> |
void isis_notif_if_state_change(const struct isis_circuit *circuit, bool down)
{
const char *xpath = "/frr-isisd:if-state-change";
struct list *arguments = yang_data_list_new();
char xpath_arg[XPATH_MAXLEN];
struct yang_data *data;
struct isis_area *area = circuit->area;
notif_prep_instance_hdr(xpath, area, "default", arguments);
notif_prepr_iface_hdr(xpath, circuit, arguments);
snprintf(xpath_arg, sizeof(xpath_arg), "%s/state", xpath);
data = yang_data_new_enum(xpath_arg, !!down);
listnode_add(arguments, data);
nb_notification_send(xpath, arguments);
} | 0 | [
"CWE-119",
"CWE-787"
]
| frr | ac3133450de12ba86c051265fc0f1b12bc57b40c | 267,626,029,302,503,160,000,000,000,000,000,000,000 | 16 | isisd: fix #10505 using base64 encoding
Using base64 instead of the raw string to encode
the binary data.
Signed-off-by: whichbug <[email protected]> |
char* encode_base64(byte* src,size_t ssize)
{
char* outbuf;
char* retbuf;
int pos;
int i, l, left;
unsigned long triple;
byte *inb;
/* Exit on empty input */
if (!ssize||src==NULL){
log_msg(LOG_LEVEL_DEBUG,"encode base64: empty string");
return NULL;
}
outbuf = (char *)checked_malloc(sizeof(char)*B64_BUF);
/* Initialize working pointers */
inb = src;
i = 0;
triple = 0;
pos = 0;
left = ssize;
log_msg(LOG_LEVEL_TRACE, "encode base64:, data length: %d", left);
/*
* Process entire inbuf.
*/
while (left != 0)
{
i++;
left--;
triple = (triple <<8) | *inb;
if (i == 3 || left == 0)
{
switch (i)
{
case 1:
triple = triple<<4;
break;
case 2:
triple = triple<<2;
break;
default:
break;
}
for (l = i; l >= 0; l--){
/* register */
int rr;
rr = 0x3f & (triple>>(6*l));
assert (rr < 64);
outbuf[pos]=tob64[rr];
pos++;
}
if (left == 0)
switch(i)
{
case 2:
outbuf[pos]='=';
pos++;
break;
case 1:
outbuf[pos]='=';
pos++;
outbuf[pos]='=';
pos++;
break;
default:
break;
}
triple = 0;
i = 0;
}
inb++;
}
/* outbuf is not completely used so we use retbuf */
retbuf=(char*)checked_malloc(sizeof(char)*(pos+1));
memcpy(retbuf,outbuf,pos);
retbuf[pos]='\0';
free(outbuf);
return retbuf;
} | 1 | [
"CWE-787"
]
| aide | 175d1f2626f4500b4fc5ecb7167bba9956b174bc | 237,907,025,629,516,230,000,000,000,000,000,000,000 | 83 | Precalculate buffer size in base64 functions
Aide uses a fixed size (16k bytes) for the return buffer in
encode_base64/decode_base64 functions. This results in a segfault if
aide processes a file with too large extended attribute value or ACL.
Fix this issue by precalculating the size of the return buffer depending on
the input in the encode_base64/decode_base64 functions.
This addresses CVE-2021-45417. Thanks to David Bouman for reporting this
vulnerability and reviewing this patch. |
R_API RBinJavaAttrInfo *r_bin_java_annotation_default_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) {
ut64 offset = 0;
if (sz < 8) {
return NULL;
}
RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);
offset += 6;
if (attr && sz >= offset) {
attr->type = R_BIN_JAVA_ATTR_TYPE_ANNOTATION_DEFAULT_ATTR;
attr->info.annotation_default_attr.default_value = r_bin_java_element_value_new (buffer + offset, sz - offset, buf_offset + offset);
if (attr->info.annotation_default_attr.default_value) {
offset += attr->info.annotation_default_attr.default_value->size;
}
}
r_bin_java_print_annotation_default_attr_summary (attr);
return attr;
} | 0 | [
"CWE-119",
"CWE-788"
]
| radare2 | 6c4428f018d385fc80a33ecddcb37becea685dd5 | 106,429,372,044,905,830,000,000,000,000,000,000,000 | 17 | Improve boundary checks to fix oobread segfaults ##crash
* Reported by Cen Zhang via huntr.dev
* Reproducer: bins/fuzzed/javaoob-havoc.class |
virDomainHostdevAssignAddress(virDomainXMLOptionPtr xmlopt,
const virDomainDef *def,
virDomainHostdevDefPtr hostdev)
{
int next_unit = 0;
int controller = 0;
unsigned int max_unit;
if (xmlopt->config.features & VIR_DOMAIN_DEF_FEATURE_WIDE_SCSI)
max_unit = SCSI_WIDE_BUS_MAX_CONT_UNIT;
else
max_unit = SCSI_NARROW_BUS_MAX_CONT_UNIT;
/* NB: Do not attempt calling virDomainDefMaybeAddController to
* automagically add a "new" controller. Doing so will result in
* qemuDomainFindOrCreateSCSIDiskController "finding" the controller
* in the domain def list and thus not hotplugging the controller as
* well as the hostdev in the event that there are either no SCSI
* controllers defined or there was no space on an existing one.
*
* Because we cannot add a controller, then we should not walk the
* defined controllers list in order to find empty space. Doing
* so fails to return the valid next unit number for the 2nd
* hostdev being added to the as yet to be created controller.
*/
do {
next_unit = virDomainControllerSCSINextUnit(def, max_unit, controller);
if (next_unit < 0)
controller++;
} while (next_unit < 0);
hostdev->info->type = VIR_DOMAIN_DEVICE_ADDRESS_TYPE_DRIVE;
hostdev->info->addr.drive.controller = controller;
hostdev->info->addr.drive.bus = 0;
hostdev->info->addr.drive.target = 0;
hostdev->info->addr.drive.unit = next_unit;
return 0;
} | 0 | [
"CWE-212"
]
| libvirt | a5b064bf4b17a9884d7d361733737fb614ad8979 | 33,011,058,156,312,686,000,000,000,000,000,000,000 | 40 | conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used
Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410
(v6.1.0-122-g3b076391be) we support http cookies. Since they may contain
somewhat sensitive information we should not format them into the XML
unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted.
Reported-by: Han Han <[email protected]>
Signed-off-by: Peter Krempa <[email protected]>
Reviewed-by: Erik Skultety <[email protected]> |
TEST(Printer, HealthTransitionPrinter) {
std::ostringstream changed;
changed << HealthTransition::Changed;
EXPECT_EQ("Changed", changed.str());
std::ostringstream unchanged;
unchanged << HealthTransition::Unchanged;
EXPECT_EQ("Unchanged", unchanged.str());
} | 0 | [
"CWE-476"
]
| envoy | 9b1c3962172a972bc0359398af6daa3790bb59db | 291,692,600,974,484,730,000,000,000,000,000,000,000 | 9 | healthcheck: fix grpc inline removal crashes (#749)
Signed-off-by: Matt Klein <[email protected]>
Signed-off-by: Pradeep Rao <[email protected]> |
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct list_head *iter;
struct slave *slave;
bond_for_each_slave(bond, slave, iter)
if (bond_slave_is_up(slave))
slave_disable_netpoll(slave);
} | 0 | [
"CWE-476",
"CWE-703"
]
| linux | 105cd17a866017b45f3c45901b394c711c97bf40 | 289,953,316,333,711,200,000,000,000,000,000,000,000 | 10 | bonding: fix null dereference in bond_ipsec_add_sa()
If bond doesn't have real device, bond->curr_active_slave is null.
But bond_ipsec_add_sa() dereferences bond->curr_active_slave without
null checking.
So, null-ptr-deref would occur.
Test commands:
ip link add bond0 type bond
ip link set bond0 up
ip x s add proto esp dst 14.1.1.1 src 15.1.1.1 spi \
0x07 mode transport reqid 0x07 replay-window 32 aead 'rfc4106(gcm(aes))' \
0x44434241343332312423222114131211f4f3f2f1 128 sel src 14.0.0.52/24 \
dst 14.0.0.70/24 proto tcp offload dev bond0 dir in
Splat looks like:
KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007]
CPU: 4 PID: 680 Comm: ip Not tainted 5.13.0-rc3+ #1168
RIP: 0010:bond_ipsec_add_sa+0xc4/0x2e0 [bonding]
Code: 85 21 02 00 00 4d 8b a6 48 0c 00 00 e8 75 58 44 ce 85 c0 0f 85 14
01 00 00 48 b8 00 00 00 00 00 fc ff df 4c 89 e2 48 c1 ea 03 <80> 3c 02
00 0f 85 fc 01 00 00 48 8d bb e0 02 00 00 4d 8b 2c 24 48
RSP: 0018:ffff88810946f508 EFLAGS: 00010246
RAX: dffffc0000000000 RBX: ffff88810b4e8040 RCX: 0000000000000001
RDX: 0000000000000000 RSI: ffffffff8fe34280 RDI: ffff888115abe100
RBP: ffff88810946f528 R08: 0000000000000003 R09: fffffbfff2287e11
R10: 0000000000000001 R11: ffff888115abe0c8 R12: 0000000000000000
R13: ffffffffc0aea9a0 R14: ffff88800d7d2000 R15: ffff88810b4e8330
FS: 00007efc5552e680(0000) GS:ffff888119c00000(0000)
knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 000055c2530dbf40 CR3: 0000000103056004 CR4: 00000000003706e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
xfrm_dev_state_add+0x2a9/0x770
? memcpy+0x38/0x60
xfrm_add_sa+0x2278/0x3b10 [xfrm_user]
? xfrm_get_policy+0xaa0/0xaa0 [xfrm_user]
? register_lock_class+0x1750/0x1750
xfrm_user_rcv_msg+0x331/0x660 [xfrm_user]
? rcu_read_lock_sched_held+0x91/0xc0
? xfrm_user_state_lookup.constprop.39+0x320/0x320 [xfrm_user]
? find_held_lock+0x3a/0x1c0
? mutex_lock_io_nested+0x1210/0x1210
? sched_clock_cpu+0x18/0x170
netlink_rcv_skb+0x121/0x350
? xfrm_user_state_lookup.constprop.39+0x320/0x320 [xfrm_user]
? netlink_ack+0x9d0/0x9d0
? netlink_deliver_tap+0x17c/0xa50
xfrm_netlink_rcv+0x68/0x80 [xfrm_user]
netlink_unicast+0x41c/0x610
? netlink_attachskb+0x710/0x710
netlink_sendmsg+0x6b9/0xb70
[ ...]
Fixes: 18cb261afd7b ("bonding: support hardware encryption offload to slaves")
Signed-off-by: Taehee Yoo <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
int ret;
struct btrfs_root *dev_root = fs_info->dev_root;
struct btrfs_path *path;
struct btrfs_key key;
struct extent_buffer *eb;
struct btrfs_dev_replace_item *ptr;
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
down_read(&dev_replace->rwsem);
if (!dev_replace->is_valid ||
!dev_replace->item_needs_writeback) {
up_read(&dev_replace->rwsem);
return 0;
}
up_read(&dev_replace->rwsem);
key.objectid = 0;
key.type = BTRFS_DEV_REPLACE_KEY;
key.offset = 0;
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
if (ret < 0) {
btrfs_warn(fs_info,
"error %d while searching for dev_replace item!",
ret);
goto out;
}
if (ret == 0 &&
btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
/*
* need to delete old one and insert a new one.
* Since no attempt is made to recover any old state, if the
* dev_replace state is 'running', the data on the target
* drive is lost.
* It would be possible to recover the state: just make sure
* that the beginning of the item is never changed and always
* contains all the essential information. Then read this
* minimal set of information and use it as a base for the
* new state.
*/
ret = btrfs_del_item(trans, dev_root, path);
if (ret != 0) {
btrfs_warn(fs_info,
"delete too small dev_replace item failed %d!",
ret);
goto out;
}
ret = 1;
}
if (ret == 1) {
/* need to insert a new item */
btrfs_release_path(path);
ret = btrfs_insert_empty_item(trans, dev_root, path,
&key, sizeof(*ptr));
if (ret < 0) {
btrfs_warn(fs_info,
"insert dev_replace item failed %d!", ret);
goto out;
}
}
eb = path->nodes[0];
ptr = btrfs_item_ptr(eb, path->slots[0],
struct btrfs_dev_replace_item);
down_write(&dev_replace->rwsem);
if (dev_replace->srcdev)
btrfs_set_dev_replace_src_devid(eb, ptr,
dev_replace->srcdev->devid);
else
btrfs_set_dev_replace_src_devid(eb, ptr, (u64)-1);
btrfs_set_dev_replace_cont_reading_from_srcdev_mode(eb, ptr,
dev_replace->cont_reading_from_srcdev_mode);
btrfs_set_dev_replace_replace_state(eb, ptr,
dev_replace->replace_state);
btrfs_set_dev_replace_time_started(eb, ptr, dev_replace->time_started);
btrfs_set_dev_replace_time_stopped(eb, ptr, dev_replace->time_stopped);
btrfs_set_dev_replace_num_write_errors(eb, ptr,
atomic64_read(&dev_replace->num_write_errors));
btrfs_set_dev_replace_num_uncorrectable_read_errors(eb, ptr,
atomic64_read(&dev_replace->num_uncorrectable_read_errors));
dev_replace->cursor_left_last_write_of_item =
dev_replace->cursor_left;
btrfs_set_dev_replace_cursor_left(eb, ptr,
dev_replace->cursor_left_last_write_of_item);
btrfs_set_dev_replace_cursor_right(eb, ptr,
dev_replace->cursor_right);
dev_replace->item_needs_writeback = 0;
up_write(&dev_replace->rwsem);
btrfs_mark_buffer_dirty(eb);
out:
btrfs_free_path(path);
return ret;
} | 0 | [
"CWE-476",
"CWE-284"
]
| linux | 09ba3bc9dd150457c506e4661380a6183af651c1 | 103,106,003,751,842,290,000,000,000,000,000,000,000 | 107 | btrfs: merge btrfs_find_device and find_device
Both btrfs_find_device() and find_device() does the same thing except
that the latter does not take the seed device onto account in the device
scanning context. We can merge them.
Signed-off-by: Anand Jain <[email protected]>
Reviewed-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]> |
static enum TIFFReadDirEntryErr TIFFReadDirEntryLong8Array(TIFF* tif, TIFFDirEntry* direntry, uint64** value)
{
enum TIFFReadDirEntryErr err;
uint32 count;
void* origdata;
uint64* data;
switch (direntry->tdir_type)
{
case TIFF_BYTE:
case TIFF_SBYTE:
case TIFF_SHORT:
case TIFF_SSHORT:
case TIFF_LONG:
case TIFF_SLONG:
case TIFF_LONG8:
case TIFF_SLONG8:
break;
default:
return(TIFFReadDirEntryErrType);
}
err=TIFFReadDirEntryArray(tif,direntry,&count,8,&origdata);
if ((err!=TIFFReadDirEntryErrOk)||(origdata==0))
{
*value=0;
return(err);
}
switch (direntry->tdir_type)
{
case TIFF_LONG8:
*value=(uint64*)origdata;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabArrayOfLong8(*value,count);
return(TIFFReadDirEntryErrOk);
case TIFF_SLONG8:
{
int64* m;
uint32 n;
m=(int64*)origdata;
for (n=0; n<count; n++)
{
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabLong8((uint64*)m);
err=TIFFReadDirEntryCheckRangeLong8Slong8(*m);
if (err!=TIFFReadDirEntryErrOk)
{
_TIFFfree(origdata);
return(err);
}
m++;
}
*value=(uint64*)origdata;
return(TIFFReadDirEntryErrOk);
}
}
data=(uint64*)_TIFFmalloc(count*8);
if (data==0)
{
_TIFFfree(origdata);
return(TIFFReadDirEntryErrAlloc);
}
switch (direntry->tdir_type)
{
case TIFF_BYTE:
{
uint8* ma;
uint64* mb;
uint32 n;
ma=(uint8*)origdata;
mb=data;
for (n=0; n<count; n++)
*mb++=(uint64)(*ma++);
}
break;
case TIFF_SBYTE:
{
int8* ma;
uint64* mb;
uint32 n;
ma=(int8*)origdata;
mb=data;
for (n=0; n<count; n++)
{
err=TIFFReadDirEntryCheckRangeLong8Sbyte(*ma);
if (err!=TIFFReadDirEntryErrOk)
break;
*mb++=(uint64)(*ma++);
}
}
break;
case TIFF_SHORT:
{
uint16* ma;
uint64* mb;
uint32 n;
ma=(uint16*)origdata;
mb=data;
for (n=0; n<count; n++)
{
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabShort(ma);
*mb++=(uint64)(*ma++);
}
}
break;
case TIFF_SSHORT:
{
int16* ma;
uint64* mb;
uint32 n;
ma=(int16*)origdata;
mb=data;
for (n=0; n<count; n++)
{
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabShort((uint16*)ma);
err=TIFFReadDirEntryCheckRangeLong8Sshort(*ma);
if (err!=TIFFReadDirEntryErrOk)
break;
*mb++=(uint64)(*ma++);
}
}
break;
case TIFF_LONG:
{
uint32* ma;
uint64* mb;
uint32 n;
ma=(uint32*)origdata;
mb=data;
for (n=0; n<count; n++)
{
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabLong(ma);
*mb++=(uint64)(*ma++);
}
}
break;
case TIFF_SLONG:
{
int32* ma;
uint64* mb;
uint32 n;
ma=(int32*)origdata;
mb=data;
for (n=0; n<count; n++)
{
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabLong((uint32*)ma);
err=TIFFReadDirEntryCheckRangeLong8Slong(*ma);
if (err!=TIFFReadDirEntryErrOk)
break;
*mb++=(uint64)(*ma++);
}
}
break;
}
_TIFFfree(origdata);
if (err!=TIFFReadDirEntryErrOk)
{
_TIFFfree(data);
return(err);
}
*value=data;
return(TIFFReadDirEntryErrOk);
} | 0 | [
"CWE-125"
]
| libtiff | 9a72a69e035ee70ff5c41541c8c61cd97990d018 | 14,046,063,101,947,237,000,000,000,000,000,000,000 | 165 | * libtiff/tif_dirread.c: modify ChopUpSingleUncompressedStrip() to
instanciate compute ntrips as TIFFhowmany_32(td->td_imagelength, rowsperstrip),
instead of a logic based on the total size of data. Which is faulty is
the total size of data is not sufficient to fill the whole image, and thus
results in reading outside of the StripByCounts/StripOffsets arrays when
using TIFFReadScanline().
Reported by Agostino Sarubbo.
Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2608.
* libtiff/tif_strip.c: revert the change in TIFFNumberOfStrips() done
for http://bugzilla.maptools.org/show_bug.cgi?id=2587 / CVE-2016-9273 since
the above change is a better fix that makes it unnecessary. |
on_web_service_idling (CockpitWebService *service,
gpointer data)
{
CockpitSession *session = data;
if (session->timeout_tag)
g_source_remove (session->timeout_tag);
g_debug ("session is idle");
/*
* The minimum amount of time before a request uses this new web service,
* otherwise it will just go away.
*/
session->timeout_tag = g_timeout_add_seconds (cockpit_ws_service_idle,
on_session_timeout,
session);
/*
* Also reset the timer which checks whether anything is going on in the
* entire process or not.
*/
if (session->auth->timeout_tag)
g_source_remove (session->auth->timeout_tag);
session->auth->timeout_tag = g_timeout_add_seconds (get_process_idle (),
on_process_timeout, session->auth);
} | 0 | [
"CWE-1021"
]
| cockpit | 46f6839d1af4e662648a85f3e54bba2d57f39f0e | 207,155,145,987,404,570,000,000,000,000,000,000,000 | 28 | ws: Restrict our cookie to the login host only
Mark our cookie as `SameSite: Strict` [1]. The current `None` default
will soon be moved to `Lax` by Firefox and Chromium, and recent versions
started to throw a warning about it.
[1] https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite
https://bugzilla.redhat.com/show_bug.cgi?id=1891944 |
static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
{
VirtIONet *n = q->n;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
VirtQueueElement *elem;
int32_t num_packets = 0;
int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
return num_packets;
}
if (q->async_tx.elem) {
virtio_queue_set_notification(q->tx_vq, 0);
return num_packets;
}
for (;;) {
ssize_t ret;
unsigned int out_num;
struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
struct virtio_net_hdr_mrg_rxbuf mhdr;
elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
if (!elem) {
break;
}
out_num = elem->out_num;
out_sg = elem->out_sg;
if (out_num < 1) {
virtio_error(vdev, "virtio-net header not in first element");
virtqueue_detach_element(q->tx_vq, elem, 0);
g_free(elem);
return -EINVAL;
}
if (n->has_vnet_hdr) {
if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
n->guest_hdr_len) {
virtio_error(vdev, "virtio-net header incorrect");
virtqueue_detach_element(q->tx_vq, elem, 0);
g_free(elem);
return -EINVAL;
}
if (n->needs_vnet_hdr_swap) {
virtio_net_hdr_swap(vdev, (void *) &mhdr);
sg2[0].iov_base = &mhdr;
sg2[0].iov_len = n->guest_hdr_len;
out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
out_sg, out_num,
n->guest_hdr_len, -1);
if (out_num == VIRTQUEUE_MAX_SIZE) {
goto drop;
}
out_num += 1;
out_sg = sg2;
}
}
/*
* If host wants to see the guest header as is, we can
* pass it on unchanged. Otherwise, copy just the parts
* that host is interested in.
*/
assert(n->host_hdr_len <= n->guest_hdr_len);
if (n->host_hdr_len != n->guest_hdr_len) {
unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
out_sg, out_num,
0, n->host_hdr_len);
sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
out_sg, out_num,
n->guest_hdr_len, -1);
out_num = sg_num;
out_sg = sg;
}
ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
out_sg, out_num, virtio_net_tx_complete);
if (ret == 0) {
virtio_queue_set_notification(q->tx_vq, 0);
q->async_tx.elem = elem;
return -EBUSY;
}
drop:
virtqueue_push(q->tx_vq, elem, 0);
virtio_notify(vdev, q->tx_vq);
g_free(elem);
if (++num_packets >= n->tx_burst) {
break;
}
}
return num_packets;
} | 0 | [
"CWE-703"
]
| qemu | abe300d9d894f7138e1af7c8e9c88c04bfe98b37 | 196,133,472,026,686,500,000,000,000,000,000,000,000 | 94 | virtio-net: fix map leaking on error during receive
Commit bedd7e93d0196 ("virtio-net: fix use after unmap/free for sg")
tries to fix the use after free of the sg by caching the virtqueue
elements in an array and unmap them at once after receiving the
packets, But it forgot to unmap the cached elements on error which
will lead to leaking of mapping and other unexpected results.
Fixing this by detaching the cached elements on error. This addresses
CVE-2022-26353.
Reported-by: Victor Tom <[email protected]>
Cc: [email protected]
Fixes: CVE-2022-26353
Fixes: bedd7e93d0196 ("virtio-net: fix use after unmap/free for sg")
Reviewed-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
clear_animation_timeout (NMApplet *applet)
{
if (applet->animation_id) {
g_source_remove (applet->animation_id);
applet->animation_id = 0;
applet->animation_step = 0;
}
} | 0 | [
"CWE-200"
]
| network-manager-applet | 8627880e07c8345f69ed639325280c7f62a8f894 | 321,256,424,116,235,700,000,000,000,000,000,000,000 | 8 | editor: prevent any registration of objects on the system bus
D-Bus access-control is name-based; so requests for a specific name
are allowed/denied based on the rules in /etc/dbus-1/system.d. But
apparently apps still get a non-named service on the bus, and if we
register *any* object even though we don't have a named service,
dbus and dbus-glib will happily proxy signals. Since the connection
editor shouldn't ever expose anything having to do with connections
on any bus, make sure that's the case. |
static void mtree_print_phys_entries(int start, int end, int skip, int ptr)
{
if (start == end - 1) {
qemu_printf("\t%3d ", start);
} else {
qemu_printf("\t%3d..%-3d ", start, end - 1);
}
qemu_printf(" skip=%d ", skip);
if (ptr == PHYS_MAP_NODE_NIL) {
qemu_printf(" ptr=NIL");
} else if (!skip) {
qemu_printf(" ptr=#%d", ptr);
} else {
qemu_printf(" ptr=[%d]", ptr);
}
qemu_printf("\n");
} | 0 | [
"CWE-787"
]
| qemu | 4bfb024bc76973d40a359476dc0291f46e435442 | 145,435,223,755,056,100,000,000,000,000,000,000,000 | 17 | memory: clamp cached translation in case it points to an MMIO region
In using the address_space_translate_internal API, address_space_cache_init
forgot one piece of advice that can be found in the code for
address_space_translate_internal:
/* MMIO registers can be expected to perform full-width accesses based only
* on their address, without considering adjacent registers that could
* decode to completely different MemoryRegions. When such registers
* exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
* regions overlap wildly. For this reason we cannot clamp the accesses
* here.
*
* If the length is small (as is the case for address_space_ldl/stl),
* everything works fine. If the incoming length is large, however,
* the caller really has to do the clamping through memory_access_size.
*/
address_space_cache_init is exactly one such case where "the incoming length
is large", therefore we need to clamp the resulting length---not to
memory_access_size though, since we are not doing an access yet, but to
the size of the resulting section. This ensures that subsequent accesses
to the cached MemoryRegionSection will be in range.
With this patch, the enclosed testcase notices that the used ring does
not fit into the MSI-X table and prints a "qemu-system-x86_64: Cannot map used"
error.
Signed-off-by: Paolo Bonzini <[email protected]> |
Get a specific body section's MIME headers */
PHP_FUNCTION(imap_fetchmime)
{
zval *streamind;
zend_long msgno, flags = 0;
pils *imap_le_struct;
char *body;
zend_string *sec;
unsigned long len;
int argc = ZEND_NUM_ARGS();
if (zend_parse_parameters(argc, "rlS|l", &streamind, &msgno, &sec, &flags) == FAILURE) {
return;
}
if (flags && ((flags & ~(FT_UID|FT_PEEK|FT_INTERNAL)) != 0)) {
php_error_docref(NULL, E_WARNING, "invalid value for the options parameter");
RETURN_FALSE;
}
if ((imap_le_struct = (pils *)zend_fetch_resource(Z_RES_P(streamind), "imap", le_imap)) == NULL) {
RETURN_FALSE;
}
if (argc < 4 || !(flags & FT_UID)) {
/* only perform the check if the msgno is a message number and not a UID */
PHP_IMAP_CHECK_MSGNO(msgno);
}
body = mail_fetch_mime(imap_le_struct->imap_stream, msgno, ZSTR_VAL(sec), &len, (argc == 4 ? flags : NIL));
if (!body) {
php_error_docref(NULL, E_WARNING, "No body MIME information available");
RETURN_FALSE;
}
RETVAL_STRINGL(body, len); | 0 | [
"CWE-88"
]
| php-src | 336d2086a9189006909ae06c7e95902d7d5ff77e | 226,274,175,347,776,960,000,000,000,000,000,000,000 | 36 | Disable rsh/ssh functionality in imap by default (bug #77153) |
ATPrepSetStatistics(Relation rel, const char *colName, Node *newValue, LOCKMODE lockmode)
{
/*
* We do our own permission checking because (a) we want to allow SET
* STATISTICS on indexes (for expressional index columns), and (b) we want
* to allow SET STATISTICS on system catalogs without requiring
* allowSystemTableMods to be turned on.
*/
if (rel->rd_rel->relkind != RELKIND_RELATION &&
rel->rd_rel->relkind != RELKIND_MATVIEW &&
rel->rd_rel->relkind != RELKIND_INDEX &&
rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is not a table, materialized view, index, or foreign table",
RelationGetRelationName(rel))));
/* Permissions checks */
if (!pg_class_ownercheck(RelationGetRelid(rel), GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
RelationGetRelationName(rel));
} | 0 | [
"CWE-362"
]
| postgres | 5f173040e324f6c2eebb90d86cf1b0cdb5890f0a | 12,627,778,135,943,234,000,000,000,000,000,000,000 | 22 | Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062 |
bind_socket(struct TCP_Server_Info *server)
{
int rc = 0;
if (server->srcaddr.ss_family != AF_UNSPEC) {
/* Bind to the specified local IP address */
struct socket *socket = server->ssocket;
rc = socket->ops->bind(socket,
(struct sockaddr *) &server->srcaddr,
sizeof(server->srcaddr));
if (rc < 0) {
struct sockaddr_in *saddr4;
struct sockaddr_in6 *saddr6;
saddr4 = (struct sockaddr_in *)&server->srcaddr;
saddr6 = (struct sockaddr_in6 *)&server->srcaddr;
if (saddr6->sin6_family == AF_INET6)
cifs_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n",
&saddr6->sin6_addr, rc);
else
cifs_dbg(VFS, "Failed to bind to: %pI4, error: %d\n",
&saddr4->sin_addr.s_addr, rc);
}
}
return rc;
} | 0 | [
"CWE-703",
"CWE-189"
]
| linux | 1fc29bacedeabb278080e31bb9c1ecb49f143c3b | 132,184,379,964,997,880,000,000,000,000,000,000,000 | 24 | cifs: fix off-by-one bug in build_unc_path_to_root
commit 839db3d10a (cifs: fix up handling of prefixpath= option) changed
the code such that the vol->prepath no longer contained a leading
delimiter and then fixed up the places that accessed that field to
account for that change.
One spot in build_unc_path_to_root was missed however. When doing the
pointer addition on pos, that patch failed to account for the fact that
we had already incremented "pos" by one when adding the length of the
prepath. This caused a buffer overrun by one byte.
This patch fixes the problem by correcting the handling of "pos".
Cc: <[email protected]> # v3.8+
Reported-by: Marcus Moeller <[email protected]>
Reported-by: Ken Fallon <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]> |
static ut64 get_vaddr(RBinFile *bf, ut64 baddr, ut64 paddr, ut64 vaddr) {
return vaddr;
} | 0 | [
"CWE-400",
"CWE-703"
]
| radare2 | 634b886e84a5c568d243e744becc6b3223e089cf | 178,347,124,001,507,530,000,000,000,000,000,000,000 | 3 | Fix DoS in PE/QNX/DYLDCACHE/PSX parsers ##crash
* Reported by lazymio
* Reproducer: AAA4AAAAAB4= |
static double mp_median(_cimg_math_parser& mp) {
const unsigned int i_end = (unsigned int)mp.opcode[2];
switch (i_end - 3) {
case 1 : return _mp_arg(3);
case 2 : return cimg::median(_mp_arg(3),_mp_arg(4));
case 3 : return cimg::median(_mp_arg(3),_mp_arg(4),_mp_arg(5));
case 5 : return cimg::median(_mp_arg(3),_mp_arg(4),_mp_arg(5),_mp_arg(6),_mp_arg(7));
case 7 : return cimg::median(_mp_arg(3),_mp_arg(4),_mp_arg(5),_mp_arg(6),_mp_arg(7),_mp_arg(8),_mp_arg(9));
case 9 : return cimg::median(_mp_arg(3),_mp_arg(4),_mp_arg(5),_mp_arg(6),_mp_arg(7),_mp_arg(8),_mp_arg(9),
_mp_arg(10),_mp_arg(11));
case 13 : return cimg::median(_mp_arg(3),_mp_arg(4),_mp_arg(5),_mp_arg(6),_mp_arg(7),_mp_arg(8),_mp_arg(9),
_mp_arg(10),_mp_arg(11),_mp_arg(12),_mp_arg(13),_mp_arg(14),_mp_arg(15));
}
CImg<doubleT> vals(i_end - 3);
double *p = vals.data();
for (unsigned int i = 3; i<i_end; ++i) *(p++) = _mp_arg(i);
return vals.median();
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 318,809,735,649,150,640,000,000,000,000,000,000,000 | 18 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
static void perf_event_addr_filters_apply(struct perf_event *event)
{
struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
struct task_struct *task = READ_ONCE(event->ctx->task);
struct perf_addr_filter *filter;
struct mm_struct *mm = NULL;
unsigned int count = 0;
unsigned long flags;
/*
* We may observe TASK_TOMBSTONE, which means that the event tear-down
* will stop on the parent's child_mutex that our caller is also holding
*/
if (task == TASK_TOMBSTONE)
return;
if (!ifh->nr_file_filters)
return;
mm = get_task_mm(event->ctx->task);
if (!mm)
goto restart;
down_read(&mm->mmap_sem);
raw_spin_lock_irqsave(&ifh->lock, flags);
list_for_each_entry(filter, &ifh->list, entry) {
event->addr_filters_offs[count] = 0;
/*
* Adjust base offset if the filter is associated to a binary
* that needs to be mapped:
*/
if (filter->inode)
event->addr_filters_offs[count] =
perf_addr_filter_apply(filter, mm);
count++;
}
event->addr_filters_gen++;
raw_spin_unlock_irqrestore(&ifh->lock, flags);
up_read(&mm->mmap_sem);
mmput(mm);
restart:
perf_event_stop(event, 1);
} | 0 | [
"CWE-190"
]
| linux | 1572e45a924f254d9570093abde46430c3172e3d | 81,925,925,197,294,100,000,000,000,000,000,000,000 | 50 | perf/core: Fix the perf_cpu_time_max_percent check
Use "proc_dointvec_minmax" instead of "proc_dointvec" to check the input
value from user-space.
If not, we can set a big value and some vars will overflow like
"sysctl_perf_event_sample_rate" which will cause a lot of unexpected
problems.
Signed-off-by: Tan Xiaojun <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: <[email protected]>
Cc: <[email protected]>
Cc: Alexander Shishkin <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Stephane Eranian <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vince Weaver <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
static apr_status_t add_buffered_data(h2_stream *stream, apr_off_t requested,
apr_off_t *plen, int *peos, int *is_all,
h2_headers **pheaders)
{
apr_bucket *b, *e;
*peos = 0;
*plen = 0;
*is_all = 0;
if (pheaders) {
*pheaders = NULL;
}
H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "add_buffered_data");
b = APR_BRIGADE_FIRST(stream->out_buffer);
while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
e = APR_BUCKET_NEXT(b);
if (APR_BUCKET_IS_METADATA(b)) {
if (APR_BUCKET_IS_FLUSH(b)) {
APR_BUCKET_REMOVE(b);
apr_bucket_destroy(b);
}
else if (APR_BUCKET_IS_EOS(b)) {
*peos = 1;
return APR_SUCCESS;
}
else if (H2_BUCKET_IS_HEADERS(b)) {
if (*plen > 0) {
/* data before the response, can only return up to here */
return APR_SUCCESS;
}
else if (pheaders) {
*pheaders = h2_bucket_headers_get(b);
APR_BUCKET_REMOVE(b);
apr_bucket_destroy(b);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
H2_STRM_MSG(stream, "prep, -> response %d"),
(*pheaders)->status);
return APR_SUCCESS;
}
else {
return APR_EAGAIN;
}
}
}
else if (b->length == 0) {
APR_BUCKET_REMOVE(b);
apr_bucket_destroy(b);
}
else {
ap_assert(b->length != (apr_size_t)-1);
*plen += b->length;
if (*plen >= requested) {
*plen = requested;
return APR_SUCCESS;
}
}
b = e;
}
*is_all = 1;
return APR_SUCCESS;
} | 0 | [
"CWE-770"
]
| mod_h2 | dd05d49abe0f67512ce9ed5ba422d7711effecfb | 51,145,852,825,175,780,000,000,000,000,000,000,000 | 62 | * fixes Timeout vs. KeepAliveTimeout behaviour, see PR 63534 (for trunk now,
mpm event backport to 2.4.x up for vote).
* Fixes stream cleanup when connection throttling is in place.
* Counts stream resets by client on streams initiated by client as cause
for connection throttling.
* Header length checks are now logged similar to HTTP/1.1 protocol handler (thanks @mkaufmann)
* Header length is checked also on the merged value from several header instances
and results in a 431 response. |
bool TABLE_LIST::prepare_view_security_context(THD *thd)
{
DBUG_ENTER("TABLE_LIST::prepare_view_security_context");
DBUG_PRINT("enter", ("table: %s", alias));
DBUG_ASSERT(!prelocking_placeholder && view);
if (view_suid)
{
DBUG_PRINT("info", ("This table is suid view => load contest"));
DBUG_ASSERT(view && view_sctx);
if (acl_getroot(view_sctx, definer.user.str, definer.host.str,
definer.host.str, thd->db))
{
if ((thd->lex->sql_command == SQLCOM_SHOW_CREATE) ||
(thd->lex->sql_command == SQLCOM_SHOW_FIELDS))
{
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_NO_SUCH_USER,
ER_THD(thd, ER_NO_SUCH_USER),
definer.user.str, definer.host.str);
}
else
{
if (thd->security_ctx->master_access & SUPER_ACL)
{
my_error(ER_NO_SUCH_USER, MYF(0), definer.user.str, definer.host.str);
}
else
{
if (thd->password == 2)
my_error(ER_ACCESS_DENIED_NO_PASSWORD_ERROR, MYF(0),
thd->security_ctx->priv_user,
thd->security_ctx->priv_host);
else
my_error(ER_ACCESS_DENIED_ERROR, MYF(0),
thd->security_ctx->priv_user,
thd->security_ctx->priv_host,
(thd->password ? ER_THD(thd, ER_YES) :
ER_THD(thd, ER_NO)));
}
DBUG_RETURN(TRUE);
}
}
}
DBUG_RETURN(FALSE);
} | 0 | [
"CWE-416"
]
| server | 4681b6f2d8c82b4ec5cf115e83698251963d80d5 | 140,672,365,116,198,800,000,000,000,000,000,000,000 | 48 | MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob
the bug was that in_vector array in Item_func_in was allocated in the
statement arena, not in the table->expr_arena.
revert part of the 5acd391e8b2d. Instead, change the arena correctly
in fix_all_session_vcol_exprs().
Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force
item tree changes to be rolled back (because they were allocated in the
wrong arena and didn't persist. now they do) |
opj_bool pi_create_encode(opj_pi_iterator_t *pi, opj_cp_t *cp, int tileno,
int pino, int tpnum, int tppos, J2K_T2_MODE t2_mode, int cur_totnum_tp)
{
char prog[4];
int i;
int incr_top = 1, resetX = 0;
opj_tcp_t *tcps = &cp->tcps[tileno];
opj_poc_t *tcp = &tcps->pocs[pino];
pi[pino].first = 1;
pi[pino].poc.prg = tcp->prg;
switch (tcp->prg) {
case CPRL:
strncpy(prog, "CPRL", 4);
break;
case LRCP:
strncpy(prog, "LRCP", 4);
break;
case PCRL:
strncpy(prog, "PCRL", 4);
break;
case RLCP:
strncpy(prog, "RLCP", 4);
break;
case RPCL:
strncpy(prog, "RPCL", 4);
break;
case PROG_UNKNOWN:
return OPJ_TRUE;
}
if (!(cp->tp_on && ((!cp->cinema && (t2_mode == FINAL_PASS)) || cp->cinema))) {
pi[pino].poc.resno0 = tcp->resS;
pi[pino].poc.resno1 = tcp->resE;
pi[pino].poc.compno0 = tcp->compS;
pi[pino].poc.compno1 = tcp->compE;
pi[pino].poc.layno0 = tcp->layS;
pi[pino].poc.layno1 = tcp->layE;
pi[pino].poc.precno0 = tcp->prcS;
pi[pino].poc.precno1 = tcp->prcE;
pi[pino].poc.tx0 = tcp->txS;
pi[pino].poc.ty0 = tcp->tyS;
pi[pino].poc.tx1 = tcp->txE;
pi[pino].poc.ty1 = tcp->tyE;
} else {
if (tpnum < cur_totnum_tp) {
for (i = 3; i >= 0; i--) {
switch (prog[i]) {
case 'C':
if (i > tppos) {
pi[pino].poc.compno0 = tcp->compS;
pi[pino].poc.compno1 = tcp->compE;
} else {
if (tpnum == 0) {
tcp->comp_t = tcp->compS;
pi[pino].poc.compno0 = tcp->comp_t;
pi[pino].poc.compno1 = tcp->comp_t + 1;
tcp->comp_t += 1;
} else {
if (incr_top == 1) {
if (tcp->comp_t == tcp->compE) {
tcp->comp_t = tcp->compS;
pi[pino].poc.compno0 = tcp->comp_t;
pi[pino].poc.compno1 = tcp->comp_t + 1;
tcp->comp_t += 1;
incr_top = 1;
} else {
pi[pino].poc.compno0 = tcp->comp_t;
pi[pino].poc.compno1 = tcp->comp_t + 1;
tcp->comp_t += 1;
incr_top = 0;
}
} else {
pi[pino].poc.compno0 = tcp->comp_t - 1;
pi[pino].poc.compno1 = tcp->comp_t;
}
}
}
break;
case 'R':
if (i > tppos) {
pi[pino].poc.resno0 = tcp->resS;
pi[pino].poc.resno1 = tcp->resE;
} else {
if (tpnum == 0) {
tcp->res_t = tcp->resS;
pi[pino].poc.resno0 = tcp->res_t;
pi[pino].poc.resno1 = tcp->res_t + 1;
tcp->res_t += 1;
} else {
if (incr_top == 1) {
if (tcp->res_t == tcp->resE) {
tcp->res_t = tcp->resS;
pi[pino].poc.resno0 = tcp->res_t;
pi[pino].poc.resno1 = tcp->res_t + 1;
tcp->res_t += 1;
incr_top = 1;
} else {
pi[pino].poc.resno0 = tcp->res_t;
pi[pino].poc.resno1 = tcp->res_t + 1;
tcp->res_t += 1;
incr_top = 0;
}
} else {
pi[pino].poc.resno0 = tcp->res_t - 1;
pi[pino].poc.resno1 = tcp->res_t;
}
}
}
break;
case 'L':
if (i > tppos) {
pi[pino].poc.layno0 = tcp->layS;
pi[pino].poc.layno1 = tcp->layE;
} else {
if (tpnum == 0) {
tcp->lay_t = tcp->layS;
pi[pino].poc.layno0 = tcp->lay_t;
pi[pino].poc.layno1 = tcp->lay_t + 1;
tcp->lay_t += 1;
} else {
if (incr_top == 1) {
if (tcp->lay_t == tcp->layE) {
tcp->lay_t = tcp->layS;
pi[pino].poc.layno0 = tcp->lay_t;
pi[pino].poc.layno1 = tcp->lay_t + 1;
tcp->lay_t += 1;
incr_top = 1;
} else {
pi[pino].poc.layno0 = tcp->lay_t;
pi[pino].poc.layno1 = tcp->lay_t + 1;
tcp->lay_t += 1;
incr_top = 0;
}
} else {
pi[pino].poc.layno0 = tcp->lay_t - 1;
pi[pino].poc.layno1 = tcp->lay_t;
}
}
}
break;
case 'P':
switch (tcp->prg) {
case LRCP:
case RLCP:
if (i > tppos) {
pi[pino].poc.precno0 = tcp->prcS;
pi[pino].poc.precno1 = tcp->prcE;
} else {
if (tpnum == 0) {
tcp->prc_t = tcp->prcS;
pi[pino].poc.precno0 = tcp->prc_t;
pi[pino].poc.precno1 = tcp->prc_t + 1;
tcp->prc_t += 1;
} else {
if (incr_top == 1) {
if (tcp->prc_t == tcp->prcE) {
tcp->prc_t = tcp->prcS;
pi[pino].poc.precno0 = tcp->prc_t;
pi[pino].poc.precno1 = tcp->prc_t + 1;
tcp->prc_t += 1;
incr_top = 1;
} else {
pi[pino].poc.precno0 = tcp->prc_t;
pi[pino].poc.precno1 = tcp->prc_t + 1;
tcp->prc_t += 1;
incr_top = 0;
}
} else {
pi[pino].poc.precno0 = tcp->prc_t - 1;
pi[pino].poc.precno1 = tcp->prc_t;
}
}
}
break;
default:
if (i > tppos) {
pi[pino].poc.tx0 = tcp->txS;
pi[pino].poc.ty0 = tcp->tyS;
pi[pino].poc.tx1 = tcp->txE;
pi[pino].poc.ty1 = tcp->tyE;
} else {
if (tpnum == 0) {
tcp->tx0_t = tcp->txS;
tcp->ty0_t = tcp->tyS;
pi[pino].poc.tx0 = tcp->tx0_t;
pi[pino].poc.tx1 = tcp->tx0_t + tcp->dx - (tcp->tx0_t % tcp->dx);
pi[pino].poc.ty0 = tcp->ty0_t;
pi[pino].poc.ty1 = tcp->ty0_t + tcp->dy - (tcp->ty0_t % tcp->dy);
tcp->tx0_t = pi[pino].poc.tx1;
tcp->ty0_t = pi[pino].poc.ty1;
} else {
if (incr_top == 1) {
if (tcp->tx0_t >= tcp->txE) {
if (tcp->ty0_t >= tcp->tyE) {
tcp->ty0_t = tcp->tyS;
pi[pino].poc.ty0 = tcp->ty0_t;
pi[pino].poc.ty1 = tcp->ty0_t + tcp->dy - (tcp->ty0_t % tcp->dy);
tcp->ty0_t = pi[pino].poc.ty1;
incr_top = 1;
resetX = 1;
} else {
pi[pino].poc.ty0 = tcp->ty0_t;
pi[pino].poc.ty1 = tcp->ty0_t + tcp->dy - (tcp->ty0_t % tcp->dy);
tcp->ty0_t = pi[pino].poc.ty1;
incr_top = 0;
resetX = 1;
}
if (resetX == 1) {
tcp->tx0_t = tcp->txS;
pi[pino].poc.tx0 = tcp->tx0_t;
pi[pino].poc.tx1 = tcp->tx0_t + tcp->dx - (tcp->tx0_t % tcp->dx);
tcp->tx0_t = pi[pino].poc.tx1;
}
} else {
pi[pino].poc.tx0 = tcp->tx0_t;
pi[pino].poc.tx1 = tcp->tx0_t + tcp->dx - (tcp->tx0_t % tcp->dx);
tcp->tx0_t = pi[pino].poc.tx1;
pi[pino].poc.ty0 = tcp->ty0_t - tcp->dy - (tcp->ty0_t % tcp->dy);
pi[pino].poc.ty1 = tcp->ty0_t ;
incr_top = 0;
}
} else {
pi[pino].poc.tx0 = tcp->tx0_t - tcp->dx - (tcp->tx0_t % tcp->dx);
pi[pino].poc.tx1 = tcp->tx0_t ;
pi[pino].poc.ty0 = tcp->ty0_t - tcp->dy - (tcp->ty0_t % tcp->dy);
pi[pino].poc.ty1 = tcp->ty0_t ;
}
}
}
break;
}
break;
}
}
}
}
return OPJ_FALSE;
} | 0 | [
"CWE-369"
]
| openjpeg | c5bd64ea146162967c29bd2af0cbb845ba3eaaaf | 113,814,955,960,343,740,000,000,000,000,000,000,000 | 243 | [MJ2] To avoid divisions by zero / undefined behaviour on shift
Signed-off-by: Young_X <[email protected]> |
virtual int get_decrypt_filter(std::unique_ptr<RGWGetObj_Filter>* filter, RGWGetObj_Filter* cb, bufferlist* manifest_bl) {
*filter = nullptr;
return 0;
} | 0 | [
"CWE-770"
]
| ceph | ab29bed2fc9f961fe895de1086a8208e21ddaddc | 45,109,217,561,300,890,000,000,000,000,000,000,000 | 4 | rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests |
int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
struct brcmf_commonring *commonring;
void *buf;
u32 flowid;
int qlen;
buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
brcmf_msgbuf_process_rx(msgbuf, buf);
buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
brcmf_msgbuf_process_rx(msgbuf, buf);
buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
brcmf_msgbuf_process_rx(msgbuf, buf);
for_each_set_bit(flowid, msgbuf->txstatus_done_map,
msgbuf->max_flowrings) {
clear_bit(flowid, msgbuf->txstatus_done_map);
commonring = msgbuf->flowrings[flowid];
qlen = brcmf_flowring_qlen(msgbuf->flow, flowid);
if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) ||
((qlen) && (atomic_read(&commonring->outstanding_tx) <
BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS)))
brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
}
return 0;
} | 0 | [
"CWE-20"
]
| linux | a4176ec356c73a46c07c181c6d04039fafa34a9f | 45,538,784,885,332,210,000,000,000,000,000,000,000 | 30 | brcmfmac: add subtype check for event handling in data path
For USB there is no separate channel being used to pass events
from firmware to the host driver and as such are passed over the
data path. In order to detect mock event messages an additional
check is needed on event subtype. This check is added conditionally
using unlikely() keyword.
Reviewed-by: Hante Meuleman <[email protected]>
Reviewed-by: Pieter-Paul Giesberts <[email protected]>
Reviewed-by: Franky Lin <[email protected]>
Signed-off-by: Arend van Spriel <[email protected]>
Signed-off-by: Kalle Valo <[email protected]> |
win_free_mem(
win_T *win,
int *dirp, // set to 'v' or 'h' for direction if 'ea'
tabpage_T *tp) // tab page "win" is in, NULL for current
{
frame_T *frp;
win_T *wp;
tabpage_T *win_tp = tp == NULL ? curtab : tp;
// Remove the window and its frame from the tree of frames.
frp = win->w_frame;
wp = winframe_remove(win, dirp, tp);
vim_free(frp);
win_free(win, tp);
// When deleting the current window in the tab, select a new current
// window.
if (win == win_tp->tp_curwin)
win_tp->tp_curwin = wp;
return wp;
} | 0 | [
"CWE-476"
]
| vim | 0f6e28f686dbb59ab3b562408ab9b2234797b9b1 | 134,399,941,303,225,380,000,000,000,000,000,000,000 | 22 | patch 8.2.4428: crash when switching tabpage while in the cmdline window
Problem: Crash when switching tabpage while in the cmdline window.
Solution: Disallow switching tabpage when in the cmdline window. |
void operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor input,
typename TTypes<T, 3>::ConstTensor filter,
typename TTypes<T, 4>::ConstTensor out_backprop,
int stride_rows, int stride_cols, int rate_rows,
int rate_cols, int pad_top, int pad_left,
typename TTypes<T, 4>::Tensor in_backprop) {
const int batch = input.dimension(0);
const int input_rows = input.dimension(1);
const int input_cols = input.dimension(2);
const int depth = input.dimension(3);
const int filter_rows = filter.dimension(0);
const int filter_cols = filter.dimension(1);
const int output_rows = out_backprop.dimension(1);
const int output_cols = out_backprop.dimension(2);
// Initialize gradient with all zeros.
in_backprop.setZero();
// This is a reference implementation, likely to be slow.
// TODO(gpapan): Write multi-threaded implementation.
// In the case of multiple argmax branches, we only back-propagate along the
// last branch, i.e., the one with largest value of `h * filter_cols + w`,
// similarly to the max-pooling backward routines.
for (int b = 0; b < batch; ++b) {
for (int h_out = 0; h_out < output_rows; ++h_out) {
int h_beg = h_out * stride_rows - pad_top;
for (int w_out = 0; w_out < output_cols; ++w_out) {
int w_beg = w_out * stride_cols - pad_left;
for (int d = 0; d < depth; ++d) {
T cur_val = Eigen::NumTraits<T>::lowest();
int h_in_max = (h_beg < 0) ? 0 : h_beg;
int w_in_max = (w_beg < 0) ? 0 : w_beg;
for (int h = 0; h < filter_rows; ++h) {
const int h_in = h_beg + h * rate_rows;
if (h_in >= 0 && h_in < input_rows) {
for (int w = 0; w < filter_cols; ++w) {
const int w_in = w_beg + w * rate_cols;
if (w_in >= 0 && w_in < input_cols) {
const T val = input(b, h_in, w_in, d) + filter(h, w, d);
if (val > cur_val) {
cur_val = val;
h_in_max = h_in;
w_in_max = w_in;
}
}
}
}
}
in_backprop(b, h_in_max, w_in_max, d) +=
out_backprop(b, h_out, w_out, d);
}
}
}
}
} | 1 | [
"CWE-787"
]
| tensorflow | 3f6fe4dfef6f57e768260b48166c27d148f3015f | 108,868,301,561,641,880,000,000,000,000,000,000,000 | 57 | Add missing validations in dillation ops.
PiperOrigin-RevId: 372037158
Change-Id: I4ee304c84a02550c030288a6534000b934fc1599 |
SERVER_REC *server_find_chatnet(const char *chatnet)
{
GSList *tmp;
g_return_val_if_fail(chatnet != NULL, NULL);
if (*chatnet == '\0') return NULL;
for (tmp = servers; tmp != NULL; tmp = tmp->next) {
SERVER_REC *server = tmp->data;
if (server->connrec->chatnet != NULL &&
g_strcasecmp(server->connrec->chatnet, chatnet) == 0)
return server;
}
return NULL;
} | 0 | [
"CWE-20"
]
| irssi-proxy | 85bbc05b21678e80423815d2ef1dfe26208491ab | 4,726,573,373,623,835,000,000,000,000,000,000,000 | 17 | Check if an SSL certificate matches the hostname of the server we are connecting to
git-svn-id: http://svn.irssi.org/repos/irssi/trunk@5104 dbcabf3a-b0e7-0310-adc4-f8d773084564 |
static int ati_remote2_probe(struct usb_interface *interface, const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_host_interface *alt = interface->cur_altsetting;
struct ati_remote2 *ar2;
int r;
if (alt->desc.bInterfaceNumber)
return -ENODEV;
ar2 = kzalloc(sizeof (struct ati_remote2), GFP_KERNEL);
if (!ar2)
return -ENOMEM;
ar2->udev = udev;
ar2->intf[0] = interface;
ar2->ep[0] = &alt->endpoint[0].desc;
ar2->intf[1] = usb_ifnum_to_if(udev, 1);
r = usb_driver_claim_interface(&ati_remote2_driver, ar2->intf[1], ar2);
if (r)
goto fail1;
alt = ar2->intf[1]->cur_altsetting;
ar2->ep[1] = &alt->endpoint[0].desc;
r = ati_remote2_urb_init(ar2);
if (r)
goto fail2;
ar2->channel_mask = channel_mask;
ar2->mode_mask = mode_mask;
r = ati_remote2_setup(ar2, ar2->channel_mask);
if (r)
goto fail2;
usb_make_path(udev, ar2->phys, sizeof(ar2->phys));
strlcat(ar2->phys, "/input0", sizeof(ar2->phys));
strlcat(ar2->name, "ATI Remote Wonder II", sizeof(ar2->name));
r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group);
if (r)
goto fail2;
r = ati_remote2_input_init(ar2);
if (r)
goto fail3;
usb_set_intfdata(interface, ar2);
interface->needs_remote_wakeup = 1;
return 0;
fail3:
sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group);
fail2:
ati_remote2_urb_cleanup(ar2);
usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
fail1:
kfree(ar2);
return r;
} | 1 | [
"CWE-703"
]
| linux | 950336ba3e4a1ffd2ca60d29f6ef386dd2c7351d | 334,891,867,116,156,980,000,000,000,000,000,000,000 | 66 | Input: ati_remote2 - fix crashes on detecting device with invalid descriptor
The ati_remote2 driver expects at least two interfaces with one
endpoint each. If given malicious descriptor that specify one
interface or no endpoints, it will crash in the probe function.
Ensure there is at least two interfaces and one endpoint for each
interface before using it.
The full disclosure: http://seclists.org/bugtraq/2016/Mar/90
Reported-by: Ralf Spenneberg <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Cc: [email protected]
Signed-off-by: Dmitry Torokhov <[email protected]> |
static void cirrus_mmio_blt_write(CirrusVGAState * s, unsigned address,
uint8_t value)
{
switch (address) {
case (CIRRUS_MMIO_BLTBGCOLOR + 0):
cirrus_hook_write_gr(s, 0x00, value);
break;
case (CIRRUS_MMIO_BLTBGCOLOR + 1):
cirrus_hook_write_gr(s, 0x10, value);
break;
case (CIRRUS_MMIO_BLTBGCOLOR + 2):
cirrus_hook_write_gr(s, 0x12, value);
break;
case (CIRRUS_MMIO_BLTBGCOLOR + 3):
cirrus_hook_write_gr(s, 0x14, value);
break;
case (CIRRUS_MMIO_BLTFGCOLOR + 0):
cirrus_hook_write_gr(s, 0x01, value);
break;
case (CIRRUS_MMIO_BLTFGCOLOR + 1):
cirrus_hook_write_gr(s, 0x11, value);
break;
case (CIRRUS_MMIO_BLTFGCOLOR + 2):
cirrus_hook_write_gr(s, 0x13, value);
break;
case (CIRRUS_MMIO_BLTFGCOLOR + 3):
cirrus_hook_write_gr(s, 0x15, value);
break;
case (CIRRUS_MMIO_BLTWIDTH + 0):
cirrus_hook_write_gr(s, 0x20, value);
break;
case (CIRRUS_MMIO_BLTWIDTH + 1):
cirrus_hook_write_gr(s, 0x21, value);
break;
case (CIRRUS_MMIO_BLTHEIGHT + 0):
cirrus_hook_write_gr(s, 0x22, value);
break;
case (CIRRUS_MMIO_BLTHEIGHT + 1):
cirrus_hook_write_gr(s, 0x23, value);
break;
case (CIRRUS_MMIO_BLTDESTPITCH + 0):
cirrus_hook_write_gr(s, 0x24, value);
break;
case (CIRRUS_MMIO_BLTDESTPITCH + 1):
cirrus_hook_write_gr(s, 0x25, value);
break;
case (CIRRUS_MMIO_BLTSRCPITCH + 0):
cirrus_hook_write_gr(s, 0x26, value);
break;
case (CIRRUS_MMIO_BLTSRCPITCH + 1):
cirrus_hook_write_gr(s, 0x27, value);
break;
case (CIRRUS_MMIO_BLTDESTADDR + 0):
cirrus_hook_write_gr(s, 0x28, value);
break;
case (CIRRUS_MMIO_BLTDESTADDR + 1):
cirrus_hook_write_gr(s, 0x29, value);
break;
case (CIRRUS_MMIO_BLTDESTADDR + 2):
cirrus_hook_write_gr(s, 0x2a, value);
break;
case (CIRRUS_MMIO_BLTDESTADDR + 3):
/* ignored */
break;
case (CIRRUS_MMIO_BLTSRCADDR + 0):
cirrus_hook_write_gr(s, 0x2c, value);
break;
case (CIRRUS_MMIO_BLTSRCADDR + 1):
cirrus_hook_write_gr(s, 0x2d, value);
break;
case (CIRRUS_MMIO_BLTSRCADDR + 2):
cirrus_hook_write_gr(s, 0x2e, value);
break;
case CIRRUS_MMIO_BLTWRITEMASK:
cirrus_hook_write_gr(s, 0x2f, value);
break;
case CIRRUS_MMIO_BLTMODE:
cirrus_hook_write_gr(s, 0x30, value);
break;
case CIRRUS_MMIO_BLTROP:
cirrus_hook_write_gr(s, 0x32, value);
break;
case CIRRUS_MMIO_BLTMODEEXT:
cirrus_hook_write_gr(s, 0x33, value);
break;
case (CIRRUS_MMIO_BLTTRANSPARENTCOLOR + 0):
cirrus_hook_write_gr(s, 0x34, value);
break;
case (CIRRUS_MMIO_BLTTRANSPARENTCOLOR + 1):
cirrus_hook_write_gr(s, 0x35, value);
break;
case (CIRRUS_MMIO_BLTTRANSPARENTCOLORMASK + 0):
cirrus_hook_write_gr(s, 0x38, value);
break;
case (CIRRUS_MMIO_BLTTRANSPARENTCOLORMASK + 1):
cirrus_hook_write_gr(s, 0x39, value);
break;
case CIRRUS_MMIO_BLTSTATUS:
cirrus_hook_write_gr(s, 0x31, value);
break;
default:
#ifdef DEBUG_CIRRUS
printf("cirrus: mmio write - addr 0x%04x val 0x%02x (ignored)\n",
address, value);
#endif
break;
}
} | 0 | [
"CWE-787"
]
| qemu | b2eb849d4b1fdb6f35d5c46958c7f703cf64cfef | 247,476,223,324,605,400,000,000,000,000,000,000,000 | 108 | CVE-2007-1320 - Cirrus LGD-54XX "bitblt" heap overflow
I have just noticed that patch for CVE-2007-1320 has never been applied
to the QEMU CVS. Please find it below.
| Multiple heap-based buffer overflows in the cirrus_invalidate_region
| function in the Cirrus VGA extension in QEMU 0.8.2, as used in Xen and
| possibly other products, might allow local users to execute arbitrary
| code via unspecified vectors related to "attempting to mark
| non-existent regions as dirty," aka the "bitblt" heap overflow.
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4340 c046a42c-6fe2-441c-8c8c-71466251a162 |
do_filter(
linenr_T line1,
linenr_T line2,
exarg_T *eap, // for forced 'ff' and 'fenc'
char_u *cmd,
int do_in,
int do_out)
{
char_u *itmp = NULL;
char_u *otmp = NULL;
linenr_T linecount;
linenr_T read_linecount;
pos_T cursor_save;
char_u *cmd_buf;
buf_T *old_curbuf = curbuf;
int shell_flags = 0;
pos_T orig_start = curbuf->b_op_start;
pos_T orig_end = curbuf->b_op_end;
int save_cmod_flags = cmdmod.cmod_flags;
#ifdef FEAT_FILTERPIPE
int stmp = p_stmp;
#endif
if (*cmd == NUL) // no filter command
return;
// Temporarily disable lockmarks since that's needed to propagate changed
// regions of the buffer for foldUpdate(), linecount, etc.
cmdmod.cmod_flags &= ~CMOD_LOCKMARKS;
cursor_save = curwin->w_cursor;
linecount = line2 - line1 + 1;
curwin->w_cursor.lnum = line1;
curwin->w_cursor.col = 0;
changed_line_abv_curs();
invalidate_botline();
/*
* When using temp files:
* 1. * Form temp file names
* 2. * Write the lines to a temp file
* 3. Run the filter command on the temp file
* 4. * Read the output of the command into the buffer
* 5. * Delete the original lines to be filtered
* 6. * Remove the temp files
*
* When writing the input with a pipe or when catching the output with a
* pipe only need to do 3.
*/
if (do_out)
shell_flags |= SHELL_DOOUT;
#ifdef FEAT_FILTERPIPE
# ifdef VIMDLL
if (!gui.in_use && !gui.starting)
stmp = 1; // Console mode doesn't support filterpipe.
# endif
if (!do_in && do_out && !stmp)
{
// Use a pipe to fetch stdout of the command, do not use a temp file.
shell_flags |= SHELL_READ;
curwin->w_cursor.lnum = line2;
}
else if (do_in && !do_out && !stmp)
{
// Use a pipe to write stdin of the command, do not use a temp file.
shell_flags |= SHELL_WRITE;
curbuf->b_op_start.lnum = line1;
curbuf->b_op_end.lnum = line2;
}
else if (do_in && do_out && !stmp)
{
// Use a pipe to write stdin and fetch stdout of the command, do not
// use a temp file.
shell_flags |= SHELL_READ|SHELL_WRITE;
curbuf->b_op_start.lnum = line1;
curbuf->b_op_end.lnum = line2;
curwin->w_cursor.lnum = line2;
}
else
#endif
if ((do_in && (itmp = vim_tempname('i', FALSE)) == NULL)
|| (do_out && (otmp = vim_tempname('o', FALSE)) == NULL))
{
emsg(_(e_cant_get_temp_file_name));
goto filterend;
}
/*
* The writing and reading of temp files will not be shown.
* Vi also doesn't do this and the messages are not very informative.
*/
++no_wait_return; // don't call wait_return() while busy
if (itmp != NULL && buf_write(curbuf, itmp, NULL, line1, line2, eap,
FALSE, FALSE, FALSE, TRUE) == FAIL)
{
msg_putchar('\n'); // keep message from buf_write()
--no_wait_return;
#if defined(FEAT_EVAL)
if (!aborting())
#endif
(void)semsg(_(e_cant_create_file_str), itmp); // will call wait_return
goto filterend;
}
if (curbuf != old_curbuf)
goto filterend;
if (!do_out)
msg_putchar('\n');
// Create the shell command in allocated memory.
cmd_buf = make_filter_cmd(cmd, itmp, otmp);
if (cmd_buf == NULL)
goto filterend;
windgoto((int)Rows - 1, 0);
cursor_on();
/*
* When not redirecting the output the command can write anything to the
* screen. If 'shellredir' is equal to ">", screen may be messed up by
* stderr output of external command. Clear the screen later.
* If do_in is FALSE, this could be something like ":r !cat", which may
* also mess up the screen, clear it later.
*/
if (!do_out || STRCMP(p_srr, ">") == 0 || !do_in)
redraw_later_clear();
if (do_out)
{
if (u_save((linenr_T)(line2), (linenr_T)(line2 + 1)) == FAIL)
{
vim_free(cmd_buf);
goto error;
}
redraw_curbuf_later(VALID);
}
read_linecount = curbuf->b_ml.ml_line_count;
/*
* When call_shell() fails wait_return() is called to give the user a
* chance to read the error messages. Otherwise errors are ignored, so you
* can see the error messages from the command that appear on stdout; use
* 'u' to fix the text
* Switch to cooked mode when not redirecting stdin, avoids that something
* like ":r !cat" hangs.
* Pass on the SHELL_DOOUT flag when the output is being redirected.
*/
if (call_shell(cmd_buf, SHELL_FILTER | SHELL_COOKED | shell_flags))
{
redraw_later_clear();
wait_return(FALSE);
}
vim_free(cmd_buf);
did_check_timestamps = FALSE;
need_check_timestamps = TRUE;
// When interrupting the shell command, it may still have produced some
// useful output. Reset got_int here, so that readfile() won't cancel
// reading.
ui_breakcheck();
got_int = FALSE;
if (do_out)
{
if (otmp != NULL)
{
if (readfile(otmp, NULL, line2, (linenr_T)0, (linenr_T)MAXLNUM,
eap, READ_FILTER) != OK)
{
#if defined(FEAT_EVAL)
if (!aborting())
#endif
{
msg_putchar('\n');
semsg(_(e_cant_read_file_str), otmp);
}
goto error;
}
if (curbuf != old_curbuf)
goto filterend;
}
read_linecount = curbuf->b_ml.ml_line_count - read_linecount;
if (shell_flags & SHELL_READ)
{
curbuf->b_op_start.lnum = line2 + 1;
curbuf->b_op_end.lnum = curwin->w_cursor.lnum;
appended_lines_mark(line2, read_linecount);
}
if (do_in)
{
if ((cmdmod.cmod_flags & CMOD_KEEPMARKS)
|| vim_strchr(p_cpo, CPO_REMMARK) == NULL)
{
if (read_linecount >= linecount)
// move all marks from old lines to new lines
mark_adjust(line1, line2, linecount, 0L);
else if (save_cmod_flags & CMOD_LOCKMARKS)
{
// Move marks from the lines below the new lines down by
// the number of lines lost.
// Move marks from the lines that will be deleted to the
// new lines and below.
mark_adjust(line2 + 1, (linenr_T)MAXLNUM,
linecount - read_linecount, 0L);
mark_adjust(line1, line2, linecount, 0L);
}
else
{
// move marks from old lines to new lines, delete marks
// that are in deleted lines
mark_adjust(line1, line1 + read_linecount - 1,
linecount, 0L);
mark_adjust(line1 + read_linecount, line2, MAXLNUM, 0L);
}
}
/*
* Put cursor on first filtered line for ":range!cmd".
* Adjust '[ and '] (set by buf_write()).
*/
curwin->w_cursor.lnum = line1;
del_lines(linecount, TRUE);
curbuf->b_op_start.lnum -= linecount; // adjust '[
curbuf->b_op_end.lnum -= linecount; // adjust ']
write_lnum_adjust(-linecount); // adjust last line
// for next write
#ifdef FEAT_FOLDING
foldUpdate(curwin, curbuf->b_op_start.lnum, curbuf->b_op_end.lnum);
#endif
}
else
{
/*
* Put cursor on last new line for ":r !cmd".
*/
linecount = curbuf->b_op_end.lnum - curbuf->b_op_start.lnum + 1;
curwin->w_cursor.lnum = curbuf->b_op_end.lnum;
}
beginline(BL_WHITE | BL_FIX); // cursor on first non-blank
--no_wait_return;
if (linecount > p_report)
{
if (do_in)
{
vim_snprintf(msg_buf, sizeof(msg_buf),
_("%ld lines filtered"), (long)linecount);
if (msg(msg_buf) && !msg_scroll)
// save message to display it after redraw
set_keep_msg((char_u *)msg_buf, 0);
}
else
msgmore((long)linecount);
}
}
else
{
error:
// put cursor back in same position for ":w !cmd"
curwin->w_cursor = cursor_save;
--no_wait_return;
wait_return(FALSE);
}
filterend:
cmdmod.cmod_flags = save_cmod_flags;
if (curbuf != old_curbuf)
{
--no_wait_return;
emsg(_(e_filter_autocommands_must_not_change_current_buffer));
}
else if (cmdmod.cmod_flags & CMOD_LOCKMARKS)
{
curbuf->b_op_start = orig_start;
curbuf->b_op_end = orig_end;
}
if (itmp != NULL)
mch_remove(itmp);
if (otmp != NULL)
mch_remove(otmp);
vim_free(itmp);
vim_free(otmp);
} | 0 | [
"CWE-122",
"CWE-787"
]
| vim | dc5490e2cbc8c16022a23b449b48c1bd0083f366 | 241,131,338,517,669,200,000,000,000,000,000,000,000 | 293 | patch 8.2.4215: illegal memory access when copying lines in Visual mode
Problem: Illegal memory access when copying lines in Visual mode.
Solution: Adjust the Visual position after copying lines. |
void Item_field::fix_after_pullout(st_select_lex *new_parent, Item **ref,
bool merge)
{
if (new_parent == get_depended_from())
depended_from= NULL;
if (context)
{
bool need_change= false;
/*
Suppose there are nested selects:
select_id=1
select_id=2
select_id=3 <----+
select_id=4 -+
select_id=5 --+
Suppose, pullout operation has moved anything that had select_id=4 or 5
in to select_id=3.
If this Item_field had a name resolution context pointing into select_lex
with id=4 or id=5, it needs a new name resolution context.
However, it could also be that this object is a part of outer reference:
Item_ref(Item_field(field in select with select_id=1))).
- The Item_ref object has a context with select_id=5, and so needs a new
name resolution context.
- The Item_field object has a context with select_id=1, and doesn't need
a new name resolution context.
So, the following loop walks from Item_field's current context upwards.
If we find that the select we've been pulled out to is up there, we
create the new name resolution context. Otherwise, we don't.
*/
for (Name_resolution_context *ct= context; ct; ct= ct->outer_context)
{
if (new_parent == ct->select_lex)
{
need_change= true;
break;
}
}
if (!need_change)
return;
if (!merge)
{
/*
It is transformation without merge.
This field was "outer" for the inner SELECT where it was taken and
moved up.
"Outer" fields uses normal SELECT_LEX context of upper SELECTs for
name resolution, so we can switch everything to it safely.
*/
this->context= &new_parent->context;
return;
}
Name_resolution_context *ctx= new Name_resolution_context();
if (!ctx)
return; // Fatal error set
if (context->select_lex == new_parent)
{
/*
This field was pushed in then pulled out
(for example left part of IN)
*/
ctx->outer_context= context->outer_context;
}
else if (context->outer_context)
{
/* just pull to the upper context */
ctx->outer_context= context->outer_context->outer_context;
}
else
{
/* No upper context (merging Derived/VIEW where context chain ends) */
ctx->outer_context= NULL;
}
ctx->table_list= context->first_name_resolution_table;
ctx->select_lex= new_parent;
if (context->select_lex == NULL)
ctx->select_lex= NULL;
ctx->first_name_resolution_table= context->first_name_resolution_table;
ctx->last_name_resolution_table= context->last_name_resolution_table;
ctx->error_processor= context->error_processor;
ctx->error_processor_data= context->error_processor_data;
ctx->resolve_in_select_list= context->resolve_in_select_list;
ctx->security_ctx= context->security_ctx;
this->context=ctx;
}
} | 0 | [
"CWE-416"
]
| server | c02ebf3510850ba78a106be9974c94c3b97d8585 | 141,583,347,632,714,070,000,000,000,000,000,000,000 | 92 | MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments. |
int git_tree__write_index(
git_oid *oid, git_index *index, git_repository *repo)
{
int ret;
bool old_ignore_case = false;
assert(oid && index && repo);
if (git_index_has_conflicts(index)) {
giterr_set(GITERR_INDEX,
"Cannot create a tree from a not fully merged index.");
return GIT_EUNMERGED;
}
if (index->tree != NULL && index->tree->entries >= 0) {
git_oid_cpy(oid, &index->tree->oid);
return 0;
}
/* The tree cache didn't help us; we'll have to write
* out a tree. If the index is ignore_case, we must
* make it case-sensitive for the duration of the tree-write
* operation. */
if (index->ignore_case) {
old_ignore_case = true;
git_index__set_ignore_case(index, false);
}
ret = write_tree(oid, repo, index, "", 0);
if (old_ignore_case)
git_index__set_ignore_case(index, true);
return ret < 0 ? ret : 0;
} | 0 | [
"CWE-20"
]
| libgit2 | 928429c5c96a701bcbcafacb2421a82602b36915 | 58,325,371,878,581,040,000,000,000,000,000,000,000 | 36 | tree: Check for `.git` with case insensitivy |
int attach_capi_ctr(struct capi_ctr *ctr)
{
int i;
mutex_lock(&capi_controller_lock);
for (i = 0; i < CAPI_MAXCONTR; i++) {
if (!capi_controller[i])
break;
}
if (i == CAPI_MAXCONTR) {
mutex_unlock(&capi_controller_lock);
printk(KERN_ERR "kcapi: out of controller slots\n");
return -EBUSY;
}
capi_controller[i] = ctr;
ctr->nrecvctlpkt = 0;
ctr->nrecvdatapkt = 0;
ctr->nsentctlpkt = 0;
ctr->nsentdatapkt = 0;
ctr->cnr = i + 1;
ctr->state = CAPI_CTR_DETECTED;
ctr->blocked = 0;
ctr->traceflag = showcapimsgs;
sprintf(ctr->procfn, "capi/controllers/%d", ctr->cnr);
ctr->procent = proc_create_single_data(ctr->procfn, 0, NULL,
ctr->proc_show, ctr);
ncontrollers++;
mutex_unlock(&capi_controller_lock);
printk(KERN_NOTICE "kcapi: controller [%03d]: %s attached\n",
ctr->cnr, ctr->name);
return 0;
} | 0 | [
"CWE-125"
]
| linux | 1f3e2e97c003f80c4b087092b225c8787ff91e4d | 141,353,178,423,550,170,000,000,000,000,000,000,000 | 38 | isdn: cpai: check ctr->cnr to avoid array index out of bound
The cmtp_add_connection() would add a cmtp session to a controller
and run a kernel thread to process cmtp.
__module_get(THIS_MODULE);
session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d",
session->num);
During this process, the kernel thread would call detach_capi_ctr()
to detach a register controller. if the controller
was not attached yet, detach_capi_ctr() would
trigger an array-index-out-bounds bug.
[ 46.866069][ T6479] UBSAN: array-index-out-of-bounds in
drivers/isdn/capi/kcapi.c:483:21
[ 46.867196][ T6479] index -1 is out of range for type 'capi_ctr *[32]'
[ 46.867982][ T6479] CPU: 1 PID: 6479 Comm: kcmtpd_ctr_0 Not tainted
5.15.0-rc2+ #8
[ 46.869002][ T6479] Hardware name: QEMU Standard PC (i440FX + PIIX,
1996), BIOS 1.14.0-2 04/01/2014
[ 46.870107][ T6479] Call Trace:
[ 46.870473][ T6479] dump_stack_lvl+0x57/0x7d
[ 46.870974][ T6479] ubsan_epilogue+0x5/0x40
[ 46.871458][ T6479] __ubsan_handle_out_of_bounds.cold+0x43/0x48
[ 46.872135][ T6479] detach_capi_ctr+0x64/0xc0
[ 46.872639][ T6479] cmtp_session+0x5c8/0x5d0
[ 46.873131][ T6479] ? __init_waitqueue_head+0x60/0x60
[ 46.873712][ T6479] ? cmtp_add_msgpart+0x120/0x120
[ 46.874256][ T6479] kthread+0x147/0x170
[ 46.874709][ T6479] ? set_kthread_struct+0x40/0x40
[ 46.875248][ T6479] ret_from_fork+0x1f/0x30
[ 46.875773][ T6479]
Signed-off-by: Xiaolong Huang <[email protected]>
Acked-by: Arnd Bergmann <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]> |
var_check_lock(int flags, char_u *name, int use_gettext)
{
if (flags & DI_FLAGS_LOCK)
{
semsg(_(e_variable_is_locked_str),
use_gettext ? (char_u *)_(name) : name);
return TRUE;
}
return FALSE;
} | 0 | [
"CWE-476"
]
| vim | 0f6e28f686dbb59ab3b562408ab9b2234797b9b1 | 29,025,884,139,559,936,000,000,000,000,000,000,000 | 10 | patch 8.2.4428: crash when switching tabpage while in the cmdline window
Problem: Crash when switching tabpage while in the cmdline window.
Solution: Disallow switching tabpage when in the cmdline window. |
static int rtl8xxxu_start(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
struct rtl8xxxu_rx_urb *rx_urb;
struct rtl8xxxu_tx_urb *tx_urb;
unsigned long flags;
int ret, i;
ret = 0;
init_usb_anchor(&priv->rx_anchor);
init_usb_anchor(&priv->tx_anchor);
init_usb_anchor(&priv->int_anchor);
priv->fops->enable_rf(priv);
if (priv->usb_interrupts) {
ret = rtl8xxxu_submit_int_urb(hw);
if (ret)
goto exit;
}
for (i = 0; i < RTL8XXXU_TX_URBS; i++) {
tx_urb = kmalloc(sizeof(struct rtl8xxxu_tx_urb), GFP_KERNEL);
if (!tx_urb) {
if (!i)
ret = -ENOMEM;
goto error_out;
}
usb_init_urb(&tx_urb->urb);
INIT_LIST_HEAD(&tx_urb->list);
tx_urb->hw = hw;
list_add(&tx_urb->list, &priv->tx_urb_free_list);
priv->tx_urb_free_count++;
}
priv->tx_stopped = false;
spin_lock_irqsave(&priv->rx_urb_lock, flags);
priv->shutdown = false;
spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
for (i = 0; i < RTL8XXXU_RX_URBS; i++) {
rx_urb = kmalloc(sizeof(struct rtl8xxxu_rx_urb), GFP_KERNEL);
if (!rx_urb) {
if (!i)
ret = -ENOMEM;
goto error_out;
}
usb_init_urb(&rx_urb->urb);
INIT_LIST_HEAD(&rx_urb->list);
rx_urb->hw = hw;
ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
}
exit:
/*
* Accept all data and mgmt frames
*/
rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0xffff);
rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0xffff);
rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, 0x6954341e);
return ret;
error_out:
rtl8xxxu_free_tx_resources(priv);
/*
* Disable all data and mgmt frames
*/
rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0x0000);
return ret;
} | 0 | [
"CWE-400",
"CWE-401"
]
| linux | a2cdd07488e666aa93a49a3fc9c9b1299e27ef3c | 124,811,113,378,293,610,000,000,000,000,000,000,000 | 77 | rtl8xxxu: prevent leaking urb
In rtl8xxxu_submit_int_urb if usb_submit_urb fails the allocated urb
should be released.
Signed-off-by: Navid Emamdoost <[email protected]>
Reviewed-by: Chris Chiu <[email protected]>
Signed-off-by: Kalle Valo <[email protected]> |
process_ellipse2(STREAM s, ELLIPSE2_ORDER * os, uint32 present, RD_BOOL delta)
{
BRUSH brush;
if (present & 0x0001)
rdp_in_coord(s, &os->left, delta);
if (present & 0x0002)
rdp_in_coord(s, &os->top, delta);
if (present & 0x0004)
rdp_in_coord(s, &os->right, delta);
if (present & 0x0008)
rdp_in_coord(s, &os->bottom, delta);
if (present & 0x0010)
in_uint8(s, os->opcode);
if (present & 0x0020)
in_uint8(s, os->fillmode);
if (present & 0x0040)
rdp_in_colour(s, &os->bgcolour);
if (present & 0x0080)
rdp_in_colour(s, &os->fgcolour);
rdp_parse_brush(s, &os->brush, present >> 8);
logger(Graphics, Debug,
"process_ellipse2(), l=%d, t=%d, r=%d, b=%d, op=0x%x, fm=%d, bs=%d, bg=0x%x, fg=0x%x",
os->left, os->top, os->right, os->bottom, os->opcode, os->fillmode, os->brush.style,
os->bgcolour, os->fgcolour);
setup_brush(&brush, &os->brush);
ui_ellipse(os->opcode - 1, os->fillmode, os->left, os->top, os->right - os->left,
os->bottom - os->top, &brush, os->bgcolour, os->fgcolour);
} | 0 | [
"CWE-119",
"CWE-125",
"CWE-703",
"CWE-787"
]
| rdesktop | 4dca546d04321a610c1835010b5dad85163b65e1 | 153,560,087,137,421,940,000,000,000,000,000,000,000 | 40 | Malicious RDP server security fixes
This commit includes fixes for a set of 21 vulnerabilities in
rdesktop when a malicious RDP server is used.
All vulnerabilities was identified and reported by Eyal Itkin.
* Add rdp_protocol_error function that is used in several fixes
* Refactor of process_bitmap_updates
* Fix possible integer overflow in s_check_rem() on 32bit arch
* Fix memory corruption in process_bitmap_data - CVE-2018-8794
* Fix remote code execution in process_bitmap_data - CVE-2018-8795
* Fix remote code execution in process_plane - CVE-2018-8797
* Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175
* Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175
* Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176
* Fix Denial of Service in sec_recv - CVE-2018-20176
* Fix minor information leak in rdpdr_process - CVE-2018-8791
* Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792
* Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793
* Fix Denial of Service in process_bitmap_data - CVE-2018-8796
* Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798
* Fix Denial of Service in process_secondary_order - CVE-2018-8799
* Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800
* Fix major information leak in ui_clip_handle_data - CVE-2018-20174
* Fix memory corruption in rdp_in_unistr - CVE-2018-20177
* Fix Denial of Service in process_demand_active - CVE-2018-20178
* Fix remote code execution in lspci_process - CVE-2018-20179
* Fix remote code execution in rdpsnddbg_process - CVE-2018-20180
* Fix remote code execution in seamless_process - CVE-2018-20181
* Fix remote code execution in seamless_process_line - CVE-2018-20182 |
static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
struct pinctrl_dev *pctldev,
struct pinctrl_map *map, unsigned num_maps)
{
int i;
struct pinctrl_dt_map *dt_map;
/* Initialize common mapping table entry fields */
for (i = 0; i < num_maps; i++) {
const char *devname;
devname = kstrdup_const(dev_name(p->dev), GFP_KERNEL);
if (!devname)
goto err_free_map;
map[i].dev_name = devname;
map[i].name = statename;
if (pctldev)
map[i].ctrl_dev_name = dev_name(pctldev->dev);
}
/* Remember the converted mapping table entries */
dt_map = kzalloc(sizeof(*dt_map), GFP_KERNEL);
if (!dt_map)
goto err_free_map;
dt_map->pctldev = pctldev;
dt_map->map = map;
dt_map->num_maps = num_maps;
list_add_tail(&dt_map->node, &p->dt_maps);
return pinctrl_register_map(map, num_maps, false);
err_free_map:
dt_free_map(pctldev, map, num_maps);
return -ENOMEM;
} | 0 | [
"CWE-125"
]
| linux | be4c60b563edee3712d392aaeb0943a768df7023 | 91,943,789,850,069,550,000,000,000,000,000,000,000 | 37 | pinctrl: devicetree: Avoid taking direct reference to device name string
When populating the pinctrl mapping table entries for a device, the
'dev_name' field for each entry is initialised to point directly at the
string returned by 'dev_name()' for the device and subsequently used by
'create_pinctrl()' when looking up the mappings for the device being
probed.
This is unreliable in the presence of calls to 'dev_set_name()', which may
reallocate the device name string leaving the pinctrl mappings with a
dangling reference. This then leads to a use-after-free every time the
name is dereferenced by a device probe:
| BUG: KASAN: invalid-access in strcmp+0x20/0x64
| Read of size 1 at addr 13ffffc153494b00 by task modprobe/590
| Pointer tag: [13], memory tag: [fe]
|
| Call trace:
| __kasan_report+0x16c/0x1dc
| kasan_report+0x10/0x18
| check_memory_region
| __hwasan_load1_noabort+0x4c/0x54
| strcmp+0x20/0x64
| create_pinctrl+0x18c/0x7f4
| pinctrl_get+0x90/0x114
| devm_pinctrl_get+0x44/0x98
| pinctrl_bind_pins+0x5c/0x450
| really_probe+0x1c8/0x9a4
| driver_probe_device+0x120/0x1d8
Follow the example of sysfs, and duplicate the device name string before
stashing it away in the pinctrl mapping entries.
Cc: Linus Walleij <[email protected]>
Reported-by: Elena Petrova <[email protected]>
Tested-by: Elena Petrova <[email protected]>
Signed-off-by: Will Deacon <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Linus Walleij <[email protected]> |
static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
struct kvm_mmio_fragment *frag;
unsigned int len;
BUG_ON(!vcpu->mmio_needed);
/* Complete previous fragment */
frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
len = min(8u, frag->len);
if (!vcpu->mmio_is_write)
memcpy(frag->data, run->mmio.data, len);
if (frag->len <= 8) {
/* Switch to the next fragment. */
frag++;
vcpu->mmio_cur_fragment++;
} else {
/* Go forward to the next mmio piece. */
frag->data += len;
frag->gpa += len;
frag->len -= len;
}
if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
vcpu->mmio_needed = 0;
// VMG change, at this point, we're always done
// RIP has already been advanced
return 1;
}
// More MMIO is needed
run->mmio.phys_addr = frag->gpa;
run->mmio.len = min(8u, frag->len);
run->mmio.is_write = vcpu->mmio_is_write;
if (run->mmio.is_write)
memcpy(run->mmio.data, frag->data, min(8u, frag->len));
run->exit_reason = KVM_EXIT_MMIO;
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
return 0;
} | 0 | [
"CWE-476"
]
| linux | 55749769fe608fa3f4a075e42e89d237c8e37637 | 11,654,910,649,534,968,000,000,000,000,000,000,000 | 45 | KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty
When dirty ring logging is enabled, any dirty logging without an active
vCPU context will cause a kernel oops. But we've already declared that
the shared_info page doesn't get dirty tracking anyway, since it would
be kind of insane to mark it dirty every time we deliver an event channel
interrupt. Userspace is supposed to just assume it's always dirty any
time a vCPU can run or event channels are routed.
So stop using the generic kvm_write_wall_clock() and just write directly
through the gfn_to_pfn_cache that we already have set up.
We can make kvm_write_wall_clock() static in x86.c again now, but let's
not remove the 'sec_hi_ofs' argument even though it's not used yet. At
some point we *will* want to use that for KVM guests too.
Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region")
Reported-by: butt3rflyh4ck <[email protected]>
Signed-off-by: David Woodhouse <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
int ssl3_get_server_certificate(SSL *s)
{
int al,i,ok,ret= -1;
unsigned long n,nc,llen,l;
X509 *x=NULL;
const unsigned char *q,*p;
unsigned char *d;
STACK_OF(X509) *sk=NULL;
SESS_CERT *sc;
EVP_PKEY *pkey=NULL;
int need_cert = 1; /* VRS: 0=> will allow null cert if auth == KRB5 */
n=s->method->ssl_get_message(s,
SSL3_ST_CR_CERT_A,
SSL3_ST_CR_CERT_B,
-1,
s->max_cert_list,
&ok);
if (!ok) return((int)n);
if (s->s3->tmp.message_type == SSL3_MT_SERVER_KEY_EXCHANGE)
{
s->s3->tmp.reuse_message=1;
return(1);
}
if (s->s3->tmp.message_type != SSL3_MT_CERTIFICATE)
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,SSL_R_BAD_MESSAGE_TYPE);
goto f_err;
}
p=d=(unsigned char *)s->init_msg;
if ((sk=sk_X509_new_null()) == NULL)
{
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,ERR_R_MALLOC_FAILURE);
goto err;
}
n2l3(p,llen);
if (llen+3 != n)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,SSL_R_LENGTH_MISMATCH);
goto f_err;
}
for (nc=0; nc<llen; )
{
n2l3(p,l);
if ((l+nc+3) > llen)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,SSL_R_CERT_LENGTH_MISMATCH);
goto f_err;
}
q=p;
x=d2i_X509(NULL,&q,l);
if (x == NULL)
{
al=SSL_AD_BAD_CERTIFICATE;
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,ERR_R_ASN1_LIB);
goto f_err;
}
if (q != (p+l))
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,SSL_R_CERT_LENGTH_MISMATCH);
goto f_err;
}
if (!sk_X509_push(sk,x))
{
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,ERR_R_MALLOC_FAILURE);
goto err;
}
x=NULL;
nc+=l+3;
p=q;
}
i=ssl_verify_cert_chain(s,sk);
if ((s->verify_mode != SSL_VERIFY_NONE) && (!i)
#ifndef OPENSSL_NO_KRB5
&& (s->s3->tmp.new_cipher->algorithms & (SSL_MKEY_MASK|SSL_AUTH_MASK))
!= (SSL_aKRB5|SSL_kKRB5)
#endif /* OPENSSL_NO_KRB5 */
)
{
al=ssl_verify_alarm_type(s->verify_result);
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,SSL_R_CERTIFICATE_VERIFY_FAILED);
goto f_err;
}
ERR_clear_error(); /* but we keep s->verify_result */
sc=ssl_sess_cert_new();
if (sc == NULL) goto err;
if (s->session->sess_cert) ssl_sess_cert_free(s->session->sess_cert);
s->session->sess_cert=sc;
sc->cert_chain=sk;
/* Inconsistency alert: cert_chain does include the peer's
* certificate, which we don't include in s3_srvr.c */
x=sk_X509_value(sk,0);
sk=NULL;
/* VRS 19990621: possible memory leak; sk=null ==> !sk_pop_free() @end*/
pkey=X509_get_pubkey(x);
/* VRS: allow null cert if auth == KRB5 */
need_cert = ((s->s3->tmp.new_cipher->algorithms
& (SSL_MKEY_MASK|SSL_AUTH_MASK))
== (SSL_aKRB5|SSL_kKRB5))? 0: 1;
#ifdef KSSL_DEBUG
printf("pkey,x = %p, %p\n", pkey,x);
printf("ssl_cert_type(x,pkey) = %d\n", ssl_cert_type(x,pkey));
printf("cipher, alg, nc = %s, %lx, %d\n", s->s3->tmp.new_cipher->name,
s->s3->tmp.new_cipher->algorithms, need_cert);
#endif /* KSSL_DEBUG */
if (need_cert && ((pkey == NULL) || EVP_PKEY_missing_parameters(pkey)))
{
x=NULL;
al=SSL3_AL_FATAL;
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,
SSL_R_UNABLE_TO_FIND_PUBLIC_KEY_PARAMETERS);
goto f_err;
}
i=ssl_cert_type(x,pkey);
if (need_cert && i < 0)
{
x=NULL;
al=SSL3_AL_FATAL;
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,
SSL_R_UNKNOWN_CERTIFICATE_TYPE);
goto f_err;
}
if (need_cert)
{
sc->peer_cert_type=i;
CRYPTO_add(&x->references,1,CRYPTO_LOCK_X509);
/* Why would the following ever happen?
* We just created sc a couple of lines ago. */
if (sc->peer_pkeys[i].x509 != NULL)
X509_free(sc->peer_pkeys[i].x509);
sc->peer_pkeys[i].x509=x;
sc->peer_key= &(sc->peer_pkeys[i]);
if (s->session->peer != NULL)
X509_free(s->session->peer);
CRYPTO_add(&x->references,1,CRYPTO_LOCK_X509);
s->session->peer=x;
}
else
{
sc->peer_cert_type=i;
sc->peer_key= NULL;
if (s->session->peer != NULL)
X509_free(s->session->peer);
s->session->peer=NULL;
}
s->session->verify_result = s->verify_result;
x=NULL;
ret=1;
if (0)
{
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
}
err:
EVP_PKEY_free(pkey);
X509_free(x);
sk_X509_pop_free(sk,X509_free);
return(ret);
} | 0 | []
| openssl | 36ca4ba63d083da6f9d4598f18f17a8c32c8eca2 | 130,991,937,724,056,270,000,000,000,000,000,000,000 | 183 | Implement the Supported Point Formats Extension for ECC ciphersuites
Submitted by: Douglas Stebila |
static xmlEntityPtr SVGGetEntity(void *context,const xmlChar *name)
{
SVGInfo
*svg_info;
/*
Get an entity by name.
*/
(void) LogMagickEvent(CoderEvent,GetMagickModule()," SAX.SVGGetEntity(%s)",
name);
svg_info=(SVGInfo *) context;
return(xmlGetDocEntity(svg_info->document,name));
} | 0 | [
"CWE-125"
]
| ImageMagick6 | a5db4873626f702d2ddd8bc293573493e0a412c0 | 268,167,461,846,971,800,000,000,000,000,000,000,000 | 13 | https://github.com/ImageMagick/ImageMagick/issues/1336 |
bool OSDService::is_full() const
{
Mutex::Locker l(full_status_lock);
return cur_state >= FULL;
} | 0 | [
"CWE-287",
"CWE-284"
]
| ceph | 5ead97120e07054d80623dada90a5cc764c28468 | 237,568,085,672,800,600,000,000,000,000,000,000,000 | 5 | auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random() |
static void diff_fill_sha1_info(struct diff_filespec *one)
{
if (DIFF_FILE_VALID(one)) {
if (!one->sha1_valid) {
struct stat st;
if (!strcmp(one->path, "-")) {
hashcpy(one->sha1, null_sha1);
return;
}
if (lstat(one->path, &st) < 0)
die("stat %s", one->path);
if (index_path(one->sha1, one->path, &st, 0))
die("cannot hash %s\n", one->path);
}
}
else
hashclr(one->sha1);
} | 0 | [
"CWE-119"
]
| git | fd55a19eb1d49ae54008d932a65f79cd6fda45c9 | 75,565,546,424,763,610,000,000,000,000,000,000,000 | 18 | Fix buffer overflow in git diff
If PATH_MAX on your system is smaller than a path stored, it may cause
buffer overflow and stack corruption in diff_addremove() and diff_change()
functions when running git-diff
Signed-off-by: Dmitry Potapov <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]> |
bool AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int filter_width,
int filter_height, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
return AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width,
pad_height, filter_width, filter_height,
output_activation_min, output_activation_max,
output_data, output_dims);
} | 0 | [
"CWE-703",
"CWE-835"
]
| tensorflow | dfa22b348b70bb89d6d6ec0ff53973bacb4f4695 | 6,634,865,541,601,304,000,000,000,000,000,000,000 | 10 | Prevent a division by 0 in average ops.
PiperOrigin-RevId: 385184660
Change-Id: I7affd4554f9b336fca29ac68f633232c094d0bd3 |
const char *CRYPTO_get_lock_name(int type)
{
if (type < 0)
return("dynamic");
else if (type < CRYPTO_NUM_LOCKS)
return(lock_names[type]);
else if (type-CRYPTO_NUM_LOCKS > sk_OPENSSL_STRING_num(app_locks))
return("ERROR");
else
return(sk_OPENSSL_STRING_value(app_locks,type-CRYPTO_NUM_LOCKS));
} | 0 | [
"CWE-310"
]
| openssl | 9c00a950604aca819cee977f1dcb4b45f2af3aa6 | 267,945,050,441,042,700,000,000,000,000,000,000,000 | 11 | Add and use a constant-time memcmp.
This change adds CRYPTO_memcmp, which compares two vectors of bytes in
an amount of time that's independent of their contents. It also changes
several MAC compares in the code to use this over the standard memcmp,
which may leak information about the size of a matching prefix.
(cherry picked from commit 2ee798880a246d648ecddadc5b91367bee4a5d98)
Conflicts:
crypto/crypto.h
ssl/t1_lib.c |
xmlKeepBlanksDefault(int val) {
int old = xmlKeepBlanksDefaultValue;
xmlKeepBlanksDefaultValue = val;
if (!val) xmlIndentTreeOutput = 1;
return(old);
} | 0 | [
"CWE-119"
]
| libxml2 | 23f05e0c33987d6605387b300c4be5da2120a7ab | 313,619,572,682,406,000,000,000,000,000,000,000,000 | 7 | Detect excessive entities expansion upon replacement
If entities expansion in the XML parser is asked for,
it is possble to craft relatively small input document leading
to excessive on-the-fly content generation.
This patch accounts for those replacement and stop parsing
after a given threshold. it can be bypassed as usual with the
HUGE parser option. |
static int oidc_target_link_uri_matches_configuration(request_rec *r,
oidc_cfg *cfg, const char *target_link_uri) {
apr_uri_t o_uri;
apr_uri_parse(r->pool, target_link_uri, &o_uri);
if (o_uri.hostname == NULL) {
oidc_error(r,
"could not parse the \"target_link_uri\" (%s) in to a valid URL: aborting.",
target_link_uri);
return FALSE;
}
apr_uri_t r_uri;
apr_uri_parse(r->pool, cfg->redirect_uri, &r_uri);
if (cfg->cookie_domain == NULL) {
/* cookie_domain set: see if the target_link_uri matches the redirect_uri host (because the session cookie will be set host-wide) */
if (apr_strnatcmp(o_uri.hostname, r_uri.hostname) != 0) {
char *p = strstr(o_uri.hostname, r_uri.hostname);
if ((p == NULL) || (apr_strnatcmp(r_uri.hostname, p) != 0)) {
oidc_error(r,
"the URL hostname (%s) of the configured OIDCRedirectURI does not match the URL hostname of the \"target_link_uri\" (%s): aborting to prevent an open redirect.",
r_uri.hostname, o_uri.hostname);
return FALSE;
}
}
} else {
/* cookie_domain set: see if the target_link_uri is within the cookie_domain */
char *p = strstr(o_uri.hostname, cfg->cookie_domain);
if ((p == NULL) || (apr_strnatcmp(cfg->cookie_domain, p) != 0)) {
oidc_error(r,
"the domain (%s) configured in OIDCCookieDomain does not match the URL hostname (%s) of the \"target_link_uri\" (%s): aborting to prevent an open redirect.",
cfg->cookie_domain, o_uri.hostname, target_link_uri);
return FALSE;
}
}
/* see if the cookie_path setting matches the target_link_uri path */
char *cookie_path = oidc_cfg_dir_cookie_path(r);
if (cookie_path != NULL) {
char *p = (o_uri.path != NULL) ? strstr(o_uri.path, cookie_path) : NULL;
if ((p == NULL) || (p != o_uri.path)) {
oidc_error(r,
"the path (%s) configured in OIDCCookiePath does not match the URL path (%s) of the \"target_link_uri\" (%s): aborting to prevent an open redirect.",
cfg->cookie_domain, o_uri.path, target_link_uri);
return FALSE;
} else if (strlen(o_uri.path) > strlen(cookie_path)) {
int n = strlen(cookie_path);
if (cookie_path[n - 1] == '/')
n--;
if (o_uri.path[n] != '/') {
oidc_error(r,
"the path (%s) configured in OIDCCookiePath does not match the URL path (%s) of the \"target_link_uri\" (%s): aborting to prevent an open redirect.",
cfg->cookie_domain, o_uri.path, target_link_uri);
return FALSE;
}
}
}
return TRUE;
} | 0 | [
"CWE-20"
]
| mod_auth_openidc | 612e309bfffd6f9b8ad7cdccda3019fc0865f3b4 | 73,407,107,116,700,030,000,000,000,000,000,000,000 | 60 | don't echo query params on invalid requests to redirect URI; closes #212
thanks @LukasReschke; I'm sure there's some OWASP guideline that warns
against this |
int RGWHandler_REST_S3Website::init(rgw::sal::RGWRadosStore *store, req_state *s,
rgw::io::BasicClient* cio)
{
// save the original object name before retarget() replaces it with the
// result of get_effective_key(). the error_handler() needs the original
// object name for redirect handling
original_object_name = s->object.name;
return RGWHandler_REST_S3::init(store, s, cio);
} | 0 | [
"CWE-79"
]
| ceph | 8f90658c731499722d5f4393c8ad70b971d05f77 | 101,151,140,380,066,750,000,000,000,000,000,000,000 | 10 | rgw: reject unauthenticated response-header actions
Signed-off-by: Matt Benjamin <[email protected]>
Reviewed-by: Casey Bodley <[email protected]>
(cherry picked from commit d8dd5e513c0c62bbd7d3044d7e2eddcd897bd400) |
static void redraw_titles(void)
{
need_maketitle = TRUE;
# ifdef FEAT_WINDOWS
redraw_tabline = TRUE;
# endif
} | 0 | [
"CWE-20"
]
| vim | d0b5138ba4bccff8a744c99836041ef6322ed39a | 213,396,147,209,318,500,000,000,000,000,000,000,000 | 7 | patch 8.0.0056
Problem: When setting 'filetype' there is no check for a valid name.
Solution: Only allow valid characters in 'filetype', 'syntax' and 'keymap'. |
proc_setgroups_write (const long child_pid, const char * const str)
{
const size_t str_len = strlen(str);
char setgroups_path[sizeof ("/proc//setgroups") + INT_STRLEN_BOUND (long)];
snprintf (setgroups_path, sizeof (setgroups_path),
"/proc/%ld/setgroups", child_pid);
const int fd = open (setgroups_path, O_WRONLY);
if (fd < 0)
{
TEST_VERIFY_EXIT (errno == ENOENT);
FAIL_UNSUPPORTED ("/proc/%ld/setgroups not found\n", child_pid);
}
xwrite (fd, str, str_len);
xclose(fd);
} | 0 | [
"CWE-284"
]
| glibc | 23e0e8f5f1fb5ed150253d986ecccdc90c2dcd5e | 29,255,024,768,277,966,000,000,000,000,000,000,000 | 20 | getcwd: Set errno to ERANGE for size == 1 (CVE-2021-3999)
No valid path returned by getcwd would fit into 1 byte, so reject the
size early and return NULL with errno set to ERANGE. This change is
prompted by CVE-2021-3999, which describes a single byte buffer
underflow and overflow when all of the following conditions are met:
- The buffer size (i.e. the second argument of getcwd) is 1 byte
- The current working directory is too long
- '/' is also mounted on the current working directory
Sequence of events:
- In sysdeps/unix/sysv/linux/getcwd.c, the syscall returns ENAMETOOLONG
because the linux kernel checks for name length before it checks
buffer size
- The code falls back to the generic getcwd in sysdeps/posix
- In the generic func, the buf[0] is set to '\0' on line 250
- this while loop on line 262 is bypassed:
while (!(thisdev == rootdev && thisino == rootino))
since the rootfs (/) is bind mounted onto the directory and the flow
goes on to line 449, where it puts a '/' in the byte before the
buffer.
- Finally on line 458, it moves 2 bytes (the underflowed byte and the
'\0') to the buf[0] and buf[1], resulting in a 1 byte buffer overflow.
- buf is returned on line 469 and errno is not set.
This resolves BZ #28769.
Reviewed-by: Andreas Schwab <[email protected]>
Reviewed-by: Adhemerval Zanella <[email protected]>
Signed-off-by: Qualys Security Advisory <[email protected]>
Signed-off-by: Siddhesh Poyarekar <[email protected]> |
uint8_t* cli_bcapi_map_getvalue(struct cli_bc_ctx *ctx , int32_t id, int32_t valuesize)
{
struct cli_map *s = get_hashtab(ctx, id);
if (!s)
return NULL;
if (cli_map_getvalue_size(s) != valuesize)
return NULL;
return cli_map_getvalue(s);
} | 0 | [
"CWE-189"
]
| clamav-devel | 3d664817f6ef833a17414a4ecea42004c35cc42f | 175,797,391,648,925,000,000,000,000,000,000,000,000 | 9 | fix recursion level crash (bb #3706).
Thanks to Stephane Chazelas for the analysis. |
static void ccall(JF, js_Ast *fun, js_Ast *args)
{
int n;
switch (fun->type) {
case EXP_INDEX:
cexp(J, F, fun->a);
emit(J, F, OP_DUP);
cexp(J, F, fun->b);
emit(J, F, OP_GETPROP);
emit(J, F, OP_ROT2);
break;
case EXP_MEMBER:
cexp(J, F, fun->a);
emit(J, F, OP_DUP);
emitstring(J, F, OP_GETPROP_S, fun->b->string);
emit(J, F, OP_ROT2);
break;
case EXP_IDENTIFIER:
if (!strcmp(fun->string, "eval")) {
ceval(J, F, fun, args);
return;
}
/* fall through */
default:
cexp(J, F, fun);
emit(J, F, J->strict ? OP_UNDEF : OP_GLOBAL);
break;
}
n = cargs(J, F, args);
emit(J, F, OP_CALL);
emitraw(J, F, n);
} | 0 | [
"CWE-476"
]
| mujs | 5008105780c0b0182ea6eda83ad5598f225be3ee | 97,166,370,020,126,900,000,000,000,000,000,000,000 | 32 | Fix 697172: degenerate labeled break/continue statement.
A labeled break statement will look for a matching label through
its chain of parent statements. We start looking at the break statement
though, so if the label is attached to the break, we'll return the break
statement itself as a break target.
Start looking for targets one level up instead. |
void Item_sum_hybrid::restore_to_before_no_rows_in_result()
{
if (!was_values)
{
was_values= TRUE;
null_value= value->null_value= was_null_value;
}
} | 0 | [
"CWE-120"
]
| server | eca207c46293bc72dd8d0d5622153fab4d3fccf1 | 250,469,625,300,237,770,000,000,000,000,000,000,000 | 8 | MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix. |
rpmRC hdrblobRead(FD_t fd, int magic, int exact_size, rpmTagVal regionTag, hdrblob blob, char **emsg)
{
int32_t block[4];
int32_t *bs = (magic != 0) ? &block[0] : &block[2];
int blen = (magic != 0) ? sizeof(block) : sizeof(block) / 2;
int32_t il;
int32_t dl;
int32_t * ei = NULL;
size_t uc;
size_t nb;
rpmRC rc = RPMRC_FAIL; /* assume failure */
int xx;
memset(block, 0, sizeof(block));
if ((xx = Freadall(fd, bs, blen)) != blen) {
rasprintf(emsg,
_("hdr size(%d): BAD, read returned %d"), blen, xx);
goto exit;
}
if (magic && memcmp(block, rpm_header_magic, sizeof(rpm_header_magic))) {
rasprintf(emsg, _("hdr magic: BAD"));
goto exit;
}
il = ntohl(block[2]);
dl = ntohl(block[3]);
if (hdrblobVerifyLengths(regionTag, il, dl, emsg))
goto exit;
nb = (il * sizeof(struct entryInfo_s)) + dl;
uc = sizeof(il) + sizeof(dl) + nb;
ei = xmalloc(uc);
ei[0] = block[2];
ei[1] = block[3];
if ((xx = Freadall(fd, (char *)&ei[2], nb)) != nb) {
rasprintf(emsg, _("hdr blob(%zd): BAD, read returned %d"), nb, xx);
goto exit;
}
if (regionTag == RPMTAG_HEADERSIGNATURES) {
size_t sigSize = uc + sizeof(rpm_header_magic);
size_t pad = (8 - (sigSize % 8)) % 8;
size_t trc;
if (pad && (trc = Freadall(fd, block, pad)) != pad) {
rasprintf(emsg, _("sigh pad(%zd): BAD, read %zd bytes"), pad, trc);
goto exit;
}
}
rc = hdrblobInit(ei, uc, regionTag, exact_size, blob, emsg);
exit:
if (rc != RPMRC_OK) {
free(ei);
blob->ei = NULL;
if (emsg && *emsg && regionTag == RPMTAG_HEADERSIGNATURES) {
/* rstrscat() cannot handle overlap even if it claims so */
char *tmp = rstrscat(NULL, _("signature "), *emsg, NULL);
free(*emsg);
*emsg = tmp;
}
}
return rc;
} | 0 | [
"CWE-125"
]
| rpm | 8f4b3c3cab8922a2022b9e47c71f1ecf906077ef | 300,748,354,672,005,900,000,000,000,000,000,000,000 | 64 | hdrblobInit() needs bounds checks too
Users can pass untrusted data to hdrblobInit() and it must be robust
against this. |
void WebContents::MessageSync(bool internal,
const std::string& channel,
blink::CloneableMessage arguments,
MessageSyncCallback callback) {
TRACE_EVENT1("electron", "WebContents::MessageSync", "channel", channel);
// webContents.emit('-ipc-message-sync', new Event(sender, message), internal,
// channel, arguments);
EmitWithSender("-ipc-message-sync", receivers_.current_context(),
std::move(callback), internal, channel, std::move(arguments));
} | 0 | [
"CWE-284",
"CWE-693"
]
| electron | 18613925610ba319da7f497b6deed85ad712c59b | 126,500,861,410,618,800,000,000,000,000,000,000,000 | 10 | refactor: wire will-navigate up to a navigation throttle instead of OpenURL (#25108)
* refactor: wire will-navigate up to a navigation throttle instead of OpenURL (#25065)
* refactor: wire will-navigate up to a navigation throttle instead of OpenURL
* spec: add test for x-site _top navigation
* chore: old code be old |
ex_behave(exarg_T *eap)
{
if (STRCMP(eap->arg, "mswin") == 0)
{
set_option_value((char_u *)"selection", 0L, (char_u *)"exclusive", 0);
set_option_value((char_u *)"selectmode", 0L, (char_u *)"mouse,key", 0);
set_option_value((char_u *)"mousemodel", 0L, (char_u *)"popup", 0);
set_option_value((char_u *)"keymodel", 0L,
(char_u *)"startsel,stopsel", 0);
}
else if (STRCMP(eap->arg, "xterm") == 0)
{
set_option_value((char_u *)"selection", 0L, (char_u *)"inclusive", 0);
set_option_value((char_u *)"selectmode", 0L, (char_u *)"", 0);
set_option_value((char_u *)"mousemodel", 0L, (char_u *)"extend", 0);
set_option_value((char_u *)"keymodel", 0L, (char_u *)"", 0);
}
else
semsg(_(e_invarg2), eap->arg);
} | 0 | [
"CWE-78"
]
| vim | 8c62a08faf89663e5633dc5036cd8695c80f1075 | 257,594,762,978,900,600,000,000,000,000,000,000,000 | 20 | patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others. |
close_log_file(void)
{
if (log_file) {
fclose(log_file);
log_file = NULL;
}
} | 0 | [
"CWE-59",
"CWE-61"
]
| keepalived | 04f2d32871bb3b11d7dc024039952f2fe2750306 | 164,543,623,863,183,690,000,000,000,000,000,000,000 | 7 | When opening files for write, ensure they aren't symbolic links
Issue #1048 identified that if, for example, a non privileged user
created a symbolic link from /etc/keepalvied.data to /etc/passwd,
writing to /etc/keepalived.data (which could be invoked via DBus)
would cause /etc/passwd to be overwritten.
This commit stops keepalived writing to pathnames where the ultimate
component is a symbolic link, by setting O_NOFOLLOW whenever opening
a file for writing.
This might break some setups, where, for example, /etc/keepalived.data
was a symbolic link to /home/fred/keepalived.data. If this was the case,
instead create a symbolic link from /home/fred/keepalived.data to
/tmp/keepalived.data, so that the file is still accessible via
/home/fred/keepalived.data.
There doesn't appear to be a way around this backward incompatibility,
since even checking if the pathname is a symbolic link prior to opening
for writing would create a race condition.
Signed-off-by: Quentin Armitage <[email protected]> |
ipvs_start(void)
{
log_message(LOG_DEBUG, "Initializing ipvs");
/* Initialize IPVS module */
if (ipvs_init()) {
if (modprobe_ipvs() || ipvs_init()) {
log_message(LOG_INFO, "IPVS: Can't initialize ipvs: %s",
ipvs_strerror(errno));
no_ipvs = true;
return IPVS_ERROR;
}
}
return IPVS_SUCCESS;
} | 0 | [
"CWE-200"
]
| keepalived | 26c8d6374db33bcfcdcd758b1282f12ceef4b94f | 277,596,586,328,672,000,000,000,000,000,000,000,000 | 15 | Disable fopen_safe() append mode by default
If a non privileged user creates /tmp/keepalived.log and has it open
for read (e.g. tail -f), then even though keepalived will change the
owner to root and remove all read/write permissions from non owners,
the application which already has the file open will be able to read
the added log entries.
Accordingly, opening a file in append mode is disabled by default, and
only enabled if --enable-smtp-alert-debug or --enable-log-file (which
are debugging options and unset by default) are enabled.
This should further alleviate security concerns related to CVE-2018-19046.
Signed-off-by: Quentin Armitage <[email protected]> |
void handle_command_error(struct st_command *command, uint error,
int sys_errno)
{
DBUG_ENTER("handle_command_error");
DBUG_PRINT("enter", ("error: %d", error));
var_set_int("$sys_errno",sys_errno);
var_set_int("$errno",error);
if (error != 0)
{
int i;
if (command->abort_on_error)
{
report_or_die("command \"%.*s\" failed with error: %u my_errno: %d "
"errno: %d",
command->first_word_len, command->query, error, my_errno,
sys_errno);
DBUG_VOID_RETURN;
}
i= match_expected_error(command, error, NULL);
if (i >= 0)
{
DBUG_PRINT("info", ("command \"%.*s\" failed with expected error: %u, errno: %d",
command->first_word_len, command->query, error,
sys_errno));
revert_properties();
DBUG_VOID_RETURN;
}
if (command->expected_errors.count > 0)
report_or_die("command \"%.*s\" failed with wrong error: %u "
"my_errno: %d errno: %d",
command->first_word_len, command->query, error, my_errno,
sys_errno);
}
else if (command->expected_errors.err[0].type == ERR_ERRNO &&
command->expected_errors.err[0].code.errnum != 0)
{
/* Error code we wanted was != 0, i.e. not an expected success */
report_or_die("command \"%.*s\" succeeded - should have failed with "
"errno %d...",
command->first_word_len, command->query,
command->expected_errors.err[0].code.errnum);
}
revert_properties();
DBUG_VOID_RETURN;
} | 0 | []
| server | 01b39b7b0730102b88d8ea43ec719a75e9316a1e | 1,688,087,666,605,677,800,000,000,000,000,000,000 | 48 | mysqltest: don't eat new lines in --exec
pass them through as is |
void luaT_getvarargs (lua_State *L, CallInfo *ci, StkId where, int wanted) {
int i;
int nextra = ci->u.l.nextraargs;
if (wanted < 0) {
wanted = nextra; /* get all extra arguments available */
checkstackp(L, nextra, where); /* ensure stack space */
L->top = where + nextra; /* next instruction will need top */
}
for (i = 0; i < wanted && i < nextra; i++)
setobjs2s(L, where + i, ci->func - nextra + i);
for (; i < wanted; i++) /* complete required results with nil */
setnilvalue(s2v(where + i));
} | 1 | [
"CWE-416",
"CWE-125",
"CWE-787"
]
| lua | eb41999461b6f428186c55abd95f4ce1a76217d5 | 218,081,626,098,464,850,000,000,000,000,000,000,000 | 13 | Fixed bugs of stack reallocation x GC
Macro 'checkstackGC' was doing a GC step after resizing the stack;
the GC could shrink the stack and undo the resize. Moreover, macro
'checkstackp' also does a GC step, which could remove the preallocated
CallInfo when calling a function. (Its name has been changed to
'checkstackGCp' to emphasize that it calls the GC.) |
ippWrite(http_t *http, /* I - HTTP connection */
ipp_t *ipp) /* I - IPP data */
{
DEBUG_printf(("ippWrite(http=%p, ipp=%p)", (void *)http, (void *)ipp));
if (!http)
return (IPP_STATE_ERROR);
return (ippWriteIO(http, (ipp_iocb_t)httpWrite2, http->blocking, NULL, ipp));
} | 0 | [
"CWE-120"
]
| cups | f24e6cf6a39300ad0c3726a41a4aab51ad54c109 | 226,973,189,573,441,950,000,000,000,000,000,000,000 | 10 | Fix multiple security/disclosure issues:
- CVE-2019-8696 and CVE-2019-8675: Fixed SNMP buffer overflows (rdar://51685251)
- Fixed IPP buffer overflow (rdar://50035411)
- Fixed memory disclosure issue in the scheduler (rdar://51373853)
- Fixed DoS issues in the scheduler (rdar://51373929) |
static void fq_reset(struct fq *fq,
fq_skb_free_t free_func)
{
int i;
for (i = 0; i < fq->flows_cnt; i++)
fq_flow_reset(fq, &fq->flows[i], free_func);
kfree(fq->flows);
fq->flows = NULL;
} | 0 | [
"CWE-330"
]
| linux | 55667441c84fa5e0911a0aac44fb059c15ba6da2 | 253,030,601,764,580,960,000,000,000,000,000,000,000 | 11 | net/flow_dissector: switch to siphash
UDP IPv6 packets auto flowlabels are using a 32bit secret
(static u32 hashrnd in net/core/flow_dissector.c) and
apply jhash() over fields known by the receivers.
Attackers can easily infer the 32bit secret and use this information
to identify a device and/or user, since this 32bit secret is only
set at boot time.
Really, using jhash() to generate cookies sent on the wire
is a serious security concern.
Trying to change the rol32(hash, 16) in ip6_make_flowlabel() would be
a dead end. Trying to periodically change the secret (like in sch_sfq.c)
could change paths taken in the network for long lived flows.
Let's switch to siphash, as we did in commit df453700e8d8
("inet: switch IP ID generator to siphash")
Using a cryptographically strong pseudo random function will solve this
privacy issue and more generally remove other weak points in the stack.
Packet schedulers using skb_get_hash_perturb() benefit from this change.
Fixes: b56774163f99 ("ipv6: Enable auto flow labels by default")
Fixes: 42240901f7c4 ("ipv6: Implement different admin modes for automatic flow labels")
Fixes: 67800f9b1f4e ("ipv6: Call skb_get_hash_flowi6 to get skb->hash in ip6_make_flowlabel")
Fixes: cb1ce2ef387b ("ipv6: Implement automatic flow label generation on transmit")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jonathan Berger <[email protected]>
Reported-by: Amit Klein <[email protected]>
Reported-by: Benny Pinkas <[email protected]>
Cc: Tom Herbert <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
QList<Message> PostgreSqlStorage::requestMsgs(UserId user, BufferId bufferId, MsgId first, MsgId last, int limit)
{
QList<Message> messagelist;
QSqlDatabase db = logDb();
if (!beginReadOnlyTransaction(db)) {
qWarning() << "PostgreSqlStorage::requestMsgs(): cannot start read only transaction!";
qWarning() << " -" << qPrintable(db.lastError().text());
return messagelist;
}
BufferInfo bufferInfo = getBufferInfo(user, bufferId);
if (!bufferInfo.isValid()) {
db.rollback();
return messagelist;
}
QString queryName;
QVariantList params;
if (last == -1 && first == -1) {
queryName = "select_messages";
}
else if (last == -1) {
queryName = "select_messagesNewerThan";
params << first.toInt();
}
else {
queryName = "select_messagesRange";
params << first.toInt();
params << last.toInt();
}
params << bufferId.toInt();
if (limit != -1)
params << limit;
else
params << "ALL";
QSqlQuery query = executePreparedQuery(queryName, params, db);
if (!watchQuery(query)) {
qDebug() << "select_messages failed";
db.rollback();
return messagelist;
}
QDateTime timestamp;
while (query.next()) {
timestamp = query.value(1).toDateTime();
timestamp.setTimeSpec(Qt::UTC);
Message msg(timestamp,
bufferInfo,
(Message::Type)query.value(2).toUInt(),
query.value(5).toString(),
query.value(4).toString(),
(Message::Flags)query.value(3).toUInt());
msg.setMsgId(query.value(0).toInt());
messagelist << msg;
}
db.commit();
return messagelist;
} | 0 | [
"CWE-89"
]
| quassel | aa1008be162cb27da938cce93ba533f54d228869 | 327,152,575,402,206,350,000,000,000,000,000,000,000 | 62 | Fixing security vulnerability with Qt 4.8.5+ and PostgreSQL.
Properly detects whether Qt performs slash escaping in SQL queries or
not, and then configures PostgreSQL accordingly. This bug was a
introduced due to a bugfix in Qt 4.8.5 disables slash escaping when
binding queries: https://bugreports.qt-project.org/browse/QTBUG-30076
Thanks to brot and Tucos.
[Fixes #1244] |
static struct calipso_doi *calipso_doi_search(u32 doi)
{
struct calipso_doi *iter;
list_for_each_entry_rcu(iter, &calipso_doi_list, list)
if (iter->doi == doi && refcount_read(&iter->refcount))
return iter;
return NULL;
} | 0 | [
"CWE-416"
]
| linux | ad5d07f4a9cd671233ae20983848874731102c08 | 241,290,205,230,618,430,000,000,000,000,000,000,000 | 9 | cipso,calipso: resolve a number of problems with the DOI refcounts
The current CIPSO and CALIPSO refcounting scheme for the DOI
definitions is a bit flawed in that we:
1. Don't correctly match gets/puts in netlbl_cipsov4_list().
2. Decrement the refcount on each attempt to remove the DOI from the
DOI list, only removing it from the list once the refcount drops
to zero.
This patch fixes these problems by adding the missing "puts" to
netlbl_cipsov4_list() and introduces a more conventional, i.e.
not-buggy, refcounting mechanism to the DOI definitions. Upon the
addition of a DOI to the DOI list, it is initialized with a refcount
of one, removing a DOI from the list removes it from the list and
drops the refcount by one; "gets" and "puts" behave as expected with
respect to refcounts, increasing and decreasing the DOI's refcount by
one.
Fixes: b1edeb102397 ("netlabel: Replace protocol/NetLabel linking with refrerence counts")
Fixes: d7cce01504a0 ("netlabel: Add support for removing a CALIPSO DOI.")
Reported-by: [email protected]
Signed-off-by: Paul Moore <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int main(int argc, char *argv[])
{
opj_dinfo_t* dinfo;
opj_event_mgr_t event_mgr; /* event manager */
int tnum;
unsigned int snum;
opj_mj2_t *movie;
mj2_tk_t *track;
mj2_sample_t *sample;
unsigned char* frame_codestream;
FILE *file, *outfile;
char outfilename[50];
mj2_dparameters_t parameters;
if (argc != 3) {
printf("Usage: %s mj2filename output_location\n", argv[0]);
printf("Example: %s foreman.mj2 output/foreman\n", argv[0]);
return 1;
}
file = fopen(argv[1], "rb");
if (!file) {
fprintf(stderr, "failed to open %s for reading\n", argv[1]);
return 1;
}
/*
configure the event callbacks (not required)
setting of each callback is optional
*/
memset(&event_mgr, 0, sizeof(opj_event_mgr_t));
event_mgr.error_handler = error_callback;
event_mgr.warning_handler = warning_callback;
event_mgr.info_handler = info_callback;
/* get a MJ2 decompressor handle */
dinfo = mj2_create_decompress();
/* catch events using our callbacks and give a local context */
opj_set_event_mgr((opj_common_ptr)dinfo, &event_mgr, stderr);
/* setup the decoder decoding parameters using user parameters */
memset(¶meters, 0, sizeof(mj2_dparameters_t));
movie = (opj_mj2_t*) dinfo->mj2_handle;
mj2_setup_decoder(movie, ¶meters);
if (mj2_read_struct(file, movie)) { /* Creating the movie structure*/
return 1;
}
/* Decode first video track */
tnum = 0;
while (movie->tk[tnum].track_type != 0) {
tnum ++;
}
track = &movie->tk[tnum];
fprintf(stdout, "Extracting %d frames from file...\n", track->num_samples);
for (snum = 0; snum < track->num_samples; snum++) {
sample = &track->sample[snum];
frame_codestream = (unsigned char*) malloc(sample->sample_size -
8); /* Skipping JP2C marker*/
fseek(file, sample->offset + 8, SEEK_SET);
fread(frame_codestream, sample->sample_size - 8, 1,
file); /* Assuming that jp and ftyp markers size do*/
sprintf(outfilename, "%s_%05d.j2k", argv[2], snum);
outfile = fopen(outfilename, "wb");
if (!outfile) {
fprintf(stderr, "failed to open %s for writing\n", outfilename);
return 1;
}
fwrite(frame_codestream, sample->sample_size - 8, 1, outfile);
fclose(outfile);
free(frame_codestream);
}
fclose(file);
fprintf(stdout, "%d frames correctly extracted\n", snum);
/* free remaining structures */
if (dinfo) {
mj2_destroy_decompress((opj_mj2_t*)dinfo->mj2_handle);
}
return 0;
} | 1 | [
"CWE-119",
"CWE-787"
]
| openjpeg | cc3824767bde397fedb8a1ae4786a222ba860c8d | 175,493,277,472,519,040,000,000,000,000,000,000,000 | 89 | opj_mj2_extract: Check provided output prefix for length
This uses snprintf() with correct buffer length instead of sprintf(), which
prevents a buffer overflow when providing a long output prefix. Furthermore
the program exits with an error when the provided output prefix is too long.
Fixes #1088. |
stream_buffer_finalize (GObject *object)
{
CamelStreamBufferPrivate *priv;
priv = CAMEL_STREAM_BUFFER (object)->priv;
g_free (priv->buf);
g_free (priv->linebuf);
/* Chain up to parent's finalize() method. */
G_OBJECT_CLASS (camel_stream_buffer_parent_class)->finalize (object);
} | 0 | [
"CWE-74"
]
| evolution-data-server | ba82be72cfd427b5d72ff21f929b3a6d8529c4df | 169,393,364,628,306,440,000,000,000,000,000,000,000 | 12 | I#226 - CVE-2020-14928: Response Injection via STARTTLS in SMTP and POP3
Closes https://gitlab.gnome.org/GNOME/evolution-data-server/-/issues/226 |
static int dynamicGetbuf(gdIOCtxPtr ctx, void *buf, int len)
{
int rlen, remain;
dpIOCtxPtr dctx;
dynamicPtr *dp;
dctx = (dpIOCtxPtr) ctx;
dp = dctx->dp;
remain = dp->logicalSize - dp->pos;
if(remain >= len) {
rlen = len;
} else {
if(remain <= 0) {
/* 2.0.34: EOF is incorrect. We use 0 for
* errors and EOF, just like fileGetbuf,
* which is a simple fread() wrapper.
* TBB. Original bug report: Daniel Cowgill. */
return 0; /* NOT EOF */
}
rlen = remain;
}
memcpy(buf, (void *) ((char *)dp->data + dp->pos), rlen);
dp->pos += rlen;
return rlen;
} | 1 | [
"CWE-125"
]
| libgd | 4859d69e07504d4b0a4bdf9bcb4d9e3769ca35ae | 243,428,089,281,886,900,000,000,000,000,000,000,000 | 29 | Fix invalid read in gdImageCreateFromTiffPtr()
tiff_invalid_read.tiff is corrupt, and causes an invalid read in
gdImageCreateFromTiffPtr(), but not in gdImageCreateFromTiff(). The culprit
is dynamicGetbuf(), which doesn't check for out-of-bound reads. In this case,
dynamicGetbuf() is called with a negative dp->pos, but also positive buffer
overflows have to be handled, in which case 0 has to be returned (cf. commit
75e29a9).
Fixing dynamicGetbuf() exhibits that the corrupt TIFF would still create
the image, because the return value of TIFFReadRGBAImage() is not checked.
We do that, and let createFromTiffRgba() fail if TIFFReadRGBAImage() fails.
This issue had been reported by Ibrahim El-Sayed to [email protected].
CVE-2016-6911 |
fr_window_present_dialog_if_created (FrWindow *window,
const char *dialog_name)
{
GtkWidget *dialog;
dialog = g_hash_table_lookup (window->priv->named_dialogs, dialog_name);
if (dialog != NULL) {
gtk_window_present (GTK_WINDOW (dialog));
return TRUE;
}
return FALSE;
} | 0 | [
"CWE-22"
]
| file-roller | b147281293a8307808475e102a14857055f81631 | 227,542,409,154,220,860,000,000,000,000,000,000,000 | 13 | libarchive: sanitize filenames before extracting |
get_cmdline_screen_pos(void)
{
cmdline_info_T *p = get_ccline_ptr();
if (p == NULL)
return -1;
return p->cmdspos;
} | 0 | [
"CWE-674",
"CWE-787"
]
| vim | 51f0bfb88a3554ca2dde777d78a59880d1ee37a8 | 32,782,428,964,099,900,000,000,000,000,000,000,000 | 8 | patch 8.2.4975: recursive command line loop may cause a crash
Problem: Recursive command line loop may cause a crash.
Solution: Limit recursion of getcmdline(). |
mswin_printer_fopen(gx_io_device * iodev, const char *fname, const char *access,
gp_file ** pfile, char *rfname, uint rnamelen, gs_memory_t *mem)
{
DWORD version = GetVersion();
HANDLE hprinter;
int pipeh[2];
uintptr_t tid;
HANDLE hthread;
char pname[gp_file_name_sizeof];
uintptr_t *ptid = &((tid_t *)(iodev->state))->tid;
gs_lib_ctx_t *ctx = mem->gs_lib_ctx;
gs_fs_list_t *fs = ctx->core->fs;
const size_t preflen = strlen(iodev->dname);
const size_t nlen = strlen(fname);
if (preflen + nlen >= gp_file_name_sizeof)
return_error(gs_error_invalidaccess);
memcpy(pname, iodev->dname, preflen);
memcpy(pname + preflen, fname, nlen + 1);
if (gp_validate_path(mem, pname, access) != 0)
return gs_error_invalidfileaccess;
/* First we try the open_printer method. */
/* Note that the loop condition here ensures we don't
* trigger on the last registered fs entry (our standard
* 'file' one). */
if (access[0] == 'w' || access[0] == 'a')
{
*pfile = NULL;
for (fs = ctx->core->fs; fs != NULL && fs->next != NULL; fs = fs->next)
{
int code = 0;
if (fs->fs.open_printer)
code = fs->fs.open_printer(mem, fs->secret, fname, 1, pfile);
if (code < 0)
return code;
if (*pfile != NULL)
return code;
}
} else
return gs_error_invalidfileaccess;
/* If nothing claimed that, then continue with the
* standard MS way of working. */
/* Win32s supports neither pipes nor Win32 printers. */
if (((HIWORD(version) & 0x8000) != 0) &&
((HIWORD(version) & 0x4000) == 0))
return_error(gs_error_invalidfileaccess);
/* Make sure that printer exists. */
if (!gp_OpenPrinter((LPTSTR)fname, &hprinter))
return_error(gs_error_invalidfileaccess);
ClosePrinter(hprinter);
*pfile = gp_file_FILE_alloc(mem);
if (*pfile == NULL)
return_error(gs_error_VMerror);
/* Create a pipe to connect a FILE pointer to a Windows printer. */
if (_pipe(pipeh, 4096, _O_BINARY) != 0) {
gp_file_dealloc(*pfile);
*pfile = NULL;
return_error(gs_fopen_errno_to_code(errno));
}
if (gp_file_FILE_set(*pfile, fdopen(pipeh[1], (char *)access), NULL)) {
*pfile = NULL;
close(pipeh[0]);
close(pipeh[1]);
return_error(gs_fopen_errno_to_code(errno));
}
/* start a thread to read the pipe */
tid = _beginthread(&mswin_printer_thread, 32768, (void *)(intptr_t)(pipeh[0]));
if (tid == -1) {
gp_fclose(*pfile);
*pfile = NULL;
close(pipeh[0]);
return_error(gs_error_invalidfileaccess);
}
/* Duplicate thread handle so we can wait on it
* even if original handle is closed by CRTL
* when the thread finishes.
*/
if (!DuplicateHandle(GetCurrentProcess(), (HANDLE)tid,
GetCurrentProcess(), &hthread,
0, FALSE, DUPLICATE_SAME_ACCESS)) {
gp_fclose(*pfile);
*pfile = NULL;
return_error(gs_error_invalidfileaccess);
}
*ptid = (uintptr_t)hthread;
/* Give the name of the printer to the thread by writing
* it to the pipe. This is avoids elaborate thread
* synchronisation code.
*/
strncpy(pname, fname, sizeof(pname));
gp_fwrite(pname, 1, sizeof(pname), *pfile);
return 0;
} | 0 | [
"CWE-20"
]
| ghostpdl | a9bd3dec9fde03327a4a2c69dad1036bf9632e20 | 306,490,172,750,432,350,000,000,000,000,000,000,000 | 105 | Bug 704342: Include device specifier strings in access validation
for the "%pipe%", %handle%" and %printer% io devices.
We previously validated only the part after the "%pipe%" Postscript device
specifier, but this proved insufficient.
This rebuilds the original file name string, and validates it complete. The
slight complication for "%pipe%" is it can be reached implicitly using
"|" so we have to check both prefixes.
Addresses CVE-2021-3781 |
void tcp_enter_loss(struct sock *sk, int how)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
/* Reduce ssthresh if it has not yet been made inside this window. */
if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq ||
(icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
tp->prior_ssthresh = tcp_current_ssthresh(sk);
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
tcp_ca_event(sk, CA_EVENT_LOSS);
}
tp->snd_cwnd = 1;
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_time_stamp;
tp->bytes_acked = 0;
tcp_clear_retrans_partial(tp);
if (tcp_is_reno(tp))
tcp_reset_reno_sack(tp);
if (!how) {
/* Push undo marker, if it was plain RTO and nothing
* was retransmitted. */
tp->undo_marker = tp->snd_una;
} else {
tp->sacked_out = 0;
tp->fackets_out = 0;
}
tcp_clear_all_retrans_hints(tp);
tcp_for_write_queue(skb, sk) {
if (skb == tcp_send_head(sk))
break;
if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
tp->undo_marker = 0;
TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED;
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb);
tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
}
}
tcp_verify_left_out(tp);
tp->reordering = min_t(unsigned int, tp->reordering,
sysctl_tcp_reordering);
tcp_set_ca_state(sk, TCP_CA_Loss);
tp->high_seq = tp->snd_nxt;
TCP_ECN_queue_cwr(tp);
/* Abort F-RTO algorithm if one is in progress */
tp->frto_counter = 0;
} | 0 | []
| net-next | fdf5af0daf8019cec2396cdef8fb042d80fe71fa | 105,040,551,570,075,170,000,000,000,000,000,000,000 | 57 | tcp: drop SYN+FIN messages
Denys Fedoryshchenko reported that SYN+FIN attacks were bringing his
linux machines to their limits.
Dont call conn_request() if the TCP flags includes SYN flag
Reported-by: Denys Fedoryshchenko <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int php_stream_memory_stat(php_stream *stream, php_stream_statbuf *ssb TSRMLS_DC) /* {{{ */
{
time_t timestamp = 0;
php_stream_memory_data *ms = (php_stream_memory_data*)stream->abstract;
assert(ms != NULL);
memset(ssb, 0, sizeof(php_stream_statbuf));
/* read-only across the board */
ssb->sb.st_mode = ms->mode & TEMP_STREAM_READONLY ? 0444 : 0666;
ssb->sb.st_size = ms->fsize;
ssb->sb.st_mode |= S_IFREG; /* regular file */
#ifdef NETWARE
ssb->sb.st_mtime.tv_sec = timestamp;
ssb->sb.st_atime.tv_sec = timestamp;
ssb->sb.st_ctime.tv_sec = timestamp;
#else
ssb->sb.st_mtime = timestamp;
ssb->sb.st_atime = timestamp;
ssb->sb.st_ctime = timestamp;
#endif
ssb->sb.st_nlink = 1;
ssb->sb.st_rdev = -1;
/* this is only for APC, so use /dev/null device - no chance of conflict there! */
ssb->sb.st_dev = 0xC;
/* generate unique inode number for alias/filename, so no phars will conflict */
ssb->sb.st_ino = 0;
#ifndef PHP_WIN32
ssb->sb.st_blksize = -1;
#endif
#if !defined(PHP_WIN32) && !defined(__BEOS__)
ssb->sb.st_blocks = -1;
#endif
return 0;
} | 1 | [
"CWE-20"
]
| php-src | 6297a117d77fa3a0df2e21ca926a92c231819cd5 | 116,178,345,785,508,770,000,000,000,000,000,000,000 | 41 | Fixed bug #71323 - Output of stream_get_meta_data can be falsified by its input |
R_API void r_bin_java_classes_free(void /*RBinClass*/ *k) {
RBinClass *klass = k;
if (klass) {
r_list_free (klass->methods);
r_list_free (klass->fields);
free (klass->name);
free (klass->super);
free (klass->visibility_str);
free (klass);
}
} | 0 | [
"CWE-119",
"CWE-788"
]
| radare2 | 6c4428f018d385fc80a33ecddcb37becea685dd5 | 90,092,289,332,957,790,000,000,000,000,000,000,000 | 11 | Improve boundary checks to fix oobread segfaults ##crash
* Reported by Cen Zhang via huntr.dev
* Reproducer: bins/fuzzed/javaoob-havoc.class |
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PolygonInfo **) NULL);
}
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info,exception);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
polygon_info[0]=ConvertPathToPolygon(path_info,exception);
if (polygon_info[0] == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
for (i=1; i < (ssize_t) number_threads; i++)
{
EdgeInfo
*edge_info;
ssize_t
j;
polygon_info[i]=(PolygonInfo *) AcquireMagickMemory(
sizeof(*polygon_info[i]));
if (polygon_info[i] == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
polygon_info[i]->number_edges=0;
edge_info=polygon_info[0]->edges;
polygon_info[i]->edges=(EdgeInfo *) AcquireQuantumMemory(
polygon_info[0]->number_edges,sizeof(*edge_info));
if (polygon_info[i]->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
(void) memcpy(polygon_info[i]->edges,edge_info,
polygon_info[0]->number_edges*sizeof(*edge_info));
for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++)
polygon_info[i]->edges[j].points=(PointInfo *) NULL;
polygon_info[i]->number_edges=polygon_info[0]->number_edges;
for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++)
{
edge_info=polygon_info[0]->edges+j;
polygon_info[i]->edges[j].points=(PointInfo *) AcquireQuantumMemory(
edge_info->number_points,sizeof(*edge_info));
if (polygon_info[i]->edges[j].points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
(void) memcpy(polygon_info[i]->edges[j].points,edge_info->points,
edge_info->number_points*sizeof(*edge_info->points));
}
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
} | 0 | []
| ImageMagick | f4cdb3f3aab28273960ffacf1d356312b56ffd27 | 233,196,379,089,361,340,000,000,000,000,000,000,000 | 84 | https://github.com/ImageMagick/ImageMagick/issues/3338 |
static MagickBooleanType AcquireQuantumPixels(QuantumInfo *quantum_info,
const size_t extent)
{
register ssize_t
i;
assert(quantum_info != (QuantumInfo *) NULL);
assert(quantum_info->signature == MagickCoreSignature);
quantum_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
quantum_info->pixels=(unsigned char **) AcquireQuantumMemory(
quantum_info->number_threads,sizeof(*quantum_info->pixels));
if (quantum_info->pixels == (unsigned char **) NULL)
return(MagickFalse);
quantum_info->extent=extent;
(void) ResetMagickMemory(quantum_info->pixels,0,quantum_info->number_threads*
sizeof(*quantum_info->pixels));
for (i=0; i < (ssize_t) quantum_info->number_threads; i++)
{
quantum_info->pixels[i]=(unsigned char *) AcquireQuantumMemory(extent+1,
sizeof(**quantum_info->pixels));
if (quantum_info->pixels[i] == (unsigned char *) NULL)
{
while (--i >= 0)
quantum_info->pixels[i]=(unsigned char *) RelinquishMagickMemory(
quantum_info->pixels[i]);
return(MagickFalse);
}
(void) ResetMagickMemory(quantum_info->pixels[i],0,(extent+1)*
sizeof(**quantum_info->pixels));
quantum_info->pixels[i][extent]=QuantumSignature;
}
return(MagickTrue);
} | 0 | [
"CWE-369"
]
| ImageMagick | c4e63ad30bc42da691f2b5f82a24516dd6b4dc70 | 175,626,876,598,634,630,000,000,000,000,000,000,000 | 33 | https://github.com/ImageMagick/ImageMagick/issues/105 |
Subsets and Splits