func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
check_fmt(struct magic_set *ms, struct magic *m)
{
pcre *pce;
int re_options;
pcre_extra *re_extra;
TSRMLS_FETCH();
if (strchr(m->desc, '%') == NULL) {
return 0;
}
if ((pce = pcre_get_compiled_regex("~%[-0-9.]*s~", &re_extra, &re_options TSRMLS_CC)) == NULL) {
return -1;
} else {
return !pcre_exec(pce, re_extra, m->desc, strlen(m->desc), 0, re_options, NULL, 0);
}
} | 0 | [
"CWE-20"
] | php-src | 74555e7c26b2c61bb8e67b7d6a6f4d2b8eb3a5f3 | 654,784,013,407,664,500,000,000,000,000,000,000 | 17 | Fixed bug #64830 mimetype detection segfaults on mp3 file |
static int mov_skip_multiple_stsd(MOVContext *c, AVIOContext *pb,
int codec_tag, int format,
int64_t size)
{
int video_codec_id = ff_codec_get_id(ff_codec_movvideo_tags, format);
if (codec_tag &&
(codec_tag != format &&
// AVID 1:1 samples with differing data format and codec tag exist
(codec_tag != AV_RL32("AV1x") || format != AV_RL32("AVup")) &&
// prores is allowed to have differing data format and codec tag
codec_tag != AV_RL32("apcn") && codec_tag != AV_RL32("apch") &&
// so is dv (sigh)
codec_tag != AV_RL32("dvpp") && codec_tag != AV_RL32("dvcp") &&
(c->fc->video_codec_id ? video_codec_id != c->fc->video_codec_id
: codec_tag != MKTAG('j','p','e','g')))) {
/* Multiple fourcc, we skip JPEG. This is not correct, we should
* export it as a separate AVStream but this needs a few changes
* in the MOV demuxer, patch welcome. */
av_log(c->fc, AV_LOG_WARNING, "multiple fourcc not supported\n");
avio_skip(pb, size);
return 1;
}
return 0;
} | 0 | [
"CWE-399",
"CWE-834"
] | FFmpeg | 9cb4eb772839c5e1de2855d126bf74ff16d13382 | 71,476,639,376,515,990,000,000,000,000,000,000,000 | 27 | avformat/mov: Fix DoS in read_tfra()
Fixes: Missing EOF check in loop
No testcase
Found-by: Xiaohei and Wangchu from Alibaba Security Team
Signed-off-by: Michael Niedermayer <[email protected]> |
close_transient_session (GdmManager *self,
GdmSession *session)
{
GPid pid;
pid = GPOINTER_TO_UINT (g_object_get_data (G_OBJECT (session), "caller-pid"));
gdm_session_close (session);
g_hash_table_remove (self->priv->transient_sessions,
GUINT_TO_POINTER (pid));
} | 0 | [] | gdm | ff98b2817014684ae1acec78ff06f0f461a56a9f | 300,979,474,162,012,700,000,000,000,000,000,000,000 | 9 | manager: if falling back to X11 retry autologin
Right now, we get one shot to autologin. If it fails, we fall back to
the greeter. We should give it another go if the reason for the failure
was wayland fallback to X.
https://bugzilla.gnome.org/show_bug.cgi?id=780520 |
rdp_send_control(uint16 action)
{
STREAM s;
s = rdp_init_data(8);
out_uint16_le(s, action);
out_uint16(s, 0); /* userid */
out_uint32(s, 0); /* control id */
s_mark_end(s);
rdp_send_data(s, RDP_DATA_PDU_CONTROL);
} | 0 | [
"CWE-787"
] | rdesktop | 766ebcf6f23ccfe8323ac10242ae6e127d4505d2 | 10,564,921,798,192,526,000,000,000,000,000,000,000 | 13 | Malicious RDP server security fixes
This commit includes fixes for a set of 21 vulnerabilities in
rdesktop when a malicious RDP server is used.
All vulnerabilities was identified and reported by Eyal Itkin.
* Add rdp_protocol_error function that is used in several fixes
* Refactor of process_bitmap_updates
* Fix possible integer overflow in s_check_rem() on 32bit arch
* Fix memory corruption in process_bitmap_data - CVE-2018-8794
* Fix remote code execution in process_bitmap_data - CVE-2018-8795
* Fix remote code execution in process_plane - CVE-2018-8797
* Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175
* Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175
* Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176
* Fix Denial of Service in sec_recv - CVE-2018-20176
* Fix minor information leak in rdpdr_process - CVE-2018-8791
* Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792
* Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793
* Fix Denial of Service in process_bitmap_data - CVE-2018-8796
* Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798
* Fix Denial of Service in process_secondary_order - CVE-2018-8799
* Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800
* Fix major information leak in ui_clip_handle_data - CVE-2018-20174
* Fix memory corruption in rdp_in_unistr - CVE-2018-20177
* Fix Denial of Service in process_demand_active - CVE-2018-20178
* Fix remote code execution in lspci_process - CVE-2018-20179
* Fix remote code execution in rdpsnddbg_process - CVE-2018-20180
* Fix remote code execution in seamless_process - CVE-2018-20181
* Fix remote code execution in seamless_process_line - CVE-2018-20182 |
static inline ut32 r_read_at_le32(const void *src, size_t offset) {
const ut8 *s = (const ut8*)src + offset;
return r_read_le32 (s);
} | 1 | [
"CWE-476"
] | radare2 | 1ea23bd6040441a21fbcfba69dce9a01af03f989 | 18,761,499,229,168,303,000,000,000,000,000,000,000 | 4 | Fix #6816 - null deref in r_read_* |
const T& _atXY(const int x, const int y, const int z=0, const int c=0) const {
return (*this)(cimg::cut(x,0,width() - 1),
cimg::cut(y,0,height() - 1),z,c);
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 53,106,724,551,707,400,000,000,000,000,000,000,000 | 4 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
static struct sock *udp6_lib_lookup2(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned int hnum, int dif,
struct udp_hslot *hslot2, unsigned int slot2)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
begin:
result = NULL;
badness = -1;
udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
score = compute_score2(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
result = sk;
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = inet6_ehashfn(net, daddr, hnum,
saddr, sport);
matches = 1;
} else if (score == SCORE2_MAX)
goto exact_match;
} else if (score == badness && reuseport) {
matches++;
if (((u64)hash * matches) >> 32 == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot2)
goto begin;
if (result) {
exact_match:
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score2(result, net, saddr, sport,
daddr, hnum, dif) < badness)) {
sock_put(result);
goto begin;
}
}
return result;
} | 0 | [
"CWE-399",
"CWE-703"
] | linux | 8822b64a0fa64a5dd1dfcf837c5b0be83f8c05d1 | 39,267,816,809,914,405,000,000,000,000,000,000,000 | 53 | ipv6: call udp_push_pending_frames when uncorking a socket with AF_INET pending data
We accidentally call down to ip6_push_pending_frames when uncorking
pending AF_INET data on a ipv6 socket. This results in the following
splat (from Dave Jones):
skbuff: skb_under_panic: text:ffffffff816765f6 len:48 put:40 head:ffff88013deb6df0 data:ffff88013deb6dec tail:0x2c end:0xc0 dev:<NULL>
------------[ cut here ]------------
kernel BUG at net/core/skbuff.c:126!
invalid opcode: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC
Modules linked in: dccp_ipv4 dccp 8021q garp bridge stp dlci mpoa snd_seq_dummy sctp fuse hidp tun bnep nfnetlink scsi_transport_iscsi rfcomm can_raw can_bcm af_802154 appletalk caif_socket can caif ipt_ULOG x25 rose af_key pppoe pppox ipx phonet irda llc2 ppp_generic slhc p8023 psnap p8022 llc crc_ccitt atm bluetooth
+netrom ax25 nfc rfkill rds af_rxrpc coretemp hwmon kvm_intel kvm crc32c_intel snd_hda_codec_realtek ghash_clmulni_intel microcode pcspkr snd_hda_codec_hdmi snd_hda_intel snd_hda_codec snd_hwdep usb_debug snd_seq snd_seq_device snd_pcm e1000e snd_page_alloc snd_timer ptp snd pps_core soundcore xfs libcrc32c
CPU: 2 PID: 8095 Comm: trinity-child2 Not tainted 3.10.0-rc7+ #37
task: ffff8801f52c2520 ti: ffff8801e6430000 task.ti: ffff8801e6430000
RIP: 0010:[<ffffffff816e759c>] [<ffffffff816e759c>] skb_panic+0x63/0x65
RSP: 0018:ffff8801e6431de8 EFLAGS: 00010282
RAX: 0000000000000086 RBX: ffff8802353d3cc0 RCX: 0000000000000006
RDX: 0000000000003b90 RSI: ffff8801f52c2ca0 RDI: ffff8801f52c2520
RBP: ffff8801e6431e08 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000001 R12: ffff88022ea0c800
R13: ffff88022ea0cdf8 R14: ffff8802353ecb40 R15: ffffffff81cc7800
FS: 00007f5720a10740(0000) GS:ffff880244c00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000005862000 CR3: 000000022843c000 CR4: 00000000001407e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000600
Stack:
ffff88013deb6dec 000000000000002c 00000000000000c0 ffffffff81a3f6e4
ffff8801e6431e18 ffffffff8159a9aa ffff8801e6431e90 ffffffff816765f6
ffffffff810b756b 0000000700000002 ffff8801e6431e40 0000fea9292aa8c0
Call Trace:
[<ffffffff8159a9aa>] skb_push+0x3a/0x40
[<ffffffff816765f6>] ip6_push_pending_frames+0x1f6/0x4d0
[<ffffffff810b756b>] ? mark_held_locks+0xbb/0x140
[<ffffffff81694919>] udp_v6_push_pending_frames+0x2b9/0x3d0
[<ffffffff81694660>] ? udplite_getfrag+0x20/0x20
[<ffffffff8162092a>] udp_lib_setsockopt+0x1aa/0x1f0
[<ffffffff811cc5e7>] ? fget_light+0x387/0x4f0
[<ffffffff816958a4>] udpv6_setsockopt+0x34/0x40
[<ffffffff815949f4>] sock_common_setsockopt+0x14/0x20
[<ffffffff81593c31>] SyS_setsockopt+0x71/0xd0
[<ffffffff816f5d54>] tracesys+0xdd/0xe2
Code: 00 00 48 89 44 24 10 8b 87 d8 00 00 00 48 89 44 24 08 48 8b 87 e8 00 00 00 48 c7 c7 c0 04 aa 81 48 89 04 24 31 c0 e8 e1 7e ff ff <0f> 0b 55 48 89 e5 0f 0b 55 48 89 e5 0f 0b 55 48 89 e5 0f 0b 55
RIP [<ffffffff816e759c>] skb_panic+0x63/0x65
RSP <ffff8801e6431de8>
This patch adds a check if the pending data is of address family AF_INET
and directly calls udp_push_ending_frames from udp_v6_push_pending_frames
if that is the case.
This bug was found by Dave Jones with trinity.
(Also move the initialization of fl6 below the AF_INET check, even if
not strictly necessary.)
Cc: Dave Jones <[email protected]>
Cc: YOSHIFUJI Hideaki <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
TEST_F(AuthorizationManagerTest, testLocalX509AuthorizationInvalidUser) {
ServiceContextNoop serviceContext;
transport::TransportLayerMock transportLayer{};
transport::SessionHandle session = transportLayer.createSession();
setX509PeerInfo(session,
SSLPeerInfo(buildX509Name(),
boost::none,
{RoleName("read", "test"), RoleName("write", "test")}));
ServiceContext::UniqueClient client = serviceContext.makeClient("testClient", session);
ServiceContext::UniqueOperationContext opCtx = client->makeOperationContext();
User* x509User;
ASSERT_NOT_OK(
authzManager->acquireUser(opCtx.get(), UserName("CN=10gen.com", "$external"), &x509User));
} | 0 | [
"CWE-863"
] | mongo | a93cfd354467981c9cf944a4ada748d0226fdfb0 | 184,855,794,315,660,940,000,000,000,000,000,000,000 | 15 | SERVER-45472 Ensure RoleGraph can serialize authentication restrictions to BSON
(cherry picked from commit 521e56b407ac72bc69a97a24d1253f51a5b6e81b)
(cherry picked from commit a10d0a22d5d009d27664967181042933ec1bef36)
(cherry picked from commit fb87cc88ecb5d300f14cda7bc238d7d5132118f5) |
int wc_ecc_make_key_ex(WC_RNG* rng, int keysize, ecc_key* key, int curve_id)
{
int err;
#if !defined(WOLFSSL_ATECC508A) && !defined(WOLFSSL_CRYPTOCELL)
#ifndef WOLFSSL_SP_MATH
DECLARE_CURVE_SPECS(curve, ECC_CURVE_FIELD_COUNT);
#endif
#endif /* !WOLFSSL_ATECC508A */
#if defined(WOLFSSL_CRYPTOCELL) && !defined(WOLFSSL_ATECC508A)
const CRYS_ECPKI_Domain_t* pDomain;
CRYS_ECPKI_KG_TempData_t tempBuff;
CRYS_ECPKI_KG_FipsContext_t fipsCtx;
byte ucompressed_key[ECC_MAX_CRYPTO_HW_SIZE*2 + 1];
word32 raw_size = 0;
#endif
if (key == NULL || rng == NULL) {
return BAD_FUNC_ARG;
}
/* make sure required variables are reset */
wc_ecc_reset(key);
err = wc_ecc_set_curve(key, keysize, curve_id);
if (err != 0) {
return err;
}
#ifdef WOLF_CRYPTO_CB
if (key->devId != INVALID_DEVID) {
err = wc_CryptoCb_MakeEccKey(rng, keysize, key, curve_id);
if (err != CRYPTOCB_UNAVAILABLE)
return err;
/* fall-through when unavailable */
}
#endif
#if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_ECC)
if (key->asyncDev.marker == WOLFSSL_ASYNC_MARKER_ECC) {
#ifdef HAVE_CAVIUM
/* TODO: Not implemented */
#elif defined(HAVE_INTEL_QA)
/* TODO: Not implemented */
#else
if (wc_AsyncTestInit(&key->asyncDev, ASYNC_TEST_ECC_MAKE)) {
WC_ASYNC_TEST* testDev = &key->asyncDev.test;
testDev->eccMake.rng = rng;
testDev->eccMake.key = key;
testDev->eccMake.size = keysize;
testDev->eccMake.curve_id = curve_id;
return WC_PENDING_E;
}
#endif
}
#endif /* WOLFSSL_ASYNC_CRYPT && WC_ASYNC_ENABLE_ECC */
#ifdef WOLFSSL_ATECC508A
if (key->dp->id == ECC_SECP256R1) {
key->type = ECC_PRIVATEKEY;
key->slot = atmel_ecc_alloc(ATMEL_SLOT_ECDHE);
err = atmel_ecc_create_key(key->slot, key->pubkey_raw);
/* populate key->pubkey */
if (err == 0
#ifdef ALT_ECC_SIZE
&& key->pubkey.x
#endif
) {
err = mp_read_unsigned_bin(key->pubkey.x, key->pubkey_raw,
ECC_MAX_CRYPTO_HW_SIZE);
}
if (err == 0
#ifdef ALT_ECC_SIZE
&& key->pubkey.y
#endif
) {
err = mp_read_unsigned_bin(key->pubkey.y,
key->pubkey_raw + ECC_MAX_CRYPTO_HW_SIZE,
ECC_MAX_CRYPTO_HW_SIZE);
}
}
else {
err = NOT_COMPILED_IN;
}
#elif defined(WOLFSSL_CRYPTOCELL)
pDomain = CRYS_ECPKI_GetEcDomain(cc310_mapCurve(curve_id));
raw_size = (word32)(key->dp->size)*2 + 1;
/* generate first key pair */
err = CRYS_ECPKI_GenKeyPair(&wc_rndState,
wc_rndGenVectFunc,
pDomain,
&key->ctx.privKey,
&key->ctx.pubKey,
&tempBuff,
&fipsCtx);
if (err != SA_SILIB_RET_OK){
WOLFSSL_MSG("CRYS_ECPKI_GenKeyPair for key pair failed");
return err;
}
key->type = ECC_PRIVATEKEY;
err = CRYS_ECPKI_ExportPublKey(&key->ctx.pubKey,
CRYS_EC_PointUncompressed,
&ucompressed_key[0],
&raw_size);
if (err == SA_SILIB_RET_OK && key->pubkey.x && key->pubkey.y) {
err = mp_read_unsigned_bin(key->pubkey.x,
&ucompressed_key[1], key->dp->size);
if (err == MP_OKAY) {
err = mp_read_unsigned_bin(key->pubkey.y,
&ucompressed_key[1+key->dp->size],key->dp->size);
}
}
raw_size = key->dp->size;
if (err == MP_OKAY) {
err = CRYS_ECPKI_ExportPrivKey(&key->ctx.privKey,
ucompressed_key,
&raw_size);
}
if (err == SA_SILIB_RET_OK) {
err = mp_read_unsigned_bin(&key->k, ucompressed_key, raw_size);
}
#else
#ifdef WOLFSSL_HAVE_SP_ECC
#ifndef WOLFSSL_SP_NO_256
if (key->idx != ECC_CUSTOM_IDX && ecc_sets[key->idx].id == ECC_SECP256R1) {
err = sp_ecc_make_key_256(rng, &key->k, &key->pubkey, key->heap);
if (err == MP_OKAY) {
key->type = ECC_PRIVATEKEY;
}
}
else
#endif
#ifdef WOLFSSL_SP_384
if (key->idx != ECC_CUSTOM_IDX && ecc_sets[key->idx].id == ECC_SECP384R1) {
err = sp_ecc_make_key_384(rng, &key->k, &key->pubkey, key->heap);
if (err == MP_OKAY) {
key->type = ECC_PRIVATEKEY;
}
}
else
#endif
#endif /* WOLFSSL_HAVE_SP_ECC */
{ /* software key gen */
#ifdef WOLFSSL_SP_MATH
err = WC_KEY_SIZE_E;
#else
/* setup the key variables */
err = mp_init(&key->k);
/* load curve info */
if (err == MP_OKAY) {
ALLOC_CURVE_SPECS(ECC_CURVE_FIELD_COUNT);
err = wc_ecc_curve_load(key->dp, &curve, ECC_CURVE_FIELD_ALL);
}
/* generate k */
if (err == MP_OKAY)
err = wc_ecc_gen_k(rng, key->dp->size, &key->k, curve->order);
/* generate public key from k */
if (err == MP_OKAY)
err = wc_ecc_make_pub_ex(key, curve, NULL);
if (err == MP_OKAY)
key->type = ECC_PRIVATEKEY;
/* cleanup these on failure case only */
if (err != MP_OKAY) {
/* clean up */
mp_forcezero(&key->k);
}
/* cleanup allocations */
wc_ecc_curve_free(curve);
FREE_CURVE_SPECS();
#endif /* WOLFSSL_SP_MATH */
}
#ifdef HAVE_WOLF_BIGINT
if (err == MP_OKAY)
err = wc_mp_to_bigint(&key->k, &key->k.raw);
if (err == MP_OKAY)
err = wc_mp_to_bigint(key->pubkey.x, &key->pubkey.x->raw);
if (err == MP_OKAY)
err = wc_mp_to_bigint(key->pubkey.y, &key->pubkey.y->raw);
if (err == MP_OKAY)
err = wc_mp_to_bigint(key->pubkey.z, &key->pubkey.z->raw);
#endif
#endif /* WOLFSSL_ATECC508A */
return err;
} | 0 | [
"CWE-326",
"CWE-203"
] | wolfssl | 1de07da61f0c8e9926dcbd68119f73230dae283f | 28,779,034,731,455,457,000,000,000,000,000,000,000 | 202 | Constant time EC map to affine for private operations
For fast math, use a constant time modular inverse when mapping to
affine when operation involves a private key - key gen, calc shared
secret, sign. |
void setup(bool ssl, const std::string& server_name, bool tracing = true, bool use_srds = false) {
use_srds_ = use_srds;
if (ssl) {
ssl_connection_ = std::make_shared<Ssl::MockConnectionInfo>();
}
server_name_ = server_name;
ON_CALL(filter_callbacks_.connection_, ssl()).WillByDefault(Return(ssl_connection_));
ON_CALL(Const(filter_callbacks_.connection_), ssl()).WillByDefault(Return(ssl_connection_));
filter_callbacks_.connection_.local_address_ =
std::make_shared<Network::Address::Ipv4Instance>("127.0.0.1", 443);
filter_callbacks_.connection_.remote_address_ =
std::make_shared<Network::Address::Ipv4Instance>("0.0.0.0");
conn_manager_ = std::make_unique<ConnectionManagerImpl>(
*this, drain_close_, random_, http_context_, runtime_, local_info_, cluster_manager_,
&overload_manager_, test_time_.timeSystem());
conn_manager_->initializeReadFilterCallbacks(filter_callbacks_);
if (tracing) {
envoy::type::v3::FractionalPercent percent1;
percent1.set_numerator(100);
envoy::type::v3::FractionalPercent percent2;
percent2.set_numerator(10000);
percent2.set_denominator(envoy::type::v3::FractionalPercent::TEN_THOUSAND);
tracing_config_ = std::make_unique<TracingConnectionManagerConfig>(
TracingConnectionManagerConfig{Tracing::OperationName::Ingress,
{{":method", requestHeaderCustomTag(":method")}},
percent1,
percent2,
percent1,
false,
256});
}
} | 0 | [
"CWE-400"
] | envoy | 0e49a495826ea9e29134c1bd54fdeb31a034f40c | 101,820,783,244,286,100,000,000,000,000,000,000,000 | 34 | http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]> |
SSL *SSL_dup(SSL *s)
{
STACK_OF(X509_NAME) *sk;
X509_NAME *xn;
SSL *ret;
int i;
if ((ret=SSL_new(SSL_get_SSL_CTX(s))) == NULL)
return(NULL);
ret->version = s->version;
ret->type = s->type;
ret->method = s->method;
if (s->session != NULL)
{
/* This copies session-id, SSL_METHOD, sid_ctx, and 'cert' */
SSL_copy_session_id(ret,s);
}
else
{
/* No session has been established yet, so we have to expect
* that s->cert or ret->cert will be changed later --
* they should not both point to the same object,
* and thus we can't use SSL_copy_session_id. */
ret->method->ssl_free(ret);
ret->method = s->method;
ret->method->ssl_new(ret);
if (s->cert != NULL)
{
if (ret->cert != NULL)
{
ssl_cert_free(ret->cert);
}
ret->cert = ssl_cert_dup(s->cert);
if (ret->cert == NULL)
goto err;
}
SSL_set_session_id_context(ret,
s->sid_ctx, s->sid_ctx_length);
}
ret->options=s->options;
ret->mode=s->mode;
SSL_set_max_cert_list(ret,SSL_get_max_cert_list(s));
SSL_set_read_ahead(ret,SSL_get_read_ahead(s));
ret->msg_callback = s->msg_callback;
ret->msg_callback_arg = s->msg_callback_arg;
SSL_set_verify(ret,SSL_get_verify_mode(s),
SSL_get_verify_callback(s));
SSL_set_verify_depth(ret,SSL_get_verify_depth(s));
ret->generate_session_id = s->generate_session_id;
SSL_set_info_callback(ret,SSL_get_info_callback(s));
ret->debug=s->debug;
/* copy app data, a little dangerous perhaps */
if (!CRYPTO_dup_ex_data(CRYPTO_EX_INDEX_SSL, &ret->ex_data, &s->ex_data))
goto err;
/* setup rbio, and wbio */
if (s->rbio != NULL)
{
if (!BIO_dup_state(s->rbio,(char *)&ret->rbio))
goto err;
}
if (s->wbio != NULL)
{
if (s->wbio != s->rbio)
{
if (!BIO_dup_state(s->wbio,(char *)&ret->wbio))
goto err;
}
else
ret->wbio=ret->rbio;
}
ret->rwstate = s->rwstate;
ret->in_handshake = s->in_handshake;
ret->handshake_func = s->handshake_func;
ret->server = s->server;
ret->new_session = s->new_session;
ret->quiet_shutdown = s->quiet_shutdown;
ret->shutdown=s->shutdown;
ret->state=s->state; /* SSL_dup does not really work at any state, though */
ret->rstate=s->rstate;
ret->init_num = 0; /* would have to copy ret->init_buf, ret->init_msg, ret->init_num, ret->init_off */
ret->hit=s->hit;
X509_VERIFY_PARAM_inherit(ret->param, s->param);
/* dup the cipher_list and cipher_list_by_id stacks */
if (s->cipher_list != NULL)
{
if ((ret->cipher_list=sk_SSL_CIPHER_dup(s->cipher_list)) == NULL)
goto err;
}
if (s->cipher_list_by_id != NULL)
if ((ret->cipher_list_by_id=sk_SSL_CIPHER_dup(s->cipher_list_by_id))
== NULL)
goto err;
/* Dup the client_CA list */
if (s->client_CA != NULL)
{
if ((sk=sk_X509_NAME_dup(s->client_CA)) == NULL) goto err;
ret->client_CA=sk;
for (i=0; i<sk_X509_NAME_num(sk); i++)
{
xn=sk_X509_NAME_value(sk,i);
if (sk_X509_NAME_set(sk,i,X509_NAME_dup(xn)) == NULL)
{
X509_NAME_free(xn);
goto err;
}
}
}
if (0)
{
err:
if (ret != NULL) SSL_free(ret);
ret=NULL;
}
return(ret);
} | 0 | [
"CWE-310"
] | openssl | c6a876473cbff0fd323c8abcaace98ee2d21863d | 44,779,357,842,342,200,000,000,000,000,000,000,000 | 129 | Support TLS_FALLBACK_SCSV.
Reviewed-by: Stephen Henson <[email protected]> |
void gf_bs_write_float(GF_BitStream *bs, Float value)
{
u32 i;
union
{ float f;
char sz [4];
} float_value;
float_value.f = value;
for (i = 0; i < 32; i++)
BS_WriteBit(bs, (float_value.sz [3 - i / 8] & 1 << (7 - i % 8)) != 0);
} | 0 | [
"CWE-617",
"CWE-703"
] | gpac | 9ea93a2ec8f555ceed1ee27294cf94822f14f10f | 199,675,253,393,277,800,000,000,000,000,000,000,000 | 13 | fixed #2165 |
static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
{
assert(fv);
MemoryRegionSection section = {
.fv = fv,
.mr = mr,
.offset_within_address_space = 0,
.offset_within_region = 0,
.size = int128_2_64(),
};
return phys_section_add(map, §ion);
} | 0 | [
"CWE-787"
] | qemu | 4bfb024bc76973d40a359476dc0291f46e435442 | 300,733,772,097,769,400,000,000,000,000,000,000,000 | 13 | memory: clamp cached translation in case it points to an MMIO region
In using the address_space_translate_internal API, address_space_cache_init
forgot one piece of advice that can be found in the code for
address_space_translate_internal:
/* MMIO registers can be expected to perform full-width accesses based only
* on their address, without considering adjacent registers that could
* decode to completely different MemoryRegions. When such registers
* exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
* regions overlap wildly. For this reason we cannot clamp the accesses
* here.
*
* If the length is small (as is the case for address_space_ldl/stl),
* everything works fine. If the incoming length is large, however,
* the caller really has to do the clamping through memory_access_size.
*/
address_space_cache_init is exactly one such case where "the incoming length
is large", therefore we need to clamp the resulting length---not to
memory_access_size though, since we are not doing an access yet, but to
the size of the resulting section. This ensures that subsequent accesses
to the cached MemoryRegionSection will be in range.
With this patch, the enclosed testcase notices that the used ring does
not fit into the MSI-X table and prints a "qemu-system-x86_64: Cannot map used"
error.
Signed-off-by: Paolo Bonzini <[email protected]> |
GF_Err ssix_Size(GF_Box *s)
{
u32 i;
GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox*)s;
ptr->size += 4;
for (i = 0; i < ptr->subsegment_count; i++) {
ptr->size += 4 + 4 * ptr->subsegments[i].range_count;
}
return GF_OK; | 0 | [
"CWE-400",
"CWE-401"
] | gpac | d2371b4b204f0a3c0af51ad4e9b491144dd1225c | 161,680,063,273,838,840,000,000,000,000,000,000,000 | 11 | prevent dref memleak on invalid input (#1183) |
void AddNetwork(const CString& sLine) {
CString sUser = sLine.Token(1);
CString sNetwork = sLine.Token(2);
CUser* pUser = GetUser();
if (sNetwork.empty()) {
sNetwork = sUser;
} else {
pUser = FindUser(sUser);
if (!pUser) {
return;
}
}
if (sNetwork.empty()) {
PutModule(t_s("Usage: AddNetwork [user] network"));
return;
}
if (!GetUser()->IsAdmin() && !pUser->HasSpaceForNewNetwork()) {
PutStatus(
t_s("Network number limit reached. Ask an admin to increase "
"the limit for you, or delete unneeded networks using /znc "
"DelNetwork <name>"));
return;
}
if (pUser->FindNetwork(sNetwork)) {
PutModule(
t_f("Error: User {1} already has a network with the name {2}")(
pUser->GetUserName(), sNetwork));
return;
}
CString sNetworkAddError;
if (pUser->AddNetwork(sNetwork, sNetworkAddError)) {
PutModule(t_f("Network {1} added to user {2}.")(
sNetwork, pUser->GetUserName()));
} else {
PutModule(t_f(
"Error: Network [{1}] could not be added for user {2}: {3}")(
sNetwork, pUser->GetUserName(), sNetworkAddError));
}
} | 0 | [
"CWE-20"
] | znc | 64613bc8b6b4adf1e32231f9844d99cd512b8973 | 42,007,512,201,726,770,000,000,000,000,000,000,000 | 44 | Don't crash if user specified invalid encoding.
This is CVE-2019-9917 |
SPL_METHOD(Array, seek)
{
long opos, position;
zval *object = getThis();
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
int result;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &position) == FAILURE) {
return;
}
if (!aht) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array");
return;
}
opos = position;
if (position >= 0) { /* negative values are not supported */
spl_array_rewind(intern TSRMLS_CC);
result = SUCCESS;
while (position-- > 0 && (result = spl_array_next(intern TSRMLS_CC)) == SUCCESS);
if (result == SUCCESS && zend_hash_has_more_elements_ex(aht, &intern->pos) == SUCCESS) {
return; /* ok */
}
}
zend_throw_exception_ex(spl_ce_OutOfBoundsException, 0 TSRMLS_CC, "Seek position %ld is out of range", opos);
} /* }}} */ | 1 | [] | php-src | b7fa67742cd8d2b0ca0c0273b157f6ffee9ad6e2 | 174,856,175,280,677,750,000,000,000,000,000,000,000 | 31 | Fix bug #70068 (Dangling pointer in the unserialization of ArrayObject items) |
static void de_run_pict(deark *c, de_module_params *mparams)
{
lctx *d = NULL;
i64 pos = 0;
i64 picsize;
struct pict_rect framerect;
d = de_malloc(c, sizeof(lctx));
do_detect_version(c, &d->dti, 1);
if(d->dti.file_version>0) {
de_declare_fmtf(c, "PICT v%d%s", d->dti.file_version,
d->dti.has_fileheader?"":", without file header");
}
d->version = 1;
if(d->dti.has_fileheader) {
pos += 512;
}
picsize = de_getu16be(pos);
de_dbg(c, "picSize: %d", (int)picsize);
pos+=2;
pict_read_rect(c->infile, pos, &framerect, "picFrame");
pos+=8;
do_read_items(c, d, pos);
dbuf_close(d->iccprofile_file);
de_free(c, d);
} | 0 | [
"CWE-476"
] | deark | 287f5ac31dfdc074669182f51ece637706070eeb | 206,877,671,251,070,700,000,000,000,000,000,000,000 | 32 | pict: Fixed a bug with ICC profile extraction
Could cause a NULL pointer dereference.
Found by F. Çelik. |
static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct tun_struct *tun = netdev_priv(dev);
struct bpf_prog *old_prog;
old_prog = rtnl_dereference(tun->xdp_prog);
rcu_assign_pointer(tun->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
return 0;
} | 0 | [
"CWE-476"
] | linux | 0ad646c81b2182f7fa67ec0c8c825e0ee165696d | 194,136,306,987,698,800,000,000,000,000,000,000,000 | 13 | tun: call dev_get_valid_name() before register_netdevice()
register_netdevice() could fail early when we have an invalid
dev name, in which case ->ndo_uninit() is not called. For tun
device, this is a problem because a timer etc. are already
initialized and it expects ->ndo_uninit() to clean them up.
We could move these initializations into a ->ndo_init() so
that register_netdevice() knows better, however this is still
complicated due to the logic in tun_detach().
Therefore, I choose to just call dev_get_valid_name() before
register_netdevice(), which is quicker and much easier to audit.
And for this specific case, it is already enough.
Fixes: 96442e42429e ("tuntap: choose the txq based on rxq")
Reported-by: Dmitry Alexeev <[email protected]>
Cc: Jason Wang <[email protected]>
Cc: "Michael S. Tsirkin" <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted)
|| !list_empty_careful(&vcpu->async_pf.done)
|| vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
|| atomic_read(&vcpu->arch.nmi_queued) ||
(kvm_arch_interrupt_allowed(vcpu) &&
kvm_cpu_has_interrupt(vcpu));
} | 0 | [] | kvm | 0769c5de24621141c953fbe1f943582d37cb4244 | 56,772,414,448,442,700,000,000,000,000,000,000,000 | 10 | KVM: x86: extend "struct x86_emulate_ops" with "get_cpuid"
In order to be able to proceed checks on CPU-specific properties
within the emulator, function "get_cpuid" is introduced.
With "get_cpuid" it is possible to virtually call the guests
"cpuid"-opcode without changing the VM's context.
[mtosatti: cleanup/beautify code]
Signed-off-by: Stephan Baerwolf <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
struct brcmf_if *ifp)
{
struct brcmf_bss_info_le *bi;
const struct brcmf_tlv *tim;
u16 beacon_interval;
u8 dtim_period;
size_t ie_len;
u8 *ie;
s32 err = 0;
brcmf_dbg(TRACE, "Enter\n");
if (brcmf_is_ibssmode(ifp->vif))
return err;
*(__le32 *)cfg->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO,
cfg->extra_buf, WL_EXTRA_BUF_MAX);
if (err) {
brcmf_err("Could not get bss info %d\n", err);
goto update_bss_info_out;
}
bi = (struct brcmf_bss_info_le *)(cfg->extra_buf + 4);
err = brcmf_inform_single_bss(cfg, bi);
if (err)
goto update_bss_info_out;
ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset);
ie_len = le32_to_cpu(bi->ie_length);
beacon_interval = le16_to_cpu(bi->beacon_period);
tim = brcmf_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
if (tim)
dtim_period = tim->data[1];
else {
/*
* active scan was done so we could not get dtim
* information out of probe response.
* so we speficially query dtim information to dongle.
*/
u32 var;
err = brcmf_fil_iovar_int_get(ifp, "dtim_assoc", &var);
if (err) {
brcmf_err("wl dtim_assoc failed (%d)\n", err);
goto update_bss_info_out;
}
dtim_period = (u8)var;
}
update_bss_info_out:
brcmf_dbg(TRACE, "Exit");
return err;
} | 0 | [
"CWE-119",
"CWE-703"
] | linux | ded89912156b1a47d940a0c954c43afbabd0c42c | 131,174,160,848,841,670,000,000,000,000,000,000,000 | 54 | brcmfmac: avoid potential stack overflow in brcmf_cfg80211_start_ap()
User-space can choose to omit NL80211_ATTR_SSID and only provide raw
IE TLV data. When doing so it can provide SSID IE with length exceeding
the allowed size. The driver further processes this IE copying it
into a local variable without checking the length. Hence stack can be
corrupted and used as exploit.
Cc: [email protected] # v4.7
Reported-by: Daxing Guo <[email protected]>
Reviewed-by: Hante Meuleman <[email protected]>
Reviewed-by: Pieter-Paul Giesberts <[email protected]>
Reviewed-by: Franky Lin <[email protected]>
Signed-off-by: Arend van Spriel <[email protected]>
Signed-off-by: Kalle Valo <[email protected]> |
static void BuildAndStoreBlockSplitCode(const uint8_t* types,
const uint32_t* lengths,
const size_t num_blocks,
const size_t num_types,
HuffmanTree* tree,
BlockSplitCode* code,
size_t* storage_ix,
uint8_t* storage) {
uint32_t type_histo[BROTLI_MAX_BLOCK_TYPE_SYMBOLS];
uint32_t length_histo[BROTLI_NUM_BLOCK_LEN_SYMBOLS];
size_t i;
BlockTypeCodeCalculator type_code_calculator;
memset(type_histo, 0, (num_types + 2) * sizeof(type_histo[0]));
memset(length_histo, 0, sizeof(length_histo));
InitBlockTypeCodeCalculator(&type_code_calculator);
for (i = 0; i < num_blocks; ++i) {
size_t type_code = NextBlockTypeCode(&type_code_calculator, types[i]);
if (i != 0) ++type_histo[type_code];
++length_histo[BlockLengthPrefixCode(lengths[i])];
}
StoreVarLenUint8(num_types - 1, storage_ix, storage);
if (num_types > 1) { /* TODO: else? could StoreBlockSwitch occur? */
BuildAndStoreHuffmanTree(&type_histo[0], num_types + 2, num_types + 2, tree,
&code->type_depths[0], &code->type_bits[0],
storage_ix, storage);
BuildAndStoreHuffmanTree(&length_histo[0], BROTLI_NUM_BLOCK_LEN_SYMBOLS,
BROTLI_NUM_BLOCK_LEN_SYMBOLS,
tree, &code->length_depths[0],
&code->length_bits[0], storage_ix, storage);
StoreBlockSwitch(code, lengths[0], types[0], 1, storage_ix, storage);
}
} | 0 | [
"CWE-120"
] | brotli | 223d80cfbec8fd346e32906c732c8ede21f0cea6 | 261,126,439,872,002,230,000,000,000,000,000,000,000 | 32 | Update (#826)
* IMPORTANT: decoder: fix potential overflow when input chunk is >2GiB
* simplify max Huffman table size calculation
* eliminate symbol duplicates (static arrays in .h files)
* minor combing in research/ code |
xmlRelaxNGValidatePopElement(xmlRelaxNGValidCtxtPtr ctxt,
xmlDocPtr doc ATTRIBUTE_UNUSED,
xmlNodePtr elem)
{
int ret;
xmlRegExecCtxtPtr exec;
if ((ctxt == NULL) || (ctxt->elem == NULL) || (elem == NULL))
return (-1);
#ifdef DEBUG_PROGRESSIVE
xmlGenericError(xmlGenericErrorContext, "PopElem %s\n", elem->name);
#endif
/*
* verify that we reached a terminal state of the content model.
*/
exec = xmlRelaxNGElemPop(ctxt);
ret = xmlRegExecPushString(exec, NULL, NULL);
if (ret == 0) {
/*
* TODO: get some of the names needed to exit the current state of exec
*/
VALID_ERR2(XML_RELAXNG_ERR_NOELEM, BAD_CAST "");
ret = -1;
} else if (ret < 0) {
ret = -1;
} else {
ret = 1;
}
xmlRegFreeExecCtxt(exec);
#ifdef DEBUG_PROGRESSIVE
if (ret < 0)
xmlGenericError(xmlGenericErrorContext, "PopElem %s failed\n",
elem->name);
#endif
return (ret);
} | 0 | [
"CWE-134"
] | libxml2 | 502f6a6d08b08c04b3ddfb1cd21b2f699c1b7f5b | 115,530,971,474,686,350,000,000,000,000,000,000,000 | 36 | More format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
adds a new xmlEscapeFormatString() function to escape composed format
strings |
TfLiteRegistration* Register_RNN() {
static TfLiteRegistration r = {rnn::Init, rnn::Free, rnn::Prepare, rnn::Eval};
return &r;
} | 0 | [
"CWE-125",
"CWE-787"
] | tensorflow | 1970c2158b1ffa416d159d03c3370b9a462aee35 | 319,228,460,189,408,700,000,000,000,000,000,000,000 | 4 | [tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56 |
vips_tracked_free( void *s )
{
/* Keep the size of the alloc in the previous 16 bytes. Ensures
* alignment rules are kept.
*/
void *start = (void *) ((char *) s - 16);
size_t size = *((size_t *) start);
g_mutex_lock( vips_tracked_mutex );
#ifdef DEBUG_VERBOSE
printf( "vips_tracked_free: %p, %zd bytes\n", s, size );
#endif /*DEBUG_VERBOSE*/
if( vips_tracked_allocs <= 0 )
g_warning( "%s", _( "vips_free: too many frees" ) );
if( vips_tracked_mem < size )
g_warning( "%s", _( "vips_free: too much free" ) );
vips_tracked_mem -= size;
vips_tracked_allocs -= 1;
g_mutex_unlock( vips_tracked_mutex );
g_free( start );
VIPS_GATE_FREE( size );
} | 0 | [
"CWE-200",
"CWE-908"
] | libvips | 00622428bda8d7521db8d74260b519fa41d69d0a | 175,531,391,602,005,800,000,000,000,000,000,000,000 | 28 | zero memory on malloc
to prevent write of uninit memory under some error conditions
thanks Balint |
int linuxOvercommitMemoryValue(void) {
FILE *fp = fopen("/proc/sys/vm/overcommit_memory","r");
char buf[64];
if (!fp) return -1;
if (fgets(buf,64,fp) == NULL) {
fclose(fp);
return -1;
}
fclose(fp);
return atoi(buf);
} | 0 | [
"CWE-20"
] | redis | 697af434fbeb2e3ba2ba9687cd283ed1a2734fa5 | 263,654,751,013,523,000,000,000,000,000,000,000,000 | 13 | initial changes needed to turn the current VM code into a cache system. Tons of work to do still. |
static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
union vmx_exit_reason exit_reason = vmx->exit_reason;
u32 vectoring_info = vmx->idt_vectoring_info;
u16 exit_handler_index;
/*
* Flush logged GPAs PML buffer, this will make dirty_bitmap more
* updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
* querying dirty_bitmap, we only need to kick all vcpus out of guest
* mode as if vcpus is in root mode, the PML buffer must has been
* flushed already. Note, PML is never enabled in hardware while
* running L2.
*/
if (enable_pml && !is_guest_mode(vcpu))
vmx_flush_pml_buffer(vcpu);
/*
* We should never reach this point with a pending nested VM-Enter, and
* more specifically emulation of L2 due to invalid guest state (see
* below) should never happen as that means we incorrectly allowed a
* nested VM-Enter with an invalid vmcs12.
*/
WARN_ON_ONCE(vmx->nested.nested_run_pending);
/* If guest state is invalid, start emulating */
if (vmx->emulation_required)
return handle_invalid_guest_state(vcpu);
if (is_guest_mode(vcpu)) {
/*
* PML is never enabled when running L2, bail immediately if a
* PML full exit occurs as something is horribly wrong.
*/
if (exit_reason.basic == EXIT_REASON_PML_FULL)
goto unexpected_vmexit;
/*
* The host physical addresses of some pages of guest memory
* are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
* Page). The CPU may write to these pages via their host
* physical address while L2 is running, bypassing any
* address-translation-based dirty tracking (e.g. EPT write
* protection).
*
* Mark them dirty on every exit from L2 to prevent them from
* getting out of sync with dirty tracking.
*/
nested_mark_vmcs12_pages_dirty(vcpu);
if (nested_vmx_reflect_vmexit(vcpu))
return 1;
}
if (exit_reason.failed_vmentry) {
dump_vmcs();
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
vcpu->run->fail_entry.hardware_entry_failure_reason
= exit_reason.full;
vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
return 0;
}
if (unlikely(vmx->fail)) {
dump_vmcs();
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
vcpu->run->fail_entry.hardware_entry_failure_reason
= vmcs_read32(VM_INSTRUCTION_ERROR);
vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
return 0;
}
/*
* Note:
* Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
* delivery event since it indicates guest is accessing MMIO.
* The vm-exit can be triggered again after return to guest that
* will cause infinite loop.
*/
if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
(exit_reason.basic != EXIT_REASON_EXCEPTION_NMI &&
exit_reason.basic != EXIT_REASON_EPT_VIOLATION &&
exit_reason.basic != EXIT_REASON_PML_FULL &&
exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
exit_reason.basic != EXIT_REASON_TASK_SWITCH)) {
int ndata = 3;
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
vcpu->run->internal.data[0] = vectoring_info;
vcpu->run->internal.data[1] = exit_reason.full;
vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) {
vcpu->run->internal.data[ndata++] =
vmcs_read64(GUEST_PHYSICAL_ADDRESS);
}
vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
vcpu->run->internal.ndata = ndata;
return 0;
}
if (unlikely(!enable_vnmi &&
vmx->loaded_vmcs->soft_vnmi_blocked)) {
if (!vmx_interrupt_blocked(vcpu)) {
vmx->loaded_vmcs->soft_vnmi_blocked = 0;
} else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
vcpu->arch.nmi_pending) {
/*
* This CPU don't support us in finding the end of an
* NMI-blocked window if the guest runs with IRQs
* disabled. So we pull the trigger after 1 s of
* futile waiting, but inform the user about this.
*/
printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
"state on VCPU %d after 1 s timeout\n",
__func__, vcpu->vcpu_id);
vmx->loaded_vmcs->soft_vnmi_blocked = 0;
}
}
if (exit_fastpath != EXIT_FASTPATH_NONE)
return 1;
if (exit_reason.basic >= kvm_vmx_max_exit_handlers)
goto unexpected_vmexit;
#ifdef CONFIG_RETPOLINE
if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
return kvm_emulate_wrmsr(vcpu);
else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER)
return handle_preemption_timer(vcpu);
else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW)
return handle_interrupt_window(vcpu);
else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
return handle_external_interrupt(vcpu);
else if (exit_reason.basic == EXIT_REASON_HLT)
return kvm_emulate_halt(vcpu);
else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG)
return handle_ept_misconfig(vcpu);
#endif
exit_handler_index = array_index_nospec((u16)exit_reason.basic,
kvm_vmx_max_exit_handlers);
if (!kvm_vmx_exit_handlers[exit_handler_index])
goto unexpected_vmexit;
return kvm_vmx_exit_handlers[exit_handler_index](vcpu);
unexpected_vmexit:
vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
exit_reason.full);
dump_vmcs();
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror =
KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
vcpu->run->internal.ndata = 2;
vcpu->run->internal.data[0] = exit_reason.full;
vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
return 0;
} | 0 | [
"CWE-787"
] | linux | 04c4f2ee3f68c9a4bf1653d15f1a9a435ae33f7a | 135,200,381,689,706,430,000,000,000,000,000,000,000 | 160 | KVM: VMX: Don't use vcpu->run->internal.ndata as an array index
__vmx_handle_exit() uses vcpu->run->internal.ndata as an index for
an array access. Since vcpu->run is (can be) mapped to a user address
space with a writer permission, the 'ndata' could be updated by the
user process at anytime (the user process can set it to outside the
bounds of the array).
So, it is not safe that __vmx_handle_exit() uses the 'ndata' that way.
Fixes: 1aa561b1a4c0 ("kvm: x86: Add "last CPU" to some KVM_EXIT information")
Signed-off-by: Reiji Watanabe <[email protected]>
Reviewed-by: Jim Mattson <[email protected]>
Message-Id: <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]> |
static int process_system_preds(struct trace_subsystem_dir *dir,
struct trace_array *tr,
struct filter_parse_error *pe,
char *filter_string)
{
struct trace_event_file *file;
struct filter_list *filter_item;
struct event_filter *filter = NULL;
struct filter_list *tmp;
LIST_HEAD(filter_list);
bool fail = true;
int err;
list_for_each_entry(file, &tr->events, list) {
if (file->system != dir)
continue;
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
if (!filter)
goto fail_mem;
filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
if (!filter->filter_string)
goto fail_mem;
err = process_preds(file->event_call, filter_string, filter, pe);
if (err) {
filter_disable(file);
parse_error(pe, FILT_ERR_BAD_SUBSYS_FILTER, 0);
append_filter_err(tr, pe, filter);
} else
event_set_filtered_flag(file);
filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
if (!filter_item)
goto fail_mem;
list_add_tail(&filter_item->list, &filter_list);
/*
* Regardless of if this returned an error, we still
* replace the filter for the call.
*/
filter_item->filter = event_filter(file);
event_set_filter(file, filter);
filter = NULL;
fail = false;
}
if (fail)
goto fail;
/*
* The calls can still be using the old filters.
* Do a synchronize_rcu() and to ensure all calls are
* done with them before we free them.
*/
tracepoint_synchronize_unregister();
list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
__free_filter(filter_item->filter);
list_del(&filter_item->list);
kfree(filter_item);
}
return 0;
fail:
/* No call succeeded */
list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
list_del(&filter_item->list);
kfree(filter_item);
}
parse_error(pe, FILT_ERR_BAD_SUBSYS_FILTER, 0);
return -EINVAL;
fail_mem:
kfree(filter);
/* If any call succeeded, we still need to sync */
if (!fail)
tracepoint_synchronize_unregister();
list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
__free_filter(filter_item->filter);
list_del(&filter_item->list);
kfree(filter_item);
}
return -ENOMEM;
} | 0 | [
"CWE-400",
"CWE-284",
"CWE-401"
] | linux | 96c5c6e6a5b6db592acae039fed54b5c8844cd35 | 141,091,671,690,996,570,000,000,000,000,000,000,000 | 86 | tracing: Have error path in predicate_parse() free its allocated memory
In predicate_parse, there is an error path that is not going to
out_free instead it returns directly which leads to a memory leak.
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Steven Rostedt (VMware) <[email protected]> |
format_DEC_MPLS_TTL(const struct ofpact_null *a OVS_UNUSED, struct ds *s)
{
ds_put_format(s, "%sdec_mpls_ttl%s", colors.value, colors.end);
} | 0 | [
"CWE-125"
] | ovs | 9237a63c47bd314b807cda0bd2216264e82edbe8 | 165,927,440,137,463,920,000,000,000,000,000,000,000 | 4 | ofp-actions: Avoid buffer overread in BUNDLE action decoding.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]> |
static bool arg_type_is_alloc_mem_ptr(enum bpf_arg_type type)
{
return type == ARG_PTR_TO_ALLOC_MEM ||
type == ARG_PTR_TO_ALLOC_MEM_OR_NULL;
} | 0 | [
"CWE-119",
"CWE-681",
"CWE-787"
] | linux | 5b9fbeb75b6a98955f628e205ac26689bcb1383e | 235,621,210,262,888,050,000,000,000,000,000,000,000 | 5 | bpf: Fix scalar32_min_max_or bounds tracking
Simon reported an issue with the current scalar32_min_max_or() implementation.
That is, compared to the other 32 bit subreg tracking functions, the code in
scalar32_min_max_or() stands out that it's using the 64 bit registers instead
of 32 bit ones. This leads to bounds tracking issues, for example:
[...]
8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
8: (79) r1 = *(u64 *)(r0 +0)
R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
9: (b7) r0 = 1
10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
10: (18) r2 = 0x600000002
12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
12: (ad) if r1 < r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: (95) exit
14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
14: (25) if r1 > 0x0 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: (95) exit
16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
16: (47) r1 |= 0
17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x1; 0x700000000),s32_max_value=1,u32_max_value=1) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
[...]
The bound tests on the map value force the upper unsigned bound to be 25769803777
in 64 bit (0b11000000000000000000000000000000001) and then lower one to be 1. By
using OR they are truncated and thus result in the range [1,1] for the 32 bit reg
tracker. This is incorrect given the only thing we know is that the value must be
positive and thus 2147483647 (0b1111111111111111111111111111111) at max for the
subregs. Fix it by using the {u,s}32_{min,max}_value vars instead. This also makes
sense, for example, for the case where we update dst_reg->s32_{min,max}_value in
the else branch we need to use the newly computed dst_reg->u32_{min,max}_value as
we know that these are positive. Previously, in the else branch the 64 bit values
of umin_value=1 and umax_value=32212254719 were used and latter got truncated to
be 1 as upper bound there. After the fix the subreg range is now correct:
[...]
8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
8: (79) r1 = *(u64 *)(r0 +0)
R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
9: (b7) r0 = 1
10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
10: (18) r2 = 0x600000002
12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
12: (ad) if r1 < r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: (95) exit
14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
14: (25) if r1 > 0x0 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: (95) exit
16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
16: (47) r1 |= 0
17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
[...]
Fixes: 3f50f132d840 ("bpf: Verifier, do explicit ALU32 bounds tracking")
Reported-by: Simon Scannell <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Reviewed-by: John Fastabend <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]> |
static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
{
struct inet_sock *inet = inet_sk(sp);
__be32 dest = inet->inet_daddr,
src = inet->inet_rcv_saddr;
__u16 destp = 0,
srcp = inet->inet_num;
seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n",
i, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
} | 0 | [
"CWE-362"
] | linux-2.6 | f6d8bd051c391c1c0458a30b2a7abcd939329259 | 10,906,749,974,912,215,000,000,000,000,000,000,000 | 16 | inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static struct ns_common *get_net_ns(struct ns_common *ns)
{
return &get_net(container_of(ns, struct net, ns))->ns;
} | 0 | [
"CWE-703",
"CWE-125"
] | linux | 8605330aac5a5785630aec8f64378a54891937cc | 39,412,717,404,203,620,000,000,000,000,000,000,000 | 4 | tcp: fix SCM_TIMESTAMPING_OPT_STATS for normal skbs
__sock_recv_timestamp can be called for both normal skbs (for
receive timestamps) and for skbs on the error queue (for transmit
timestamps).
Commit 1c885808e456
(tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING)
assumes any skb passed to __sock_recv_timestamp are from
the error queue, containing OPT_STATS in the content of the skb.
This results in accessing invalid memory or generating junk
data.
To fix this, set skb->pkt_type to PACKET_OUTGOING for packets
on the error queue. This is safe because on the receive path
on local sockets skb->pkt_type is never set to PACKET_OUTGOING.
With that, copy OPT_STATS from a packet, only if its pkt_type
is PACKET_OUTGOING.
Fixes: 1c885808e456 ("tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING")
Reported-by: JongHwan Kim <[email protected]>
Signed-off-by: Soheil Hassas Yeganeh <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: Willem de Bruijn <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
int size, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name;
struct ddpehdr *ddp;
int copied = 0;
int err = 0;
struct ddpebits ddphv;
struct sk_buff *skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
/* FIXME: use skb->cb to be able to use shared skbs */
ddp = ddp_hdr(skb);
*((__u16 *)&ddphv) = ntohs(*((__u16 *)ddp));
if (sk->sk_type == SOCK_RAW) {
copied = ddphv.deh_len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
} else {
copied = ddphv.deh_len - sizeof(*ddp);
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
err = skb_copy_datagram_iovec(skb, sizeof(*ddp),
msg->msg_iov, copied);
}
if (!err) {
if (sat) {
sat->sat_family = AF_APPLETALK;
sat->sat_port = ddp->deh_sport;
sat->sat_addr.s_node = ddp->deh_snode;
sat->sat_addr.s_net = ddp->deh_snet;
}
msg->msg_namelen = sizeof(*sat);
}
skb_free_datagram(sk, skb); /* Free the datagram. */
return err ? : copied;
} | 0 | [] | history | 7ab442d7e0a76402c12553ee256f756097cae2d2 | 162,114,781,842,207,150,000,000,000,000,000,000,000 | 49 | [DDP]: Convert to new protocol interface.
Convert ddp to the new protocol interface which means it has to
handle fragmented skb's. The only big change is in the checksum
routine which has to do more work (like skb_checksum).
Minor speedup is folding the carry to avoid a branch.
Tested against a 2.4 system and by running both code over
a range of packets. |
Field *Item::make_string_field(TABLE *table)
{
Field *field;
DBUG_ASSERT(collation.collation);
/*
Note: the following check is repeated in
subquery_types_allow_materialization():
*/
if (too_big_for_varchar())
field= new Field_blob(max_length, maybe_null, name,
collation.collation, TRUE);
/* Item_type_holder holds the exact type, do not change it */
else if (max_length > 0 &&
(type() != Item::TYPE_HOLDER || field_type() != MYSQL_TYPE_STRING))
field= new Field_varstring(max_length, maybe_null, name, table->s,
collation.collation);
else
field= new Field_string(max_length, maybe_null, name,
collation.collation);
if (field)
field->init(table);
return field;
} | 0 | [] | server | b000e169562697aa072600695d4f0c0412f94f4f | 230,635,845,533,857,700,000,000,000,000,000,000,000 | 23 | Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL))
based on:
commit f7316aa0c9a
Author: Ajo Robert <[email protected]>
Date: Thu Aug 24 17:03:21 2017 +0530
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item. |
void ldbInit(void) {
ldb.conn = NULL;
ldb.active = 0;
ldb.logs = listCreate();
listSetFreeMethod(ldb.logs,(void (*)(void*))sdsfree);
ldb.children = listCreate();
ldb.src = NULL;
ldb.lines = 0;
ldb.cbuf = sdsempty();
} | 0 | [
"CWE-703",
"CWE-125"
] | redis | 6ac3c0b7abd35f37201ed2d6298ecef4ea1ae1dd | 175,922,077,575,838,000,000,000,000,000,000,000,000 | 10 | Fix protocol parsing on 'ldbReplParseCommand' (CVE-2021-32672)
The protocol parsing on 'ldbReplParseCommand' (LUA debugging)
Assumed protocol correctness. This means that if the following
is given:
*1
$100
test
The parser will try to read additional 94 unallocated bytes after
the client buffer.
This commit fixes this issue by validating that there are actually enough
bytes to read. It also limits the amount of data that can be sent by
the debugger client to 1M so the client will not be able to explode
the memory. |
int pkey_ctrl_string(EVP_PKEY_CTX *ctx, char *value)
{
int rv;
char *stmp, *vtmp = NULL;
stmp = BUF_strdup(value);
if (!stmp)
return -1;
vtmp = strchr(stmp, ':');
if (vtmp)
{
*vtmp = 0;
vtmp++;
}
rv = EVP_PKEY_CTX_ctrl_str(ctx, stmp, vtmp);
OPENSSL_free(stmp);
return rv;
} | 0 | [] | openssl | a70da5b3ecc3160368529677006801c58cb369db | 54,012,113,136,485,395,000,000,000,000,000,000,000 | 17 | New functions to check a hostname email or IP address against a
certificate. Add options to s_client, s_server and x509 utilities
to print results of checks. |
static void sas_unregister_devs_sas_addr(struct domain_device *parent,
int phy_id, bool last)
{
struct expander_device *ex_dev = &parent->ex_dev;
struct ex_phy *phy = &ex_dev->ex_phy[phy_id];
struct domain_device *child, *n, *found = NULL;
if (last) {
list_for_each_entry_safe(child, n,
&ex_dev->children, siblings) {
if (SAS_ADDR(child->sas_addr) ==
SAS_ADDR(phy->attached_sas_addr)) {
set_bit(SAS_DEV_GONE, &child->state);
if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
sas_unregister_ex_tree(parent->port, child);
else
sas_unregister_dev(parent->port, child);
found = child;
break;
}
}
sas_disable_routing(parent, phy->attached_sas_addr);
}
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
if (phy->port) {
sas_port_delete_phy(phy->port, phy->phy);
sas_device_set_phy(found, phy->port);
if (phy->port->num_phys == 0)
sas_port_delete(phy->port);
phy->port = NULL;
}
} | 1 | [
"CWE-284"
] | linux | 0558f33c06bb910e2879e355192227a8e8f0219d | 112,811,908,323,419,610,000,000,000,000,000,000,000 | 32 | scsi: libsas: direct call probe and destruct
In commit 87c8331fcf72 ("[SCSI] libsas: prevent domain rediscovery
competing with ata error handling") introduced disco mutex to prevent
rediscovery competing with ata error handling and put the whole
revalidation in the mutex. But the rphy add/remove needs to wait for the
error handling which also grabs the disco mutex. This may leads to dead
lock.So the probe and destruct event were introduce to do the rphy
add/remove asynchronously and out of the lock.
The asynchronously processed workers makes the whole discovery process
not atomic, the other events may interrupt the process. For example,
if a loss of signal event inserted before the probe event, the
sas_deform_port() is called and the port will be deleted.
And sas_port_delete() may run before the destruct event, but the
port-x:x is the top parent of end device or expander. This leads to
a kernel WARNING such as:
[ 82.042979] sysfs group 'power' not found for kobject 'phy-1:0:22'
[ 82.042983] ------------[ cut here ]------------
[ 82.042986] WARNING: CPU: 54 PID: 1714 at fs/sysfs/group.c:237
sysfs_remove_group+0x94/0xa0
[ 82.043059] Call trace:
[ 82.043082] [<ffff0000082e7624>] sysfs_remove_group+0x94/0xa0
[ 82.043085] [<ffff00000864e320>] dpm_sysfs_remove+0x60/0x70
[ 82.043086] [<ffff00000863ee10>] device_del+0x138/0x308
[ 82.043089] [<ffff00000869a2d0>] sas_phy_delete+0x38/0x60
[ 82.043091] [<ffff00000869a86c>] do_sas_phy_delete+0x6c/0x80
[ 82.043093] [<ffff00000863dc20>] device_for_each_child+0x58/0xa0
[ 82.043095] [<ffff000008696f80>] sas_remove_children+0x40/0x50
[ 82.043100] [<ffff00000869d1bc>] sas_destruct_devices+0x64/0xa0
[ 82.043102] [<ffff0000080e93bc>] process_one_work+0x1fc/0x4b0
[ 82.043104] [<ffff0000080e96c0>] worker_thread+0x50/0x490
[ 82.043105] [<ffff0000080f0364>] kthread+0xfc/0x128
[ 82.043107] [<ffff0000080836c0>] ret_from_fork+0x10/0x50
Make probe and destruct a direct call in the disco and revalidate function,
but put them outside the lock. The whole discovery or revalidate won't
be interrupted by other events. And the DISCE_PROBE and DISCE_DESTRUCT
event are deleted as a result of the direct call.
Introduce a new list to destruct the sas_port and put the port delete after
the destruct. This makes sure the right order of destroying the sysfs
kobject and fix the warning above.
In sas_ex_revalidate_domain() have a loop to find all broadcasted
device, and sometimes we have a chance to find the same expander twice.
Because the sas_port will be deleted at the end of the whole revalidate
process, sas_port with the same name cannot be added before this.
Otherwise the sysfs will complain of creating duplicate filename. Since
the LLDD will send broadcast for every device change, we can only
process one expander's revalidation.
[mkp: kbuild test robot warning]
Signed-off-by: Jason Yan <[email protected]>
CC: John Garry <[email protected]>
CC: Johannes Thumshirn <[email protected]>
CC: Ewan Milne <[email protected]>
CC: Christoph Hellwig <[email protected]>
CC: Tomas Henzl <[email protected]>
CC: Dan Williams <[email protected]>
Reviewed-by: Hannes Reinecke <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]> |
get_number(int *numptr, int low, const char *names[], int ch, FILE * file,
const char *terms) {
char temp[MAX_TEMPSTR], *pc;
int len, i;
pc = temp;
len = 0;
/* first look for a number */
while (isdigit((unsigned char) ch)) {
if (++len >= MAX_TEMPSTR)
goto bad;
*pc++ = (char)ch;
ch = get_char(file);
}
*pc = '\0';
if (len != 0) {
/* got a number, check for valid terminator */
if (!strchr(terms, ch))
goto bad;
*numptr = atoi(temp);
return (ch);
}
/* no numbers, look for a string if we have any */
if (names) {
while (isalpha((unsigned char) ch)) {
if (++len >= MAX_TEMPSTR)
goto bad;
*pc++ = (char)ch;
ch = get_char(file);
}
*pc = '\0';
if (len != 0 && strchr(terms, ch)) {
for (i = 0; names[i] != NULL; i++) {
Debug(DPARS | DEXT,
("get_num, compare(%s,%s)\n", names[i], temp));
if (!strcasecmp(names[i], temp)) {
*numptr = i + low;
return (ch);
}
}
}
}
bad:
unget_char(ch, file);
return (EOF);
} | 0 | [
"CWE-476"
] | cronie | a6576769f01325303b11edc3e0cfb05ef382ce56 | 243,595,978,605,829,140,000,000,000,000,000,000,000 | 49 | Fix CVE-2019-9704 and CVE-2019-9705
The users can cause DoS of the crond by loading huge crontab files.
We now allow maximum 1000 environment variables and 1000 crontab entries.
Also the comments and whitespace between the entries and variables
are now limited to 32768 characters. |
static int nfs4_xdr_enc_rename(struct rpc_rqst *req, __be32 *p, const struct nfs4_rename_arg *args)
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
.nops = 7,
};
int status;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, &hdr);
if ((status = encode_putfh(&xdr, args->old_dir)) != 0)
goto out;
if ((status = encode_savefh(&xdr)) != 0)
goto out;
if ((status = encode_putfh(&xdr, args->new_dir)) != 0)
goto out;
if ((status = encode_rename(&xdr, args->old_name, args->new_name)) != 0)
goto out;
if ((status = encode_getfattr(&xdr, args->bitmask)) != 0)
goto out;
if ((status = encode_restorefh(&xdr)) != 0)
goto out;
status = encode_getfattr(&xdr, args->bitmask);
out:
return status;
} | 0 | [
"CWE-703"
] | linux | dc0b027dfadfcb8a5504f7d8052754bf8d501ab9 | 106,254,837,113,129,600,000,000,000,000,000,000,000 | 26 | NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <[email protected]> |
static void init_qxl_rom(PCIQXLDevice *d)
{
QXLRom *rom = memory_region_get_ram_ptr(&d->rom_bar);
QXLModes *modes = (QXLModes *)(rom + 1);
uint32_t ram_header_size;
uint32_t surface0_area_size;
uint32_t num_pages;
uint32_t fb;
int i, n;
memset(rom, 0, d->rom_size);
rom->magic = cpu_to_le32(QXL_ROM_MAGIC);
rom->id = cpu_to_le32(d->id);
rom->log_level = cpu_to_le32(d->guestdebug);
rom->modes_offset = cpu_to_le32(sizeof(QXLRom));
rom->slot_gen_bits = MEMSLOT_GENERATION_BITS;
rom->slot_id_bits = MEMSLOT_SLOT_BITS;
rom->slots_start = 1;
rom->slots_end = NUM_MEMSLOTS - 1;
rom->n_surfaces = cpu_to_le32(d->ssd.num_surfaces);
for (i = 0, n = 0; i < ARRAY_SIZE(qxl_modes); i++) {
fb = qxl_modes[i].y_res * qxl_modes[i].stride;
if (fb > d->vgamem_size) {
continue;
}
modes->modes[n].id = cpu_to_le32(i);
modes->modes[n].x_res = cpu_to_le32(qxl_modes[i].x_res);
modes->modes[n].y_res = cpu_to_le32(qxl_modes[i].y_res);
modes->modes[n].bits = cpu_to_le32(qxl_modes[i].bits);
modes->modes[n].stride = cpu_to_le32(qxl_modes[i].stride);
modes->modes[n].x_mili = cpu_to_le32(qxl_modes[i].x_mili);
modes->modes[n].y_mili = cpu_to_le32(qxl_modes[i].y_mili);
modes->modes[n].orientation = cpu_to_le32(qxl_modes[i].orientation);
n++;
}
modes->n_modes = cpu_to_le32(n);
ram_header_size = ALIGN(sizeof(QXLRam), 4096);
surface0_area_size = ALIGN(d->vgamem_size, 4096);
num_pages = d->vga.vram_size;
num_pages -= ram_header_size;
num_pages -= surface0_area_size;
num_pages = num_pages / QXL_PAGE_SIZE;
assert(ram_header_size + surface0_area_size <= d->vga.vram_size);
rom->draw_area_offset = cpu_to_le32(0);
rom->surface0_area_size = cpu_to_le32(surface0_area_size);
rom->pages_offset = cpu_to_le32(surface0_area_size);
rom->num_pages = cpu_to_le32(num_pages);
rom->ram_header_offset = cpu_to_le32(d->vga.vram_size - ram_header_size);
if (d->xres && d->yres) {
/* needs linux kernel 4.12+ to work */
rom->client_monitors_config.count = 1;
rom->client_monitors_config.heads[0].left = 0;
rom->client_monitors_config.heads[0].top = 0;
rom->client_monitors_config.heads[0].right = cpu_to_le32(d->xres);
rom->client_monitors_config.heads[0].bottom = cpu_to_le32(d->yres);
rom->client_monitors_config_crc = qxl_crc32(
(const uint8_t *)&rom->client_monitors_config,
sizeof(rom->client_monitors_config));
}
d->shadow_rom = *rom;
d->rom = rom;
d->modes = modes;
} | 0 | [
"CWE-476"
] | qemu | d52680fc932efb8a2f334cc6993e705ed1e31e99 | 34,974,686,305,094,583,000,000,000,000,000,000,000 | 71 | qxl: check release info object
When releasing spice resources in release_resource() routine,
if release info object 'ext.info' is null, it leads to null
pointer dereference. Add check to avoid it.
Reported-by: Bugs SysSec <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]> |
int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level != SOL_TCP)
return inet_csk_compat_getsockopt(sk, level, optname,
optval, optlen);
return do_tcp_getsockopt(sk, level, optname, optval, optlen);
} | 0 | [
"CWE-399",
"CWE-835"
] | linux | ccf7abb93af09ad0868ae9033d1ca8108bdaec82 | 315,280,350,685,908,560,000,000,000,000,000,000,000 | 8 | tcp: avoid infinite loop in tcp_splice_read()
Splicing from TCP socket is vulnerable when a packet with URG flag is
received and stored into receive queue.
__tcp_splice_read() returns 0, and sk_wait_data() immediately
returns since there is the problematic skb in queue.
This is a nice way to burn cpu (aka infinite loop) and trigger
soft lockups.
Again, this gem was found by syzkaller tool.
Fixes: 9c55e01c0cc8 ("[TCP]: Splice receive support.")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Dmitry Vyukov <[email protected]>
Cc: Willy Tarreau <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int __init loop_init(void)
{
int i, nr;
unsigned long range;
struct loop_device *lo;
int err;
part_shift = 0;
if (max_part > 0) {
part_shift = fls(max_part);
/*
* Adjust max_part according to part_shift as it is exported
* to user space so that user can decide correct minor number
* if [s]he want to create more devices.
*
* Note that -1 is required because partition 0 is reserved
* for the whole disk.
*/
max_part = (1UL << part_shift) - 1;
}
if ((1UL << part_shift) > DISK_MAX_PARTS) {
err = -EINVAL;
goto err_out;
}
if (max_loop > 1UL << (MINORBITS - part_shift)) {
err = -EINVAL;
goto err_out;
}
/*
* If max_loop is specified, create that many devices upfront.
* This also becomes a hard limit. If max_loop is not specified,
* create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
* init time. Loop devices can be requested on-demand with the
* /dev/loop-control interface, or be instantiated by accessing
* a 'dead' device node.
*/
if (max_loop) {
nr = max_loop;
range = max_loop << part_shift;
} else {
nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
range = 1UL << MINORBITS;
}
err = misc_register(&loop_misc);
if (err < 0)
goto err_out;
if (register_blkdev(LOOP_MAJOR, "loop")) {
err = -EIO;
goto misc_out;
}
blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
THIS_MODULE, loop_probe, NULL, NULL);
/* pre-create number of devices given by config or max_loop */
mutex_lock(&loop_index_mutex);
for (i = 0; i < nr; i++)
loop_add(&lo, i);
mutex_unlock(&loop_index_mutex);
printk(KERN_INFO "loop: module loaded\n");
return 0;
misc_out:
misc_deregister(&loop_misc);
err_out:
return err;
} | 0 | [
"CWE-416",
"CWE-362"
] | linux | ae6650163c66a7eff1acd6eb8b0f752dcfa8eba5 | 318,205,162,902,768,870,000,000,000,000,000,000,000 | 75 | loop: fix concurrent lo_open/lo_release
范龙飞 reports that KASAN can report a use-after-free in __lock_acquire.
The reason is due to insufficient serialization in lo_release(), which
will continue to use the loop device even after it has decremented the
lo_refcnt to zero.
In the meantime, another process can come in, open the loop device
again as it is being shut down. Confusion ensues.
Reported-by: 范龙飞 <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
static unsigned int selinux_ip_output(struct sk_buff *skb,
u16 family)
{
u32 sid;
if (!netlbl_enabled())
return NF_ACCEPT;
/* we do this in the LOCAL_OUT path and not the POST_ROUTING path
* because we want to make sure we apply the necessary labeling
* before IPsec is applied so we can leverage AH protection */
if (skb->sk) {
struct sk_security_struct *sksec = skb->sk->sk_security;
sid = sksec->sid;
} else
sid = SECINITSID_KERNEL;
if (selinux_netlbl_skbuff_setsid(skb, family, sid) != 0)
return NF_DROP;
return NF_ACCEPT;
} | 0 | [] | linux-2.6 | ee18d64c1f632043a02e6f5ba5e045bb26a5465f | 74,074,056,862,139,260,000,000,000,000,000,000,000 | 21 | KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]> |
static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_paddrinfo pinfo;
struct sctp_transport *transport;
int retval = 0;
if (len < sizeof(pinfo)) {
retval = -EINVAL;
goto out;
}
len = sizeof(pinfo);
if (copy_from_user(&pinfo, optval, len)) {
retval = -EFAULT;
goto out;
}
transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address,
pinfo.spinfo_assoc_id);
if (!transport)
return -EINVAL;
pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
pinfo.spinfo_state = transport->state;
pinfo.spinfo_cwnd = transport->cwnd;
pinfo.spinfo_srtt = transport->srtt;
pinfo.spinfo_rto = jiffies_to_msecs(transport->rto);
pinfo.spinfo_mtu = transport->pathmtu;
if (pinfo.spinfo_state == SCTP_UNKNOWN)
pinfo.spinfo_state = SCTP_ACTIVE;
if (put_user(len, optlen)) {
retval = -EFAULT;
goto out;
}
if (copy_to_user(optval, &pinfo, len)) {
retval = -EFAULT;
goto out;
}
out:
return retval;
} | 0 | [
"CWE-20"
] | linux | 726bc6b092da4c093eb74d13c07184b18c1af0f1 | 229,541,472,853,529,060,000,000,000,000,000,000,000 | 47 | net/sctp: Validate parameter size for SCTP_GET_ASSOC_STATS
Building sctp may fail with:
In function ‘copy_from_user’,
inlined from ‘sctp_getsockopt_assoc_stats’ at
net/sctp/socket.c:5656:20:
arch/x86/include/asm/uaccess_32.h:211:26: error: call to
‘copy_from_user_overflow’ declared with attribute error: copy_from_user()
buffer size is not provably correct
if built with W=1 due to a missing parameter size validation
before the call to copy_from_user.
Signed-off-by: Guenter Roeck <[email protected]>
Acked-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
QUtil::uint_to_string(unsigned long long num, int length)
{
return int_to_string_base(num, 10, length);
} | 1 | [
"CWE-787"
] | qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 113,034,255,050,892,140,000,000,000,000,000,000,000 | 4 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
static int usb_host_handle_data(USBHostDevice *s, USBPacket *p)
{
struct usbdevfs_urb *urb;
AsyncURB *aurb;
int ret;
aurb = async_alloc();
aurb->hdev = s;
aurb->packet = p;
urb = &aurb->urb;
if (p->pid == USB_TOKEN_IN)
urb->endpoint = p->devep | 0x80;
else
urb->endpoint = p->devep;
if (is_halted(s, p->devep)) {
ret = ioctl(s->fd, USBDEVFS_CLEAR_HALT, &urb->endpoint);
if (ret < 0) {
dprintf("husb: failed to clear halt. ep 0x%x errno %d\n",
urb->endpoint, errno);
return USB_RET_NAK;
}
clear_halt(s, p->devep);
}
urb->buffer = p->data;
urb->buffer_length = p->len;
if (is_isoc(s, p->devep)) {
/* Setup ISOC transfer */
urb->type = USBDEVFS_URB_TYPE_ISO;
urb->flags = USBDEVFS_URB_ISO_ASAP;
urb->number_of_packets = 1;
urb->iso_frame_desc[0].length = p->len;
} else {
/* Setup bulk transfer */
urb->type = USBDEVFS_URB_TYPE_BULK;
}
urb->usercontext = s;
ret = ioctl(s->fd, USBDEVFS_SUBMITURB, urb);
dprintf("husb: data submit. ep 0x%x len %u aurb %p\n", urb->endpoint, p->len, aurb);
if (ret < 0) {
dprintf("husb: submit failed. errno %d\n", errno);
async_free(aurb);
switch(errno) {
case ETIMEDOUT:
return USB_RET_NAK;
case EPIPE:
default:
return USB_RET_STALL;
}
}
usb_defer_packet(p, async_cancel, aurb);
return USB_RET_ASYNC;
} | 0 | [
"CWE-119"
] | qemu | babd03fde68093482528010a5435c14ce9128e3f | 260,559,233,680,984,200,000,000,000,000,000,000,000 | 63 | usb-linux.c: fix buffer overflow
In usb-linux.c:usb_host_handle_control, we pass a 1024-byte buffer and
length to the kernel. However, the length was provided by the caller
of dev->handle_packet, and is not checked, so the kernel might provide
too much data and overflow our buffer.
For example, hw/usb-uhci.c could set the length to 2047.
hw/usb-ohci.c looks like it might go up to 4096 or 8192.
This causes a qemu crash, as reported here:
http://www.mail-archive.com/[email protected]/msg18447.html
This patch increases the usb-linux.c buffer size to 2048 to fix the
specific device reported, and adds a check to avoid the overflow in
any case.
Signed-off-by: Jim Paris <[email protected]>
Signed-off-by: Anthony Liguori <[email protected]> |
inline void SoftmaxImpl(const SoftmaxParams& params,
const RuntimeShape& input_shape,
const float* input_data,
const RuntimeShape& output_shape, float* output_data,
int start_batch, int end_batch) {
ruy::profiler::ScopeLabel label("Softmax/Impl");
MatchingFlatSize(input_shape, output_shape);
const int logit_size = input_shape.Dims(input_shape.DimensionsCount() - 1);
const MatrixMap<const float> in_mat(input_data + logit_size * start_batch,
logit_size, end_batch - start_batch);
MatrixMap<float> out_mat(output_data + logit_size * start_batch, logit_size,
end_batch - start_batch);
// Compute the exponential first, removing the max coefficient for numerical
// stability.
out_mat =
(in_mat.rowwise() - in_mat.colwise().maxCoeff()).array() * params.beta;
// We are separating out the exp function so that exp can be vectorized.
out_mat = out_mat.array().exp();
// Normalize to get the activations.
Eigen::Array<float, 1, Eigen::Dynamic> scale =
out_mat.array().colwise().sum().inverse();
out_mat.array().rowwise() *= scale;
} | 0 | [
"CWE-476",
"CWE-369"
] | tensorflow | 15691e456c7dc9bd6be203b09765b063bf4a380c | 202,161,155,281,028,170,000,000,000,000,000,000,000 | 24 | Prevent dereferencing of null pointers in TFLite's `add.cc`.
PiperOrigin-RevId: 387244946
Change-Id: I56094233327fbd8439b92e1dbb1262176e00eeb9 |
tabpage_close_other(tabpage_T *tp, int forceit)
{
int done = 0;
win_T *wp;
int h = tabline_height();
// Limit to 1000 windows, autocommands may add a window while we close
// one. OK, so I'm paranoid...
while (++done < 1000)
{
wp = tp->tp_firstwin;
ex_win_close(forceit, wp, tp);
// Autocommands may delete the tab page under our fingers and we may
// fail to close a window with a modified buffer.
if (!valid_tabpage(tp) || tp->tp_firstwin == wp)
break;
}
apply_autocmds(EVENT_TABCLOSED, NULL, NULL, FALSE, curbuf);
redraw_tabline = TRUE;
if (h != tabline_height())
shell_new_rows();
} | 0 | [
"CWE-122"
] | vim | 35a319b77f897744eec1155b736e9372c9c5575f | 187,323,049,726,467,170,000,000,000,000,000,000,000 | 25 | patch 8.2.3489: ml_get error after search with range
Problem: ml_get error after search with range.
Solution: Limit the line number to the buffer line count. |
gdk_pixbuf__tiff_image_load_increment (gpointer data, const guchar *buf,
guint size, GError **error)
{
TiffContext *context = (TiffContext *) data;
g_return_val_if_fail (data != NULL, FALSE);
tiff_set_handlers ();
if (!make_available_at_least (context, size)) {
g_set_error_literal (error,
GDK_PIXBUF_ERROR,
GDK_PIXBUF_ERROR_INSUFFICIENT_MEMORY,
_("Insufficient memory to open TIFF file"));
return FALSE;
}
memcpy (context->buffer + context->used, buf, size);
context->used += size;
return TRUE;
} | 0 | [] | gdk-pixbuf | 31a6cff3dfc6944aad4612a9668b8ad39122e48b | 116,273,658,511,003,670,000,000,000,000,000,000,000 | 21 | tiff: Check for integer overflows in multiplication
The checks currently in use are not sufficient, because they depend on
undefined behaviour:
rowstride = width * 4;
if (rowstride / 4 != width) { /* overflow */
If the multiplication has already overflowed, the compiler may decide
to optimize the if out and thus we do not handle the erroneous case.
Rearrange the checks to avoid the undefined behaviour.
Note that gcc doesn't seem to be impacted, though a defined behaviour is
obviously preferred.
CVE-2017-2870
https://bugzilla.gnome.org/show_bug.cgi?id=780269 |
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int len, int write, int force,
struct page **pages, struct vm_area_struct **vmas)
{
int i;
unsigned int vm_flags;
if (len <= 0)
return 0;
/*
* Require read or write permissions.
* If 'force' is set, we only require the "MAY" flags.
*/
vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
i = 0;
do {
struct vm_area_struct *vma;
unsigned int foll_flags;
vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(tsk, start)) {
unsigned long pg = start & PAGE_MASK;
struct vm_area_struct *gate_vma = get_gate_vma(tsk);
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if (write) /* user gate pages are read-only */
return i ? : -EFAULT;
if (pg > TASK_SIZE)
pgd = pgd_offset_k(pg);
else
pgd = pgd_offset_gate(mm, pg);
BUG_ON(pgd_none(*pgd));
pud = pud_offset(pgd, pg);
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, pg);
if (pmd_none(*pmd))
return i ? : -EFAULT;
pte = pte_offset_map(pmd, pg);
if (pte_none(*pte)) {
pte_unmap(pte);
return i ? : -EFAULT;
}
if (pages) {
struct page *page = vm_normal_page(gate_vma, start, *pte);
pages[i] = page;
if (page)
get_page(page);
}
pte_unmap(pte);
if (vmas)
vmas[i] = gate_vma;
i++;
start += PAGE_SIZE;
len--;
continue;
}
if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
|| !(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &len, i, write);
continue;
}
foll_flags = FOLL_TOUCH;
if (pages)
foll_flags |= FOLL_GET;
if (!write && !(vma->vm_flags & VM_LOCKED) &&
(!vma->vm_ops || !vma->vm_ops->fault))
foll_flags |= FOLL_ANON;
do {
struct page *page;
/*
* If tsk is ooming, cut off its access to large memory
* allocations. It has a pending SIGKILL, but it can't
* be processed until returning to user space.
*/
if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE)))
return -ENOMEM;
if (write)
foll_flags |= FOLL_WRITE;
cond_resched();
while (!(page = follow_page(vma, start, foll_flags))) {
int ret;
ret = handle_mm_fault(mm, vma, start,
foll_flags & FOLL_WRITE);
if (ret & VM_FAULT_ERROR) {
if (ret & VM_FAULT_OOM)
return i ? i : -ENOMEM;
else if (ret & VM_FAULT_SIGBUS)
return i ? i : -EFAULT;
BUG();
}
if (ret & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
/*
* The VM_FAULT_WRITE bit tells us that
* do_wp_page has broken COW when necessary,
* even if maybe_mkwrite decided not to set
* pte_write. We can thus safely do subsequent
* page lookups as if they were reads.
*/
if (ret & VM_FAULT_WRITE)
foll_flags &= ~FOLL_WRITE;
cond_resched();
}
if (IS_ERR(page))
return i ? i : PTR_ERR(page);
if (pages) {
pages[i] = page;
flush_anon_page(vma, page, start);
flush_dcache_page(page);
}
if (vmas)
vmas[i] = vma;
i++;
start += PAGE_SIZE;
len--;
} while (len && start < vma->vm_end);
} while (len);
return i;
} | 1 | [
"CWE-20"
] | linux-2.6 | 672ca28e300c17bf8d792a2a7a8631193e580c74 | 139,010,725,753,363,210,000,000,000,000,000,000,000 | 138 | Fix ZERO_PAGE breakage with vmware
Commit 89f5b7da2a6bad2e84670422ab8192382a5aeb9f ("Reinstate ZERO_PAGE
optimization in 'get_user_pages()' and fix XIP") broke vmware, as
reported by Jeff Chua:
"This broke vmware 6.0.4.
Jun 22 14:53:03.845: vmx| NOT_IMPLEMENTED
/build/mts/release/bora-93057/bora/vmx/main/vmmonPosix.c:774"
and the reason seems to be that there's an old bug in how we handle do
FOLL_ANON on VM_SHARED areas in get_user_pages(), but since it only
triggered if the whole page table was missing, nobody had apparently hit
it before.
The recent changes to 'follow_page()' made the FOLL_ANON logic trigger
not just for whole missing page tables, but for individual pages as
well, and exposed this problem.
This fixes it by making the test for when FOLL_ANON is used more
careful, and also makes the code easier to read and understand by moving
the logic to a separate inline function.
Reported-and-tested-by: Jeff Chua <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
start_job(cupsd_job_t *job, /* I - Job ID */
cupsd_printer_t *printer) /* I - Printer to print job */
{
const char *filename; /* Support filename */
ipp_attribute_t *cancel_after = ippFindAttribute(job->attrs,
"job-cancel-after",
IPP_TAG_INTEGER);
/* job-cancel-after attribute */
cupsdLogMessage(CUPSD_LOG_DEBUG2, "start_job(job=%p(%d), printer=%p(%s))",
job, job->id, printer, printer->name);
/*
* Make sure we have some files around before we try to print...
*/
if (job->num_files == 0)
{
ippSetString(job->attrs, &job->reasons, 0, "aborted-by-system");
cupsdSetJobState(job, IPP_JOB_ABORTED, CUPSD_JOB_DEFAULT,
"Aborting job because it has no files.");
return;
}
/*
* Update the printer and job state to "processing"...
*/
if (!cupsdLoadJob(job))
return;
if (!job->printer_message)
job->printer_message = ippFindAttribute(job->attrs,
"job-printer-state-message",
IPP_TAG_TEXT);
if (job->printer_message)
ippSetString(job->attrs, &job->printer_message, 0, "");
ippSetString(job->attrs, &job->reasons, 0, "job-printing");
cupsdSetJobState(job, IPP_JOB_PROCESSING, CUPSD_JOB_DEFAULT, NULL);
cupsdSetPrinterState(printer, IPP_PRINTER_PROCESSING, 0);
cupsdSetPrinterReasons(printer, "-cups-remote-pending,"
"cups-remote-pending-held,"
"cups-remote-processing,"
"cups-remote-stopped,"
"cups-remote-canceled,"
"cups-remote-aborted,"
"cups-remote-completed");
job->cost = 0;
job->current_file = 0;
job->file_time = 0;
job->history_time = 0;
job->progress = 0;
job->printer = printer;
printer->job = job;
if (cancel_after)
job->cancel_time = time(NULL) + ippGetInteger(cancel_after, 0);
else if (MaxJobTime > 0)
job->cancel_time = time(NULL) + MaxJobTime;
else
job->cancel_time = 0;
/*
* Check for support files...
*/
cupsdSetPrinterReasons(job->printer, "-cups-missing-filter-warning,"
"cups-insecure-filter-warning");
if (printer->pc)
{
for (filename = (const char *)cupsArrayFirst(printer->pc->support_files);
filename;
filename = (const char *)cupsArrayNext(printer->pc->support_files))
{
if (_cupsFileCheck(filename, _CUPS_FILE_CHECK_FILE, !RunUser,
cupsdLogFCMessage, printer))
break;
}
}
/*
* Setup the last exit status and security profiles...
*/
job->status = 0;
job->profile = cupsdCreateProfile(job->id, 0);
job->bprofile = cupsdCreateProfile(job->id, 1);
/*
* Create the status pipes and buffer...
*/
if (cupsdOpenPipe(job->status_pipes))
{
cupsdLogJob(job, CUPSD_LOG_DEBUG,
"Unable to create job status pipes - %s.", strerror(errno));
cupsdSetJobState(job, IPP_JOB_STOPPED, CUPSD_JOB_DEFAULT,
"Job stopped because the scheduler could not create the "
"job status pipes.");
cupsdDestroyProfile(job->profile);
job->profile = NULL;
cupsdDestroyProfile(job->bprofile);
job->bprofile = NULL;
return;
}
job->status_buffer = cupsdStatBufNew(job->status_pipes[0], NULL);
job->status_level = CUPSD_LOG_INFO;
/*
* Create the backchannel pipes and make them non-blocking...
*/
if (cupsdOpenPipe(job->back_pipes))
{
cupsdLogJob(job, CUPSD_LOG_DEBUG,
"Unable to create back-channel pipes - %s.", strerror(errno));
cupsdSetJobState(job, IPP_JOB_STOPPED, CUPSD_JOB_DEFAULT,
"Job stopped because the scheduler could not create the "
"back-channel pipes.");
cupsdClosePipe(job->status_pipes);
cupsdStatBufDelete(job->status_buffer);
job->status_buffer = NULL;
cupsdDestroyProfile(job->profile);
job->profile = NULL;
cupsdDestroyProfile(job->bprofile);
job->bprofile = NULL;
return;
}
fcntl(job->back_pipes[0], F_SETFL,
fcntl(job->back_pipes[0], F_GETFL) | O_NONBLOCK);
fcntl(job->back_pipes[1], F_SETFL,
fcntl(job->back_pipes[1], F_GETFL) | O_NONBLOCK);
/*
* Create the side-channel pipes and make them non-blocking...
*/
if (socketpair(AF_LOCAL, SOCK_STREAM, 0, job->side_pipes))
{
cupsdLogJob(job, CUPSD_LOG_DEBUG,
"Unable to create side-channel pipes - %s.", strerror(errno));
cupsdSetJobState(job, IPP_JOB_STOPPED, CUPSD_JOB_DEFAULT,
"Job stopped because the scheduler could not create the "
"side-channel pipes.");
cupsdClosePipe(job->back_pipes);
cupsdClosePipe(job->status_pipes);
cupsdStatBufDelete(job->status_buffer);
job->status_buffer = NULL;
cupsdDestroyProfile(job->profile);
job->profile = NULL;
cupsdDestroyProfile(job->bprofile);
job->bprofile = NULL;
return;
}
fcntl(job->side_pipes[0], F_SETFL,
fcntl(job->side_pipes[0], F_GETFL) | O_NONBLOCK);
fcntl(job->side_pipes[1], F_SETFL,
fcntl(job->side_pipes[1], F_GETFL) | O_NONBLOCK);
fcntl(job->side_pipes[0], F_SETFD,
fcntl(job->side_pipes[0], F_GETFD) | FD_CLOEXEC);
fcntl(job->side_pipes[1], F_SETFD,
fcntl(job->side_pipes[1], F_GETFD) | FD_CLOEXEC);
/*
* Now start the first file in the job...
*/
cupsdContinueJob(job);
} | 1 | [] | cups | d47f6aec436e0e9df6554436e391471097686ecc | 174,379,712,304,334,240,000,000,000,000,000,000,000 | 186 | Fix local privilege escalation to root and sandbox bypasses in scheduler
(rdar://37836779, rdar://37836995, rdar://37837252, rdar://37837581) |
uchar *getMSG(msg_t *pM)
{
uchar *ret;
if(pM == NULL)
ret = UCHAR_CONSTANT("");
else {
if(pM->iLenMSG == 0)
ret = UCHAR_CONSTANT("");
else
ret = pM->pszRawMsg + pM->offMSG;
}
return ret;
} | 0 | [
"CWE-772"
] | rsyslog | 8083bd1433449fd2b1b79bf759f782e0f64c0cd2 | 291,579,656,889,982,140,000,000,000,000,000,000,000 | 13 | backporting abort condition fix from 5.7.7 |
static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
{
vhost_poll_flush(&vs->vqs[index].vq.poll);
} | 0 | [
"CWE-200",
"CWE-119"
] | linux | 59c816c1f24df0204e01851431d3bab3eb76719c | 232,279,204,207,595,920,000,000,000,000,000,000,000 | 4 | vhost/scsi: potential memory corruption
This code in vhost_scsi_make_tpg() is confusing because we limit "tpgt"
to UINT_MAX but the data type of "tpg->tport_tpgt" and that is a u16.
I looked at the context and it turns out that in
vhost_scsi_set_endpoint(), "tpg->tport_tpgt" is used as an offset into
the vs_tpg[] array which has VHOST_SCSI_MAX_TARGET (256) elements so
anything higher than 255 then it is invalid. I have made that the limit
now.
In vhost_scsi_send_evt() we mask away values higher than 255, but now
that the limit has changed, we don't need the mask.
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Nicholas Bellinger <[email protected]> |
/* Must be called in process context */
void netif_napi_del(struct napi_struct *napi)
{
might_sleep();
if (napi_hash_del(napi))
synchronize_net();
list_del_init(&napi->dev_list);
napi_free_frags(napi);
kfree_skb_list(napi->gro_list);
napi->gro_list = NULL;
napi->gro_count = 0; | 0 | [
"CWE-400",
"CWE-703"
] | linux | fac8e0f579695a3ecbc4d3cac369139d7f819971 | 250,407,675,933,168,700,000,000,000,000,000,000,000 | 12 | tunnels: Don't apply GRO to multiple layers of encapsulation.
When drivers express support for TSO of encapsulated packets, they
only mean that they can do it for one layer of encapsulation.
Supporting additional levels would mean updating, at a minimum,
more IP length fields and they are unaware of this.
No encapsulation device expresses support for handling offloaded
encapsulated packets, so we won't generate these types of frames
in the transmit path. However, GRO doesn't have a check for
multiple levels of encapsulation and will attempt to build them.
UDP tunnel GRO actually does prevent this situation but it only
handles multiple UDP tunnels stacked on top of each other. This
generalizes that solution to prevent any kind of tunnel stacking
that would cause problems.
Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack")
Signed-off-by: Jesse Gross <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
unsigned long *stackend;
int err;
prepare_to_copy(orig);
tsk = alloc_task_struct();
if (!tsk)
return NULL;
ti = alloc_thread_info(tsk);
if (!ti) {
free_task_struct(tsk);
return NULL;
}
err = arch_dup_task_struct(tsk, orig);
if (err)
goto out;
tsk->stack = ti;
err = prop_local_init_single(&tsk->dirties);
if (err)
goto out;
setup_thread_stack(tsk, orig);
stackend = end_of_stack(tsk);
*stackend = STACK_END_MAGIC; /* for overflow detection */
#ifdef CONFIG_CC_STACKPROTECTOR
tsk->stack_canary = get_random_int();
#endif
/* One for us, one for whoever does the "release_task()" (usually parent) */
atomic_set(&tsk->usage,2);
atomic_set(&tsk->fs_excl, 0);
#ifdef CONFIG_BLK_DEV_IO_TRACE
tsk->btrace_seq = 0;
#endif
tsk->splice_pipe = NULL;
account_kernel_stack(ti, 1);
return tsk;
out:
free_thread_info(ti);
free_task_struct(tsk);
return NULL;
} | 0 | [
"CWE-20",
"CWE-703",
"CWE-400"
] | linux | b69f2292063d2caf37ca9aec7d63ded203701bf3 | 126,550,590,993,255,630,000,000,000,000,000,000,000 | 55 | block: Fix io_context leak after failure of clone with CLONE_IO
With CLONE_IO, parent's io_context->nr_tasks is incremented, but never
decremented whenever copy_process() fails afterwards, which prevents
exit_io_context() from calling IO schedulers exit functions.
Give a task_struct to exit_io_context(), and call exit_io_context() instead of
put_io_context() in copy_process() cleanup path.
Signed-off-by: Louis Rilling <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
PHP_FUNCTION(utf8_decode)
{
char *arg;
XML_Char *decoded;
int arg_len, len;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &arg, &arg_len) == FAILURE) {
return;
}
decoded = xml_utf8_decode(arg, arg_len, &len, "ISO-8859-1");
if (decoded == NULL) {
RETURN_FALSE;
}
RETVAL_STRINGL(decoded, len, 0);
} | 0 | [
"CWE-787"
] | php-src | 7d163e8a0880ae8af2dd869071393e5dc07ef271 | 301,234,475,340,056,500,000,000,000,000,000,000,000 | 16 | truncate results at depth of 255 to prevent corruption |
webSocketsEncode(rfbClientPtr cl, const char *src, int len, char **dst)
{
return ((ws_ctx_t *)cl->wsctx)->encode(cl, src, len, dst);
} | 0 | [
"CWE-787"
] | libvncserver | aac95a9dcf4bbba87b76c72706c3221a842ca433 | 209,522,254,436,946,570,000,000,000,000,000,000,000 | 4 | fix overflow and refactor websockets decode (Hybi)
fix critical heap-based buffer overflow which allowed easy modification
of a return address via an overwritten function pointer
fix bug causing connections to fail due a "one websocket frame = one
ws_read" assumption, which failed with LibVNCServer-0.9.11
refactor websocket Hybi decode to use a simple state machine for
decoding of websocket frames |
static int af9005_pid_filter(struct dvb_usb_adapter *adap, int index,
u16 pid, int onoff)
{
u8 cmd = index & 0x1f;
int ret;
deb_info("set pid filter, index %d, pid %x, onoff %d\n", index,
pid, onoff);
if (onoff) {
/* cannot use it as pid_filter_ctrl since it has to be done
before setting the first pid */
if (adap->feedcount == 1) {
deb_info("first pid set, enable pid table\n");
ret = af9005_pid_filter_control(adap, onoff);
if (ret)
return ret;
}
ret =
af9005_write_ofdm_register(adap->dev,
XD_MP2IF_PID_DATA_L,
(u8) (pid & 0xff));
if (ret)
return ret;
ret =
af9005_write_ofdm_register(adap->dev,
XD_MP2IF_PID_DATA_H,
(u8) (pid >> 8));
if (ret)
return ret;
cmd |= 0x20 | 0x40;
} else {
if (adap->feedcount == 0) {
deb_info("last pid unset, disable pid table\n");
ret = af9005_pid_filter_control(adap, onoff);
if (ret)
return ret;
}
}
ret = af9005_write_ofdm_register(adap->dev, XD_MP2IF_PID_IDX, cmd);
if (ret)
return ret;
deb_info("set pid ok\n");
return 0;
} | 0 | [
"CWE-400",
"CWE-401"
] | linux | 2289adbfa559050d2a38bcd9caac1c18b800e928 | 9,946,306,358,723,396,000,000,000,000,000,000,000 | 43 | media: usb: fix memory leak in af9005_identify_state
In af9005_identify_state when returning -EIO the allocated buffer should
be released. Replace the "return -EIO" with assignment into ret and move
deb_info() under a check.
Fixes: af4e067e1dcf ("V4L/DVB (5625): Add support for the AF9005 demodulator from Afatech")
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Hans Verkuil <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
int tid)
{
unsigned long flags;
int sta_id;
struct iwl_addsta_cmd sta_cmd;
lockdep_assert_held(&priv->shrd->mutex);
sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
return -ENXIO;
}
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].sta.station_flags_msk = 0;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
} | 0 | [
"CWE-119",
"CWE-787"
] | linux | 2da424b0773cea3db47e1e81db71eeebde8269d4 | 216,617,917,280,097,760,000,000,000,000,000,000,000 | 25 | iwlwifi: Sanity check for sta_id
On my testing, I saw some strange behavior
[ 421.739708] iwlwifi 0000:01:00.0: ACTIVATE a non DRIVER active station id 148 addr 00:00:00:00:00:00
[ 421.739719] iwlwifi 0000:01:00.0: iwl_sta_ucode_activate Added STA id 148 addr 00:00:00:00:00:00 to uCode
not sure how it happen, but adding the sanity check to prevent memory
corruption
Signed-off-by: Wey-Yi Guy <[email protected]>
Signed-off-by: John W. Linville <[email protected]> |
static void complete_update_bin(conn *c) {
protocol_binary_response_status eno = PROTOCOL_BINARY_RESPONSE_EINVAL;
enum store_item_type ret = NOT_STORED;
assert(c != NULL);
item *it = c->item;
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[ITEM_clsid(it)].set_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
/* We don't actually receive the trailing two characters in the bin
* protocol, so we're going to just set them here */
if ((it->it_flags & ITEM_CHUNKED) == 0) {
*(ITEM_data(it) + it->nbytes - 2) = '\r';
*(ITEM_data(it) + it->nbytes - 1) = '\n';
} else {
assert(c->ritem);
item_chunk *ch = (item_chunk *) c->ritem;
if (ch->size == ch->used)
ch = ch->next;
if (ch->size - ch->used > 1) {
ch->data[ch->used + 1] = '\r';
ch->data[ch->used + 2] = '\n';
ch->used += 2;
} else {
ch->data[ch->used + 1] = '\r';
ch->next->data[0] = '\n';
ch->used++;
ch->next->used++;
assert(ch->size == ch->used);
}
}
ret = store_item(it, c->cmd, c);
#ifdef ENABLE_DTRACE
uint64_t cas = ITEM_get_cas(it);
switch (c->cmd) {
case NREAD_ADD:
MEMCACHED_COMMAND_ADD(c->sfd, ITEM_key(it), it->nkey,
(ret == STORED) ? it->nbytes : -1, cas);
break;
case NREAD_REPLACE:
MEMCACHED_COMMAND_REPLACE(c->sfd, ITEM_key(it), it->nkey,
(ret == STORED) ? it->nbytes : -1, cas);
break;
case NREAD_APPEND:
MEMCACHED_COMMAND_APPEND(c->sfd, ITEM_key(it), it->nkey,
(ret == STORED) ? it->nbytes : -1, cas);
break;
case NREAD_PREPEND:
MEMCACHED_COMMAND_PREPEND(c->sfd, ITEM_key(it), it->nkey,
(ret == STORED) ? it->nbytes : -1, cas);
break;
case NREAD_SET:
MEMCACHED_COMMAND_SET(c->sfd, ITEM_key(it), it->nkey,
(ret == STORED) ? it->nbytes : -1, cas);
break;
}
#endif
switch (ret) {
case STORED:
/* Stored */
write_bin_response(c, NULL, 0, 0, 0);
break;
case EXISTS:
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS, NULL, 0);
break;
case NOT_FOUND:
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, NULL, 0);
break;
case NOT_STORED:
case TOO_LARGE:
case NO_MEMORY:
if (c->cmd == NREAD_ADD) {
eno = PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS;
} else if(c->cmd == NREAD_REPLACE) {
eno = PROTOCOL_BINARY_RESPONSE_KEY_ENOENT;
} else {
eno = PROTOCOL_BINARY_RESPONSE_NOT_STORED;
}
write_bin_error(c, eno, NULL, 0);
}
item_remove(c->item); /* release the c->item reference */
c->item = 0;
} | 0 | [
"CWE-190"
] | memcached | bd578fc34b96abe0f8d99c1409814a09f51ee71c | 267,468,753,293,904,760,000,000,000,000,000,000,000 | 89 | CVE reported by cisco talos |
unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
const struct in6_addr *daddr, u32 rnd)
{
u32 c;
c = jhash_3words((__force u32)saddr->s6_addr32[0],
(__force u32)saddr->s6_addr32[1],
(__force u32)saddr->s6_addr32[2],
rnd);
c = jhash_3words((__force u32)saddr->s6_addr32[3],
(__force u32)daddr->s6_addr32[0],
(__force u32)daddr->s6_addr32[1],
c);
c = jhash_3words((__force u32)daddr->s6_addr32[2],
(__force u32)daddr->s6_addr32[3],
(__force u32)id,
c);
return c & (INETFRAGS_HASHSZ - 1);
} | 0 | [] | linux | 3ef0eb0db4bf92c6d2510fe5c4dc51852746f206 | 82,045,790,611,138,970,000,000,000,000,000,000,000 | 22 | net: frag, move LRU list maintenance outside of rwlock
Updating the fragmentation queues LRU (Least-Recently-Used) list,
required taking the hash writer lock. However, the LRU list isn't
tied to the hash at all, so we can use a separate lock for it.
Original-idea-by: Florian Westphal <[email protected]>
Signed-off-by: Jesper Dangaard Brouer <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
TEST(FloatPoolingOpTest, MaxPool) {
FloatPoolingOpModel m(BuiltinOperator_MAX_POOL_2D,
/*input=*/{TensorType_FLOAT32, {1, 2, 4, 1}},
/*filter_width=*/2, /*filter_height=*/2,
/*output=*/{TensorType_FLOAT32, {}});
m.SetInput({
0, 6, 2, 4, //
3, 2, 10, 7, //
});
m.Invoke();
EXPECT_THAT(m.GetOutput(), ElementsAreArray({6, 10}));
} | 0 | [
"CWE-369"
] | tensorflow | 5f7975d09eac0f10ed8a17dbb6f5964977725adc | 67,807,156,795,252,740,000,000,000,000,000,000,000 | 12 | Prevent another div by 0 in optimized pooling implementations TFLite
PiperOrigin-RevId: 370800091
Change-Id: I2119352f57fb5ca4f2051e0e2d749403304a979b |
inline T_CALC MulOffset(T a, T b, T_SCALE c) {
return (static_cast<T_CALC>(a) - static_cast<T_CALC>(b)) *
static_cast<T_CALC>(c);
} | 0 | [
"CWE-787"
] | tensorflow | f6c40f0c6cbf00d46c7717a26419f2062f2f8694 | 53,718,137,099,821,900,000,000,000,000,000,000,000 | 4 | Validate min and max arguments to `QuantizedResizeBilinear`.
PiperOrigin-RevId: 369765091
Change-Id: I33be8b78273ab7d08b97541692fe05cb7f94963a |
wav_close (SF_PRIVATE *psf)
{
if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR)
{ wav_write_tailer (psf) ;
if (psf->file.mode == SFM_RDWR)
{ sf_count_t current = psf_ftell (psf) ;
/*
** If the mode is RDWR and the current position is less than the
** filelength, truncate the file.
*/
if (current < psf->filelength)
{ psf_ftruncate (psf, current) ;
psf->filelength = current ;
} ;
} ;
psf->write_header (psf, SF_TRUE) ;
} ;
return 0 ;
} /* wav_close */ | 0 | [
"CWE-476"
] | libsndfile | 6f3266277bed16525f0ac2f0f03ff4626f1923e5 | 213,602,587,683,351,960,000,000,000,000,000,000,000 | 24 | Fix max channel count bug
The code was allowing files to be written with a channel count of exactly
`SF_MAX_CHANNELS` but was failing to read some file formats with the same
channel count. |
tape_buffered_read (char *in_buf, int in_des, off_t num_bytes)
{
off_t bytes_left = num_bytes; /* Bytes needing to be copied. */
off_t space_left; /* Bytes to copy from input buffer. */
while (bytes_left > 0)
{
if (input_size == 0)
tape_fill_input_buffer (in_des, io_block_size);
if (bytes_left < input_size)
space_left = bytes_left;
else
space_left = input_size;
memcpy (in_buf, in_buff, (unsigned) space_left);
in_buff += space_left;
in_buf += space_left;
input_size -= space_left;
bytes_left -= space_left;
}
} | 0 | [
"CWE-190"
] | cpio | dd96882877721703e19272fe25034560b794061b | 24,835,193,342,354,740,000,000,000,000,000,000,000 | 20 | Rewrite dynamic string support.
* src/dstring.c (ds_init): Take a single argument.
(ds_free): New function.
(ds_resize): Take a single argument. Use x2nrealloc to expand
the storage.
(ds_reset,ds_append,ds_concat,ds_endswith): New function.
(ds_fgetstr): Rewrite. In particular, this fixes integer overflow.
* src/dstring.h (dynamic_string): Keep both the allocated length
(ds_size) and index of the next free byte in the string (ds_idx).
(ds_init,ds_resize): Change signature.
(ds_len): New macro.
(ds_free,ds_reset,ds_append,ds_concat,ds_endswith): New protos.
* src/copyin.c: Use new ds_ functions.
* src/copyout.c: Likewise.
* src/copypass.c: Likewise.
* src/util.c: Likewise. |
int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface,
struct usb_host_interface *alts,
struct audioformat *fmt, int rate)
{
switch (fmt->protocol) {
case UAC_VERSION_1:
default:
return set_sample_rate_v1(chip, iface, alts, fmt, rate);
case UAC_VERSION_2:
return set_sample_rate_v2(chip, iface, alts, fmt, rate);
}
} | 0 | [] | sound | 447d6275f0c21f6cc97a88b3a0c601436a4cdf2a | 264,298,893,021,198,640,000,000,000,000,000,000,000 | 13 | ALSA: usb-audio: Add sanity checks for endpoint accesses
Add some sanity check codes before actually accessing the endpoint via
get_endpoint() in order to avoid the invalid access through a
malformed USB descriptor. Mostly just checking bNumEndpoints, but in
one place (snd_microii_spdif_default_get()), the validity of iface and
altsetting index is checked as well.
Bugzilla: https://bugzilla.suse.com/show_bug.cgi?id=971125
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]> |
GF_Err ilst_dump(GF_Box *a, FILE * trace)
{
u32 i;
GF_Box *tag;
GF_Err e;
GF_ItemListBox *ptr;
ptr = (GF_ItemListBox *)a;
gf_isom_box_dump_start(a, "ItemListBox", trace);
fprintf(trace, ">\n");
i=0;
while ( (tag = (GF_Box*)gf_list_enum(ptr->other_boxes, &i))) {
e = ilst_item_dump(tag, trace);
if(e) return e;
}
gf_isom_box_dump_done("ItemListBox", NULL, trace);
return GF_OK;
} | 0 | [
"CWE-125"
] | gpac | bceb03fd2be95097a7b409ea59914f332fb6bc86 | 154,838,889,070,897,350,000,000,000,000,000,000,000 | 18 | fixed 2 possible heap overflows (inc. #1088) |
QPDF::addPageAt(QPDFObjectHandle newpage, bool before,
QPDFObjectHandle refpage)
{
int refpos = findPage(refpage);
if (! before)
{
++refpos;
}
insertPage(newpage, refpos);
} | 0 | [
"CWE-787"
] | qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 269,007,090,546,389,200,000,000,000,000,000,000,000 | 10 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
HttpStateData::~HttpStateData()
{
/*
* don't forget that ~Client() gets called automatically
*/
if (httpChunkDecoder)
delete httpChunkDecoder;
cbdataReferenceDone(_peer);
debugs(11,5, HERE << "HttpStateData " << this << " destroyed; " << serverConnection);
} | 0 | [
"CWE-444"
] | squid | fd68382860633aca92065e6c343cfd1b12b126e7 | 205,898,878,641,670,400,000,000,000,000,000,000,000 | 13 | Improve Transfer-Encoding handling (#702)
Reject messages containing Transfer-Encoding header with coding other
than chunked or identity. Squid does not support other codings.
For simplicity and security sake, also reject messages where
Transfer-Encoding contains unnecessary complex values that are
technically equivalent to "chunked" or "identity" (e.g., ",,chunked" or
"identity, chunked").
RFC 7230 formally deprecated and removed identity coding, but it is
still used by some agents. |
static void sysctl_head_finish(struct ctl_table_header *head)
{
if (!head)
return;
spin_lock(&sysctl_lock);
unuse_table(head);
spin_unlock(&sysctl_lock);
} | 0 | [
"CWE-20",
"CWE-399"
] | linux | 93362fa47fe98b62e4a34ab408c4a418432e7939 | 134,940,469,098,799,600,000,000,000,000,000,000,000 | 8 | sysctl: Drop reference added by grab_header in proc_sys_readdir
Fixes CVE-2016-9191, proc_sys_readdir doesn't drop reference
added by grab_header when return from !dir_emit_dots path.
It can cause any path called unregister_sysctl_table will
wait forever.
The calltrace of CVE-2016-9191:
[ 5535.960522] Call Trace:
[ 5535.963265] [<ffffffff817cdaaf>] schedule+0x3f/0xa0
[ 5535.968817] [<ffffffff817d33fb>] schedule_timeout+0x3db/0x6f0
[ 5535.975346] [<ffffffff817cf055>] ? wait_for_completion+0x45/0x130
[ 5535.982256] [<ffffffff817cf0d3>] wait_for_completion+0xc3/0x130
[ 5535.988972] [<ffffffff810d1fd0>] ? wake_up_q+0x80/0x80
[ 5535.994804] [<ffffffff8130de64>] drop_sysctl_table+0xc4/0xe0
[ 5536.001227] [<ffffffff8130de17>] drop_sysctl_table+0x77/0xe0
[ 5536.007648] [<ffffffff8130decd>] unregister_sysctl_table+0x4d/0xa0
[ 5536.014654] [<ffffffff8130deff>] unregister_sysctl_table+0x7f/0xa0
[ 5536.021657] [<ffffffff810f57f5>] unregister_sched_domain_sysctl+0x15/0x40
[ 5536.029344] [<ffffffff810d7704>] partition_sched_domains+0x44/0x450
[ 5536.036447] [<ffffffff817d0761>] ? __mutex_unlock_slowpath+0x111/0x1f0
[ 5536.043844] [<ffffffff81167684>] rebuild_sched_domains_locked+0x64/0xb0
[ 5536.051336] [<ffffffff8116789d>] update_flag+0x11d/0x210
[ 5536.057373] [<ffffffff817cf61f>] ? mutex_lock_nested+0x2df/0x450
[ 5536.064186] [<ffffffff81167acb>] ? cpuset_css_offline+0x1b/0x60
[ 5536.070899] [<ffffffff810fce3d>] ? trace_hardirqs_on+0xd/0x10
[ 5536.077420] [<ffffffff817cf61f>] ? mutex_lock_nested+0x2df/0x450
[ 5536.084234] [<ffffffff8115a9f5>] ? css_killed_work_fn+0x25/0x220
[ 5536.091049] [<ffffffff81167ae5>] cpuset_css_offline+0x35/0x60
[ 5536.097571] [<ffffffff8115aa2c>] css_killed_work_fn+0x5c/0x220
[ 5536.104207] [<ffffffff810bc83f>] process_one_work+0x1df/0x710
[ 5536.110736] [<ffffffff810bc7c0>] ? process_one_work+0x160/0x710
[ 5536.117461] [<ffffffff810bce9b>] worker_thread+0x12b/0x4a0
[ 5536.123697] [<ffffffff810bcd70>] ? process_one_work+0x710/0x710
[ 5536.130426] [<ffffffff810c3f7e>] kthread+0xfe/0x120
[ 5536.135991] [<ffffffff817d4baf>] ret_from_fork+0x1f/0x40
[ 5536.142041] [<ffffffff810c3e80>] ? kthread_create_on_node+0x230/0x230
One cgroup maintainer mentioned that "cgroup is trying to offline
a cpuset css, which takes place under cgroup_mutex. The offlining
ends up trying to drain active usages of a sysctl table which apprently
is not happening."
The real reason is that proc_sys_readdir doesn't drop reference added
by grab_header when return from !dir_emit_dots path. So this cpuset
offline path will wait here forever.
See here for details: http://www.openwall.com/lists/oss-security/2016/11/04/13
Fixes: f0c3b5093add ("[readdir] convert procfs")
Cc: [email protected]
Reported-by: CAI Qian <[email protected]>
Tested-by: Yang Shukui <[email protected]>
Signed-off-by: Zhou Chengming <[email protected]>
Acked-by: Al Viro <[email protected]>
Signed-off-by: Eric W. Biederman <[email protected]> |
methodHandle LinkResolver::linktime_resolve_static_method(const LinkInfo& link_info, TRAPS) {
Klass* resolved_klass = link_info.resolved_klass();
methodHandle resolved_method;
if (!resolved_klass->is_interface()) {
resolved_method = resolve_method(link_info, Bytecodes::_invokestatic, CHECK_NULL);
} else {
resolved_method = resolve_interface_method(link_info, Bytecodes::_invokestatic, CHECK_NULL);
}
assert(resolved_method->name() != vmSymbols::class_initializer_name(), "should have been checked in verifier");
// check if static
if (!resolved_method->is_static()) {
ResourceMark rm(THREAD);
stringStream ss;
ss.print("Expected static method '");
resolved_method()->print_external_name(&ss);
ss.print("'");
THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), ss.as_string());
}
return resolved_method;
} | 0 | [] | jdk11u | 132745902a4601dc64b2c8ca112ca30292feccb4 | 250,851,980,832,086,000,000,000,000,000,000,000,000 | 22 | 8281866: Enhance MethodHandle invocations
Reviewed-by: mbaesken
Backport-of: d974d9da365f787f67971d88c79371c8b0769f75 |
static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
{
struct vfio_devices *devs = data;
struct vfio_device *device;
if (devs->cur_index == devs->max_index)
return -ENOSPC;
device = vfio_device_get_from_dev(&pdev->dev);
if (!device)
return -EINVAL;
if (pci_dev_driver(pdev) != &vfio_pci_driver) {
vfio_device_put(device);
return -EBUSY;
}
devs->devices[devs->cur_index++] = device;
return 0;
} | 0 | [
"CWE-399",
"CWE-190"
] | linux | 05692d7005a364add85c6e25a6c4447ce08f913a | 307,734,523,097,462,950,000,000,000,000,000,000,000 | 20 | vfio/pci: Fix integer overflows, bitmask check
The VFIO_DEVICE_SET_IRQS ioctl did not sufficiently sanitize
user-supplied integers, potentially allowing memory corruption. This
patch adds appropriate integer overflow checks, checks the range bounds
for VFIO_IRQ_SET_DATA_NONE, and also verifies that only single element
in the VFIO_IRQ_SET_DATA_TYPE_MASK bitmask is set.
VFIO_IRQ_SET_ACTION_TYPE_MASK is already correctly checked later in
vfio_pci_set_irqs_ioctl().
Furthermore, a kzalloc is changed to a kcalloc because the use of a
kzalloc with an integer multiplication allowed an integer overflow
condition to be reached without this patch. kcalloc checks for overflow
and should prevent a similar occurrence.
Signed-off-by: Vlad Tsyrklevich <[email protected]>
Signed-off-by: Alex Williamson <[email protected]> |
static void kvm_resume(void)
{
if (kvm_usage_count) {
lockdep_assert_not_held(&kvm_count_lock);
hardware_enable_nolock(NULL);
} | 0 | [
"CWE-459"
] | linux | 683412ccf61294d727ead4a73d97397396e69a6b | 121,231,861,138,254,350,000,000,000,000,000,000,000 | 7 | KVM: SEV: add cache flush to solve SEV cache incoherency issues
Flush the CPU caches when memory is reclaimed from an SEV guest (where
reclaim also includes it being unmapped from KVM's memslots). Due to lack
of coherency for SEV encrypted memory, failure to flush results in silent
data corruption if userspace is malicious/broken and doesn't ensure SEV
guest memory is properly pinned and unpinned.
Cache coherency is not enforced across the VM boundary in SEV (AMD APM
vol.2 Section 15.34.7). Confidential cachelines, generated by confidential
VM guests have to be explicitly flushed on the host side. If a memory page
containing dirty confidential cachelines was released by VM and reallocated
to another user, the cachelines may corrupt the new user at a later time.
KVM takes a shortcut by assuming all confidential memory remain pinned
until the end of VM lifetime. Therefore, KVM does not flush cache at
mmu_notifier invalidation events. Because of this incorrect assumption and
the lack of cache flushing, malicous userspace can crash the host kernel:
creating a malicious VM and continuously allocates/releases unpinned
confidential memory pages when the VM is running.
Add cache flush operations to mmu_notifier operations to ensure that any
physical memory leaving the guest VM get flushed. In particular, hook
mmu_notifier_invalidate_range_start and mmu_notifier_release events and
flush cache accordingly. The hook after releasing the mmu lock to avoid
contention with other vCPUs.
Cc: [email protected]
Suggested-by: Sean Christpherson <[email protected]>
Reported-by: Mingwei Zhang <[email protected]>
Signed-off-by: Mingwei Zhang <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
TTF_Font* TTF_OpenFontDPIRW( SDL_RWops *src, int freesrc, int ptsize, unsigned int hdpi, unsigned int vdpi )
{
return TTF_OpenFontIndexDPIRW(src, freesrc, ptsize, 0, hdpi, vdpi); | 0 | [
"CWE-190",
"CWE-787"
] | SDL_ttf | db1b41ab8bde6723c24b866e466cad78c2fa0448 | 160,811,257,330,060,600,000,000,000,000,000,000,000 | 4 | More integer overflow (see bug #187)
Make sure that 'width + alignment' doesn't overflow, otherwise
it could create a SDL_Surface of 'width' but with wrong 'pitch' |
static float expandFloats(unsigned char * dst, int tileWidth, int bytesps) {
float max = 0.f;
if (bytesps == 2) {
uint16_t * dst16 = (ushort *) dst;
uint32_t * dst32 = (unsigned int *) dst;
float *f32 = (float*) dst;
for (int index = tileWidth - 1; index >= 0; --index) {
dst32[index] = __DNG_HalfToFloat(dst16[index]);
max = MAX(max,f32[index]);
}
}
else if (bytesps == 3)
{
uint8_t * dst8 = ((unsigned char *) dst) + (tileWidth - 1) * 3;
uint32_t * dst32 = (unsigned int *) dst;
float *f32 = (float*) dst;
for (int index = tileWidth - 1; index >= 0; --index, dst8 -= 3) {
dst32[index] = __DNG_FP24ToFloat(dst8);
max = MAX(max,f32[index]);
}
}
else if (bytesps==4)
{
float *f32 = (float*) dst;
for (int index = 0; index < tileWidth; index++)
max = MAX(max,f32[index]);
}
return max;
} | 0 | [
"CWE-787"
] | LibRaw | 8682ad204392b914ab1cc6ebcca9c27c19c1a4b4 | 30,637,818,286,940,185,000,000,000,000,000,000,000 | 29 | 0.18.17 |
tiff_document_get_page_label (EvDocument *document,
EvPage *page)
{
TiffDocument *tiff_document = TIFF_DOCUMENT (document);
static gchar *label;
if (TIFFGetField (tiff_document->tiff, TIFFTAG_PAGENAME, &label) &&
g_utf8_validate (label, -1, NULL)) {
return g_strdup (label);
}
return NULL;
} | 0 | [
"CWE-754"
] | evince | 234f034a4d15cd46dd556f4945f99fbd57ef5f15 | 157,885,279,504,006,120,000,000,000,000,000,000,000 | 13 | tiff: Handle failure from TIFFReadRGBAImageOriented
The TIFFReadRGBAImageOriented function returns zero if it was unable to
read the image. Return NULL in this case instead of displaying
uninitialized memory.
Fixes #1129 |
void ListenerImpl::debugLog(const std::string& message) {
UNREFERENCED_PARAMETER(message);
ENVOY_LOG(debug, "{}: name={}, hash={}, address={}", message, name_, hash_, address_->asString());
} | 0 | [
"CWE-400"
] | envoy | dfddb529e914d794ac552e906b13d71233609bf7 | 101,820,743,367,000,280,000,000,000,000,000,000,000 | 4 | listener: Add configurable accepted connection limits (#153)
Add support for per-listener limits on accepted connections.
Signed-off-by: Tony Allen <[email protected]> |
int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
irq_handler_t handler,
unsigned long irqflags,
const char *devname, void *dev_id)
{
return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
devname, dev_id,
&xen_lateeoi_chip);
} | 0 | [
"CWE-400",
"CWE-703"
] | linux | e99502f76271d6bc4e374fe368c50c67a1fd3070 | 289,521,858,280,065,070,000,000,000,000,000,000,000 | 9 | xen/events: defer eoi in case of excessive number of events
In case rogue guests are sending events at high frequency it might
happen that xen_evtchn_do_upcall() won't stop processing events in
dom0. As this is done in irq handling a crash might be the result.
In order to avoid that, delay further inter-domain events after some
time in xen_evtchn_do_upcall() by forcing eoi processing into a
worker on the same cpu, thus inhibiting new events coming in.
The time after which eoi processing is to be delayed is configurable
via a new module parameter "event_loop_timeout" which specifies the
maximum event loop time in jiffies (default: 2, the value was chosen
after some tests showing that a value of 2 was the lowest with an
only slight drop of dom0 network throughput while multiple guests
performed an event storm).
How long eoi processing will be delayed can be specified via another
parameter "event_eoi_delay" (again in jiffies, default 10, again the
value was chosen after testing with different delay values).
This is part of XSA-332.
Cc: [email protected]
Reported-by: Julien Grall <[email protected]>
Signed-off-by: Juergen Gross <[email protected]>
Reviewed-by: Stefano Stabellini <[email protected]>
Reviewed-by: Wei Liu <[email protected]> |
int luaV_tointegerns (const TValue *obj, lua_Integer *p, F2Imod mode) {
if (ttisfloat(obj))
return luaV_flttointeger(fltvalue(obj), p, mode);
else if (ttisinteger(obj)) {
*p = ivalue(obj);
return 1;
}
else
return 0;
} | 0 | [
"CWE-416",
"CWE-125",
"CWE-787"
] | lua | eb41999461b6f428186c55abd95f4ce1a76217d5 | 332,777,130,960,572,420,000,000,000,000,000,000,000 | 10 | Fixed bugs of stack reallocation x GC
Macro 'checkstackGC' was doing a GC step after resizing the stack;
the GC could shrink the stack and undo the resize. Moreover, macro
'checkstackp' also does a GC step, which could remove the preallocated
CallInfo when calling a function. (Its name has been changed to
'checkstackGCp' to emphasize that it calls the GC.) |
static int have_same_root(const char *path1, const char *path2)
{
int is_abs1, is_abs2;
is_abs1 = is_absolute_path(path1);
is_abs2 = is_absolute_path(path2);
return (is_abs1 && is_abs2 && tolower(path1[0]) == tolower(path2[0])) ||
(!is_abs1 && !is_abs2);
} | 0 | [
"CWE-125"
] | git | 11a9f4d807a0d71dc6eff51bb87baf4ca2cccf1d | 155,962,297,001,394,540,000,000,000,000,000,000,000 | 9 | is_ntfs_dotgit: use a size_t for traversing string
We walk through the "name" string using an int, which can
wrap to a negative value and cause us to read random memory
before our array (e.g., by creating a tree with a name >2GB,
since "int" is still 32 bits even on most 64-bit platforms).
Worse, this is easy to trigger during the fsck_tree() check,
which is supposed to be protecting us from malicious
garbage.
Note one bit of trickiness in the existing code: we
sometimes assign -1 to "len" at the end of the loop, and
then rely on the "len++" in the for-loop's increment to take
it back to 0. This is still legal with a size_t, since
assigning -1 will turn into SIZE_MAX, which then wraps
around to 0 on increment.
Signed-off-by: Jeff King <[email protected]> |
void quicklistPush(quicklist *quicklist, void *value, const size_t sz,
int where) {
if (where == QUICKLIST_HEAD) {
quicklistPushHead(quicklist, value, sz);
} else if (where == QUICKLIST_TAIL) {
quicklistPushTail(quicklist, value, sz);
}
} | 0 | [
"CWE-190"
] | redis | f6a40570fa63d5afdd596c78083d754081d80ae3 | 21,351,405,856,108,910,000,000,000,000,000,000,000 | 8 | Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628)
- fix possible heap corruption in ziplist and listpack resulting by trying to
allocate more than the maximum size of 4GB.
- prevent ziplist (hash and zset) from reaching size of above 1GB, will be
converted to HT encoding, that's not a useful size.
- prevent listpack (stream) from reaching size of above 1GB.
- XADD will start a new listpack if the new record may cause the previous
listpack to grow over 1GB.
- XADD will respond with an error if a single stream record is over 1GB
- List type (ziplist in quicklist) was truncating strings that were over 4GB,
now it'll respond with an error. |
Continue(int lineno, int col_offset, int end_lineno, int end_col_offset,
PyArena *arena)
{
stmt_ty p;
p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p));
if (!p)
return NULL;
p->kind = Continue_kind;
p->lineno = lineno;
p->col_offset = col_offset;
p->end_lineno = end_lineno;
p->end_col_offset = end_col_offset;
return p;
} | 0 | [
"CWE-125"
] | cpython | dcfcd146f8e6fc5c2fc16a4c192a0c5f5ca8c53c | 251,712,637,691,102,970,000,000,000,000,000,000,000 | 14 | bpo-35766: Merge typed_ast back into CPython (GH-11645) |
static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
{
if (tcp_skb_is_last(sk, skb))
sk->sk_send_head = NULL;
else
sk->sk_send_head = tcp_write_queue_next(sk, skb);
} | 0 | [
"CWE-416",
"CWE-269"
] | linux | bb1fceca22492109be12640d49f5ea5a544c6bb4 | 116,061,665,956,012,600,000,000,000,000,000,000,000 | 7 | tcp: fix use after free in tcp_xmit_retransmit_queue()
When tcp_sendmsg() allocates a fresh and empty skb, it puts it at the
tail of the write queue using tcp_add_write_queue_tail()
Then it attempts to copy user data into this fresh skb.
If the copy fails, we undo the work and remove the fresh skb.
Unfortunately, this undo lacks the change done to tp->highest_sack and
we can leave a dangling pointer (to a freed skb)
Later, tcp_xmit_retransmit_queue() can dereference this pointer and
access freed memory. For regular kernels where memory is not unmapped,
this might cause SACK bugs because tcp_highest_sack_seq() is buggy,
returning garbage instead of tp->snd_nxt, but with various debug
features like CONFIG_DEBUG_PAGEALLOC, this can crash the kernel.
This bug was found by Marco Grassi thanks to syzkaller.
Fixes: 6859d49475d4 ("[TCP]: Abstract tp->highest_sack accessing & point to next skb")
Reported-by: Marco Grassi <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Ilpo Järvinen <[email protected]>
Cc: Yuchung Cheng <[email protected]>
Cc: Neal Cardwell <[email protected]>
Acked-by: Neal Cardwell <[email protected]>
Reviewed-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
PHP_FUNCTION(imagecreatefromjpeg)
{
_php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_JPG, "JPEG", gdImageCreateFromJpeg, gdImageCreateFromJpegCtx);
} | 0 | [
"CWE-703",
"CWE-189"
] | php-src | 2938329ce19cb8c4197dec146c3ec887c6f61d01 | 243,720,637,148,851,100,000,000,000,000,000,000,000 | 4 | Fixed bug #66356 (Heap Overflow Vulnerability in imagecrop())
And also fixed the bug: arguments are altered after some calls |
static int upload_dsp_code(struct snd_card *card)
{
struct snd_msnd *chip = card->private_data;
const struct firmware *init_fw = NULL, *perm_fw = NULL;
int err;
outb(HPBLKSEL_0, chip->io + HP_BLKS);
err = request_firmware(&init_fw, INITCODEFILE, card->dev);
if (err < 0) {
printk(KERN_ERR LOGNAME ": Error loading " INITCODEFILE);
goto cleanup1;
}
err = request_firmware(&perm_fw, PERMCODEFILE, card->dev);
if (err < 0) {
printk(KERN_ERR LOGNAME ": Error loading " PERMCODEFILE);
goto cleanup;
}
memcpy_toio(chip->mappedbase, perm_fw->data, perm_fw->size);
if (snd_msnd_upload_host(chip, init_fw->data, init_fw->size) < 0) {
printk(KERN_WARNING LOGNAME ": Error uploading to DSP\n");
err = -ENODEV;
goto cleanup;
}
printk(KERN_INFO LOGNAME ": DSP firmware uploaded\n");
err = 0;
cleanup:
release_firmware(perm_fw);
cleanup1:
release_firmware(init_fw);
return err;
} | 0 | [
"CWE-125",
"CWE-401"
] | linux | 20e2b791796bd68816fa115f12be5320de2b8021 | 46,026,381,279,548,960,000,000,000,000,000,000,000 | 34 | ALSA: msnd: Optimize / harden DSP and MIDI loops
The ISA msnd drivers have loops fetching the ring-buffer head, tail
and size values inside the loops. Such codes are inefficient and
fragile.
This patch optimizes it, and also adds the sanity check to avoid the
endless loops.
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=196131
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=196133
Signed-off-by: Takashi Iwai <[email protected]> |
parser_module_handle_module_specifier (parser_context_t *context_p, /**< parser context */
ecma_module_node_t **node_list_p) /**< target node list */
{
if (context_p->token.type != LEXER_LITERAL
|| context_p->token.lit_location.type != LEXER_STRING_LITERAL
|| context_p->token.lit_location.length == 0)
{
parser_raise_error (context_p, PARSER_ERR_STRING_EXPECTED);
}
lexer_construct_literal_object (context_p, &context_p->token.lit_location, LEXER_STRING_LITERAL);
lexer_literal_t *path_p = context_p->lit_object.literal_p;
lexer_next_token (context_p);
/* The lexer_next_token may throw an error, so the path is constructed after its call. */
ecma_string_t *path_string_p = ecma_new_ecma_string_from_utf8 (path_p->u.char_p, path_p->prop.length);
ecma_module_node_t *node_p = JERRY_CONTEXT (module_current_p)->imports_p;
ecma_module_node_t *last_node_p = NULL;
/* Check if we have an import node with the same module request. */
while (node_p != NULL)
{
if (ecma_compare_ecma_strings (ecma_get_string_from_value (node_p->u.path_or_module), path_string_p))
{
ecma_deref_ecma_string (path_string_p);
break;
}
last_node_p = node_p;
node_p = node_p->next_p;
}
if (node_p == NULL)
{
node_p = (ecma_module_node_t *) jmem_heap_alloc_block_null_on_error (sizeof (ecma_module_node_t));
if (node_p == NULL)
{
ecma_deref_ecma_string (path_string_p);
parser_raise_error (context_p, PARSER_ERR_OUT_OF_MEMORY);
}
if (last_node_p == NULL)
{
JERRY_CONTEXT (module_current_p)->imports_p = node_p;
}
else
{
last_node_p->next_p = node_p;
}
node_p->next_p = NULL;
node_p->module_names_p = NULL;
node_p->u.path_or_module = ecma_make_string_value (path_string_p);
}
/* Append to imports. */
if (node_list_p == NULL)
{
parser_module_append_names (context_p, &node_p->module_names_p);
return;
}
ecma_value_t *module_object_p = &node_p->u.path_or_module;
node_p = *node_list_p;
last_node_p = NULL;
while (node_p != NULL)
{
if (node_p->u.module_object_p == module_object_p)
{
parser_module_append_names (context_p, &node_p->module_names_p);
return;
}
last_node_p = node_p;
node_p = node_p->next_p;
}
node_p = (ecma_module_node_t *) parser_malloc (context_p, sizeof (ecma_module_node_t));
if (last_node_p == NULL)
{
*node_list_p = node_p;
}
else
{
last_node_p->next_p = node_p;
}
node_p->next_p = NULL;
node_p->module_names_p = context_p->module_names_p;
node_p->u.module_object_p = module_object_p;
context_p->module_names_p = NULL;
} /* parser_module_handle_module_specifier */ | 1 | [
"CWE-416"
] | jerryscript | 3bcd48f72d4af01d1304b754ef19fe1a02c96049 | 263,449,273,218,103,500,000,000,000,000,000,000,000 | 101 | Improve parse_identifier (#4691)
Ascii string length is no longer computed during string allocation.
JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected] |
cmd_boolean (const char *com, const char *val, void *place)
{
bool value;
if (CMP2 (val, 'o', 'n') || CMP3 (val, 'y', 'e', 's') || CMP1 (val, '1'))
/* "on", "yes" and "1" mean true. */
value = true;
else if (CMP3 (val, 'o', 'f', 'f') || CMP2 (val, 'n', 'o') || CMP1 (val, '0'))
/* "off", "no" and "0" mean false. */
value = false;
else
{
fprintf (stderr,
_("%s: %s: Invalid boolean %s; use `on' or `off'.\n"),
exec_name, com, quote (val));
return false;
}
*(bool *) place = value;
return true;
} | 0 | [
"CWE-22"
] | wget | 18b0979357ed7dc4e11d4f2b1d7e0f5932d82aa7 | 30,386,328,771,278,634,000,000,000,000,000,000,000 | 21 | CVE-2014-4877: Arbitrary Symlink Access
Wget was susceptible to a symlink attack which could create arbitrary
files, directories or symbolic links and set their permissions when
retrieving a directory recursively through FTP. This commit changes the
default settings in Wget such that Wget no longer creates local symbolic
links, but rather traverses them and retrieves the pointed-to file in
such a retrieval.
The old behaviour can be attained by passing the --retr-symlinks=no
option to the Wget invokation command. |
static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
{
struct qeth_reply *reply;
reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
if (reply) {
atomic_set(&reply->refcnt, 1);
atomic_set(&reply->received, 0);
reply->card = card;
}
return reply;
} | 0 | [
"CWE-200",
"CWE-119"
] | linux | 6fb392b1a63ae36c31f62bc3fc8630b49d602b62 | 66,853,545,494,725,320,000,000,000,000,000,000,000 | 12 | qeth: avoid buffer overflow in snmp ioctl
Check user-defined length in snmp ioctl request and allow request
only if it fits into a qeth command buffer.
Signed-off-by: Ursula Braun <[email protected]>
Signed-off-by: Frank Blaschka <[email protected]>
Reviewed-by: Heiko Carstens <[email protected]>
Reported-by: Nico Golde <[email protected]>
Reported-by: Fabian Yamaguchi <[email protected]>
Cc: <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void
malloc_printerr (const char *str)
{
__libc_message (do_abort, "%s\n", str);
__builtin_unreachable (); | 0 | [
"CWE-787"
] | glibc | d6db68e66dff25d12c3bc5641b60cbd7fb6ab44f | 32,750,919,066,381,863,000,000,000,000,000,000,000 | 5 | malloc: Mitigate null-byte overflow attacks
* malloc/malloc.c (_int_free): Check for corrupt prev_size vs size.
(malloc_consolidate): Likewise. |
e1000e_autoneg_timer(void *opaque)
{
E1000ECore *core = opaque;
if (!qemu_get_queue(core->owner_nic)->link_down) {
e1000x_update_regs_on_autoneg_done(core->mac, core->phy[0]);
e1000e_start_recv(core);
e1000e_update_flowctl_status(core);
/* signal link status change to the guest */
e1000e_set_interrupt_cause(core, E1000_ICR_LSC);
}
} | 0 | [
"CWE-835"
] | qemu | 4154c7e03fa55b4cf52509a83d50d6c09d743b77 | 281,138,791,415,921,700,000,000,000,000,000,000,000 | 12 | net: e1000e: fix an infinite loop issue
This issue is like the issue in e1000 network card addressed in
this commit:
e1000: eliminate infinite loops on out-of-bounds transfer start.
Signed-off-by: Li Qiang <[email protected]>
Reviewed-by: Dmitry Fleytman <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
{
int i;
struct msr_autoload *m = &vmx->msr_autoload;
switch (msr) {
case MSR_EFER:
if (cpu_has_load_ia32_efer()) {
clear_atomic_switch_msr_special(vmx,
VM_ENTRY_LOAD_IA32_EFER,
VM_EXIT_LOAD_IA32_EFER);
return;
}
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
if (cpu_has_load_perf_global_ctrl()) {
clear_atomic_switch_msr_special(vmx,
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
return;
}
break;
}
i = vmx_find_loadstore_msr_slot(&m->guest, msr);
if (i < 0)
goto skip_guest;
--m->guest.nr;
m->guest.val[i] = m->guest.val[m->guest.nr];
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
skip_guest:
i = vmx_find_loadstore_msr_slot(&m->host, msr);
if (i < 0)
return;
--m->host.nr;
m->host.val[i] = m->host.val[m->host.nr];
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
} | 0 | [
"CWE-787"
] | linux | 04c4f2ee3f68c9a4bf1653d15f1a9a435ae33f7a | 130,632,686,056,716,180,000,000,000,000,000,000,000 | 39 | KVM: VMX: Don't use vcpu->run->internal.ndata as an array index
__vmx_handle_exit() uses vcpu->run->internal.ndata as an index for
an array access. Since vcpu->run is (can be) mapped to a user address
space with a writer permission, the 'ndata' could be updated by the
user process at anytime (the user process can set it to outside the
bounds of the array).
So, it is not safe that __vmx_handle_exit() uses the 'ndata' that way.
Fixes: 1aa561b1a4c0 ("kvm: x86: Add "last CPU" to some KVM_EXIT information")
Signed-off-by: Reiji Watanabe <[email protected]>
Reviewed-by: Jim Mattson <[email protected]>
Message-Id: <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]> |
static bool io_wq_for_each_worker(struct io_wqe *wqe,
bool (*func)(struct io_worker *, void *),
void *data)
{
struct io_worker *worker;
bool ret = false;
list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
if (io_worker_get(worker)) {
/* no task if node is/was offline */
if (worker->task)
ret = func(worker, data);
io_worker_release(worker);
if (ret)
break;
}
}
return ret;
} | 0 | [
"CWE-200"
] | linux | 713b9825a4c47897f66ad69409581e7734a8728e | 269,685,787,186,402,200,000,000,000,000,000,000,000 | 20 | io-wq: fix cancellation on create-worker failure
WARNING: CPU: 0 PID: 10392 at fs/io_uring.c:1151 req_ref_put_and_test
fs/io_uring.c:1151 [inline]
WARNING: CPU: 0 PID: 10392 at fs/io_uring.c:1151 req_ref_put_and_test
fs/io_uring.c:1146 [inline]
WARNING: CPU: 0 PID: 10392 at fs/io_uring.c:1151
io_req_complete_post+0xf5b/0x1190 fs/io_uring.c:1794
Modules linked in:
Call Trace:
tctx_task_work+0x1e5/0x570 fs/io_uring.c:2158
task_work_run+0xe0/0x1a0 kernel/task_work.c:164
tracehook_notify_signal include/linux/tracehook.h:212 [inline]
handle_signal_work kernel/entry/common.c:146 [inline]
exit_to_user_mode_loop kernel/entry/common.c:172 [inline]
exit_to_user_mode_prepare+0x232/0x2a0 kernel/entry/common.c:209
__syscall_exit_to_user_mode_work kernel/entry/common.c:291 [inline]
syscall_exit_to_user_mode+0x19/0x60 kernel/entry/common.c:302
do_syscall_64+0x42/0xb0 arch/x86/entry/common.c:86
entry_SYSCALL_64_after_hwframe+0x44/0xae
When io_wqe_enqueue() -> io_wqe_create_worker() fails, we can't just
call io_run_cancel() to clean up the request, it's already enqueued via
io_wqe_insert_work() and will be executed either by some other worker
during cancellation (e.g. in io_wq_put_and_exit()).
Reported-by: Hao Sun <[email protected]>
Fixes: 3146cba99aa28 ("io-wq: make worker creation resilient against signals")
Signed-off-by: Pavel Begunkov <[email protected]>
Link: https://lore.kernel.org/r/93b9de0fcf657affab0acfd675d4abcd273ee863.1631092071.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <[email protected]> |
static void steal_suitable_fallback(struct zone *zone, struct page *page,
int start_type, bool whole_block)
{
unsigned int current_order = page_order(page);
struct free_area *area;
int free_pages, movable_pages, alike_pages;
int old_block_type;
old_block_type = get_pageblock_migratetype(page);
/*
* This can happen due to races and we want to prevent broken
* highatomic accounting.
*/
if (is_migrate_highatomic(old_block_type))
goto single_page;
/* Take ownership for orders >= pageblock_order */
if (current_order >= pageblock_order) {
change_pageblock_range(page, current_order, start_type);
goto single_page;
}
/* We are not allowed to try stealing from the whole block */
if (!whole_block)
goto single_page;
free_pages = move_freepages_block(zone, page, start_type,
&movable_pages);
/*
* Determine how many pages are compatible with our allocation.
* For movable allocation, it's the number of movable pages which
* we just obtained. For other types it's a bit more tricky.
*/
if (start_type == MIGRATE_MOVABLE) {
alike_pages = movable_pages;
} else {
/*
* If we are falling back a RECLAIMABLE or UNMOVABLE allocation
* to MOVABLE pageblock, consider all non-movable pages as
* compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
* vice versa, be conservative since we can't distinguish the
* exact migratetype of non-movable pages.
*/
if (old_block_type == MIGRATE_MOVABLE)
alike_pages = pageblock_nr_pages
- (free_pages + movable_pages);
else
alike_pages = 0;
}
/* moving whole block can fail due to zone boundary conditions */
if (!free_pages)
goto single_page;
/*
* If a sufficient number of pages in the block are either free or of
* comparable migratability as our allocation, claim the whole block.
*/
if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
page_group_by_mobility_disabled)
set_pageblock_migratetype(page, start_type);
return;
single_page:
area = &zone->free_area[current_order];
list_move(&page->lru, &area->free_list[start_type]);
} | 0 | [] | linux | 400e22499dd92613821374c8c6c88c7225359980 | 286,115,125,533,527,540,000,000,000,000,000,000,000 | 69 | mm: don't warn about allocations which stall for too long
Commit 63f53dea0c98 ("mm: warn about allocations which stall for too
long") was a great step for reducing possibility of silent hang up
problem caused by memory allocation stalls. But this commit reverts it,
for it is possible to trigger OOM lockup and/or soft lockups when many
threads concurrently called warn_alloc() (in order to warn about memory
allocation stalls) due to current implementation of printk(), and it is
difficult to obtain useful information due to limitation of synchronous
warning approach.
Current printk() implementation flushes all pending logs using the
context of a thread which called console_unlock(). printk() should be
able to flush all pending logs eventually unless somebody continues
appending to printk() buffer.
Since warn_alloc() started appending to printk() buffer while waiting
for oom_kill_process() to make forward progress when oom_kill_process()
is processing pending logs, it became possible for warn_alloc() to force
oom_kill_process() loop inside printk(). As a result, warn_alloc()
significantly increased possibility of preventing oom_kill_process()
from making forward progress.
---------- Pseudo code start ----------
Before warn_alloc() was introduced:
retry:
if (mutex_trylock(&oom_lock)) {
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_lock)
}
goto retry;
After warn_alloc() was introduced:
retry:
if (mutex_trylock(&oom_lock)) {
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_lock)
} else if (waited_for_10seconds()) {
atomic_inc(&printk_pending_logs);
}
goto retry;
---------- Pseudo code end ----------
Although waited_for_10seconds() becomes true once per 10 seconds,
unbounded number of threads can call waited_for_10seconds() at the same
time. Also, since threads doing waited_for_10seconds() keep doing
almost busy loop, the thread doing print_one_log() can use little CPU
resource. Therefore, this situation can be simplified like
---------- Pseudo code start ----------
retry:
if (mutex_trylock(&oom_lock)) {
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_lock)
} else {
atomic_inc(&printk_pending_logs);
}
goto retry;
---------- Pseudo code end ----------
when printk() is called faster than print_one_log() can process a log.
One of possible mitigation would be to introduce a new lock in order to
make sure that no other series of printk() (either oom_kill_process() or
warn_alloc()) can append to printk() buffer when one series of printk()
(either oom_kill_process() or warn_alloc()) is already in progress.
Such serialization will also help obtaining kernel messages in readable
form.
---------- Pseudo code start ----------
retry:
if (mutex_trylock(&oom_lock)) {
mutex_lock(&oom_printk_lock);
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_printk_lock);
mutex_unlock(&oom_lock)
} else {
if (mutex_trylock(&oom_printk_lock)) {
atomic_inc(&printk_pending_logs);
mutex_unlock(&oom_printk_lock);
}
}
goto retry;
---------- Pseudo code end ----------
But this commit does not go that direction, for we don't want to
introduce a new lock dependency, and we unlikely be able to obtain
useful information even if we serialized oom_kill_process() and
warn_alloc().
Synchronous approach is prone to unexpected results (e.g. too late [1],
too frequent [2], overlooked [3]). As far as I know, warn_alloc() never
helped with providing information other than "something is going wrong".
I want to consider asynchronous approach which can obtain information
during stalls with possibly relevant threads (e.g. the owner of
oom_lock and kswapd-like threads) and serve as a trigger for actions
(e.g. turn on/off tracepoints, ask libvirt daemon to take a memory dump
of stalling KVM guest for diagnostic purpose).
This commit temporarily loses ability to report e.g. OOM lockup due to
unable to invoke the OOM killer due to !__GFP_FS allocation request.
But asynchronous approach will be able to detect such situation and emit
warning. Thus, let's remove warn_alloc().
[1] https://bugzilla.kernel.org/show_bug.cgi?id=192981
[2] http://lkml.kernel.org/r/CAM_iQpWuPVGc2ky8M-9yukECtS+zKjiDasNymX7rMcBjBFyM_A@mail.gmail.com
[3] commit db73ee0d46379922 ("mm, vmscan: do not loop on too_many_isolated for ever"))
Link: http://lkml.kernel.org/r/1509017339-4802-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp
Signed-off-by: Tetsuo Handa <[email protected]>
Reported-by: Cong Wang <[email protected]>
Reported-by: yuwang.yuwang <[email protected]>
Reported-by: Johannes Weiner <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Sergey Senozhatsky <[email protected]>
Cc: Petr Mladek <[email protected]>
Cc: Steven Rostedt <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
CImg<T> get_mirror(const char axis) const {
return (+*this).mirror(axis);
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 158,136,766,676,729,580,000,000,000,000,000,000,000 | 3 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
check_acl(const Acl *acl)
{
if (ARR_ELEMTYPE(acl) != ACLITEMOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("ACL array contains wrong data type")));
if (ARR_NDIM(acl) != 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("ACL arrays must be one-dimensional")));
if (ARR_HASNULL(acl))
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg("ACL arrays must not contain null values")));
} | 0 | [
"CWE-264"
] | postgres | fea164a72a7bfd50d77ba5fb418d357f8f2bb7d0 | 203,441,149,427,818,570,000,000,000,000,000,000,000 | 15 | Shore up ADMIN OPTION restrictions.
Granting a role without ADMIN OPTION is supposed to prevent the grantee
from adding or removing members from the granted role. Issuing SET ROLE
before the GRANT bypassed that, because the role itself had an implicit
right to add or remove members. Plug that hole by recognizing that
implicit right only when the session user matches the current role.
Additionally, do not recognize it during a security-restricted operation
or during execution of a SECURITY DEFINER function. The restriction on
SECURITY DEFINER is not security-critical. However, it seems best for a
user testing his own SECURITY DEFINER function to see the same behavior
others will see. Back-patch to 8.4 (all supported versions).
The SQL standards do not conflate roles and users as PostgreSQL does;
only SQL roles have members, and only SQL users initiate sessions. An
application using PostgreSQL users and roles as SQL users and roles will
never attempt to grant membership in the role that is the session user,
so the implicit right to add or remove members will never arise.
The security impact was mostly that a role member could revoke access
from others, contrary to the wishes of his own grantor. Unapproved role
member additions are less notable, because the member can still largely
achieve that by creating a view or a SECURITY DEFINER function.
Reviewed by Andres Freund and Tom Lane. Reported, independently, by
Jonas Sundman and Noah Misch.
Security: CVE-2014-0060 |
static void whereLoopInit(WhereLoop *p){
p->aLTerm = p->aLTermSpace;
p->nLTerm = 0;
p->nLSlot = ArraySize(p->aLTermSpace);
p->wsFlags = 0;
} | 0 | [
"CWE-129"
] | sqlite | effc07ec9c6e08d3bd17665f8800054770f8c643 | 21,185,384,153,131,715,000,000,000,000,000,000,000 | 6 | Fix the whereKeyStats() routine (part of STAT4 processing only) so that it
is able to cope with row-value comparisons against the primary key index
of a WITHOUT ROWID table.
[forum:/forumpost/3607259d3c|Forum post 3607259d3c].
FossilOrigin-Name: 2a6f761864a462de5c2d5bc666b82fb0b7e124a03443cd1482620dde344b34bb |
static int DecodeExtKeyUsage(const byte* input, int sz, DecodedCert* cert)
{
word32 idx = 0, oid;
int length, ret;
WOLFSSL_MSG("DecodeExtKeyUsage");
if (GetSequence(input, &idx, &length, sz) < 0) {
WOLFSSL_MSG("\tfail: should be a SEQUENCE");
return ASN_PARSE_E;
}
#if defined(OPENSSL_EXTRA) || defined(OPENSSL_EXTRA_X509_SMALL)
cert->extExtKeyUsageSrc = input + idx;
cert->extExtKeyUsageSz = length;
#endif
while (idx < (word32)sz) {
ret = GetObjectId(input, &idx, &oid, oidCertKeyUseType, sz);
if (ret == ASN_UNKNOWN_OID_E)
continue;
else if (ret < 0)
return ret;
switch (oid) {
case EKU_ANY_OID:
cert->extExtKeyUsage |= EXTKEYUSE_ANY;
break;
case EKU_SERVER_AUTH_OID:
cert->extExtKeyUsage |= EXTKEYUSE_SERVER_AUTH;
break;
case EKU_CLIENT_AUTH_OID:
cert->extExtKeyUsage |= EXTKEYUSE_CLIENT_AUTH;
break;
case EKU_CODESIGNING_OID:
cert->extExtKeyUsage |= EXTKEYUSE_CODESIGN;
break;
case EKU_EMAILPROTECT_OID:
cert->extExtKeyUsage |= EXTKEYUSE_EMAILPROT;
break;
case EKU_TIMESTAMP_OID:
cert->extExtKeyUsage |= EXTKEYUSE_TIMESTAMP;
break;
case EKU_OCSP_SIGN_OID:
cert->extExtKeyUsage |= EXTKEYUSE_OCSP_SIGN;
break;
default:
break;
}
#if defined(OPENSSL_EXTRA) || defined(OPENSSL_EXTRA_X509_SMALL)
cert->extExtKeyUsageCount++;
#endif
}
return 0;
} | 0 | [
"CWE-125",
"CWE-345"
] | wolfssl | f93083be72a3b3d956b52a7ec13f307a27b6e093 | 169,492,903,258,489,750,000,000,000,000,000,000,000 | 57 | OCSP: improve handling of OCSP no check extension |
static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
struct rf_tech_specific_params_nfca_poll *nfca_poll,
__u8 *data)
{
nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
data += 2;
nfca_poll->nfcid1_len = *data++;
pr_debug("sens_res 0x%x, nfcid1_len %d\n",
nfca_poll->sens_res, nfca_poll->nfcid1_len);
memcpy(nfca_poll->nfcid1, data, nfca_poll->nfcid1_len);
data += nfca_poll->nfcid1_len;
nfca_poll->sel_res_len = *data++;
if (nfca_poll->sel_res_len != 0)
nfca_poll->sel_res = *data++;
pr_debug("sel_res_len %d, sel_res 0x%x\n",
nfca_poll->sel_res_len,
nfca_poll->sel_res);
return data;
} | 1 | [
"CWE-119",
"CWE-787"
] | linux | 67de956ff5dc1d4f321e16cfbd63f5be3b691b43 | 147,695,216,107,880,760,000,000,000,000,000,000,000 | 26 | NFC: Prevent multiple buffer overflows in NCI
Fix multiple remotely-exploitable stack-based buffer overflows due to
the NCI code pulling length fields directly from incoming frames and
copying too much data into statically-sized arrays.
Signed-off-by: Dan Rosenberg <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: Lauro Ramos Venancio <[email protected]>
Cc: Aloisio Almeida Jr <[email protected]>
Cc: Samuel Ortiz <[email protected]>
Cc: David S. Miller <[email protected]>
Acked-by: Ilan Elias <[email protected]>
Signed-off-by: Samuel Ortiz <[email protected]> |
STATIC regnode *
S_construct_ahocorasick_from_trie(pTHX_ RExC_state_t *pRExC_state, regnode *source, U32 depth)
{
/* The Trie is constructed and compressed now so we can build a fail array if
* it's needed
This is basically the Aho-Corasick algorithm. Its from exercise 3.31 and
3.32 in the
"Red Dragon" -- Compilers, principles, techniques, and tools. Aho, Sethi,
Ullman 1985/88
ISBN 0-201-10088-6
We find the fail state for each state in the trie, this state is the longest
proper suffix of the current state's 'word' that is also a proper prefix of
another word in our trie. State 1 represents the word '' and is thus the
default fail state. This allows the DFA not to have to restart after its
tried and failed a word at a given point, it simply continues as though it
had been matching the other word in the first place.
Consider
'abcdgu'=~/abcdefg|cdgu/
When we get to 'd' we are still matching the first word, we would encounter
'g' which would fail, which would bring us to the state representing 'd' in
the second word where we would try 'g' and succeed, proceeding to match
'cdgu'.
*/
/* add a fail transition */
const U32 trie_offset = ARG(source);
reg_trie_data *trie=(reg_trie_data *)RExC_rxi->data->data[trie_offset];
U32 *q;
const U32 ucharcount = trie->uniquecharcount;
const U32 numstates = trie->statecount;
const U32 ubound = trie->lasttrans + ucharcount;
U32 q_read = 0;
U32 q_write = 0;
U32 charid;
U32 base = trie->states[ 1 ].trans.base;
U32 *fail;
reg_ac_data *aho;
const U32 data_slot = add_data( pRExC_state, STR_WITH_LEN("T"));
regnode *stclass;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_CONSTRUCT_AHOCORASICK_FROM_TRIE;
PERL_UNUSED_CONTEXT;
#ifndef DEBUGGING
PERL_UNUSED_ARG(depth);
#endif
if ( OP(source) == TRIE ) {
struct regnode_1 *op = (struct regnode_1 *)
PerlMemShared_calloc(1, sizeof(struct regnode_1));
StructCopy(source, op, struct regnode_1);
stclass = (regnode *)op;
} else {
struct regnode_charclass *op = (struct regnode_charclass *)
PerlMemShared_calloc(1, sizeof(struct regnode_charclass));
StructCopy(source, op, struct regnode_charclass);
stclass = (regnode *)op;
}
OP(stclass)+=2; /* convert the TRIE type to its AHO-CORASICK equivalent */
ARG_SET( stclass, data_slot );
aho = (reg_ac_data *) PerlMemShared_calloc( 1, sizeof(reg_ac_data) );
RExC_rxi->data->data[ data_slot ] = (void*)aho;
aho->trie=trie_offset;
aho->states=(reg_trie_state *)PerlMemShared_malloc( numstates * sizeof(reg_trie_state) );
Copy( trie->states, aho->states, numstates, reg_trie_state );
Newx( q, numstates, U32);
aho->fail = (U32 *) PerlMemShared_calloc( numstates, sizeof(U32) );
aho->refcount = 1;
fail = aho->fail;
/* initialize fail[0..1] to be 1 so that we always have
a valid final fail state */
fail[ 0 ] = fail[ 1 ] = 1;
for ( charid = 0; charid < ucharcount ; charid++ ) {
const U32 newstate = TRIE_TRANS_STATE( 1, base, ucharcount, charid, 0 );
if ( newstate ) {
q[ q_write ] = newstate;
/* set to point at the root */
fail[ q[ q_write++ ] ]=1;
}
}
while ( q_read < q_write) {
const U32 cur = q[ q_read++ % numstates ];
base = trie->states[ cur ].trans.base;
for ( charid = 0 ; charid < ucharcount ; charid++ ) {
const U32 ch_state = TRIE_TRANS_STATE( cur, base, ucharcount, charid, 1 );
if (ch_state) {
U32 fail_state = cur;
U32 fail_base;
do {
fail_state = fail[ fail_state ];
fail_base = aho->states[ fail_state ].trans.base;
} while ( !TRIE_TRANS_STATE( fail_state, fail_base, ucharcount, charid, 1 ) );
fail_state = TRIE_TRANS_STATE( fail_state, fail_base, ucharcount, charid, 1 );
fail[ ch_state ] = fail_state;
if ( !aho->states[ ch_state ].wordnum && aho->states[ fail_state ].wordnum )
{
aho->states[ ch_state ].wordnum = aho->states[ fail_state ].wordnum;
}
q[ q_write++ % numstates] = ch_state;
}
}
}
/* restore fail[0..1] to 0 so that we "fall out" of the AC loop
when we fail in state 1, this allows us to use the
charclass scan to find a valid start char. This is based on the principle
that theres a good chance the string being searched contains lots of stuff
that cant be a start char.
*/
fail[ 0 ] = fail[ 1 ] = 0;
DEBUG_TRIE_COMPILE_r({
Perl_re_indentf( aTHX_ "Stclass Failtable (%" UVuf " states): 0",
depth, (UV)numstates
);
for( q_read=1; q_read<numstates; q_read++ ) {
Perl_re_printf( aTHX_ ", %" UVuf, (UV)fail[q_read]);
}
Perl_re_printf( aTHX_ "\n");
});
Safefree(q);
/*RExC_seen |= REG_TRIEDFA_SEEN;*/
return stclass; | 0 | [
"CWE-190",
"CWE-787"
] | perl5 | 897d1f7fd515b828e4b198d8b8bef76c6faf03ed | 308,349,771,926,812,440,000,000,000,000,000,000,000 | 126 | regcomp.c: Prevent integer overflow from nested regex quantifiers.
(CVE-2020-10543) On 32bit systems the size calculations for nested regular
expression quantifiers could overflow causing heap memory corruption.
Fixes: Perl/perl5-security#125
(cherry picked from commit bfd31397db5dc1a5c5d3e0a1f753a4f89a736e71) |
static void mod_wstunnel_merge_config(plugin_config * const pconf, const config_plugin_value_t *cpv) {
do {
mod_wstunnel_merge_config_cpv(pconf, cpv);
} while ((++cpv)->k_id != -1);
} | 0 | [
"CWE-476"
] | lighttpd1.4 | 971773f1fae600074b46ef64f3ca1f76c227985f | 193,798,236,633,382,640,000,000,000,000,000,000,000 | 5 | [mod_wstunnel] fix crash with bad hybivers (fixes #3165)
(thx Michał Dardas)
x-ref:
"mod_wstunnel null pointer dereference"
https://redmine.lighttpd.net/issues/3165 |
static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
{
struct nfs4_state *newstate;
int ret;
/* memory barrier prior to reading state->n_* */
clear_bit(NFS_DELEGATED_STATE, &state->flags);
smp_rmb();
if (state->n_rdwr != 0) {
clear_bit(NFS_O_RDWR_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
if (ret != 0)
return ret;
if (newstate != state)
return -ESTALE;
}
if (state->n_wronly != 0) {
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
if (ret != 0)
return ret;
if (newstate != state)
return -ESTALE;
}
if (state->n_rdonly != 0) {
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
if (ret != 0)
return ret;
if (newstate != state)
return -ESTALE;
}
/*
* We may have performed cached opens for all three recoveries.
* Check if we need to update the current stateid.
*/
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
memcmp(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)) != 0) {
write_seqlock(&state->seqlock);
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
memcpy(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data));
write_sequnlock(&state->seqlock);
}
return 0;
} | 0 | [
"CWE-703",
"CWE-189"
] | linux | bf118a342f10dafe44b14451a1392c3254629a1f | 335,892,058,548,152,950,000,000,000,000,000,000,000 | 45 | NFSv4: include bitmap in nfsv4 get acl data
The NFSv4 bitmap size is unbounded: a server can return an arbitrary
sized bitmap in an FATTR4_WORD0_ACL request. Replace using the
nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server
with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data
xdr length to the (cached) acl page data.
This is a general solution to commit e5012d1f "NFSv4.1: update
nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead
when getting ACLs.
Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr
was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved.
Cc: [email protected]
Signed-off-by: Andy Adamson <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]> |
Subsets and Splits