func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static int af9005_frontend_attach(struct dvb_usb_adapter *adap)
{
u8 buf[8];
int i;
/* without these calls the first commands after downloading
the firmware fail. I put these calls here to simulate
what it is done in dvb-usb-init.c.
*/
struct usb_device *udev = adap->dev->udev;
usb_clear_halt(udev, usb_sndbulkpipe(udev, 2));
usb_clear_halt(udev, usb_rcvbulkpipe(udev, 1));
if (dvb_usb_af9005_dump_eeprom) {
printk("EEPROM DUMP\n");
for (i = 0; i < 255; i += 8) {
af9005_read_eeprom(adap->dev, i, buf, 8);
debug_dump(buf, 8, printk);
}
}
adap->fe_adap[0].fe = af9005_fe_attach(adap->dev);
return 0;
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
2289adbfa559050d2a38bcd9caac1c18b800e928
| 286,592,619,411,861,300,000,000,000,000,000,000,000 | 22 |
media: usb: fix memory leak in af9005_identify_state
In af9005_identify_state when returning -EIO the allocated buffer should
be released. Replace the "return -EIO" with assignment into ret and move
deb_info() under a check.
Fixes: af4e067e1dcf ("V4L/DVB (5625): Add support for the AF9005 demodulator from Afatech")
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Hans Verkuil <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
SYSCALL_DEFINE2(creat, const char __user *, pathname, umode_t, mode)
{
return sys_open(pathname, O_CREAT | O_WRONLY | O_TRUNC, mode);
}
| 0 |
[
"CWE-17"
] |
linux
|
eee5cc2702929fd41cce28058dc6d6717f723f87
| 260,199,394,561,618,700,000,000,000,000,000,000,000 | 4 |
get rid of s_files and files_lock
The only thing we need it for is alt-sysrq-r (emergency remount r/o)
and these days we can do just as well without going through the
list of files.
Signed-off-by: Al Viro <[email protected]>
|
static NOINLINE void send_offer(struct dhcp_packet *oldpacket,
uint32_t static_lease_nip,
struct dyn_lease *lease,
uint8_t *requested_ip_opt,
unsigned arpping_ms)
{
struct dhcp_packet packet;
uint32_t lease_time_sec;
struct in_addr addr;
init_packet(&packet, oldpacket, DHCPOFFER);
/* If it is a static lease, use its IP */
packet.yiaddr = static_lease_nip;
/* Else: */
if (!static_lease_nip) {
/* We have no static lease for client's chaddr */
uint32_t req_nip;
const char *p_host_name;
if (lease) {
/* We have a dynamic lease for client's chaddr.
* Reuse its IP (even if lease is expired).
* Note that we ignore requested IP in this case.
*/
packet.yiaddr = lease->lease_nip;
}
/* Or: if client has requested an IP */
else if (requested_ip_opt != NULL
/* (read IP) */
&& (move_from_unaligned32(req_nip, requested_ip_opt), 1)
/* and the IP is in the lease range */
&& ntohl(req_nip) >= server_config.start_ip
&& ntohl(req_nip) <= server_config.end_ip
/* and */
&& ( !(lease = find_lease_by_nip(req_nip)) /* is not already taken */
|| is_expired_lease(lease) /* or is taken, but expired */
)
) {
packet.yiaddr = req_nip;
}
else {
/* Otherwise, find a free IP */
packet.yiaddr = find_free_or_expired_nip(oldpacket->chaddr, arpping_ms);
}
if (!packet.yiaddr) {
bb_error_msg("no free IP addresses. OFFER abandoned");
return;
}
/* Reserve the IP for a short time hoping to get DHCPREQUEST soon */
p_host_name = (const char*) udhcp_get_option(oldpacket, DHCP_HOST_NAME);
lease = add_lease(packet.chaddr, packet.yiaddr,
server_config.offer_time,
p_host_name,
p_host_name ? (unsigned char)p_host_name[OPT_LEN - OPT_DATA] : 0
);
if (!lease) {
bb_error_msg("no free IP addresses. OFFER abandoned");
return;
}
}
lease_time_sec = select_lease_time(oldpacket);
udhcp_add_simple_option(&packet, DHCP_LEASE_TIME, htonl(lease_time_sec));
add_server_options(&packet);
addr.s_addr = packet.yiaddr;
bb_error_msg("sending OFFER of %s", inet_ntoa(addr));
/* send_packet emits error message itself if it detects failure */
send_packet(&packet, /*force_bcast:*/ 0);
}
| 0 |
[
"CWE-125"
] |
busybox
|
6d3b4bb24da9a07c263f3c1acf8df85382ff562c
| 27,009,699,364,614,690,000,000,000,000,000,000,000 | 72 |
udhcpc: check that 4-byte options are indeed 4-byte, closes 11506
function old new delta
udhcp_get_option32 - 27 +27
udhcp_get_option 231 248 +17
------------------------------------------------------------------------------
(add/remove: 1/0 grow/shrink: 1/0 up/down: 44/0) Total: 44 bytes
Signed-off-by: Denys Vlasenko <[email protected]>
|
ciEnv::ciEnv(Arena* arena) : _ciEnv_arena(mtCompiler) {
ASSERT_IN_VM;
// Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
CompilerThread* current_thread = CompilerThread::current();
assert(current_thread->env() == NULL, "must be");
current_thread->set_env(this);
assert(ciEnv::current() == this, "sanity");
_oop_recorder = NULL;
_debug_info = NULL;
_dependencies = NULL;
_failure_reason = NULL;
_compilable = MethodCompilable_never;
_break_at_compile = false;
_compiler_data = NULL;
#ifndef PRODUCT
assert(firstEnv, "must be first");
firstEnv = false;
#endif /* !PRODUCT */
_system_dictionary_modification_counter = 0;
_num_inlined_bytecodes = 0;
_task = NULL;
_log = NULL;
// Temporary buffer for creating symbols and such.
_name_buffer = NULL;
_name_buffer_len = 0;
_arena = arena;
_factory = new (_arena) ciObjectFactory(_arena, 128);
// Preload commonly referenced system ciObjects.
// During VM initialization, these instances have not yet been created.
// Assertions ensure that these instances are not accessed before
// their initialization.
assert(Universe::is_fully_initialized(), "must be");
_NullPointerException_instance = NULL;
_ArithmeticException_instance = NULL;
_ArrayIndexOutOfBoundsException_instance = NULL;
_ArrayStoreException_instance = NULL;
_ClassCastException_instance = NULL;
_the_null_string = NULL;
_the_min_jint_string = NULL;
_jvmti_can_hotswap_or_post_breakpoint = false;
_jvmti_can_access_local_variables = false;
_jvmti_can_post_on_exceptions = false;
_jvmti_can_pop_frame = false;
}
| 0 |
[] |
jdk8u
|
1dafef08cc922ee85a8e216387100dc681a5484d
| 169,185,592,186,403,350,000,000,000,000,000,000,000 | 54 |
8281859: Improve class compilation
Reviewed-by: andrew
Backport-of: 3ac62a66efd05d0842076dd4cfbea0e53b12630f
|
nautilus_file_cancel_call_when_ready (NautilusFile *file,
NautilusFileCallback callback,
gpointer callback_data)
{
g_return_if_fail (callback != NULL);
if (file == NULL) {
return;
}
g_return_if_fail (NAUTILUS_IS_FILE (file));
EEL_CALL_METHOD
(NAUTILUS_FILE_CLASS, file,
cancel_call_when_ready, (file, callback, callback_data));
}
| 0 |
[] |
nautilus
|
7632a3e13874a2c5e8988428ca913620a25df983
| 298,812,377,298,609,230,000,000,000,000,000,000,000 | 16 |
Check for trusted desktop file launchers.
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-directory-async.c:
Check for trusted desktop file launchers.
* libnautilus-private/nautilus-file-private.h:
* libnautilus-private/nautilus-file.c:
* libnautilus-private/nautilus-file.h:
Add nautilus_file_is_trusted_link.
Allow unsetting of custom display name.
* libnautilus-private/nautilus-mime-actions.c:
Display dialog when trying to launch a non-trusted desktop file.
svn path=/trunk/; revision=15003
|
static void GetNonpeakPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
SkipList
*list;
ssize_t
channel;
size_t
color,
next,
previous;
ssize_t
count;
unsigned short
channels[5];
/*
Finds the non peak value for each of the colors.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
next=list->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
channels[channel]=(unsigned short) color;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
| 0 |
[] |
ImageMagick6
|
072d7b10dbe74d1cf4ec0d008990c1a28c076f9e
| 32,288,800,708,837,475,000,000,000,000,000,000,000 | 48 |
https://github.com/ImageMagick/ImageMagick/issues/3332
|
com_pager(String *buffer MY_ATTRIBUTE((unused)),
char *line MY_ATTRIBUTE((unused)))
{
char pager_name[FN_REFLEN], *end, *param;
if (status.batch)
return 0;
/* Skip spaces in front of the pager command */
while (my_isspace(charset_info, *line))
line++;
/* Skip the pager command */
param= strchr(line, ' ');
/* Skip the spaces between the command and the argument */
while (param && my_isspace(charset_info, *param))
param++;
if (!param || !strlen(param)) // if pager was not given, use the default
{
if (!default_pager_set)
{
tee_fprintf(stdout, "Default pager wasn't set, using stdout.\n");
opt_nopager=1;
strmov(pager, "stdout");
PAGER= stdout;
return 0;
}
strmov(pager, default_pager);
}
else
{
end= strmake(pager_name, param, sizeof(pager_name)-1);
while (end > pager_name && (my_isspace(charset_info,end[-1]) ||
my_iscntrl(charset_info,end[-1])))
end--;
end[0]=0;
strmov(pager, pager_name);
strmov(default_pager, pager_name);
}
opt_nopager=0;
tee_fprintf(stdout, "PAGER set to '%s'\n", pager);
return 0;
}
| 0 |
[
"CWE-319"
] |
mysql-server
|
0002e1380d5f8c113b6bce91f2cf3f75136fd7c7
| 191,983,372,340,968,000,000,000,000,000,000,000,000 | 41 |
BUG#25575605: SETTING --SSL-MODE=REQUIRED SENDS CREDENTIALS BEFORE VERIFYING SSL CONNECTION
MYSQL_OPT_SSL_MODE option introduced.
It is set in case of --ssl-mode=REQUIRED and permits only SSL connection.
(cherry picked from commit f91b941842d240b8a62645e507f5554e8be76aec)
|
static void multiblock_speed(const EVP_CIPHER *evp_cipher)
{
static int mblengths[] =
{ 8 * 1024, 2 * 8 * 1024, 4 * 8 * 1024, 8 * 8 * 1024, 8 * 16 * 1024 };
int j, count, num = sizeof(lengths) / sizeof(lengths[0]);
const char *alg_name;
unsigned char *inp, *out, no_key[32], no_iv[16];
EVP_CIPHER_CTX ctx;
double d = 0.0;
inp = OPENSSL_malloc(mblengths[num - 1]);
out = OPENSSL_malloc(mblengths[num - 1] + 1024);
if (!inp || !out) {
BIO_printf(bio_err,"Out of memory\n");
goto end;
}
EVP_CIPHER_CTX_init(&ctx);
EVP_EncryptInit_ex(&ctx, evp_cipher, NULL, no_key, no_iv);
EVP_CIPHER_CTX_ctrl(&ctx, EVP_CTRL_AEAD_SET_MAC_KEY, sizeof(no_key),
no_key);
alg_name = OBJ_nid2ln(evp_cipher->nid);
for (j = 0; j < num; j++) {
print_message(alg_name, 0, mblengths[j]);
Time_F(START);
for (count = 0, run = 1; run && count < 0x7fffffff; count++) {
unsigned char aad[EVP_AEAD_TLS1_AAD_LEN];
EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM mb_param;
size_t len = mblengths[j];
int packlen;
memset(aad, 0, 8); /* avoid uninitialized values */
aad[8] = 23; /* SSL3_RT_APPLICATION_DATA */
aad[9] = 3; /* version */
aad[10] = 2;
aad[11] = 0; /* length */
aad[12] = 0;
mb_param.out = NULL;
mb_param.inp = aad;
mb_param.len = len;
mb_param.interleave = 8;
packlen = EVP_CIPHER_CTX_ctrl(&ctx,
EVP_CTRL_TLS1_1_MULTIBLOCK_AAD,
sizeof(mb_param), &mb_param);
if (packlen > 0) {
mb_param.out = out;
mb_param.inp = inp;
mb_param.len = len;
EVP_CIPHER_CTX_ctrl(&ctx,
EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT,
sizeof(mb_param), &mb_param);
} else {
int pad;
RAND_bytes(out, 16);
len += 16;
aad[11] = len >> 8;
aad[12] = len;
pad = EVP_CIPHER_CTX_ctrl(&ctx,
EVP_CTRL_AEAD_TLS1_AAD,
EVP_AEAD_TLS1_AAD_LEN, aad);
EVP_Cipher(&ctx, out, inp, len + pad);
}
}
d = Time_F(STOP);
BIO_printf(bio_err,
mr ? "+R:%d:%s:%f\n"
: "%d %s's in %.2fs\n", count, "evp", d);
results[D_EVP][j] = ((double)count) / d * mblengths[j];
}
if (mr) {
fprintf(stdout, "+H");
for (j = 0; j < num; j++)
fprintf(stdout, ":%d", mblengths[j]);
fprintf(stdout, "\n");
fprintf(stdout, "+F:%d:%s", D_EVP, alg_name);
for (j = 0; j < num; j++)
fprintf(stdout, ":%.2f", results[D_EVP][j]);
fprintf(stdout, "\n");
} else {
fprintf(stdout,
"The 'numbers' are in 1000s of bytes per second processed.\n");
fprintf(stdout, "type ");
for (j = 0; j < num; j++)
fprintf(stdout, "%7d bytes", mblengths[j]);
fprintf(stdout, "\n");
fprintf(stdout, "%-24s", alg_name);
for (j = 0; j < num; j++) {
if (results[D_EVP][j] > 10000)
fprintf(stdout, " %11.2fk", results[D_EVP][j] / 1e3);
else
fprintf(stdout, " %11.2f ", results[D_EVP][j]);
}
fprintf(stdout, "\n");
}
end:
if (inp)
OPENSSL_free(inp);
if (out)
OPENSSL_free(out);
}
| 0 |
[] |
openssl
|
1a3701f4fe0530a40ec073cd78d02cfcc26c0f8e
| 325,837,349,564,108,000,000,000,000,000,000,000,000 | 108 |
Sanity check EVP_CTRL_AEAD_TLS_AAD
The various implementations of EVP_CTRL_AEAD_TLS_AAD expect a buffer of at
least 13 bytes long. Add sanity checks to ensure that the length is at
least that. Also add a new constant (EVP_AEAD_TLS1_AAD_LEN) to evp.h to
represent this length. Thanks to Kevin Wojtysiak (Int3 Solutions) and
Paramjot Oberoi (Int3 Solutions) for reporting this issue.
Reviewed-by: Andy Polyakov <[email protected]>
(cherry picked from commit c8269881093324b881b81472be037055571f73f3)
Conflicts:
ssl/record/ssl3_record.c
|
static int decode_abs(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
int rc = 0;
switch (c->ad_bytes) {
case 2:
c->modrm_ea = insn_fetch(u16, 2, c->eip);
break;
case 4:
c->modrm_ea = insn_fetch(u32, 4, c->eip);
break;
case 8:
c->modrm_ea = insn_fetch(u64, 8, c->eip);
break;
}
done:
return rc;
}
| 0 |
[
"CWE-20"
] |
kvm
|
e42d9b8141d1f54ff72ad3850bb110c95a5f3b88
| 130,425,372,610,205,300,000,000,000,000,000,000,000 | 20 |
KVM: x86 emulator: limit instructions to 15 bytes
While we are never normally passed an instruction that exceeds 15 bytes,
smp games can cause us to attempt to interpret one, which will cause
large latencies in non-preempt hosts.
Cc: [email protected]
Signed-off-by: Avi Kivity <[email protected]>
|
int inet_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sock *sk = sock->sk;
struct inet_sock *inet = inet_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
sin->sin_family = AF_INET;
if (peer) {
if (!inet->inet_dport ||
(((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
peer == 1))
return -ENOTCONN;
sin->sin_port = inet->inet_dport;
sin->sin_addr.s_addr = inet->inet_daddr;
} else {
__be32 addr = inet->inet_rcv_saddr;
if (!addr)
addr = inet->inet_saddr;
sin->sin_port = inet->inet_sport;
sin->sin_addr.s_addr = addr;
}
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
*uaddr_len = sizeof(*sin);
return 0;
}
| 0 |
[
"CWE-362"
] |
linux-2.6
|
f6d8bd051c391c1c0458a30b2a7abcd939329259
| 260,900,025,946,534,460,000,000,000,000,000,000,000 | 26 |
inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data)
{
if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP)
ses->sectype = RawNTLMSSP;
switch (ses->sectype) {
case Kerberos:
sess_data->func = SMB2_auth_kerberos;
break;
case RawNTLMSSP:
sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate;
break;
default:
cifs_dbg(VFS, "secType %d not supported!\n", ses->sectype);
return -EOPNOTSUPP;
}
return 0;
}
| 0 |
[
"CWE-476"
] |
linux
|
cabfb3680f78981d26c078a26e5c748531257ebb
| 55,272,981,903,698,780,000,000,000,000,000,000,000 | 19 |
CIFS: Enable encryption during session setup phase
In order to allow encryption on SMB connection we need to exchange
a session key and generate encryption and decryption keys.
Signed-off-by: Pavel Shilovsky <[email protected]>
|
bool Item_cache_real::cache_value()
{
if (!example)
return FALSE;
value_cached= TRUE;
value= example->val_result();
null_value_inside= null_value= example->null_value;
return TRUE;
}
| 0 |
[
"CWE-416"
] |
server
|
c02ebf3510850ba78a106be9974c94c3b97d8585
| 117,608,070,855,356,520,000,000,000,000,000,000,000 | 9 |
MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments.
|
_rsa_sec_compute_root_tr(const struct rsa_public_key *pub,
const struct rsa_private_key *key,
void *random_ctx, nettle_random_func *random,
mp_limb_t *x, const mp_limb_t *m, size_t mn)
{
TMP_GMP_DECL (c, mp_limb_t);
TMP_GMP_DECL (ri, mp_limb_t);
TMP_GMP_DECL (scratch, mp_limb_t);
size_t key_limb_size;
int ret;
key_limb_size = NETTLE_OCTET_SIZE_TO_LIMB_SIZE(key->size);
/* mpz_powm_sec handles only odd moduli. If p, q or n is even, the
key is invalid and rejected by rsa_private_key_prepare. However,
some applications, notably gnutls, don't use this function, and
we don't want an invalid key to lead to a crash down inside
mpz_powm_sec. So do an additional check here. */
if (mpz_even_p (pub->n) || mpz_even_p (key->p) || mpz_even_p (key->q))
{
mpn_zero(x, key_limb_size);
return 0;
}
assert(mpz_size(pub->n) == key_limb_size);
assert(mn <= key_limb_size);
TMP_GMP_ALLOC (c, key_limb_size);
TMP_GMP_ALLOC (ri, key_limb_size);
TMP_GMP_ALLOC (scratch, _rsa_sec_compute_root_itch(key));
rsa_sec_blind (pub, random_ctx, random, x, ri, m, mn);
_rsa_sec_compute_root(key, c, x, scratch);
ret = rsa_sec_check_root(pub, c, x);
rsa_sec_unblind(pub, x, ri, c);
cnd_mpn_zero(1 - ret, x, key_limb_size);
TMP_GMP_FREE (scratch);
TMP_GMP_FREE (ri);
TMP_GMP_FREE (c);
return ret;
}
| 1 |
[
"CWE-20"
] |
nettle
|
485b5e2820a057e873b1ba812fdb39cae4adf98c
| 65,543,261,521,215,600,000,000,000,000,000,000,000 | 46 |
Change _rsa_sec_compute_root_tr to take a fix input size.
Improves consistency with _rsa_sec_compute_root, and fixes zero-input bug.
|
static int hls_slice_header(HEVCContext *s)
{
GetBitContext *gb = &s->HEVClc->gb;
SliceHeader *sh = &s->sh;
int i, ret;
// Coded parameters
sh->first_slice_in_pic_flag = get_bits1(gb);
if (s->ref && sh->first_slice_in_pic_flag) {
av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n");
return 1; // This slice will be skiped later, do not corrupt state
}
if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
s->seq_decode = (s->seq_decode + 1) & 0xff;
s->max_ra = INT_MAX;
if (IS_IDR(s))
ff_hevc_clear_refs(s);
}
sh->no_output_of_prior_pics_flag = 0;
if (IS_IRAP(s))
sh->no_output_of_prior_pics_flag = get_bits1(gb);
sh->pps_id = get_ue_golomb_long(gb);
if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) {
av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
return AVERROR_INVALIDDATA;
}
if (!sh->first_slice_in_pic_flag &&
s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) {
av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
return AVERROR_INVALIDDATA;
}
s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data;
if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1)
sh->no_output_of_prior_pics_flag = 1;
if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) {
const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data;
const HEVCSPS *last_sps = s->ps.sps;
enum AVPixelFormat pix_fmt;
if (last_sps && IS_IRAP(s) && s->nal_unit_type != HEVC_NAL_CRA_NUT) {
if (sps->width != last_sps->width || sps->height != last_sps->height ||
sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering !=
last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering)
sh->no_output_of_prior_pics_flag = 0;
}
ff_hevc_clear_refs(s);
ret = set_sps(s, sps, sps->pix_fmt);
if (ret < 0)
return ret;
pix_fmt = get_format(s, sps);
if (pix_fmt < 0)
return pix_fmt;
s->avctx->pix_fmt = pix_fmt;
s->seq_decode = (s->seq_decode + 1) & 0xff;
s->max_ra = INT_MAX;
}
sh->dependent_slice_segment_flag = 0;
if (!sh->first_slice_in_pic_flag) {
int slice_address_length;
if (s->ps.pps->dependent_slice_segments_enabled_flag)
sh->dependent_slice_segment_flag = get_bits1(gb);
slice_address_length = av_ceil_log2(s->ps.sps->ctb_width *
s->ps.sps->ctb_height);
sh->slice_segment_addr = get_bitsz(gb, slice_address_length);
if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
av_log(s->avctx, AV_LOG_ERROR,
"Invalid slice segment address: %u.\n",
sh->slice_segment_addr);
return AVERROR_INVALIDDATA;
}
if (!sh->dependent_slice_segment_flag) {
sh->slice_addr = sh->slice_segment_addr;
s->slice_idx++;
}
} else {
sh->slice_segment_addr = sh->slice_addr = 0;
s->slice_idx = 0;
s->slice_initialized = 0;
}
if (!sh->dependent_slice_segment_flag) {
s->slice_initialized = 0;
for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++)
skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
sh->slice_type = get_ue_golomb_long(gb);
if (!(sh->slice_type == HEVC_SLICE_I ||
sh->slice_type == HEVC_SLICE_P ||
sh->slice_type == HEVC_SLICE_B)) {
av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
sh->slice_type);
return AVERROR_INVALIDDATA;
}
if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I) {
av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
return AVERROR_INVALIDDATA;
}
// when flag is not present, picture is inferred to be output
sh->pic_output_flag = 1;
if (s->ps.pps->output_flag_present_flag)
sh->pic_output_flag = get_bits1(gb);
if (s->ps.sps->separate_colour_plane_flag)
sh->colour_plane_id = get_bits(gb, 2);
if (!IS_IDR(s)) {
int poc, pos;
sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb);
poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type);
if (!sh->first_slice_in_pic_flag && poc != s->poc) {
av_log(s->avctx, AV_LOG_WARNING,
"Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
if (s->avctx->err_recognition & AV_EF_EXPLODE)
return AVERROR_INVALIDDATA;
poc = s->poc;
}
s->poc = poc;
sh->short_term_ref_pic_set_sps_flag = get_bits1(gb);
pos = get_bits_left(gb);
if (!sh->short_term_ref_pic_set_sps_flag) {
ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1);
if (ret < 0)
return ret;
sh->short_term_rps = &sh->slice_rps;
} else {
int numbits, rps_idx;
if (!s->ps.sps->nb_st_rps) {
av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
return AVERROR_INVALIDDATA;
}
numbits = av_ceil_log2(s->ps.sps->nb_st_rps);
rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
sh->short_term_rps = &s->ps.sps->st_rps[rps_idx];
}
sh->short_term_ref_pic_set_size = pos - get_bits_left(gb);
pos = get_bits_left(gb);
ret = decode_lt_rps(s, &sh->long_term_rps, gb);
if (ret < 0) {
av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
if (s->avctx->err_recognition & AV_EF_EXPLODE)
return AVERROR_INVALIDDATA;
}
sh->long_term_ref_pic_set_size = pos - get_bits_left(gb);
if (s->ps.sps->sps_temporal_mvp_enabled_flag)
sh->slice_temporal_mvp_enabled_flag = get_bits1(gb);
else
sh->slice_temporal_mvp_enabled_flag = 0;
} else {
s->sh.short_term_rps = NULL;
s->poc = 0;
}
/* 8.3.1 */
if (sh->first_slice_in_pic_flag && s->temporal_id == 0 &&
s->nal_unit_type != HEVC_NAL_TRAIL_N &&
s->nal_unit_type != HEVC_NAL_TSA_N &&
s->nal_unit_type != HEVC_NAL_STSA_N &&
s->nal_unit_type != HEVC_NAL_RADL_N &&
s->nal_unit_type != HEVC_NAL_RADL_R &&
s->nal_unit_type != HEVC_NAL_RASL_N &&
s->nal_unit_type != HEVC_NAL_RASL_R)
s->pocTid0 = s->poc;
if (s->ps.sps->sao_enabled) {
sh->slice_sample_adaptive_offset_flag[0] = get_bits1(gb);
if (s->ps.sps->chroma_format_idc) {
sh->slice_sample_adaptive_offset_flag[1] =
sh->slice_sample_adaptive_offset_flag[2] = get_bits1(gb);
}
} else {
sh->slice_sample_adaptive_offset_flag[0] = 0;
sh->slice_sample_adaptive_offset_flag[1] = 0;
sh->slice_sample_adaptive_offset_flag[2] = 0;
}
sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) {
int nb_refs;
sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active;
if (sh->slice_type == HEVC_SLICE_B)
sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active;
if (get_bits1(gb)) { // num_ref_idx_active_override_flag
sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1;
if (sh->slice_type == HEVC_SLICE_B)
sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1;
}
if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) {
av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
sh->nb_refs[L0], sh->nb_refs[L1]);
return AVERROR_INVALIDDATA;
}
sh->rpl_modification_flag[0] = 0;
sh->rpl_modification_flag[1] = 0;
nb_refs = ff_hevc_frame_nb_refs(s);
if (!nb_refs) {
av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
return AVERROR_INVALIDDATA;
}
if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
sh->rpl_modification_flag[0] = get_bits1(gb);
if (sh->rpl_modification_flag[0]) {
for (i = 0; i < sh->nb_refs[L0]; i++)
sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
}
if (sh->slice_type == HEVC_SLICE_B) {
sh->rpl_modification_flag[1] = get_bits1(gb);
if (sh->rpl_modification_flag[1] == 1)
for (i = 0; i < sh->nb_refs[L1]; i++)
sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
}
}
if (sh->slice_type == HEVC_SLICE_B)
sh->mvd_l1_zero_flag = get_bits1(gb);
if (s->ps.pps->cabac_init_present_flag)
sh->cabac_init_flag = get_bits1(gb);
else
sh->cabac_init_flag = 0;
sh->collocated_ref_idx = 0;
if (sh->slice_temporal_mvp_enabled_flag) {
sh->collocated_list = L0;
if (sh->slice_type == HEVC_SLICE_B)
sh->collocated_list = !get_bits1(gb);
if (sh->nb_refs[sh->collocated_list] > 1) {
sh->collocated_ref_idx = get_ue_golomb_long(gb);
if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
av_log(s->avctx, AV_LOG_ERROR,
"Invalid collocated_ref_idx: %d.\n",
sh->collocated_ref_idx);
return AVERROR_INVALIDDATA;
}
}
}
if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) ||
(s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) {
int ret = pred_weight_table(s, gb);
if (ret < 0)
return ret;
}
sh->max_num_merge_cand = 5 - get_ue_golomb_long(gb);
if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
av_log(s->avctx, AV_LOG_ERROR,
"Invalid number of merging MVP candidates: %d.\n",
sh->max_num_merge_cand);
return AVERROR_INVALIDDATA;
}
}
sh->slice_qp_delta = get_se_golomb(gb);
if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
sh->slice_cb_qp_offset = get_se_golomb(gb);
sh->slice_cr_qp_offset = get_se_golomb(gb);
} else {
sh->slice_cb_qp_offset = 0;
sh->slice_cr_qp_offset = 0;
}
if (s->ps.pps->chroma_qp_offset_list_enabled_flag)
sh->cu_chroma_qp_offset_enabled_flag = get_bits1(gb);
else
sh->cu_chroma_qp_offset_enabled_flag = 0;
if (s->ps.pps->deblocking_filter_control_present_flag) {
int deblocking_filter_override_flag = 0;
if (s->ps.pps->deblocking_filter_override_enabled_flag)
deblocking_filter_override_flag = get_bits1(gb);
if (deblocking_filter_override_flag) {
sh->disable_deblocking_filter_flag = get_bits1(gb);
if (!sh->disable_deblocking_filter_flag) {
int beta_offset_div2 = get_se_golomb(gb);
int tc_offset_div2 = get_se_golomb(gb) ;
if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
tc_offset_div2 < -6 || tc_offset_div2 > 6) {
av_log(s->avctx, AV_LOG_ERROR,
"Invalid deblock filter offsets: %d, %d\n",
beta_offset_div2, tc_offset_div2);
return AVERROR_INVALIDDATA;
}
sh->beta_offset = beta_offset_div2 * 2;
sh->tc_offset = tc_offset_div2 * 2;
}
} else {
sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf;
sh->beta_offset = s->ps.pps->beta_offset;
sh->tc_offset = s->ps.pps->tc_offset;
}
} else {
sh->disable_deblocking_filter_flag = 0;
sh->beta_offset = 0;
sh->tc_offset = 0;
}
if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
(sh->slice_sample_adaptive_offset_flag[0] ||
sh->slice_sample_adaptive_offset_flag[1] ||
!sh->disable_deblocking_filter_flag)) {
sh->slice_loop_filter_across_slices_enabled_flag = get_bits1(gb);
} else {
sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag;
}
} else if (!s->slice_initialized) {
av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
return AVERROR_INVALIDDATA;
}
sh->num_entry_point_offsets = 0;
if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) {
unsigned num_entry_point_offsets = get_ue_golomb_long(gb);
// It would be possible to bound this tighter but this here is simpler
if (num_entry_point_offsets > get_bits_left(gb)) {
av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
return AVERROR_INVALIDDATA;
}
sh->num_entry_point_offsets = num_entry_point_offsets;
if (sh->num_entry_point_offsets > 0) {
int offset_len = get_ue_golomb_long(gb) + 1;
if (offset_len < 1 || offset_len > 32) {
sh->num_entry_point_offsets = 0;
av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len);
return AVERROR_INVALIDDATA;
}
av_freep(&sh->entry_point_offset);
av_freep(&sh->offset);
av_freep(&sh->size);
sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned));
sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
if (!sh->entry_point_offset || !sh->offset || !sh->size) {
sh->num_entry_point_offsets = 0;
av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
return AVERROR(ENOMEM);
}
for (i = 0; i < sh->num_entry_point_offsets; i++) {
unsigned val = get_bits_long(gb, offset_len);
sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size
}
if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) {
s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here
s->threads_number = 1;
} else
s->enable_parallel_tiles = 0;
} else
s->enable_parallel_tiles = 0;
}
if (s->ps.pps->slice_header_extension_present_flag) {
unsigned int length = get_ue_golomb_long(gb);
if (length*8LL > get_bits_left(gb)) {
av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n");
return AVERROR_INVALIDDATA;
}
for (i = 0; i < length; i++)
skip_bits(gb, 8); // slice_header_extension_data_byte
}
// Inferred parameters
sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta;
if (sh->slice_qp > 51 ||
sh->slice_qp < -s->ps.sps->qp_bd_offset) {
av_log(s->avctx, AV_LOG_ERROR,
"The slice_qp %d is outside the valid range "
"[%d, 51].\n",
sh->slice_qp,
-s->ps.sps->qp_bd_offset);
return AVERROR_INVALIDDATA;
}
sh->slice_ctb_addr_rs = sh->slice_segment_addr;
if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
return AVERROR_INVALIDDATA;
}
if (get_bits_left(gb) < 0) {
av_log(s->avctx, AV_LOG_ERROR,
"Overread slice header by %d bits\n", -get_bits_left(gb));
return AVERROR_INVALIDDATA;
}
s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
if (!s->ps.pps->cu_qp_delta_enabled_flag)
s->HEVClc->qp_y = s->sh.slice_qp;
s->slice_initialized = 1;
s->HEVClc->tu.cu_qp_offset_cb = 0;
s->HEVClc->tu.cu_qp_offset_cr = 0;
return 0;
}
| 0 |
[
"CWE-476"
] |
FFmpeg
|
54655623a82632e7624714d7b2a3e039dc5faa7e
| 267,234,351,156,159,380,000,000,000,000,000,000,000 | 426 |
avcodec/hevcdec: Avoid only partly skiping duplicate first slices
Fixes: NULL pointer dereference and out of array access
Fixes: 13871/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_HEVC_fuzzer-5746167087890432
Fixes: 13845/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_HEVC_fuzzer-5650370728034304
This also fixes the return code for explode mode
Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Reviewed-by: James Almer <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
|
static int __cil_resolve_name_with_parents(struct cil_tree_node *node, char *name, enum cil_sym_index sym_index, struct cil_symtab_datum **datum)
{
int rc = SEPOL_ERR;
symtab_t *symtab = NULL;
while (node != NULL && rc != SEPOL_OK) {
switch (node->flavor) {
case CIL_ROOT:
goto exit;
break;
case CIL_BLOCK:
symtab = &((struct cil_block*)node->data)->symtab[sym_index];
rc = cil_symtab_get_datum(symtab, name, datum);
break;
case CIL_BLOCKINHERIT: {
struct cil_blockinherit *inherit = node->data;
rc = __cil_resolve_name_with_parents(node->parent, name, sym_index, datum);
if (rc != SEPOL_OK) {
/* Continue search in original block's parent */
rc = __cil_resolve_name_with_parents(NODE(inherit->block), name, sym_index, datum);
goto exit;
}
}
break;
case CIL_MACRO: {
struct cil_macro *macro = node->data;
symtab = ¯o->symtab[sym_index];
rc = cil_symtab_get_datum(symtab, name, datum);
}
break;
case CIL_CALL: {
struct cil_call *call = node->data;
rc = cil_resolve_name_call_args(call, name, sym_index, datum);
if (rc != SEPOL_OK) {
/* Continue search in macro's parent */
rc = __cil_resolve_name_with_parents(NODE(call->macro)->parent, name, sym_index, datum);
}
}
break;
case CIL_IN:
/* In block symtabs only exist before resolving the AST */
case CIL_CONDBLOCK:
/* Cond block symtabs only exist before resolving the AST */
default:
break;
}
node = node->parent;
}
exit:
return rc;
}
| 0 |
[
"CWE-125"
] |
selinux
|
340f0eb7f3673e8aacaf0a96cbfcd4d12a405521
| 262,423,554,588,427,750,000,000,000,000,000,000,000 | 53 |
libsepol/cil: Check for statements not allowed in optional blocks
While there are some checks for invalid statements in an optional
block when resolving the AST, there are no checks when building the
AST.
OSS-Fuzz found the following policy which caused a null dereference
in cil_tree_get_next_path().
(blockinherit b3)
(sid SID)
(sidorder(SID))
(optional o
(ibpkeycon :(1 0)s)
(block b3
(filecon""block())
(filecon""block())))
The problem is that the blockinherit copies block b3 before
the optional block is disabled. When the optional is disabled,
block b3 is deleted along with everything else in the optional.
Later, when filecon statements with the same path are found an
error message is produced and in trying to find out where the block
was copied from, the reference to the deleted block is used. The
error handling code assumes (rightly) that if something was copied
from a block then that block should still exist.
It is clear that in-statements, blocks, and macros cannot be in an
optional, because that allows nodes to be copied from the optional
block to somewhere outside even though the optional could be disabled
later. When optionals are disabled the AST is reset and the
resolution is restarted at the point of resolving macro calls, so
anything resolved before macro calls will never be re-resolved.
This includes tunableifs, in-statements, blockinherits,
blockabstracts, and macro definitions. Tunable declarations also
cannot be in an optional block because they are needed to resolve
tunableifs. It should be fine to allow blockinherit statements in
an optional, because that is copying nodes from outside the optional
to the optional and if the optional is later disabled, everything
will be deleted anyway.
Check and quit with an error if a tunable declaration, in-statement,
block, blockabstract, or macro definition is found within an
optional when either building or resolving the AST.
Signed-off-by: James Carter <[email protected]>
|
format_REG_MOVE(const struct ofpact_reg_move *a,
const struct ofpact_format_params *fp)
{
nxm_format_reg_move(a, fp->s);
}
| 0 |
[
"CWE-416"
] |
ovs
|
77cccc74deede443e8b9102299efc869a52b65b2
| 136,868,024,792,913,340,000,000,000,000,000,000,000 | 5 |
ofp-actions: Fix use-after-free while decoding RAW_ENCAP.
While decoding RAW_ENCAP action, decode_ed_prop() might re-allocate
ofpbuf if there is no enough space left. However, function
'decode_NXAST_RAW_ENCAP' continues to use old pointer to 'encap'
structure leading to write-after-free and incorrect decoding.
==3549105==ERROR: AddressSanitizer: heap-use-after-free on address
0x60600000011a at pc 0x0000005f6cc6 bp 0x7ffc3a2d4410 sp 0x7ffc3a2d4408
WRITE of size 2 at 0x60600000011a thread T0
#0 0x5f6cc5 in decode_NXAST_RAW_ENCAP lib/ofp-actions.c:4461:20
#1 0x5f0551 in ofpact_decode ./lib/ofp-actions.inc2:4777:16
#2 0x5ed17c in ofpacts_decode lib/ofp-actions.c:7752:21
#3 0x5eba9a in ofpacts_pull_openflow_actions__ lib/ofp-actions.c:7791:13
#4 0x5eb9fc in ofpacts_pull_openflow_actions lib/ofp-actions.c:7835:12
#5 0x64bb8b in ofputil_decode_packet_out lib/ofp-packet.c:1113:17
#6 0x65b6f4 in ofp_print_packet_out lib/ofp-print.c:148:13
#7 0x659e3f in ofp_to_string__ lib/ofp-print.c:1029:16
#8 0x659b24 in ofp_to_string lib/ofp-print.c:1244:21
#9 0x65a28c in ofp_print lib/ofp-print.c:1288:28
#10 0x540d11 in ofctl_ofp_parse utilities/ovs-ofctl.c:2814:9
#11 0x564228 in ovs_cmdl_run_command__ lib/command-line.c:247:17
#12 0x56408a in ovs_cmdl_run_command lib/command-line.c:278:5
#13 0x5391ae in main utilities/ovs-ofctl.c:179:9
#14 0x7f6911ce9081 in __libc_start_main (/lib64/libc.so.6+0x27081)
#15 0x461fed in _start (utilities/ovs-ofctl+0x461fed)
Fix that by getting a new pointer before using.
Credit to OSS-Fuzz.
Fuzzer regression test will fail only with AddressSanitizer enabled.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=27851
Fixes: f839892a206a ("OF support and translation of generic encap and decap")
Acked-by: William Tu <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]>
|
mysql_shutdown(MYSQL *mysql, enum mysql_enum_shutdown_level shutdown_level)
{
uchar level[1];
DBUG_ENTER("mysql_shutdown");
level[0]= (uchar) shutdown_level;
DBUG_RETURN(simple_command(mysql, COM_SHUTDOWN, level, 1, 0));
}
| 0 |
[] |
mysql-server
|
3d8134d2c9b74bc8883ffe2ef59c168361223837
| 24,592,990,793,962,220,000,000,000,000,000,000,000 | 7 |
Bug#25988681: USE-AFTER-FREE IN MYSQL_STMT_CLOSE()
Description: If mysql_stmt_close() encountered error,
it recorded error in prepared statement
but then frees memory assigned to prepared
statement. If mysql_stmt_error() is used
to get error information, it will result
into use after free.
In all cases where mysql_stmt_close() can
fail, error would have been set by
cli_advanced_command in MYSQL structure.
Solution: Don't copy error from MYSQL using set_stmt_errmsg.
There is no automated way to test the fix since
it is in mysql_stmt_close() which does not expect
any reply from server.
Reviewed-By: Georgi Kodinov <[email protected]>
Reviewed-By: Ramil Kalimullin <[email protected]>
|
static loff_t max_file_blocks(void)
{
loff_t result = (DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS);
loff_t leaf_count = ADDRS_PER_BLOCK;
/* two direct node blocks */
result += (leaf_count * 2);
/* two indirect node blocks */
leaf_count *= NIDS_PER_BLOCK;
result += (leaf_count * 2);
/* one double indirect node block */
leaf_count *= NIDS_PER_BLOCK;
result += leaf_count;
return result;
}
| 0 |
[
"CWE-284"
] |
linux
|
b9dd46188edc2f0d1f37328637860bb65a771124
| 280,192,901,278,989,970,000,000,000,000,000,000,000 | 18 |
f2fs: sanity check segment count
F2FS uses 4 bytes to represent block address. As a result, supported
size of disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments.
Signed-off-by: Jin Qian <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]>
|
void audio_sample_entry_del(GF_Box *s)
{
GF_MPEGAudioSampleEntryBox *ptr = (GF_MPEGAudioSampleEntryBox *)s;
if (ptr == NULL) return;
gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s);
if (ptr->esd) gf_isom_box_del((GF_Box *)ptr->esd);
if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc);
if (ptr->cfg_ac3) gf_isom_box_del((GF_Box *)ptr->cfg_ac3);
if (ptr->cfg_3gpp) gf_isom_box_del((GF_Box *)ptr->cfg_3gpp);
gf_free(ptr);
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
gpac
|
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
| 327,676,299,050,474,860,000,000,000,000,000,000,000 | 12 |
prevent dref memleak on invalid input (#1183)
|
grow_memtuples(Tuplesortstate *state)
{
int newmemtupsize;
int memtupsize = state->memtupsize;
int64 memNowUsed = state->allowedMem - state->availMem;
/* Forget it if we've already maxed out memtuples, per comment above */
if (!state->growmemtuples)
return false;
/* Select new value of memtupsize */
if (memNowUsed <= state->availMem)
{
/*
* We've used no more than half of allowedMem; double our usage,
* clamping at INT_MAX tuples.
*/
if (memtupsize < INT_MAX / 2)
newmemtupsize = memtupsize * 2;
else
{
newmemtupsize = INT_MAX;
state->growmemtuples = false;
}
}
else
{
/*
* This will be the last increment of memtupsize. Abandon doubling
* strategy and instead increase as much as we safely can.
*
* To stay within allowedMem, we can't increase memtupsize by more
* than availMem / sizeof(SortTuple) elements. In practice, we want
* to increase it by considerably less, because we need to leave some
* space for the tuples to which the new array slots will refer. We
* assume the new tuples will be about the same size as the tuples
* we've already seen, and thus we can extrapolate from the space
* consumption so far to estimate an appropriate new size for the
* memtuples array. The optimal value might be higher or lower than
* this estimate, but it's hard to know that in advance. We again
* clamp at INT_MAX tuples.
*
* This calculation is safe against enlarging the array so much that
* LACKMEM becomes true, because the memory currently used includes
* the present array; thus, there would be enough allowedMem for the
* new array elements even if no other memory were currently used.
*
* We do the arithmetic in float8, because otherwise the product of
* memtupsize and allowedMem could overflow. Any inaccuracy in the
* result should be insignificant; but even if we computed a
* completely insane result, the checks below will prevent anything
* really bad from happening.
*/
double grow_ratio;
grow_ratio = (double) state->allowedMem / (double) memNowUsed;
if (memtupsize * grow_ratio < INT_MAX)
newmemtupsize = (int) (memtupsize * grow_ratio);
else
newmemtupsize = INT_MAX;
/* We won't make any further enlargement attempts */
state->growmemtuples = false;
}
/* Must enlarge array by at least one element, else report failure */
if (newmemtupsize <= memtupsize)
goto noalloc;
/*
* On a 32-bit machine, allowedMem could exceed MaxAllocHugeSize. Clamp
* to ensure our request won't be rejected. Note that we can easily
* exhaust address space before facing this outcome. (This is presently
* impossible due to guc.c's MAX_KILOBYTES limitation on work_mem, but
* don't rely on that at this distance.)
*/
if ((Size) newmemtupsize >= MaxAllocHugeSize / sizeof(SortTuple))
{
newmemtupsize = (int) (MaxAllocHugeSize / sizeof(SortTuple));
state->growmemtuples = false; /* can't grow any more */
}
/*
* We need to be sure that we do not cause LACKMEM to become true, else
* the space management algorithm will go nuts. The code above should
* never generate a dangerous request, but to be safe, check explicitly
* that the array growth fits within availMem. (We could still cause
* LACKMEM if the memory chunk overhead associated with the memtuples
* array were to increase. That shouldn't happen with any sane value of
* allowedMem, because at any array size large enough to risk LACKMEM,
* palloc would be treating both old and new arrays as separate chunks.
* But we'll check LACKMEM explicitly below just in case.)
*/
if (state->availMem < (int64) ((newmemtupsize - memtupsize) * sizeof(SortTuple)))
goto noalloc;
/* OK, do it */
FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
state->memtupsize = newmemtupsize;
state->memtuples = (SortTuple *)
repalloc_huge(state->memtuples,
state->memtupsize * sizeof(SortTuple));
USEMEM(state, GetMemoryChunkSpace(state->memtuples));
if (LACKMEM(state))
elog(ERROR, "unexpected out-of-memory situation during sort");
return true;
noalloc:
/* If for any reason we didn't realloc, shut off future attempts */
state->growmemtuples = false;
return false;
}
| 0 |
[
"CWE-209"
] |
postgres
|
804b6b6db4dcfc590a468e7be390738f9f7755fb
| 222,440,715,899,160,960,000,000,000,000,000,000,000 | 112 |
Fix column-privilege leak in error-message paths
While building error messages to return to the user,
BuildIndexValueDescription, ExecBuildSlotValueDescription and
ri_ReportViolation would happily include the entire key or entire row in
the result returned to the user, even if the user didn't have access to
view all of the columns being included.
Instead, include only those columns which the user is providing or which
the user has select rights on. If the user does not have any rights
to view the table or any of the columns involved then no detail is
provided and a NULL value is returned from BuildIndexValueDescription
and ExecBuildSlotValueDescription. Note that, for key cases, the user
must have access to all of the columns for the key to be shown; a
partial key will not be returned.
Further, in master only, do not return any data for cases where row
security is enabled on the relation and row security should be applied
for the user. This required a bit of refactoring and moving of things
around related to RLS- note the addition of utils/misc/rls.c.
Back-patch all the way, as column-level privileges are now in all
supported versions.
This has been assigned CVE-2014-8161, but since the issue and the patch
have already been publicized on pgsql-hackers, there's no point in trying
to hide this commit.
|
static void bmdma_reset(IDEDMA *dma)
{
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
#ifdef DEBUG_IDE
printf("ide: dma_reset\n");
#endif
bmdma_cancel(bm);
bm->cmd = 0;
bm->status = 0;
bm->addr = 0;
bm->cur_addr = 0;
bm->cur_prd_last = 0;
bm->cur_prd_addr = 0;
bm->cur_prd_len = 0;
bm->sector_num = 0;
bm->nsector = 0;
}
| 0 |
[
"CWE-399"
] |
qemu
|
3251bdcf1c67427d964517053c3d185b46e618e8
| 128,056,152,915,016,070,000,000,000,000,000,000,000 | 18 |
ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <[email protected]>
Reviewed-by: Paolo Bonzini <[email protected]>
Message-id: [email protected]
Signed-off-by: Stefan Hajnoczi <[email protected]>
|
static int DecodeSubjKeyId(const byte* input, int sz, DecodedCert* cert)
{
word32 idx = 0;
int length = 0, ret = 0;
WOLFSSL_ENTER("DecodeSubjKeyId");
if (sz <= 0)
return ASN_PARSE_E;
ret = GetOctetString(input, &idx, &length, sz);
if (ret < 0)
return ret;
#if defined(OPENSSL_EXTRA) || defined(OPENSSL_EXTRA_X509_SMALL)
cert->extSubjKeyIdSrc = &input[idx];
cert->extSubjKeyIdSz = length;
#endif /* OPENSSL_EXTRA */
if (length == KEYID_SIZE) {
XMEMCPY(cert->extSubjKeyId, input + idx, length);
}
else
ret = CalcHashId(input + idx, length, cert->extSubjKeyId);
return ret;
}
| 0 |
[
"CWE-125",
"CWE-345"
] |
wolfssl
|
f93083be72a3b3d956b52a7ec13f307a27b6e093
| 41,921,011,292,867,740,000,000,000,000,000,000,000 | 27 |
OCSP: improve handling of OCSP no check extension
|
static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
int addr_len)
{
struct sock *sk = sock->sk;
char name[15];
struct net_device *dev;
int err = -ENODEV;
/*
* Check legality
*/
if (addr_len != sizeof(struct sockaddr))
return -EINVAL;
strlcpy(name, uaddr->sa_data, sizeof(name));
dev = dev_get_by_name(sock_net(sk), name);
if (dev) {
err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
dev_put(dev);
}
return err;
}
| 0 |
[
"CWE-909"
] |
linux-2.6
|
67286640f638f5ad41a946b9a3dc75327950248f
| 93,856,861,274,840,790,000,000,000,000,000,000,000 | 23 |
net: packet: fix information leak to userland
packet_getname_spkt() doesn't initialize all members of sa_data field of
sockaddr struct if strlen(dev->name) < 13. This structure is then copied
to userland. It leads to leaking of contents of kernel stack memory.
We have to fully fill sa_data with strncpy() instead of strlcpy().
The same with packet_getname(): it doesn't initialize sll_pkttype field of
sockaddr_ll. Set it to zero.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int myisamchk(MI_CHECK *param, char * filename)
{
int error,lock_type,recreate;
int rep_quick= param->testflag & (T_QUICK | T_FORCE_UNIQUENESS);
MI_INFO *info;
File datafile;
char llbuff[22],llbuff2[22];
my_bool state_updated=0;
MYISAM_SHARE *share;
DBUG_ENTER("myisamchk");
param->out_flag=error=param->warning_printed=param->error_printed=
recreate=0;
datafile=0;
param->isam_file_name=filename; /* For error messages */
if (!(info=mi_open(filename,
(param->testflag & (T_DESCRIPT | T_READONLY)) ?
O_RDONLY : O_RDWR,
HA_OPEN_FOR_REPAIR |
((param->testflag & T_WAIT_FOREVER) ?
HA_OPEN_WAIT_IF_LOCKED :
(param->testflag & T_DESCRIPT) ?
HA_OPEN_IGNORE_IF_LOCKED : HA_OPEN_ABORT_IF_LOCKED))))
{
/* Avoid twice printing of isam file name */
param->error_printed=1;
switch (my_errno) {
case HA_ERR_CRASHED:
mi_check_print_error(param,"'%s' doesn't have a correct index definition. You need to recreate it before you can do a repair",filename);
break;
case HA_ERR_NOT_A_TABLE:
mi_check_print_error(param,"'%s' is not a MyISAM-table",filename);
break;
case HA_ERR_CRASHED_ON_USAGE:
mi_check_print_error(param,"'%s' is marked as crashed",filename);
break;
case HA_ERR_CRASHED_ON_REPAIR:
mi_check_print_error(param,"'%s' is marked as crashed after last repair",filename);
break;
case HA_ERR_OLD_FILE:
mi_check_print_error(param,"'%s' is an old type of MyISAM-table", filename);
break;
case HA_ERR_END_OF_FILE:
mi_check_print_error(param,"Couldn't read complete header from '%s'", filename);
break;
case EAGAIN:
mi_check_print_error(param,"'%s' is locked. Use -w to wait until unlocked",filename);
break;
case ENOENT:
mi_check_print_error(param,"File '%s' doesn't exist",filename);
break;
case EACCES:
mi_check_print_error(param,"You don't have permission to use '%s'",filename);
break;
default:
mi_check_print_error(param,"%d when opening MyISAM-table '%s'",
my_errno,filename);
break;
}
DBUG_RETURN(1);
}
share=info->s;
share->options&= ~HA_OPTION_READ_ONLY_DATA; /* We are modifing it */
share->tot_locks-= share->r_locks;
share->r_locks=0;
/*
Skip the checking of the file if:
We are using --fast and the table is closed properly
We are using --check-only-changed-tables and the table hasn't changed
*/
if (param->testflag & (T_FAST | T_CHECK_ONLY_CHANGED))
{
my_bool need_to_check= mi_is_crashed(info) || share->state.open_count != 0;
if ((param->testflag & (T_REP_ANY | T_SORT_RECORDS)) &&
((share->state.changed & (STATE_CHANGED | STATE_CRASHED |
STATE_CRASHED_ON_REPAIR) ||
!(param->testflag & T_CHECK_ONLY_CHANGED))))
need_to_check=1;
if (info->s->base.keys && info->state->records)
{
if ((param->testflag & T_STATISTICS) &&
(share->state.changed & STATE_NOT_ANALYZED))
need_to_check=1;
if ((param->testflag & T_SORT_INDEX) &&
(share->state.changed & STATE_NOT_SORTED_PAGES))
need_to_check=1;
if ((param->testflag & T_REP_BY_SORT) &&
(share->state.changed & STATE_NOT_OPTIMIZED_KEYS))
need_to_check=1;
}
if ((param->testflag & T_CHECK_ONLY_CHANGED) &&
(share->state.changed & (STATE_CHANGED | STATE_CRASHED |
STATE_CRASHED_ON_REPAIR)))
need_to_check=1;
if (!need_to_check)
{
if (!(param->testflag & T_SILENT) || param->testflag & T_INFO)
printf("MyISAM file: %s is already checked\n",filename);
if (mi_close(info))
{
mi_check_print_error(param,"%d when closing MyISAM-table '%s'",
my_errno,filename);
DBUG_RETURN(1);
}
DBUG_RETURN(0);
}
}
if ((param->testflag & (T_REP_ANY | T_STATISTICS |
T_SORT_RECORDS | T_SORT_INDEX)) &&
(((param->testflag & T_UNPACK) &&
share->data_file_type == COMPRESSED_RECORD) ||
mi_uint2korr(share->state.header.state_info_length) !=
MI_STATE_INFO_SIZE ||
mi_uint2korr(share->state.header.base_info_length) !=
MI_BASE_INFO_SIZE ||
mi_is_any_intersect_keys_active(param->keys_in_use, share->base.keys,
~share->state.key_map) ||
test_if_almost_full(info) ||
info->s->state.header.file_version[3] != myisam_file_magic[3] ||
(set_collation &&
set_collation->number != share->state.header.language) ||
myisam_block_size != MI_KEY_BLOCK_LENGTH))
{
if (set_collation)
param->language= set_collation->number;
if (recreate_table(param, &info,filename))
{
(void) fprintf(stderr,
"MyISAM-table '%s' is not fixed because of errors\n",
filename);
return(-1);
}
recreate=1;
if (!(param->testflag & T_REP_ANY))
{
param->testflag|=T_REP_BY_SORT; /* if only STATISTICS */
if (!(param->testflag & T_SILENT))
printf("- '%s' has old table-format. Recreating index\n",filename);
rep_quick|=T_QUICK;
}
share=info->s;
share->tot_locks-= share->r_locks;
share->r_locks=0;
}
if (param->testflag & T_DESCRIPT)
{
param->total_files++;
param->total_records+=info->state->records;
param->total_deleted+=info->state->del;
descript(param, info, filename);
}
else
{
if (!stopwords_inited++)
ft_init_stopwords();
if (!(param->testflag & T_READONLY))
lock_type = F_WRLCK; /* table is changed */
else
lock_type= F_RDLCK;
if (info->lock_type == F_RDLCK)
info->lock_type=F_UNLCK; /* Read only table */
if (_mi_readinfo(info,lock_type,0))
{
mi_check_print_error(param,"Can't lock indexfile of '%s', error: %d",
filename,my_errno);
param->error_printed=0;
goto end2;
}
/*
_mi_readinfo() has locked the table.
We mark the table as locked (without doing file locks) to be able to
use functions that only works on locked tables (like row caching).
*/
mi_lock_database(info, F_EXTRA_LCK);
datafile=info->dfile;
if (param->testflag & (T_REP_ANY | T_SORT_RECORDS | T_SORT_INDEX))
{
if (param->testflag & T_REP_ANY)
{
ulonglong tmp=share->state.key_map;
mi_copy_keys_active(share->state.key_map, share->base.keys,
param->keys_in_use);
if (tmp != share->state.key_map)
info->update|=HA_STATE_CHANGED;
}
if (rep_quick && chk_del(param, info, param->testflag & ~T_VERBOSE))
{
if (param->testflag & T_FORCE_CREATE)
{
rep_quick=0;
mi_check_print_info(param,"Creating new data file\n");
}
else
{
error=1;
mi_check_print_error(param,
"Quick-recover aborted; Run recovery without switch 'q'");
}
}
if (!error)
{
if ((param->testflag & (T_REP_BY_SORT | T_REP_PARALLEL)) &&
(mi_is_any_key_active(share->state.key_map) ||
(rep_quick && !param->keys_in_use && !recreate)) &&
mi_test_if_sort_rep(info, info->state->records,
info->s->state.key_map,
param->force_sort))
{
if (param->testflag & T_REP_BY_SORT)
error=mi_repair_by_sort(param,info,filename,rep_quick);
else
error=mi_repair_parallel(param,info,filename,rep_quick);
state_updated=1;
}
else if (param->testflag & T_REP_ANY)
error=mi_repair(param, info,filename,rep_quick);
}
if (!error && param->testflag & T_SORT_RECORDS)
{
/*
The data file is nowadays reopened in the repair code so we should
soon remove the following reopen-code
*/
#ifndef TO_BE_REMOVED
if (param->out_flag & O_NEW_DATA)
{ /* Change temp file to org file */
(void) my_close(info->dfile,MYF(MY_WME)); /* Close new file */
error|=change_to_newfile(filename, MI_NAME_DEXT, DATA_TMP_EXT, MYF(0));
if (mi_open_datafile(info,info->s, NULL, -1))
error=1;
param->out_flag&= ~O_NEW_DATA; /* We are using new datafile */
param->read_cache.file=info->dfile;
}
#endif
if (! error)
{
uint key;
/*
We can't update the index in mi_sort_records if we have a
prefix compressed or fulltext index
*/
my_bool update_index=1;
for (key=0 ; key < share->base.keys; key++)
if (share->keyinfo[key].flag & (HA_BINARY_PACK_KEY|HA_FULLTEXT))
update_index=0;
error=mi_sort_records(param,info,filename,param->opt_sort_key,
/* what is the following parameter for ? */
(my_bool) !(param->testflag & T_REP),
update_index);
datafile=info->dfile; /* This is now locked */
if (!error && !update_index)
{
if (param->verbose)
puts("Table had a compressed index; We must now recreate the index");
error=mi_repair_by_sort(param,info,filename,1);
}
}
}
if (!error && param->testflag & T_SORT_INDEX)
error=mi_sort_index(param,info,filename);
if (!error)
share->state.changed&= ~(STATE_CHANGED | STATE_CRASHED |
STATE_CRASHED_ON_REPAIR);
else
mi_mark_crashed(info);
}
else if ((param->testflag & T_CHECK) || !(param->testflag & T_AUTO_INC))
{
if (!(param->testflag & T_SILENT) || param->testflag & T_INFO)
printf("Checking MyISAM file: %s\n",filename);
if (!(param->testflag & T_SILENT))
printf("Data records: %7s Deleted blocks: %7s\n",
llstr(info->state->records,llbuff),
llstr(info->state->del,llbuff2));
error =chk_status(param,info);
mi_intersect_keys_active(share->state.key_map, param->keys_in_use);
error =chk_size(param,info);
if (!error || !(param->testflag & (T_FAST | T_FORCE_CREATE)))
error|=chk_del(param, info,param->testflag);
if ((!error || (!(param->testflag & (T_FAST | T_FORCE_CREATE)) &&
!param->start_check_pos)))
{
error|=chk_key(param, info);
if (!error && (param->testflag & (T_STATISTICS | T_AUTO_INC)))
error=update_state_info(param, info,
((param->testflag & T_STATISTICS) ?
UPDATE_STAT : 0) |
((param->testflag & T_AUTO_INC) ?
UPDATE_AUTO_INC : 0));
}
if ((!rep_quick && !error) ||
!(param->testflag & (T_FAST | T_FORCE_CREATE)))
{
if (param->testflag & (T_EXTEND | T_MEDIUM))
(void) init_key_cache(dflt_key_cache,opt_key_cache_block_size,
param->use_buffers, 0, 0);
(void) init_io_cache(¶m->read_cache,datafile,
(uint) param->read_buffer_length,
READ_CACHE,
(param->start_check_pos ?
param->start_check_pos :
share->pack.header_length),
1,
MYF(MY_WME));
lock_memory(param);
if ((info->s->options & (HA_OPTION_PACK_RECORD |
HA_OPTION_COMPRESS_RECORD)) ||
(param->testflag & (T_EXTEND | T_MEDIUM)))
error|=chk_data_link(param, info, param->testflag & T_EXTEND);
error|=flush_blocks(param, share->key_cache, share->kfile);
(void) end_io_cache(¶m->read_cache);
}
if (!error)
{
if ((share->state.changed & STATE_CHANGED) &&
(param->testflag & T_UPDATE_STATE))
info->update|=HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
share->state.changed&= ~(STATE_CHANGED | STATE_CRASHED |
STATE_CRASHED_ON_REPAIR);
}
else if (!mi_is_crashed(info) &&
(param->testflag & T_UPDATE_STATE))
{ /* Mark crashed */
mi_mark_crashed(info);
info->update|=HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
}
}
}
if ((param->testflag & T_AUTO_INC) ||
((param->testflag & T_REP_ANY) && info->s->base.auto_key))
update_auto_increment_key(param, info,
(my_bool) !test(param->testflag & T_AUTO_INC));
if (!(param->testflag & T_DESCRIPT))
{
if (info->update & HA_STATE_CHANGED && ! (param->testflag & T_READONLY))
error|=update_state_info(param, info,
UPDATE_OPEN_COUNT |
(((param->testflag & T_REP_ANY) ?
UPDATE_TIME : 0) |
(state_updated ? UPDATE_STAT : 0) |
((param->testflag & T_SORT_RECORDS) ?
UPDATE_SORT : 0)));
(void) lock_file(param, share->kfile,0L,F_UNLCK,"indexfile",filename);
info->update&= ~HA_STATE_CHANGED;
}
mi_lock_database(info, F_UNLCK);
end2:
if (mi_close(info))
{
mi_check_print_error(param,"%d when closing MyISAM-table '%s'",my_errno,filename);
DBUG_RETURN(1);
}
if (error == 0)
{
if (param->out_flag & O_NEW_DATA)
error|=change_to_newfile(filename,MI_NAME_DEXT,DATA_TMP_EXT,
((param->testflag & T_BACKUP_DATA) ?
MYF(MY_REDEL_MAKE_BACKUP) : MYF(0)));
if (param->out_flag & O_NEW_INDEX)
error|=change_to_newfile(filename, MI_NAME_IEXT, INDEX_TMP_EXT, MYF(0));
}
(void) fflush(stdout); (void) fflush(stderr);
if (param->error_printed)
{
if (param->testflag & (T_REP_ANY | T_SORT_RECORDS | T_SORT_INDEX))
{
(void) fprintf(stderr,
"MyISAM-table '%s' is not fixed because of errors\n",
filename);
if (param->testflag & T_REP_ANY)
(void) fprintf(stderr,
"Try fixing it by using the --safe-recover (-o), the --force (-f) option or by not using the --quick (-q) flag\n");
}
else if (!(param->error_printed & 2) &&
!(param->testflag & T_FORCE_CREATE))
(void) fprintf(stderr,
"MyISAM-table '%s' is corrupted\nFix it using switch \"-r\" or \"-o\"\n",
filename);
}
else if (param->warning_printed &&
! (param->testflag & (T_REP_ANY | T_SORT_RECORDS | T_SORT_INDEX |
T_FORCE_CREATE)))
(void) fprintf(stderr, "MyISAM-table '%s' is usable but should be fixed\n",
filename);
(void) fflush(stderr);
DBUG_RETURN(error);
} /* myisamchk */
| 1 |
[
"CWE-362"
] |
mysql-server
|
4e5473862e6852b0f3802b0cd0c6fa10b5253291
| 89,744,924,425,786,040,000,000,000,000,000,000,000 | 395 |
Bug#24388746: PRIVILEGE ESCALATION AND RACE CONDITION USING CREATE TABLE
During REPAIR TABLE of a MyISAM table, a temporary data file (.TMD)
is created. When repair finishes, this file is renamed to the original
.MYD file. The problem was that during this rename, we copied the
stats from the old file to the new file with chmod/chown. If a user
managed to replace the temporary file before chmod/chown was executed,
it was possible to get an arbitrary file with the privileges of the
mysql user.
This patch fixes the problem by not copying stats from the old
file to the new file. This is not needed as the new file was
created with the correct stats. This fix only changes server
behavior - external utilities such as myisamchk still does
chmod/chown.
No test case provided since the problem involves synchronization
with file system operations.
|
static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
{
struct parsed_partitions *state;
int nr;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
nr = disk_max_parts(hd);
state->parts = vzalloc(array_size(nr, sizeof(state->parts[0])));
if (!state->parts) {
kfree(state);
return NULL;
}
state->limit = nr;
return state;
}
| 0 |
[
"CWE-416"
] |
linux
|
9fbfabfda25d8774c5a08634fdd2da000a924890
| 13,266,634,885,182,880,000,000,000,000,000,000,000 | 20 |
block: fix incorrect references to disk objects
When adding partitions to the disk, the reference count of the disk
object is increased. then alloc partition device and called
device_add(), if the device_add() return error, the reference
count of the disk object will be reduced twice, at put_device(pdev)
and put_disk(disk). this leads to the end of the object's life cycle
prematurely, and trigger following calltrace.
__init_work+0x2d/0x50 kernel/workqueue.c:519
synchronize_rcu_expedited+0x3af/0x650 kernel/rcu/tree_exp.h:847
bdi_remove_from_list mm/backing-dev.c:938 [inline]
bdi_unregister+0x17f/0x5c0 mm/backing-dev.c:946
release_bdi+0xa1/0xc0 mm/backing-dev.c:968
kref_put include/linux/kref.h:65 [inline]
bdi_put+0x72/0xa0 mm/backing-dev.c:976
bdev_free_inode+0x11e/0x220 block/bdev.c:408
i_callback+0x3f/0x70 fs/inode.c:226
rcu_do_batch kernel/rcu/tree.c:2508 [inline]
rcu_core+0x76d/0x16c0 kernel/rcu/tree.c:2743
__do_softirq+0x1d7/0x93b kernel/softirq.c:558
invoke_softirq kernel/softirq.c:432 [inline]
__irq_exit_rcu kernel/softirq.c:636 [inline]
irq_exit_rcu+0xf2/0x130 kernel/softirq.c:648
sysvec_apic_timer_interrupt+0x93/0xc0
making disk is NULL when calling put_disk().
Reported-by: Hao Sun <[email protected]>
Signed-off-by: Zqiang <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jens Axboe <[email protected]>
|
static void rfx_profiler_print(RFX_CONTEXT* context)
{
PROFILER_PRINT_HEADER
PROFILER_PRINT(context->priv->prof_rfx_decode_rgb)
PROFILER_PRINT(context->priv->prof_rfx_decode_component)
PROFILER_PRINT(context->priv->prof_rfx_rlgr_decode)
PROFILER_PRINT(context->priv->prof_rfx_differential_decode)
PROFILER_PRINT(context->priv->prof_rfx_quantization_decode)
PROFILER_PRINT(context->priv->prof_rfx_dwt_2d_decode)
PROFILER_PRINT(context->priv->prof_rfx_ycbcr_to_rgb)
PROFILER_PRINT(context->priv->prof_rfx_encode_rgb)
PROFILER_PRINT(context->priv->prof_rfx_encode_component)
PROFILER_PRINT(context->priv->prof_rfx_rlgr_encode)
PROFILER_PRINT(context->priv->prof_rfx_differential_encode)
PROFILER_PRINT(context->priv->prof_rfx_quantization_encode)
PROFILER_PRINT(context->priv->prof_rfx_dwt_2d_encode)
PROFILER_PRINT(context->priv->prof_rfx_rgb_to_ycbcr)
PROFILER_PRINT(context->priv->prof_rfx_encode_format_rgb)
PROFILER_PRINT_FOOTER
}
| 0 |
[
"CWE-125"
] |
FreeRDP
|
3a06ce058f690b7fc1edad2f352c453376c2ebfe
| 150,352,634,212,748,120,000,000,000,000,000,000,000 | 20 |
Fixed oob read in rfx_process_message_tileset
Check input data length
Thanks to hac425 CVE-2020-11043
|
static struct crypto_skcipher *ceph_crypto_alloc_cipher(void)
{
return crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
}
| 0 |
[
"CWE-399"
] |
linux
|
a45f795c65b479b4ba107b6ccde29b896d51ee98
| 10,834,127,415,681,272,000,000,000,000,000,000,000 | 4 |
libceph: introduce ceph_crypt() for in-place en/decryption
Starting with 4.9, kernel stacks may be vmalloced and therefore not
guaranteed to be physically contiguous; the new CONFIG_VMAP_STACK
option is enabled by default on x86. This makes it invalid to use
on-stack buffers with the crypto scatterlist API, as sg_set_buf()
expects a logical address and won't work with vmalloced addresses.
There isn't a different (e.g. kvec-based) crypto API we could switch
net/ceph/crypto.c to and the current scatterlist.h API isn't getting
updated to accommodate this use case. Allocating a new header and
padding for each operation is a non-starter, so do the en/decryption
in-place on a single pre-assembled (header + data + padding) heap
buffer. This is explicitly supported by the crypto API:
"... the caller may provide the same scatter/gather list for the
plaintext and cipher text. After the completion of the cipher
operation, the plaintext data is replaced with the ciphertext data
in case of an encryption and vice versa for a decryption."
Signed-off-by: Ilya Dryomov <[email protected]>
Reviewed-by: Sage Weil <[email protected]>
|
emit_B(codegen_scope *s, uint32_t pc, uint8_t i)
{
if (pc >= s->icapa) {
if (pc == UINT32_MAX) {
codegen_error(s, "too big code block");
}
if (pc >= UINT32_MAX / 2) {
pc = UINT32_MAX;
}
else {
s->icapa *= 2;
}
s->iseq = (mrb_code *)codegen_realloc(s, s->iseq, sizeof(mrb_code)*s->icapa);
if (s->lines) {
s->lines = (uint16_t*)codegen_realloc(s, s->lines, sizeof(uint16_t)*s->icapa);
}
}
if (s->lines) {
if (s->lineno > 0 || pc == 0)
s->lines[pc] = s->lineno;
else
s->lines[pc] = s->lines[pc-1];
}
s->iseq[pc] = i;
}
| 0 |
[
"CWE-415",
"CWE-122"
] |
mruby
|
38b164ace7d6ae1c367883a3d67d7f559783faad
| 28,306,717,748,894,070,000,000,000,000,000,000,000 | 25 |
codegen.c: fix a bug in `gen_values()`.
- Fix limit handling that fails 15 arguments method calls.
- Fix too early argument packing in arrays.
|
static void io_file_put_work(struct work_struct *work)
{
struct io_ring_ctx *ctx;
struct llist_node *node;
ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
node = llist_del_all(&ctx->file_put_llist);
while (node) {
struct fixed_file_ref_node *ref_node;
struct llist_node *next = node->next;
ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
__io_file_put_work(ref_node);
node = next;
}
}
| 0 |
[] |
linux
|
0f2122045b946241a9e549c2a76cea54fa58a7ff
| 14,910,836,671,687,191,000,000,000,000,000,000,000 | 17 |
io_uring: don't rely on weak ->files references
Grab actual references to the files_struct. To avoid circular references
issues due to this, we add a per-task note that keeps track of what
io_uring contexts a task has used. When the tasks execs or exits its
assigned files, we cancel requests based on this tracking.
With that, we can grab proper references to the files table, and no
longer need to rely on stashing away ring_fd and ring_file to check
if the ring_fd may have been closed.
Cc: [email protected] # v5.5+
Reviewed-by: Pavel Begunkov <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
static void rfbProcessClientNormalMessage(rfbClientPtr cl)
{
int n;
rfbClientToServerMsg msg;
char *str;
READ((char *)&msg, 1)
switch (msg.type) {
case rfbSetPixelFormat:
READ(((char *)&msg) + 1, sz_rfbSetPixelFormatMsg - 1)
cl->format.bitsPerPixel = msg.spf.format.bitsPerPixel;
cl->format.depth = msg.spf.format.depth;
cl->format.bigEndian = (msg.spf.format.bigEndian ? 1 : 0);
cl->format.trueColour = (msg.spf.format.trueColour ? 1 : 0);
cl->format.redMax = Swap16IfLE(msg.spf.format.redMax);
cl->format.greenMax = Swap16IfLE(msg.spf.format.greenMax);
cl->format.blueMax = Swap16IfLE(msg.spf.format.blueMax);
cl->format.redShift = msg.spf.format.redShift;
cl->format.greenShift = msg.spf.format.greenShift;
cl->format.blueShift = msg.spf.format.blueShift;
cl->readyForSetColourMapEntries = TRUE;
rfbSetTranslateFunction(cl);
return;
case rfbFixColourMapEntries:
READ(((char *)&msg) + 1, sz_rfbFixColourMapEntriesMsg - 1)
rfbLog("rfbProcessClientNormalMessage: FixColourMapEntries unsupported\n");
rfbCloseClient(cl);
return;
case rfbSetEncodings:
{
int i;
CARD32 enc;
Bool firstFence = !cl->enableFence;
Bool firstCU = !cl->enableCU;
Bool firstGII = !cl->enableGII;
Bool logTightCompressLevel = FALSE;
READ(((char *)&msg) + 1, sz_rfbSetEncodingsMsg - 1)
msg.se.nEncodings = Swap16IfLE(msg.se.nEncodings);
cl->preferredEncoding = -1;
cl->useCopyRect = FALSE;
cl->enableCursorShapeUpdates = FALSE;
cl->enableCursorPosUpdates = FALSE;
cl->enableLastRectEncoding = FALSE;
cl->tightCompressLevel = TIGHT_DEFAULT_COMPRESSION;
cl->tightSubsampLevel = TIGHT_DEFAULT_SUBSAMP;
cl->tightQualityLevel = -1;
cl->imageQualityLevel = -1;
for (i = 0; i < msg.se.nEncodings; i++) {
READ((char *)&enc, 4)
enc = Swap32IfLE(enc);
switch (enc) {
case rfbEncodingCopyRect:
cl->useCopyRect = TRUE;
break;
case rfbEncodingRaw:
if (cl->preferredEncoding == -1) {
cl->preferredEncoding = enc;
rfbLog("Using raw encoding for client %s\n", cl->host);
}
break;
case rfbEncodingRRE:
if (cl->preferredEncoding == -1) {
cl->preferredEncoding = enc;
rfbLog("Using rre encoding for client %s\n", cl->host);
}
break;
case rfbEncodingCoRRE:
if (cl->preferredEncoding == -1) {
cl->preferredEncoding = enc;
rfbLog("Using CoRRE encoding for client %s\n", cl->host);
}
break;
case rfbEncodingHextile:
if (cl->preferredEncoding == -1) {
cl->preferredEncoding = enc;
rfbLog("Using hextile encoding for client %s\n", cl->host);
}
break;
case rfbEncodingZlib:
if (cl->preferredEncoding == -1) {
cl->preferredEncoding = enc;
rfbLog("Using zlib encoding for client %s\n", cl->host);
}
break;
case rfbEncodingZRLE:
if (cl->preferredEncoding == -1) {
cl->preferredEncoding = enc;
rfbLog("Using ZRLE encoding for client %s\n", cl->host);
}
break;
case rfbEncodingZYWRLE:
if (cl->preferredEncoding == -1) {
cl->preferredEncoding = enc;
rfbLog("Using ZYWRLE encoding for client %s\n", cl->host);
}
break;
case rfbEncodingTight:
if (cl->preferredEncoding == -1) {
cl->preferredEncoding = enc;
rfbLog("Using tight encoding for client %s\n", cl->host);
}
break;
case rfbEncodingXCursor:
if (!cl->enableCursorShapeUpdates) {
rfbLog("Enabling X-style cursor updates for client %s\n",
cl->host);
cl->enableCursorShapeUpdates = TRUE;
cl->useRichCursorEncoding = FALSE;
cl->cursorWasChanged = TRUE;
}
break;
case rfbEncodingRichCursor:
if (!cl->enableCursorShapeUpdates) {
rfbLog("Enabling full-color cursor updates for client %s\n",
cl->host);
cl->enableCursorShapeUpdates = TRUE;
cl->useRichCursorEncoding = TRUE;
cl->cursorWasChanged = TRUE;
}
break;
case rfbEncodingPointerPos:
if (!cl->enableCursorPosUpdates) {
rfbLog("Enabling cursor position updates for client %s\n",
cl->host);
cl->enableCursorPosUpdates = TRUE;
cl->cursorWasMoved = TRUE;
cl->cursorX = -1;
cl->cursorY = -1;
}
break;
case rfbEncodingLastRect:
if (!cl->enableLastRectEncoding) {
rfbLog("Enabling LastRect protocol extension for client %s\n",
cl->host);
cl->enableLastRectEncoding = TRUE;
}
break;
case rfbEncodingFence:
if (!cl->enableFence) {
rfbLog("Enabling Fence protocol extension for client %s\n",
cl->host);
cl->enableFence = TRUE;
}
break;
case rfbEncodingContinuousUpdates:
if (!cl->enableCU) {
rfbLog("Enabling Continuous Updates protocol extension for client %s\n",
cl->host);
cl->enableCU = TRUE;
}
break;
case rfbEncodingNewFBSize:
if (!cl->enableDesktopSize) {
if (!rfbAuthDisableRemoteResize) {
rfbLog("Enabling Desktop Size protocol extension for client %s\n",
cl->host);
cl->enableDesktopSize = TRUE;
} else
rfbLog("WARNING: Remote desktop resizing disabled per system policy.\n");
}
break;
case rfbEncodingExtendedDesktopSize:
if (!cl->enableExtDesktopSize) {
if (!rfbAuthDisableRemoteResize) {
rfbLog("Enabling Extended Desktop Size protocol extension for client %s\n",
cl->host);
cl->enableExtDesktopSize = TRUE;
} else
rfbLog("WARNING: Remote desktop resizing disabled per system policy.\n");
}
break;
case rfbEncodingGII:
if (!cl->enableGII) {
rfbLog("Enabling GII extension for client %s\n", cl->host);
cl->enableGII = TRUE;
}
break;
default:
if (enc >= (CARD32)rfbEncodingCompressLevel0 &&
enc <= (CARD32)rfbEncodingCompressLevel9) {
cl->zlibCompressLevel = enc & 0x0F;
cl->tightCompressLevel = enc & 0x0F;
if (cl->preferredEncoding == rfbEncodingTight)
logTightCompressLevel = TRUE;
else
rfbLog("Using compression level %d for client %s\n",
cl->tightCompressLevel, cl->host);
if (rfbInterframe == -1) {
if (cl->tightCompressLevel >= 5) {
if (!InterframeOn(cl)) {
rfbCloseClient(cl);
return;
}
} else
InterframeOff(cl);
}
} else if (enc >= (CARD32)rfbEncodingSubsamp1X &&
enc <= (CARD32)rfbEncodingSubsampGray) {
cl->tightSubsampLevel = enc & 0xFF;
rfbLog("Using JPEG subsampling %d for client %s\n",
cl->tightSubsampLevel, cl->host);
} else if (enc >= (CARD32)rfbEncodingQualityLevel0 &&
enc <= (CARD32)rfbEncodingQualityLevel9) {
cl->tightQualityLevel = JPEG_QUAL[enc & 0x0F];
cl->tightSubsampLevel = JPEG_SUBSAMP[enc & 0x0F];
cl->imageQualityLevel = enc & 0x0F;
if (cl->preferredEncoding == rfbEncodingTight)
rfbLog("Using JPEG subsampling %d, Q%d for client %s\n",
cl->tightSubsampLevel, cl->tightQualityLevel, cl->host);
else
rfbLog("Using image quality level %d for client %s\n",
cl->imageQualityLevel, cl->host);
} else if (enc >= (CARD32)rfbEncodingFineQualityLevel0 + 1 &&
enc <= (CARD32)rfbEncodingFineQualityLevel100) {
cl->tightQualityLevel = enc & 0xFF;
rfbLog("Using JPEG quality %d for client %s\n",
cl->tightQualityLevel, cl->host);
} else {
rfbLog("rfbProcessClientNormalMessage: ignoring unknown encoding %d (%x)\n",
(int)enc, (int)enc);
}
} /* switch (enc) */
} /* for (i = 0; i < msg.se.nEncodings; i++) */
if (cl->preferredEncoding == -1)
cl->preferredEncoding = rfbEncodingTight;
if (cl->preferredEncoding == rfbEncodingTight && logTightCompressLevel)
rfbLog("Using Tight compression level %d for client %s\n",
rfbTightCompressLevel(cl), cl->host);
if (cl->enableCursorPosUpdates && !cl->enableCursorShapeUpdates) {
rfbLog("Disabling cursor position updates for client %s\n", cl->host);
cl->enableCursorPosUpdates = FALSE;
}
if (cl->enableFence && firstFence) {
if (!rfbSendFence(cl, rfbFenceFlagRequest, 0, NULL))
return;
}
if (cl->enableCU && cl->enableFence && firstCU) {
if (!rfbSendEndOfCU(cl))
return;
}
if (cl->enableGII && firstGII) {
/* Send GII server version message to all clients */
rfbGIIServerVersionMsg msg;
msg.type = rfbGIIServer;
/* We always send as big endian to make things easier on the Java
viewer. */
msg.endianAndSubType = rfbGIIVersion | rfbGIIBE;
msg.length = Swap16IfLE(sz_rfbGIIServerVersionMsg - 4);
msg.maximumVersion = msg.minimumVersion = Swap16IfLE(1);
if (WriteExact(cl, (char *)&msg, sz_rfbGIIServerVersionMsg) < 0) {
rfbLogPerror("rfbProcessClientNormalMessage: write");
rfbCloseClient(cl);
return;
}
}
return;
} /* rfbSetEncodings */
case rfbFramebufferUpdateRequest:
{
RegionRec tmpRegion;
BoxRec box;
READ(((char *)&msg) + 1, sz_rfbFramebufferUpdateRequestMsg - 1)
box.x1 = Swap16IfLE(msg.fur.x);
box.y1 = Swap16IfLE(msg.fur.y);
box.x2 = box.x1 + Swap16IfLE(msg.fur.w);
box.y2 = box.y1 + Swap16IfLE(msg.fur.h);
SAFE_REGION_INIT(pScreen, &tmpRegion, &box, 0);
if (!msg.fur.incremental || !cl->continuousUpdates)
REGION_UNION(pScreen, &cl->requestedRegion, &cl->requestedRegion,
&tmpRegion);
if (!cl->readyForSetColourMapEntries) {
/* client hasn't sent a SetPixelFormat so is using server's */
cl->readyForSetColourMapEntries = TRUE;
if (!cl->format.trueColour) {
if (!rfbSetClientColourMap(cl, 0, 0)) {
REGION_UNINIT(pScreen, &tmpRegion);
return;
}
}
}
if (!msg.fur.incremental) {
REGION_UNION(pScreen, &cl->modifiedRegion, &cl->modifiedRegion,
&tmpRegion);
REGION_SUBTRACT(pScreen, &cl->copyRegion, &cl->copyRegion, &tmpRegion);
REGION_UNION(pScreen, &cl->ifRegion, &cl->ifRegion, &tmpRegion);
cl->pendingExtDesktopResize = TRUE;
}
if (FB_UPDATE_PENDING(cl) &&
(!cl->deferredUpdateScheduled || rfbDeferUpdateTime == 0 ||
gettime() - cl->deferredUpdateStart >=
(double)rfbDeferUpdateTime)) {
if (rfbSendFramebufferUpdate(cl))
cl->deferredUpdateScheduled = FALSE;
}
REGION_UNINIT(pScreen, &tmpRegion);
return;
}
case rfbKeyEvent:
cl->rfbKeyEventsRcvd++;
READ(((char *)&msg) + 1, sz_rfbKeyEventMsg - 1)
if (!rfbViewOnly && !cl->viewOnly)
KeyEvent((KeySym)Swap32IfLE(msg.ke.key), msg.ke.down);
return;
case rfbPointerEvent:
cl->rfbPointerEventsRcvd++;
READ(((char *)&msg) + 1, sz_rfbPointerEventMsg - 1)
if (pointerClient && (pointerClient != cl))
return;
if (msg.pe.buttonMask == 0)
pointerClient = NULL;
else
pointerClient = cl;
if (!rfbViewOnly && !cl->viewOnly) {
cl->cursorX = (int)Swap16IfLE(msg.pe.x);
cl->cursorY = (int)Swap16IfLE(msg.pe.y);
PtrAddEvent(msg.pe.buttonMask, cl->cursorX, cl->cursorY, cl);
}
return;
case rfbClientCutText:
{
int ignoredBytes = 0;
READ(((char *)&msg) + 1, sz_rfbClientCutTextMsg - 1)
msg.cct.length = Swap32IfLE(msg.cct.length);
if (msg.cct.length > rfbMaxClipboard) {
rfbLog("Truncating %d-byte clipboard update to %d bytes.\n",
msg.cct.length, rfbMaxClipboard);
ignoredBytes = msg.cct.length - rfbMaxClipboard;
msg.cct.length = rfbMaxClipboard;
}
if (msg.cct.length <= 0) return;
str = (char *)malloc(msg.cct.length);
if (str == NULL) {
rfbLogPerror("rfbProcessClientNormalMessage: rfbClientCutText out of memory");
rfbCloseClient(cl);
return;
}
if ((n = ReadExact(cl, str, msg.cct.length)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
free(str);
rfbCloseClient(cl);
return;
}
if (ignoredBytes > 0) {
if ((n = SkipExact(cl, ignoredBytes)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
free(str);
rfbCloseClient(cl);
return;
}
}
/* NOTE: We do not accept cut text from a view-only client */
if (!rfbViewOnly && !cl->viewOnly && !rfbAuthDisableCBRecv) {
vncClientCutText(str, msg.cct.length);
if (rfbSyncCutBuffer) rfbSetXCutText(str, msg.cct.length);
}
free(str);
return;
}
case rfbEnableContinuousUpdates:
{
BoxRec box;
READ(((char *)&msg) + 1, sz_rfbEnableContinuousUpdatesMsg - 1)
if (!cl->enableFence || !cl->enableCU) {
rfbLog("Ignoring request to enable continuous updates because the client does not\n");
rfbLog("support the flow control extensions.\n");
return;
}
box.x1 = Swap16IfLE(msg.ecu.x);
box.y1 = Swap16IfLE(msg.ecu.y);
box.x2 = box.x1 + Swap16IfLE(msg.ecu.w);
box.y2 = box.y1 + Swap16IfLE(msg.ecu.h);
SAFE_REGION_INIT(pScreen, &cl->cuRegion, &box, 0);
cl->continuousUpdates = msg.ecu.enable;
if (cl->continuousUpdates) {
REGION_EMPTY(pScreen, &cl->requestedRegion);
if (!rfbSendFramebufferUpdate(cl))
return;
} else {
if (!rfbSendEndOfCU(cl))
return;
}
rfbLog("Continuous updates %s\n",
cl->continuousUpdates ? "enabled" : "disabled");
return;
}
case rfbFence:
{
CARD32 flags;
char data[64];
READ(((char *)&msg) + 1, sz_rfbFenceMsg - 1)
flags = Swap32IfLE(msg.f.flags);
READ(data, msg.f.length)
if (msg.f.length > sizeof(data))
rfbLog("Ignoring fence. Payload of %d bytes is too large.\n",
msg.f.length);
else
HandleFence(cl, flags, msg.f.length, data);
return;
}
#define EDSERROR(format, args...) { \
if (!strlen(errMsg)) \
snprintf(errMsg, 256, "Desktop resize ERROR: "format"\n", args); \
result = rfbEDSResultInvalid; \
}
case rfbSetDesktopSize:
{
int i;
struct xorg_list newScreens;
rfbClientPtr cl2;
int result = rfbEDSResultSuccess;
char errMsg[256] = "\0";
ScreenPtr pScreen = screenInfo.screens[0];
READ(((char *)&msg) + 1, sz_rfbSetDesktopSizeMsg - 1)
if (msg.sds.numScreens < 1)
EDSERROR("Requested number of screens %d is invalid",
msg.sds.numScreens);
msg.sds.w = Swap16IfLE(msg.sds.w);
msg.sds.h = Swap16IfLE(msg.sds.h);
if (msg.sds.w < 1 || msg.sds.h < 1)
EDSERROR("Requested framebuffer dimensions %dx%d are invalid",
msg.sds.w, msg.sds.h);
xorg_list_init(&newScreens);
for (i = 0; i < msg.sds.numScreens; i++) {
rfbScreenInfo *screen = rfbNewScreen(0, 0, 0, 0, 0, 0);
READ((char *)&screen->s, sizeof(rfbScreenDesc))
screen->s.id = Swap32IfLE(screen->s.id);
screen->s.x = Swap16IfLE(screen->s.x);
screen->s.y = Swap16IfLE(screen->s.y);
screen->s.w = Swap16IfLE(screen->s.w);
screen->s.h = Swap16IfLE(screen->s.h);
screen->s.flags = Swap32IfLE(screen->s.flags);
if (screen->s.w < 1 || screen->s.h < 1)
EDSERROR("Screen 0x%.8x requested dimensions %dx%d are invalid",
(unsigned int)screen->s.id, screen->s.w, screen->s.h);
if (screen->s.x >= msg.sds.w || screen->s.y >= msg.sds.h ||
screen->s.x + screen->s.w > msg.sds.w ||
screen->s.y + screen->s.h > msg.sds.h)
EDSERROR("Screen 0x%.8x requested geometry %dx%d+%d+%d exceeds requested framebuffer dimensions",
(unsigned int)screen->s.id, screen->s.w, screen->s.h,
screen->s.x, screen->s.y);
if (rfbFindScreenID(&newScreens, screen->s.id)) {
EDSERROR("Screen 0x%.8x duplicate ID", (unsigned int)screen->s.id);
free(screen);
} else
rfbAddScreen(&newScreens, screen);
}
if (cl->viewOnly) {
rfbLog("NOTICE: Ignoring remote desktop resize request from a view-only client.\n");
result = rfbEDSResultProhibited;
} else if (result == rfbEDSResultSuccess) {
result = ResizeDesktop(pScreen, cl, msg.sds.w, msg.sds.h, &newScreens);
if (result == rfbEDSResultSuccess)
return;
} else
rfbLog(errMsg);
rfbRemoveScreens(&newScreens);
/* Send back the error only to the requesting client. This loop is
necessary because the client may have been shut down as a result of
an error in ResizeDesktop(). */
for (cl2 = rfbClientHead; cl2; cl2 = cl2->next) {
if (cl2 == cl) {
cl2->pendingExtDesktopResize = TRUE;
cl2->reason = rfbEDSReasonClient;
cl2->result = result;
rfbSendFramebufferUpdate(cl2);
break;
}
}
return;
}
case rfbGIIClient:
{
CARD8 endianAndSubType, littleEndian, subType;
READ((char *)&endianAndSubType, 1);
littleEndian = (endianAndSubType & rfbGIIBE) ? 0 : 1;
subType = endianAndSubType & ~rfbGIIBE;
switch (subType) {
case rfbGIIVersion:
READ((char *)&msg.giicv.length, sz_rfbGIIClientVersionMsg - 2);
if (littleEndian != *(const char *)&rfbEndianTest) {
msg.giicv.length = Swap16(msg.giicv.length);
msg.giicv.version = Swap16(msg.giicv.version);
}
if (msg.giicv.length != sz_rfbGIIClientVersionMsg - 4 ||
msg.giicv.version < 1) {
rfbLog("ERROR: Malformed GII client version message\n");
rfbCloseClient(cl);
return;
}
rfbLog("Client supports GII version %d\n", msg.giicv.version);
break;
case rfbGIIDeviceCreate:
{
int i;
rfbDevInfo dev;
rfbGIIDeviceCreatedMsg dcmsg;
memset(&dev, 0, sizeof(dev));
dcmsg.deviceOrigin = 0;
READ((char *)&msg.giidc.length, sz_rfbGIIDeviceCreateMsg - 2);
if (littleEndian != *(const char *)&rfbEndianTest) {
msg.giidc.length = Swap16(msg.giidc.length);
msg.giidc.vendorID = Swap32(msg.giidc.vendorID);
msg.giidc.productID = Swap32(msg.giidc.productID);
msg.giidc.canGenerate = Swap32(msg.giidc.canGenerate);
msg.giidc.numRegisters = Swap32(msg.giidc.numRegisters);
msg.giidc.numValuators = Swap32(msg.giidc.numValuators);
msg.giidc.numButtons = Swap32(msg.giidc.numButtons);
}
rfbLog("GII Device Create: %s\n", msg.giidc.deviceName);
#ifdef GII_DEBUG
rfbLog(" Vendor ID: %d\n", msg.giidc.vendorID);
rfbLog(" Product ID: %d\n", msg.giidc.productID);
rfbLog(" Event mask: %.8x\n", msg.giidc.canGenerate);
rfbLog(" Registers: %d\n", msg.giidc.numRegisters);
rfbLog(" Valuators: %d\n", msg.giidc.numValuators);
rfbLog(" Buttons: %d\n", msg.giidc.numButtons);
#endif
if (msg.giidc.length != sz_rfbGIIDeviceCreateMsg - 4 +
msg.giidc.numValuators * sz_rfbGIIValuator) {
rfbLog("ERROR: Malformed GII device create message\n");
rfbCloseClient(cl);
return;
}
if (msg.giidc.numButtons > MAX_BUTTONS) {
rfbLog("GII device create ERROR: %d buttons exceeds max of %d\n",
msg.giidc.numButtons, MAX_BUTTONS);
SKIP(msg.giidc.numValuators * sz_rfbGIIValuator);
goto sendMessage;
}
if (msg.giidc.numValuators > MAX_VALUATORS) {
rfbLog("GII device create ERROR: %d valuators exceeds max of %d\n",
msg.giidc.numValuators, MAX_VALUATORS);
SKIP(msg.giidc.numValuators * sz_rfbGIIValuator);
goto sendMessage;
}
memcpy(&dev.name, msg.giidc.deviceName, 32);
dev.numButtons = msg.giidc.numButtons;
dev.numValuators = msg.giidc.numValuators;
dev.eventMask = msg.giidc.canGenerate;
dev.mode =
(dev.eventMask & rfbGIIValuatorAbsoluteMask) ? Absolute : Relative;
dev.productID = msg.giidc.productID;
if (dev.mode == Relative) {
rfbLog("GII device create ERROR: relative valuators not supported (yet)\n");
SKIP(msg.giidc.numValuators * sz_rfbGIIValuator);
goto sendMessage;
}
for (i = 0; i < dev.numValuators; i++) {
rfbGIIValuator *v = &dev.valuators[i];
READ((char *)v, sz_rfbGIIValuator);
if (littleEndian != *(const char *)&rfbEndianTest) {
v->index = Swap32(v->index);
v->rangeMin = Swap32((CARD32)v->rangeMin);
v->rangeCenter = Swap32((CARD32)v->rangeCenter);
v->rangeMax = Swap32((CARD32)v->rangeMax);
v->siUnit = Swap32(v->siUnit);
v->siAdd = Swap32((CARD32)v->siAdd);
v->siMul = Swap32((CARD32)v->siMul);
v->siDiv = Swap32((CARD32)v->siDiv);
v->siShift = Swap32((CARD32)v->siShift);
}
#ifdef GII_DEBUG
rfbLog(" Valuator: %s (%s)\n", v->longName, v->shortName);
rfbLog(" Index: %d\n", v->index);
rfbLog(" Range: min = %d, center = %d, max = %d\n",
v->rangeMin, v->rangeCenter, v->rangeMax);
rfbLog(" SI unit: %d\n", v->siUnit);
rfbLog(" SI add: %d\n", v->siAdd);
rfbLog(" SI multiply: %d\n", v->siMul);
rfbLog(" SI divide: %d\n", v->siDiv);
rfbLog(" SI shift: %d\n", v->siShift);
#endif
}
for (i = 0; i < cl->numDevices; i++) {
if (!strcmp(dev.name, cl->devices[i].name)) {
rfbLog("Device \'%s\' already exists with GII device ID %d\n",
dev.name, i + 1);
dcmsg.deviceOrigin = Swap32IfLE(i + 1);
goto sendMessage;
}
}
if (rfbVirtualTablet || AddExtInputDevice(&dev)) {
memcpy(&cl->devices[cl->numDevices], &dev, sizeof(dev));
cl->numDevices++;
dcmsg.deviceOrigin = Swap32IfLE(cl->numDevices);
}
rfbLog("GII device ID = %d\n", cl->numDevices);
sendMessage:
/* Send back a GII device created message */
dcmsg.type = rfbGIIServer;
/* We always send as big endian to make things easier on the Java
viewer. */
dcmsg.endianAndSubType = rfbGIIDeviceCreate | rfbGIIBE;
dcmsg.length = Swap16IfLE(sz_rfbGIIDeviceCreatedMsg - 4);
if (WriteExact(cl, (char *)&dcmsg, sz_rfbGIIDeviceCreatedMsg) < 0) {
rfbLogPerror("rfbProcessClientNormalMessage: write");
rfbCloseClient(cl);
return;
}
break;
}
case rfbGIIDeviceDestroy:
READ((char *)&msg.giidd.length, sz_rfbGIIDeviceDestroyMsg - 2);
if (littleEndian != *(const char *)&rfbEndianTest) {
msg.giidd.length = Swap16(msg.giidd.length);
msg.giidd.deviceOrigin = Swap32(msg.giidd.deviceOrigin);
}
if (msg.giidd.length != sz_rfbGIIDeviceDestroyMsg - 4) {
rfbLog("ERROR: Malformed GII device create message\n");
rfbCloseClient(cl);
return;
}
RemoveExtInputDevice(cl, msg.giidd.deviceOrigin - 1);
break;
case rfbGIIEvent:
{
CARD16 length;
READ((char *)&length, sizeof(CARD16));
if (littleEndian != *(const char *)&rfbEndianTest)
length = Swap16(length);
while (length > 0) {
CARD8 eventSize, eventType;
READ((char *)&eventSize, 1);
READ((char *)&eventType, 1);
switch (eventType) {
case rfbGIIButtonPress:
case rfbGIIButtonRelease:
{
rfbGIIButtonEvent b;
rfbDevInfo *dev;
READ((char *)&b.pad, sz_rfbGIIButtonEvent - 2);
if (littleEndian != *(const char *)&rfbEndianTest) {
b.deviceOrigin = Swap32(b.deviceOrigin);
b.buttonNumber = Swap32(b.buttonNumber);
}
if (eventSize != sz_rfbGIIButtonEvent || b.deviceOrigin <= 0 ||
b.buttonNumber < 1) {
rfbLog("ERROR: Malformed GII button event\n");
rfbCloseClient(cl);
return;
}
if (eventSize > length) {
rfbLog("ERROR: Malformed GII event message\n");
rfbCloseClient(cl);
return;
}
length -= eventSize;
if (b.deviceOrigin < 1 || b.deviceOrigin > cl->numDevices) {
rfbLog("ERROR: GII button event from non-existent device %d\n",
b.deviceOrigin);
rfbCloseClient(cl);
return;
}
dev = &cl->devices[b.deviceOrigin - 1];
if ((eventType == rfbGIIButtonPress &&
(dev->eventMask & rfbGIIButtonPressMask) == 0) ||
(eventType == rfbGIIButtonRelease &&
(dev->eventMask & rfbGIIButtonReleaseMask) == 0)) {
rfbLog("ERROR: Device %d can't generate GII button events\n",
b.deviceOrigin);
rfbCloseClient(cl);
return;
}
if (b.buttonNumber > dev->numButtons) {
rfbLog("ERROR: GII button %d event for device %d exceeds button count (%d)\n",
b.buttonNumber, b.deviceOrigin, dev->numButtons);
rfbCloseClient(cl);
return;
}
#ifdef GII_DEBUG
rfbLog("Device %d button %d %s\n", b.deviceOrigin,
b.buttonNumber,
eventType == rfbGIIButtonPress ? "PRESS" : "release");
fflush(stderr);
#endif
ExtInputAddEvent(dev, eventType == rfbGIIButtonPress ?
ButtonPress : ButtonRelease, b.buttonNumber);
break;
}
case rfbGIIValuatorRelative:
case rfbGIIValuatorAbsolute:
{
rfbGIIValuatorEvent v;
int i;
rfbDevInfo *dev;
READ((char *)&v.pad, sz_rfbGIIValuatorEvent - 2);
if (littleEndian != *(const char *)&rfbEndianTest) {
v.deviceOrigin = Swap32(v.deviceOrigin);
v.first = Swap32(v.first);
v.count = Swap32(v.count);
}
if (eventSize !=
sz_rfbGIIValuatorEvent + sizeof(int) * v.count) {
rfbLog("ERROR: Malformed GII valuator event\n");
rfbCloseClient(cl);
return;
}
if (eventSize > length) {
rfbLog("ERROR: Malformed GII event message\n");
rfbCloseClient(cl);
return;
}
length -= eventSize;
if (v.deviceOrigin < 1 || v.deviceOrigin > cl->numDevices) {
rfbLog("ERROR: GII valuator event from non-existent device %d\n",
v.deviceOrigin);
rfbCloseClient(cl);
return;
}
dev = &cl->devices[v.deviceOrigin - 1];
if ((eventType == rfbGIIValuatorRelative &&
(dev->eventMask & rfbGIIValuatorRelativeMask) == 0) ||
(eventType == rfbGIIValuatorAbsolute &&
(dev->eventMask & rfbGIIValuatorAbsoluteMask) == 0)) {
rfbLog("ERROR: Device %d cannot generate GII valuator events\n",
v.deviceOrigin);
rfbCloseClient(cl);
return;
}
if (v.first + v.count > dev->numValuators) {
rfbLog("ERROR: GII valuator event for device %d exceeds valuator count (%d)\n",
v.deviceOrigin, dev->numValuators);
rfbCloseClient(cl);
return;
}
#ifdef GII_DEBUG
rfbLog("Device %d Valuator %s first=%d count=%d:\n",
v.deviceOrigin,
eventType == rfbGIIValuatorRelative ? "rel" : "ABS",
v.first, v.count);
#endif
for (i = v.first; i < v.first + v.count; i++) {
READ((char *)&dev->values[i], sizeof(int));
if (littleEndian != *(const char *)&rfbEndianTest)
dev->values[i] = Swap32((CARD32)dev->values[i]);
#ifdef GII_DEBUG
fprintf(stderr, "v[%d]=%d ", i, dev->values[i]);
#endif
}
#ifdef GII_DEBUG
fprintf(stderr, "\n");
#endif
if (v.count > 0) {
dev->valFirst = v.first;
dev->valCount = v.count;
dev->mode = eventType == rfbGIIValuatorAbsolute ?
Absolute : Relative;
ExtInputAddEvent(dev, MotionNotify, 0);
}
break;
}
default:
rfbLog("ERROR: This server cannot handle GII event type %d\n",
eventType);
rfbCloseClient(cl);
return;
} /* switch (eventType) */
} /* while (length > 0) */
if (length != 0) {
rfbLog("ERROR: Malformed GII event message\n");
rfbCloseClient(cl);
return;
}
break;
} /* rfbGIIEvent */
} /* switch (subType) */
return;
} /* rfbGIIClient */
default:
rfbLog("rfbProcessClientNormalMessage: unknown message type %d\n",
msg.type);
rfbLog(" ... closing connection\n");
rfbCloseClient(cl);
return;
} /* switch (msg.type) */
}
| 1 |
[
"CWE-787"
] |
turbovnc
|
cea98166008301e614e0d36776bf9435a536136e
| 299,527,984,026,747,930,000,000,000,000,000,000,000 | 885 |
Server: Fix two issues identified by ASan
1. If the TLSPlain and X509Plain security types were both disabled, then
rfbOptPamAuth() would overflow the name field in the secTypes
structure when testing the "none" security type, since the name of
that security type has less than five characters. This issue was
innocuous, since the overflow was fully contained within the secTypes
structure, but the ASan error caused Xvnc to abort, which made it
difficult to detect other errors.
2. If an ill-behaved RFB client sent the TurboVNC Server a fence
message with more than 64 bytes, then the TurboVNC Server would
try to read that message and subsequently overflow the stack before
it detected that the payload was too large. This could never have
occurred with any of the VNC viewers that currently support the RFB
flow control extensions (TigerVNC and TurboVNC, namely.) This issue
was also innocuous, since the stack overflow affected two variables
(newScreens and errMsg) that were never accessed before the function
returned.
|
static void mg_mqtt_proto_data_destructor(void *proto_data) {
MG_FREE(proto_data);
}
| 0 |
[
"CWE-119",
"CWE-284",
"CWE-787"
] |
mongoose
|
b3e0f780c34cea88f057a62213c012aa88fe2deb
| 164,809,823,868,443,700,000,000,000,000,000,000,000 | 3 |
Fix heap-based overflow in parse_mqtt
PUBLISHED_FROM=3306592896298597fff5269634df0c1a1555113b
|
DEFUN (no_neighbor_nexthop_self,
no_neighbor_nexthop_self_cmd,
NO_NEIGHBOR_CMD2 "next-hop-self",
NO_STR
NEIGHBOR_STR
NEIGHBOR_ADDR_STR2
"Disable the next hop calculation for this neighbor\n")
{
return peer_af_flag_unset_vty (vty, argv[0], bgp_node_afi (vty),
bgp_node_safi (vty), PEER_FLAG_NEXTHOP_SELF);
}
| 0 |
[
"CWE-125"
] |
frr
|
6d58272b4cf96f0daa846210dd2104877900f921
| 202,811,677,927,740,050,000,000,000,000,000,000,000 | 11 |
[bgpd] cleanup, compact and consolidate capability parsing code
2007-07-26 Paul Jakma <[email protected]>
* (general) Clean up and compact capability parsing slightly.
Consolidate validation of length and logging of generic TLV, and
memcpy of capability data, thus removing such from cap specifc
code (not always present or correct).
* bgp_open.h: Add structures for the generic capability TLV header
and for the data formats of the various specific capabilities we
support. Hence remove the badly named, or else misdefined, struct
capability.
* bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data.
Do the length checks *before* memcpy()'ing based on that length
(stored capability - should have been validated anyway on input,
but..).
(bgp_afi_safi_valid_indices) new function to validate (afi,safi)
which is about to be used as index into arrays, consolidates
several instances of same, at least one of which appeared to be
incomplete..
(bgp_capability_mp) Much condensed.
(bgp_capability_orf_entry) New, process one ORF entry
(bgp_capability_orf) Condensed. Fixed to process all ORF entries.
(bgp_capability_restart) Condensed, and fixed to use a
cap-specific type, rather than abusing capability_mp.
(struct message capcode_str) added to aid generic logging.
(size_t cap_minsizes[]) added to aid generic validation of
capability length field.
(bgp_capability_parse) Generic logging and validation of TLV
consolidated here. Code compacted as much as possible.
* bgp_packet.c: (bgp_open_receive) Capability parsers now use
streams, so no more need here to manually fudge the input stream
getp.
(bgp_capability_msg_parse) use struct capability_mp_data. Validate
lengths /before/ memcpy. Use bgp_afi_safi_valid_indices.
(bgp_capability_receive) Exported for use by test harness.
* bgp_vty.c: (bgp_show_summary) fix conversion warning
(bgp_show_peer) ditto
* bgp_debug.h: Fix storage 'extern' after type 'const'.
* lib/log.c: (mes_lookup) warning about code not being in
same-number array slot should be debug, not warning. E.g. BGP
has several discontigious number spaces, allocating from
different parts of a space is not uncommon (e.g. IANA
assigned versus vendor-assigned code points in some number
space).
|
static inline int is_page_fault(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
}
| 0 |
[
"CWE-20"
] |
linux-2.6
|
16175a796d061833aacfbd9672235f2d2725df65
| 243,363,506,933,002,380,000,000,000,000,000,000,000 | 6 |
KVM: VMX: Don't allow uninhibited access to EFER on i386
vmx_set_msr() does not allow i386 guests to touch EFER, but they can still
do so through the default: label in the switch. If they set EFER_LME, they
can oops the host.
Fix by having EFER access through the normal channel (which will check for
EFER_LME) even on i386.
Reported-and-tested-by: Benjamin Gilbert <[email protected]>
Cc: [email protected]
Signed-off-by: Avi Kivity <[email protected]>
|
static void t1_puts(PDF pdf, const char *s)
{
if (s != t1_line_array)
strcpy(t1_line_array, s);
t1_line_ptr = strend(t1_line_array);
t1_putline(pdf);
}
| 0 |
[
"CWE-119"
] |
texlive-source
|
6ed0077520e2b0da1fd060c7f88db7b2e6068e4c
| 263,636,446,665,269,330,000,000,000,000,000,000,000 | 7 |
writet1 protection against buffer overflow
git-svn-id: svn://tug.org/texlive/trunk/Build/source@48697 c570f23f-e606-0410-a88d-b1316a301751
|
Arg next_arg(const char *&error) {
if (next_arg_index_ >= 0)
return do_get_arg(internal::to_unsigned(next_arg_index_++), error);
error = "cannot switch from manual to automatic argument indexing";
return Arg();
}
| 0 |
[
"CWE-134",
"CWE-119",
"CWE-787"
] |
fmt
|
8cf30aa2be256eba07bb1cefb998c52326e846e7
| 237,063,547,167,684,020,000,000,000,000,000,000,000 | 6 |
Fix segfault on complex pointer formatting (#642)
|
void __fastcall TConsoleRunner::ScriptInput(TScript * /*Script*/,
const UnicodeString Prompt, UnicodeString & Str)
{
Input(Prompt, Str, true, true);
}
| 0 |
[
"CWE-787"
] |
winscp
|
faa96e8144e6925a380f94a97aa382c9427f688d
| 26,155,378,590,581,350,000,000,000,000,000,000,000 | 5 |
Bug 1943: Prevent loading session settings that can lead to remote code execution from handled URLs
https://winscp.net/tracker/1943
(cherry picked from commit ec584f5189a856cd79509f754722a6898045c5e0)
Source commit: 0f4be408b3f01132b00682da72d925d6c4ee649b
|
void __kthread_init_worker(struct kthread_worker *worker,
const char *name,
struct lock_class_key *key)
{
memset(worker, 0, sizeof(struct kthread_worker));
spin_lock_init(&worker->lock);
lockdep_set_class_and_name(&worker->lock, key, name);
INIT_LIST_HEAD(&worker->work_list);
INIT_LIST_HEAD(&worker->delayed_work_list);
}
| 0 |
[
"CWE-200"
] |
tip
|
dfb4357da6ddbdf57d583ba64361c9d792b0e0b1
| 99,903,942,849,823,650,000,000,000,000,000,000,000 | 10 |
time: Remove CONFIG_TIMER_STATS
Currently CONFIG_TIMER_STATS exposes process information across namespaces:
kernel/time/timer_list.c print_timer():
SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
/proc/timer_list:
#11: <0000000000000000>, hrtimer_wakeup, S:01, do_nanosleep, cron/2570
Given that the tracer can give the same information, this patch entirely
removes CONFIG_TIMER_STATS.
Suggested-by: Thomas Gleixner <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Acked-by: John Stultz <[email protected]>
Cc: Nicolas Pitre <[email protected]>
Cc: [email protected]
Cc: Lai Jiangshan <[email protected]>
Cc: Shuah Khan <[email protected]>
Cc: Xing Gao <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Jessica Frazelle <[email protected]>
Cc: [email protected]
Cc: Nicolas Iooss <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Petr Mladek <[email protected]>
Cc: Richard Cochran <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Michal Marek <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: Olof Johansson <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Cc: Arjan van de Ven <[email protected]>
Link: http://lkml.kernel.org/r/20170208192659.GA32582@beast
Signed-off-by: Thomas Gleixner <[email protected]>
|
void tcp_send_active_reset(struct sock *sk, gfp_t priority)
{
struct sk_buff *skb;
TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
/* NOTE: No TCP options attached and we never retransmit this. */
skb = alloc_skb(MAX_TCP_HEADER, priority);
if (!skb) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
return;
}
/* Reserve space for headers and prepare control bits. */
skb_reserve(skb, MAX_TCP_HEADER);
tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
TCPHDR_ACK | TCPHDR_RST);
tcp_mstamp_refresh(tcp_sk(sk));
/* Send it off. */
if (tcp_transmit_skb(sk, skb, 0, priority))
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
/* skb of trace_tcp_send_reset() keeps the skb that caused RST,
* skb here is different to the troublesome skb, so use NULL
*/
trace_tcp_send_reset(sk, NULL);
}
| 0 |
[
"CWE-190"
] |
net
|
3b4929f65b0d8249f19a50245cd88ed1a2f78cff
| 336,974,501,922,018,640,000,000,000,000,000,000,000 | 27 |
tcp: limit payload size of sacked skbs
Jonathan Looney reported that TCP can trigger the following crash
in tcp_shifted_skb() :
BUG_ON(tcp_skb_pcount(skb) < pcount);
This can happen if the remote peer has advertized the smallest
MSS that linux TCP accepts : 48
An skb can hold 17 fragments, and each fragment can hold 32KB
on x86, or 64KB on PowerPC.
This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs
can overflow.
Note that tcp_sendmsg() builds skbs with less than 64KB
of payload, so this problem needs SACK to be enabled.
SACK blocks allow TCP to coalesce multiple skbs in the retransmit
queue, thus filling the 17 fragments to maximal capacity.
CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs
Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jonathan Looney <[email protected]>
Acked-by: Neal Cardwell <[email protected]>
Reviewed-by: Tyler Hicks <[email protected]>
Cc: Yuchung Cheng <[email protected]>
Cc: Bruce Curtis <[email protected]>
Cc: Jonathan Lemon <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
PackLinuxElf32mipsel::buildLoader(Filter const *ft)
{
if (0!=xct_off) { // shared library
buildLinuxLoader(
stub_mipsel_r3000_linux_shlib_init, sizeof(stub_mipsel_r3000_linux_shlib_init),
nullptr, 0, ft );
return;
}
buildLinuxLoader(
stub_mipsel_r3000_linux_elf_entry, sizeof(stub_mipsel_r3000_linux_elf_entry),
stub_mipsel_r3000_linux_elf_fold, sizeof(stub_mipsel_r3000_linux_elf_fold), ft);
}
| 0 |
[
"CWE-476",
"CWE-415"
] |
upx
|
90279abdfcd235172eab99651043051188938dcc
| 244,524,025,715,207,460,000,000,000,000,000,000,000 | 12 |
PackLinuxElf::canUnpack must checkEhdr() for ELF input
https://github.com/upx/upx/issues/485
modified: p_lx_elf.cpp
|
iobuf_open (const char *fname)
{
iobuf_t a;
gnupg_fd_t fp;
file_filter_ctx_t *fcx;
size_t len = 0;
int print_only = 0;
int fd;
if (!fname || (*fname == '-' && !fname[1]))
{
fp = FD_FOR_STDIN;
fname = "[stdin]";
print_only = 1;
}
else if ((fd = check_special_filename (fname)) != -1)
return iobuf_fdopen (translate_file_handle (fd, 0), "rb");
else if ((fp = fd_cache_open (fname, "rb")) == GNUPG_INVALID_FD)
return NULL;
a = iobuf_alloc (1, IOBUF_BUFFER_SIZE);
fcx = xmalloc (sizeof *fcx + strlen (fname));
fcx->fp = fp;
fcx->print_only_name = print_only;
strcpy (fcx->fname, fname);
if (!print_only)
a->real_fname = xstrdup (fname);
a->filter = file_filter;
a->filter_ov = fcx;
file_filter (fcx, IOBUFCTRL_DESC, NULL, (byte *) & a->desc, &len);
file_filter (fcx, IOBUFCTRL_INIT, NULL, NULL, &len);
if (DBG_IOBUF)
log_debug ("iobuf-%d.%d: open '%s' fd=%d\n",
a->no, a->subno, fname, FD2INT (fcx->fp));
return a;
}
| 0 |
[
"CWE-20"
] |
gnupg
|
2183683bd633818dd031b090b5530951de76f392
| 146,547,677,534,703,780,000,000,000,000,000,000,000 | 36 |
Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]>
|
get_clipboard_data_from_selection_data (FrWindow *window,
const char *data)
{
FrClipboardData *clipboard_data;
char **uris;
int i;
clipboard_data = fr_clipboard_data_new ();
uris = g_strsplit (data, "\r\n", -1);
clipboard_data->file = g_file_new_for_uri (uris[0]);
if (window->priv->second_password != NULL)
clipboard_data->password = g_strdup (window->priv->second_password);
else if (strcmp (uris[1], "") != 0)
clipboard_data->password = g_strdup (uris[1]);
clipboard_data->op = (strcmp (uris[2], "copy") == 0) ? FR_CLIPBOARD_OP_COPY : FR_CLIPBOARD_OP_CUT;
clipboard_data->base_dir = g_strdup (uris[3]);
for (i = 4; uris[i] != NULL; i++)
if (uris[i][0] != '\0')
clipboard_data->files = g_list_prepend (clipboard_data->files, g_strdup (uris[i]));
clipboard_data->files = g_list_reverse (clipboard_data->files);
g_strfreev (uris);
return clipboard_data;
}
| 0 |
[
"CWE-22"
] |
file-roller
|
b147281293a8307808475e102a14857055f81631
| 20,129,527,985,243,663,000,000,000,000,000,000,000 | 27 |
libarchive: sanitize filenames before extracting
|
xmlSchemaTypeFinalContains(xmlSchemaTypePtr type, int final)
{
if (type == NULL)
return (0);
if (type->flags & final)
return (1);
else
return (0);
}
| 0 |
[
"CWE-134"
] |
libxml2
|
4472c3a5a5b516aaf59b89be602fbce52756c3e9
| 272,763,631,373,861,770,000,000,000,000,000,000,000 | 9 |
Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports.
|
static struct fpm_child_s *fpm_resources_prepare(struct fpm_worker_pool_s *wp) /* {{{ */
{
struct fpm_child_s *c;
c = fpm_child_alloc();
if (!c) {
zlog(ZLOG_ERROR, "[pool %s] unable to malloc new child", wp->config->name);
return 0;
}
c->wp = wp;
c->fd_stdout = -1; c->fd_stderr = -1;
if (0 > fpm_stdio_prepare_pipes(c)) {
fpm_child_free(c);
return 0;
}
if (0 > fpm_scoreboard_proc_alloc(c)) {
fpm_stdio_discard_pipes(c);
fpm_child_free(c);
return 0;
}
return c;
}
| 0 |
[
"CWE-787"
] |
php-src
|
fadb1f8c1d08ae62b4f0a16917040fde57a3b93b
| 241,613,101,870,374,360,000,000,000,000,000,000,000 | 27 |
Fix bug #81026 (PHP-FPM oob R/W in root process leading to priv escalation)
The main change is to store scoreboard procs directly to the variable sized
array rather than indirectly through the pointer.
Signed-off-by: Stanislav Malyshev <[email protected]>
|
evalfor(union node *n, int flags)
{
struct arglist arglist;
union node *argp;
struct strlist *sp;
int status;
errlinno = lineno = n->nfor.linno;
if (funcline)
lineno -= funcline - 1;
arglist.lastp = &arglist.list;
for (argp = n->nfor.args ; argp ; argp = argp->narg.next) {
expandarg(argp, &arglist, EXP_FULL | EXP_TILDE);
}
*arglist.lastp = NULL;
status = 0;
loopnest++;
flags &= EV_TESTED;
for (sp = arglist.list ; sp ; sp = sp->next) {
setvar(n->nfor.var, sp->text, 0);
status = evaltree(n->nfor.body, flags);
if (skiploop() & ~SKIPCONT)
break;
}
loopnest--;
return status;
}
| 0 |
[] |
dash
|
29d6f2148f10213de4e904d515e792d2cf8c968e
| 195,950,389,299,837,680,000,000,000,000,000,000,000 | 30 |
eval: Check nflag in evaltree instead of cmdloop
This patch moves the nflag check from cmdloop into evaltree. This
is so that nflag will be in force even if we enter the shell via a
path other than cmdloop, e.g., through sh -c.
Reported-by: Joey Hess <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
|
ews_client_autodiscover_cancelled_cb (GCancellable *cancellable, gpointer user_data)
{
AutodiscoverData *data = user_data;
soup_session_abort (data->session);
}
| 0 |
[
"CWE-310"
] |
gnome-online-accounts
|
edde7c63326242a60a075341d3fea0be0bc4d80e
| 9,363,714,145,098,892,000,000,000,000,000,000,000 | 5 |
Guard against invalid SSL certificates
None of the branded providers (eg., Google, Facebook and Windows Live)
should ever have an invalid certificate. So set "ssl-strict" on the
SoupSession object being used by GoaWebView.
Providers like ownCloud and Exchange might have to deal with
certificates that are not up to the mark. eg., self-signed
certificates. For those, show a warning when the account is being
created, and only proceed if the user decides to ignore it. In any
case, save the status of the certificate that was used to create the
account. So an account created with a valid certificate will never
work with an invalid one, and one created with an invalid certificate
will not throw any further warnings.
Fixes: CVE-2013-0240
|
RZ_API const RzList /*<RzBinSection *>*/ *rz_bin_object_get_sections_all(RZ_NONNULL RzBinObject *obj) {
rz_return_val_if_fail(obj, NULL);
return obj->sections;
}
| 0 |
[
"CWE-200",
"CWE-787"
] |
rizin
|
07b43bc8aa1ffebd9b68d60624c9610cf7e460c7
| 21,409,072,802,830,950,000,000,000,000,000,000,000 | 4 |
fix oob read on luac
|
static int shmem_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
int err;
/*
* If this is a request for a synthetic attribute in the system.*
* namespace use the generic infrastructure to resolve a handler
* for it via sb->s_xattr.
*/
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_setxattr(dentry, name, value, size, flags);
err = shmem_xattr_validate(name);
if (err)
return err;
return simple_xattr_set(&info->xattrs, name, value, size, flags);
}
| 0 |
[
"CWE-399"
] |
linux
|
5f00110f7273f9ff04ac69a5f85bb535a4fd0987
| 321,743,553,395,429,700,000,000,000,000,000,000,000 | 20 |
tmpfs: fix use-after-free of mempolicy object
The tmpfs remount logic preserves filesystem mempolicy if the mpol=M
option is not specified in the remount request. A new policy can be
specified if mpol=M is given.
Before this patch remounting an mpol bound tmpfs without specifying
mpol= mount option in the remount request would set the filesystem's
mempolicy object to a freed mempolicy object.
To reproduce the problem boot a DEBUG_PAGEALLOC kernel and run:
# mkdir /tmp/x
# mount -t tmpfs -o size=100M,mpol=interleave nodev /tmp/x
# grep /tmp/x /proc/mounts
nodev /tmp/x tmpfs rw,relatime,size=102400k,mpol=interleave:0-3 0 0
# mount -o remount,size=200M nodev /tmp/x
# grep /tmp/x /proc/mounts
nodev /tmp/x tmpfs rw,relatime,size=204800k,mpol=??? 0 0
# note ? garbage in mpol=... output above
# dd if=/dev/zero of=/tmp/x/f count=1
# panic here
Panic:
BUG: unable to handle kernel NULL pointer dereference at (null)
IP: [< (null)>] (null)
[...]
Oops: 0010 [#1] SMP DEBUG_PAGEALLOC
Call Trace:
mpol_shared_policy_init+0xa5/0x160
shmem_get_inode+0x209/0x270
shmem_mknod+0x3e/0xf0
shmem_create+0x18/0x20
vfs_create+0xb5/0x130
do_last+0x9a1/0xea0
path_openat+0xb3/0x4d0
do_filp_open+0x42/0xa0
do_sys_open+0xfe/0x1e0
compat_sys_open+0x1b/0x20
cstar_dispatch+0x7/0x1f
Non-debug kernels will not crash immediately because referencing the
dangling mpol will not cause a fault. Instead the filesystem will
reference a freed mempolicy object, which will cause unpredictable
behavior.
The problem boils down to a dropped mpol reference below if
shmem_parse_options() does not allocate a new mpol:
config = *sbinfo
shmem_parse_options(data, &config, true)
mpol_put(sbinfo->mpol)
sbinfo->mpol = config.mpol /* BUG: saves unreferenced mpol */
This patch avoids the crash by not releasing the mempolicy if
shmem_parse_options() doesn't create a new mpol.
How far back does this issue go? I see it in both 2.6.36 and 3.3. I did
not look back further.
Signed-off-by: Greg Thelen <[email protected]>
Acked-by: Hugh Dickins <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
void CumSum(const T* input_data, const RuntimeShape& shape, int axis,
bool exclusive, bool reverse, T* output_data) {
const int dim = shape.DimensionsCount();
TFLITE_DCHECK_GE(dim, 1);
CumsumImpl<T>(input_data, shape, axis, exclusive, reverse, output_data);
}
| 0 |
[
"CWE-476",
"CWE-369"
] |
tensorflow
|
15691e456c7dc9bd6be203b09765b063bf4a380c
| 47,898,015,511,649,740,000,000,000,000,000,000,000 | 6 |
Prevent dereferencing of null pointers in TFLite's `add.cc`.
PiperOrigin-RevId: 387244946
Change-Id: I56094233327fbd8439b92e1dbb1262176e00eeb9
|
idn2_to_ascii_4i (const uint32_t * input, size_t inlen, char * output, int flags)
{
uint32_t *input_u32;
uint8_t *input_u8, *output_u8;
size_t length;
int rc;
if (!input)
{
if (output)
*output = 0;
return IDN2_OK;
}
input_u32 = (uint32_t *) malloc ((inlen + 1) * sizeof(uint32_t));
if (!input_u32)
return IDN2_MALLOC;
u32_cpy (input_u32, input, inlen);
input_u32[inlen] = 0;
input_u8 = u32_to_u8 (input_u32, inlen + 1, NULL, &length);
free (input_u32);
if (!input_u8)
{
if (errno == ENOMEM)
return IDN2_MALLOC;
return IDN2_ENCODING_ERROR;
}
rc = idn2_lookup_u8 (input_u8, &output_u8, flags);
free (input_u8);
if (rc == IDN2_OK)
{
/* wow, this is ugly, but libidn manpage states:
* char * out output zero terminated string that must have room for at
* least 63 characters plus the terminating zero.
*/
size_t len = strlen ((char *) output_u8);
if (len > 63)
{
free (output_u8);
return IDN2_TOO_BIG_DOMAIN;
}
if (output)
strcpy (output, (char *) output_u8);
free (output_u8);
}
return rc;
}
| 0 |
[
"CWE-787"
] |
libidn2
|
e4d1558aa2c1c04a05066ee8600f37603890ba8c
| 21,245,970,559,885,027,000,000,000,000,000,000,000 | 55 |
idn2_to_ascii_4i(): Restrict output length to 63
|
TIFFFetchStripThing(TIFF* tif, TIFFDirEntry* dir, uint32 nstrips, uint64** lpp)
{
static const char module[] = "TIFFFetchStripThing";
enum TIFFReadDirEntryErr err;
uint64* data;
err=TIFFReadDirEntryLong8Array(tif,dir,&data);
if (err!=TIFFReadDirEntryErrOk)
{
const TIFFField* fip = TIFFFieldWithTag(tif,dir->tdir_tag);
TIFFReadDirEntryOutputErr(tif,err,module,fip ? fip->field_name : "unknown tagname",0);
return(0);
}
if (dir->tdir_count!=(uint64)nstrips)
{
uint64* resizeddata;
resizeddata=(uint64*)_TIFFCheckMalloc(tif,nstrips,sizeof(uint64),"for strip array");
if (resizeddata==0) {
_TIFFfree(data);
return(0);
}
if (dir->tdir_count<(uint64)nstrips)
{
_TIFFmemcpy(resizeddata,data,(uint32)dir->tdir_count*sizeof(uint64));
_TIFFmemset(resizeddata+(uint32)dir->tdir_count,0,(nstrips-(uint32)dir->tdir_count)*sizeof(uint64));
}
else
_TIFFmemcpy(resizeddata,data,nstrips*sizeof(uint64));
_TIFFfree(data);
data=resizeddata;
}
*lpp=data;
return(1);
}
| 0 |
[
"CWE-125"
] |
libtiff
|
9a72a69e035ee70ff5c41541c8c61cd97990d018
| 249,411,317,751,720,400,000,000,000,000,000,000,000 | 33 |
* libtiff/tif_dirread.c: modify ChopUpSingleUncompressedStrip() to
instanciate compute ntrips as TIFFhowmany_32(td->td_imagelength, rowsperstrip),
instead of a logic based on the total size of data. Which is faulty is
the total size of data is not sufficient to fill the whole image, and thus
results in reading outside of the StripByCounts/StripOffsets arrays when
using TIFFReadScanline().
Reported by Agostino Sarubbo.
Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2608.
* libtiff/tif_strip.c: revert the change in TIFFNumberOfStrips() done
for http://bugzilla.maptools.org/show_bug.cgi?id=2587 / CVE-2016-9273 since
the above change is a better fix that makes it unnecessary.
|
inline uint8_t* WireFormatLite::WriteUInt64NoTagToArray(uint64_t value,
uint8_t* target) {
return io::CodedOutputStream::WriteVarint64ToArray(value, target);
}
| 0 |
[
"CWE-703"
] |
protobuf
|
d1635e1496f51e0d5653d856211e8821bc47adc4
| 25,985,038,224,339,920,000,000,000,000,000,000,000 | 4 |
Apply patch
|
static struct file_struct *send_file_name(int f, struct file_list *flist,
const char *fname, STRUCT_STAT *stp,
int flags, int filter_level)
{
struct file_struct *file;
file = make_file(fname, flist, stp, flags, filter_level);
if (!file)
return NULL;
if (chmod_modes && !S_ISLNK(file->mode) && file->mode)
file->mode = tweak_mode(file->mode, chmod_modes);
if (f >= 0) {
char fbuf[MAXPATHLEN];
#ifdef SUPPORT_LINKS
const char *symlink_name;
int symlink_len;
#ifdef ICONV_OPTION
char symlink_buf[MAXPATHLEN];
#endif
#endif
#if defined SUPPORT_ACLS || defined SUPPORT_XATTRS
stat_x sx;
init_stat_x(&sx);
#endif
#ifdef SUPPORT_LINKS
if (preserve_links && S_ISLNK(file->mode)) {
symlink_name = F_SYMLINK(file);
symlink_len = strlen(symlink_name);
if (symlink_len == 0) {
io_error |= IOERR_GENERAL;
f_name(file, fbuf);
rprintf(FERROR_XFER,
"skipping symlink with 0-length value: %s\n",
full_fname(fbuf));
return NULL;
}
} else {
symlink_name = NULL;
symlink_len = 0;
}
#endif
#ifdef ICONV_OPTION
if (ic_send != (iconv_t)-1) {
xbuf outbuf, inbuf;
INIT_CONST_XBUF(outbuf, fbuf);
if (file->dirname) {
INIT_XBUF_STRLEN(inbuf, (char*)file->dirname);
outbuf.size -= 2; /* Reserve room for '/' & 1 more char. */
if (iconvbufs(ic_send, &inbuf, &outbuf, ICB_INIT) < 0)
goto convert_error;
outbuf.size += 2;
fbuf[outbuf.len++] = '/';
}
INIT_XBUF_STRLEN(inbuf, (char*)file->basename);
if (iconvbufs(ic_send, &inbuf, &outbuf, ICB_INIT) < 0) {
convert_error:
io_error |= IOERR_GENERAL;
rprintf(FERROR_XFER,
"[%s] cannot convert filename: %s (%s)\n",
who_am_i(), f_name(file, fbuf), strerror(errno));
return NULL;
}
fbuf[outbuf.len] = '\0';
#ifdef SUPPORT_LINKS
if (symlink_len && sender_symlink_iconv) {
INIT_XBUF(inbuf, (char*)symlink_name, symlink_len, (size_t)-1);
INIT_CONST_XBUF(outbuf, symlink_buf);
if (iconvbufs(ic_send, &inbuf, &outbuf, ICB_INIT) < 0) {
io_error |= IOERR_GENERAL;
f_name(file, fbuf);
rprintf(FERROR_XFER,
"[%s] cannot convert symlink data for: %s (%s)\n",
who_am_i(), full_fname(fbuf), strerror(errno));
return NULL;
}
symlink_buf[outbuf.len] = '\0';
symlink_name = symlink_buf;
symlink_len = outbuf.len;
}
#endif
} else
#endif
f_name(file, fbuf);
#ifdef SUPPORT_ACLS
if (preserve_acls && !S_ISLNK(file->mode)) {
sx.st.st_mode = file->mode;
if (get_acl(fname, &sx) < 0) {
io_error |= IOERR_GENERAL;
return NULL;
}
}
#endif
#ifdef SUPPORT_XATTRS
if (preserve_xattrs) {
sx.st.st_mode = file->mode;
if (get_xattr(fname, &sx) < 0) {
io_error |= IOERR_GENERAL;
return NULL;
}
}
#endif
send_file_entry(f, fbuf, file,
#ifdef SUPPORT_LINKS
symlink_name, symlink_len,
#endif
flist->used, flist->ndx_start);
#ifdef SUPPORT_ACLS
if (preserve_acls && !S_ISLNK(file->mode)) {
send_acl(f, &sx);
free_acl(&sx);
}
#endif
#ifdef SUPPORT_XATTRS
if (preserve_xattrs) {
F_XATTR(file) = send_xattr(f, &sx);
free_xattr(&sx);
}
#endif
}
maybe_emit_filelist_progress(flist->used + flist_count_offset);
flist_expand(flist, 1);
flist->files[flist->used++] = file;
return file;
}
| 0 |
[
"CWE-59"
] |
rsync
|
962f8b90045ab331fc04c9e65f80f1a53e68243b
| 20,107,479,649,148,022,000,000,000,000,000,000,000 | 139 |
Complain if an inc-recursive path is not right for its dir.
This ensures that a malicious sender can't use a just-sent
symlink as a trasnfer path.
|
static long file_seek(jas_stream_obj_t *obj, long offset, int origin)
{
jas_stream_fileobj_t *fileobj;
JAS_DBGLOG(100, ("file_seek(%p, %ld, %d)\n", obj, offset, origin));
fileobj = JAS_CAST(jas_stream_fileobj_t *, obj);
return lseek(fileobj->fd, offset, origin);
}
| 0 |
[
"CWE-415",
"CWE-190",
"CWE-369"
] |
jasper
|
634ce8e8a5accc0fa05dd2c20d42b4749d4b2735
| 244,801,922,335,782,620,000,000,000,000,000,000,000 | 7 |
Made some changes to the I/O stream library for memory streams.
There were a number of potential problems due to the possibility
of integer overflow.
Changed some integral types to the larger types size_t or ssize_t.
For example, the function mem_resize now takes the buffer size parameter
as a size_t.
Added a new function jas_stream_memopen2, which takes a
buffer size specified as a size_t instead of an int.
This can be used in jas_image_cmpt_create to avoid potential
overflow problems.
Added a new function jas_deprecated to warn about reliance on
deprecated library behavior.
|
int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
{
struct kernel_vm86_struct info; /* declare this _on top_,
* this avoids wasting of stack space.
* This remains on the stack until we
* return to 32 bit user space.
*/
struct task_struct *tsk;
int tmp, ret;
struct vm86plus_struct __user *v86;
tsk = current;
switch (cmd) {
case VM86_REQUEST_IRQ:
case VM86_FREE_IRQ:
case VM86_GET_IRQ_BITS:
case VM86_GET_AND_RESET_IRQ:
ret = do_vm86_irq_handling(cmd, (int)arg);
goto out;
case VM86_PLUS_INSTALL_CHECK:
/*
* NOTE: on old vm86 stuff this will return the error
* from access_ok(), because the subfunction is
* interpreted as (invalid) address to vm86_struct.
* So the installation check works.
*/
ret = 0;
goto out;
}
/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
ret = -EPERM;
if (tsk->thread.saved_sp0)
goto out;
v86 = (struct vm86plus_struct __user *)arg;
tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
offsetof(struct kernel_vm86_struct, regs32) -
sizeof(info.regs));
ret = -EFAULT;
if (tmp)
goto out;
info.regs32 = regs;
info.vm86plus.is_vm86pus = 1;
tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
do_sys_vm86(&info, tsk);
ret = 0; /* we never return here */
out:
return ret;
}
| 0 |
[
"CWE-264"
] |
linux-2.6
|
1a5a9906d4e8d1976b701f889d8f35d54b928f25
| 295,306,184,344,282,870,000,000,000,000,000,000,000 | 49 |
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38+]
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& decoder) {
ClientStreamImplPtr stream(new ClientStreamImpl(*this, per_stream_buffer_limit_, decoder));
// If the connection is currently above the high watermark, make sure to inform the new stream.
// The connection can not pass this on automatically as it has no awareness that a new stream is
// created.
if (connection_.aboveHighWatermark()) {
stream->runHighWatermarkCallbacks();
}
ClientStreamImpl& stream_ref = *stream;
stream->moveIntoList(std::move(stream), active_streams_);
return stream_ref;
}
| 0 |
[
"CWE-400"
] |
envoy
|
0e49a495826ea9e29134c1bd54fdeb31a034f40c
| 155,982,453,928,524,930,000,000,000,000,000,000,000 | 12 |
http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]>
|
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
{
BUG_ON(len > skb->len);
skb->len -= len;
BUG_ON(skb->len < skb->data_len);
skb_postpull_rcsum(skb, skb->data, len);
return skb->data += len;
}
| 0 |
[
"CWE-416"
] |
net
|
36d5fe6a000790f56039afe26834265db0a3ad4c
| 189,834,289,445,046,200,000,000,000,000,000,000,000 | 8 |
core, nfqueue, openvswitch: Orphan frags in skb_zerocopy and handle errors
skb_zerocopy can copy elements of the frags array between skbs, but it doesn't
orphan them. Also, it doesn't handle errors, so this patch takes care of that
as well, and modify the callers accordingly. skb_tx_error() is also added to
the callers so they will signal the failed delivery towards the creator of the
skb.
Signed-off-by: Zoltan Kiss <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
PJ_DEF(pj_status_t) pjsip_tpmgr_set_state_cb(pjsip_tpmgr *mgr,
pjsip_tp_state_callback cb)
{
PJ_ASSERT_RETURN(mgr, PJ_EINVAL);
mgr->tp_state_cb = cb;
return PJ_SUCCESS;
}
| 0 |
[
"CWE-297",
"CWE-295"
] |
pjproject
|
67e46c1ac45ad784db5b9080f5ed8b133c122872
| 336,450,193,524,131,250,000,000,000,000,000,000,000 | 9 |
Merge pull request from GHSA-8hcp-hm38-mfph
* Check hostname during TLS transport selection
* revision based on feedback
* remove the code in create_request that has been moved
|
_gnutls_read_client_hello (gnutls_session_t session, opaque * data,
int datalen)
{
uint8_t session_id_len;
int pos = 0, ret;
uint16_t suite_size, comp_size;
gnutls_protocol_t adv_version;
int neg_version;
int len = datalen;
opaque rnd[TLS_RANDOM_SIZE], *suite_ptr, *comp_ptr;
if (session->internals.v2_hello != 0)
{ /* version 2.0 */
return _gnutls_read_client_hello_v2 (session, data, datalen);
}
DECR_LEN (len, 2);
_gnutls_handshake_log ("HSK[%x]: Client's version: %d.%d\n", session,
data[pos], data[pos + 1]);
adv_version = _gnutls_version_get (data[pos], data[pos + 1]);
set_adv_version (session, data[pos], data[pos + 1]);
pos += 2;
neg_version = _gnutls_negotiate_version( session, adv_version);
if (neg_version < 0)
{
gnutls_assert();
return neg_version;
}
/* Read client random value.
*/
DECR_LEN (len, TLS_RANDOM_SIZE);
_gnutls_set_client_random (session, &data[pos]);
pos += TLS_RANDOM_SIZE;
_gnutls_tls_create_random (rnd);
_gnutls_set_server_random (session, rnd);
session->security_parameters.timestamp = time (NULL);
DECR_LEN (len, 1);
session_id_len = data[pos++];
/* RESUME SESSION
*/
if (session_id_len > TLS_MAX_SESSION_ID_SIZE)
{
gnutls_assert ();
return GNUTLS_E_UNEXPECTED_PACKET_LENGTH;
}
DECR_LEN (len, session_id_len);
ret = _gnutls_server_restore_session (session, &data[pos], session_id_len);
pos += session_id_len;
if (ret == 0)
{ /* resumed! */
resume_copy_required_values (session);
session->internals.resumed = RESUME_TRUE;
return _gnutls_user_hello_func( session, adv_version);
}
else
{
_gnutls_generate_session_id (session->security_parameters.
session_id,
&session->security_parameters.
session_id_size);
session->internals.resumed = RESUME_FALSE;
}
/* Remember ciphersuites for later
*/
DECR_LEN (len, 2);
suite_size = _gnutls_read_uint16 (&data[pos]);
pos += 2;
DECR_LEN (len, suite_size);
suite_ptr = &data[pos];
pos += suite_size;
/* Point to the compression methods
*/
DECR_LEN (len, 1);
comp_size = data[pos++]; /* z is the number of compression methods */
DECR_LEN (len, comp_size);
comp_ptr = &data[pos];
pos += comp_size;
/* Parse the extensions (if any)
*/
if (neg_version >= GNUTLS_TLS1)
{
ret = _gnutls_parse_extensions (session, EXTENSION_APPLICATION, &data[pos], len); /* len is the rest of the parsed length */
if (ret < 0)
{
gnutls_assert ();
return ret;
}
}
ret = _gnutls_user_hello_func( session, adv_version);
if (ret < 0)
{
gnutls_assert();
return ret;
}
if (neg_version >= GNUTLS_TLS1)
{
ret = _gnutls_parse_extensions (session, EXTENSION_TLS, &data[pos], len); /* len is the rest of the parsed length */
if (ret < 0)
{
gnutls_assert ();
return ret;
}
}
/* select an appropriate cipher suite
*/
ret = _gnutls_server_select_suite (session, suite_ptr, suite_size);
if (ret < 0)
{
gnutls_assert ();
return ret;
}
/* select appropriate compression method */
ret = _gnutls_server_select_comp_method (session, comp_ptr, comp_size);
if (ret < 0)
{
gnutls_assert ();
return ret;
}
return 0;
}
| 0 |
[
"CWE-189"
] |
gnutls
|
bc8102405fda11ea00ca3b42acc4f4bce9d6e97b
| 39,306,871,543,120,950,000,000,000,000,000,000,000 | 140 |
Fix GNUTLS-SA-2008-1 security vulnerabilities.
See http://www.gnu.org/software/gnutls/security.html for updates.
|
cmd_http_send(CMD_ARGS)
{
struct http *hp;
int i;
(void)cmd;
(void)vl;
CAST_OBJ_NOTNULL(hp, priv, HTTP_MAGIC);
AN(av[1]);
AZ(av[2]);
vtc_dump(hp->vl, 4, "send", av[1], -1);
i = write(hp->fd, av[1], strlen(av[1]));
if (i != strlen(av[1]))
vtc_log(hp->vl, hp->fatal, "Write error in http_send(): %s",
strerror(errno));
}
| 0 |
[
"CWE-269"
] |
Varnish-Cache
|
85e8468bec9416bd7e16b0d80cb820ecd2b330c3
| 274,292,693,684,946,420,000,000,000,000,000,000,000 | 16 |
Do not consider a CR by itself as a valid line terminator
Varnish (prior to version 4.0) was not following the standard with
regard to line separator.
Spotted and analyzed by: Régis Leroy [regilero] [email protected]
|
static void term_exit(void)
{
tcsetattr (0, TCSANOW, &oldtty);
fcntl(0, F_SETFL, old_fd0_flags);
}
| 0 |
[
"CWE-416"
] |
qemu
|
a4afa548fc6dd9842ed86639b4d37d4d1c4ad480
| 48,182,043,716,967,400,000,000,000,000,000,000,000 | 5 |
char: move front end handlers in CharBackend
Since the hanlders are associated with a CharBackend, rather than the
CharDriverState, it is more appropriate to store in CharBackend. This
avoids the handler copy dance in qemu_chr_fe_set_handlers() then
mux_chr_update_read_handler(), by storing the CharBackend pointer
directly.
Also a mux CharDriver should go through mux->backends[focused], since
chr->be will stay NULL. Before that, it was possible to call
chr->handler by mistake with surprising results, for ex through
qemu_chr_be_can_write(), which would result in calling the last set
handler front end, not the one with focus.
Signed-off-by: Marc-André Lureau <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
CImgDisplay& paint(const bool wait_expose=true) {
if (is_empty()) return *this;
cimg_lock_display();
_paint(wait_expose);
cimg_unlock_display();
return *this;
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 264,216,496,283,129,900,000,000,000,000,000,000,000 | 7 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
static int get_dma_id(struct dma_device *device)
{
int rc;
mutex_lock(&dma_list_mutex);
rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
if (rc >= 0)
device->dev_id = rc;
mutex_unlock(&dma_list_mutex);
return rc < 0 ? rc : 0;
}
| 0 |
[] |
linux
|
7bced397510ab569d31de4c70b39e13355046387
| 161,799,849,410,690,280,000,000,000,000,000,000,000 | 13 |
net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.
This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.
Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():
https://lkml.org/lkml/2014/9/3/177
Cc: Dave Jiang <[email protected]>
Cc: Vinod Koul <[email protected]>
Cc: David Whipple <[email protected]>
Cc: Alexander Duyck <[email protected]>
Cc: <[email protected]>
Reported-by: Roman Gushchin <[email protected]>
Acked-by: David S. Miller <[email protected]>
Signed-off-by: Dan Williams <[email protected]>
|
static int nbd_negotiate_drop_sync(QIOChannel *ioc, size_t size)
{
ssize_t ret;
uint8_t *buffer = g_malloc(MIN(65536, size));
while (size > 0) {
size_t count = MIN(65536, size);
ret = nbd_negotiate_read(ioc, buffer, count);
if (ret < 0) {
g_free(buffer);
return ret;
}
size -= count;
}
g_free(buffer);
return 0;
}
| 1 |
[
"CWE-20"
] |
qemu
|
2b0bbc4f8809c972bad134bc1a2570dbb01dea0b
| 327,239,942,468,809,270,000,000,000,000,000,000,000 | 19 |
nbd/server: get rid of nbd_negotiate_read and friends
Functions nbd_negotiate_{read,write,drop_sync} were introduced in
1a6245a5b, when nbd_rwv (was nbd_wr_sync) was working through
qemu_co_sendv_recvv (the path is nbd_wr_sync -> qemu_co_{recv/send} ->
qemu_co_send_recv -> qemu_co_sendv_recvv), which just yields, without
setting any handlers. But starting from ff82911cd nbd_rwv (was
nbd_wr_syncv) works through qio_channel_yield() which sets handlers, so
watchers are redundant in nbd_negotiate_{read,write,drop_sync}, then,
let's just use nbd_{read,write,drop} functions.
Functions nbd_{read,write,drop} has errp parameter, which is unused in
this patch. This will be fixed later.
Signed-off-by: Vladimir Sementsov-Ogievskiy <[email protected]>
Reviewed-by: Eric Blake <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags)
{
int i;
BYTE iBitmapFormat;
BOOL compressed = FALSE;
CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER));
if (!cache_brush)
goto fail;
if (Stream_GetRemainingLength(s) < 6)
goto fail;
Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */
Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */
if (iBitmapFormat >= ARRAYSIZE(BMF_BPP))
goto fail;
cache_brush->bpp = BMF_BPP[iBitmapFormat];
Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */
Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */
Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */
Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */
if ((cache_brush->cx == 8) && (cache_brush->cy == 8))
{
if (cache_brush->bpp == 1)
{
if (cache_brush->length != 8)
{
WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "",
cache_brush->length);
goto fail;
}
/* rows are encoded in reverse order */
if (Stream_GetRemainingLength(s) < 8)
goto fail;
for (i = 7; i >= 0; i--)
{
Stream_Read_UINT8(s, cache_brush->data[i]);
}
}
else
{
if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20))
compressed = TRUE;
else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24))
compressed = TRUE;
else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32))
compressed = TRUE;
if (compressed != FALSE)
{
/* compressed brush */
if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data),
cache_brush->bpp))
goto fail;
}
else
{
/* uncompressed brush */
UINT32 scanline = (cache_brush->bpp / 8) * 8;
if (Stream_GetRemainingLength(s) < scanline * 8)
goto fail;
for (i = 7; i >= 0; i--)
{
Stream_Read(s, &cache_brush->data[i * scanline], scanline);
}
}
}
}
return cache_brush;
fail:
free_cache_brush_order(update->context, cache_brush);
return NULL;
}
| 1 |
[
"CWE-125"
] |
FreeRDP
|
b8beb55913471952f92770c90c372139d78c16c0
| 153,820,175,371,952,620,000,000,000,000,000,000,000 | 82 |
Fixed OOB read in update_read_cache_bitmap_v3_order
CVE-2020-11096 thanks @antonio-morales for finding this.
|
CudnnSupport::createRnnStateTensorDescriptor(int num_layer, int batch_size,
int data_size,
dnn::DataType data_type) {
return std::unique_ptr<dnn::RnnStateTensorDescriptor>(
new CudnnRnnStateTensorDescriptor(parent_, num_layer, batch_size,
data_size, ToCudnnDataType(data_type)));
}
| 0 |
[
"CWE-20"
] |
tensorflow
|
14755416e364f17fb1870882fa778c7fec7f16e3
| 141,861,080,732,194,740,000,000,000,000,000,000,000 | 7 |
Prevent CHECK-fail in LSTM/GRU with zero-length input.
PiperOrigin-RevId: 346239181
Change-Id: I5f233dbc076aab7bb4e31ba24f5abd4eaf99ea4f
|
separate_nextcmd(exarg_T *eap)
{
char_u *p;
#ifdef FEAT_QUICKFIX
p = skip_grep_pat(eap);
#else
p = eap->arg;
#endif
for ( ; *p; MB_PTR_ADV(p))
{
if (*p == Ctrl_V)
{
if (eap->argt & (EX_CTRLV | EX_XFILE))
++p; // skip CTRL-V and next char
else
// remove CTRL-V and skip next char
STRMOVE(p, p + 1);
if (*p == NUL) // stop at NUL after CTRL-V
break;
}
#ifdef FEAT_EVAL
// Skip over `=expr` when wildcards are expanded.
else if (p[0] == '`' && p[1] == '=' && (eap->argt & EX_XFILE))
{
p += 2;
(void)skip_expr(&p, NULL);
if (*p == NUL) // stop at NUL after CTRL-V
break;
}
#endif
// Check for '"': start of comment or '|': next command
// :@" and :*" do not start a comment!
// :redir @" doesn't either.
else if ((*p == '"'
#ifdef FEAT_EVAL
&& !in_vim9script()
#endif
&& !(eap->argt & EX_NOTRLCOM)
&& ((eap->cmdidx != CMD_at && eap->cmdidx != CMD_star)
|| p != eap->arg)
&& (eap->cmdidx != CMD_redir
|| p != eap->arg + 1 || p[-1] != '@'))
#ifdef FEAT_EVAL
|| (*p == '#'
&& in_vim9script()
&& !(eap->argt & EX_NOTRLCOM)
&& p > eap->cmd && VIM_ISWHITE(p[-1]))
#endif
|| *p == '|' || *p == '\n')
{
/*
* We remove the '\' before the '|', unless EX_CTRLV is used
* AND 'b' is present in 'cpoptions'.
*/
if ((vim_strchr(p_cpo, CPO_BAR) == NULL
|| !(eap->argt & EX_CTRLV)) && *(p - 1) == '\\')
{
STRMOVE(p - 1, p); // remove the '\'
--p;
}
else
{
eap->nextcmd = check_nextcmd(p);
*p = NUL;
break;
}
}
}
if (!(eap->argt & EX_NOTRLCOM)) // remove trailing spaces
del_trailing_spaces(eap->arg);
}
| 0 |
[
"CWE-122"
] |
vim
|
35a319b77f897744eec1155b736e9372c9c5575f
| 155,473,970,087,354,470,000,000,000,000,000,000,000 | 76 |
patch 8.2.3489: ml_get error after search with range
Problem: ml_get error after search with range.
Solution: Limit the line number to the buffer line count.
|
gdata_service_finalize (GObject *object)
{
GDataServicePrivate *priv = GDATA_SERVICE (object)->priv;
g_free (priv->locale);
/* Chain up to the parent class */
G_OBJECT_CLASS (gdata_service_parent_class)->finalize (object);
}
| 0 |
[
"CWE-20"
] |
libgdata
|
6799f2c525a584dc998821a6ce897e463dad7840
| 136,782,286,273,108,340,000,000,000,000,000,000,000 | 9 |
core: Validate SSL certificates for all connections
This prevents MitM attacks which use spoofed SSL certificates.
Note that this bumps our libsoup requirement to 2.37.91.
Closes: https://bugzilla.gnome.org/show_bug.cgi?id=671535
|
bgp_attr_init (void)
{
aspath_init ();
attrhash_init ();
community_init ();
ecommunity_init ();
cluster_init ();
transit_init ();
}
| 0 |
[] |
quagga
|
8794e8d229dc9fe29ea31424883433d4880ef408
| 223,257,243,046,107,340,000,000,000,000,000,000,000 | 9 |
bgpd: Fix regression in args consolidation, total should be inited from args
* bgp_attr.c: (bgp_attr_unknown) total should be initialised from the args.
|
Options() :
password(0),
linearize(false),
decrypt(false),
split_pages(0),
verbose(false),
progress(false),
suppress_warnings(false),
copy_encryption(false),
encryption_file(0),
encryption_file_password(0),
encrypt(false),
password_is_hex_key(false),
suppress_password_recovery(false),
password_mode(pm_auto),
keylen(0),
r2_print(true),
r2_modify(true),
r2_extract(true),
r2_annotate(true),
r3_accessibility(true),
r3_extract(true),
r3_assemble(true),
r3_annotate_and_form(true),
r3_form_filling(true),
r3_modify_other(true),
r3_print(qpdf_r3p_full),
force_V4(false),
force_R5(false),
cleartext_metadata(false),
use_aes(false),
stream_data_set(false),
stream_data_mode(qpdf_s_compress),
compress_streams(true),
compress_streams_set(false),
decode_level(qpdf_dl_generalized),
decode_level_set(false),
normalize_set(false),
normalize(false),
suppress_recovery(false),
object_stream_set(false),
object_stream_mode(qpdf_o_preserve),
ignore_xref_streams(false),
qdf_mode(false),
preserve_unreferenced_objects(false),
preserve_unreferenced_page_resources(false),
keep_files_open(true),
keep_files_open_set(false),
keep_files_open_threshold(200), // default known in help and docs
newline_before_endstream(false),
coalesce_contents(false),
flatten_annotations(false),
flatten_annotations_required(0),
flatten_annotations_forbidden(an_invisible | an_hidden),
generate_appearances(false),
show_npages(false),
deterministic_id(false),
static_id(false),
static_aes_iv(false),
suppress_original_object_id(false),
show_encryption(false),
show_encryption_key(false),
check_linearization(false),
show_linearization(false),
show_xref(false),
show_trailer(false),
show_obj(0),
show_gen(0),
show_raw_stream_data(false),
show_filtered_stream_data(false),
show_pages(false),
show_page_images(false),
collate(false),
json(false),
check(false),
optimize_images(false),
externalize_inline_images(false),
keep_inline_images(false),
remove_page_labels(false),
oi_min_width(128), // Default values for these
oi_min_height(128), // oi flags are in --help
oi_min_area(16384), // and in the manual.
ii_min_bytes(1024), //
underlay("underlay"),
overlay("overlay"),
under_overlay(0),
require_outfile(true),
infilename(0),
outfilename(0)
{
}
| 0 |
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
| 294,520,049,259,630,860,000,000,000,000,000,000,000 | 91 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
|
int Field_year::store_time_dec(const MYSQL_TIME *ltime, uint dec_arg)
{
ErrConvTime str(ltime);
if (Field_year::store(ltime->year, 0))
return 1;
set_datetime_warning(WARN_DATA_TRUNCATED, &str, ltime->time_type, 1);
return 0;
}
| 0 |
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
| 155,941,546,617,711,410,000,000,000,000,000,000,000 | 9 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]>
|
uriCommonTest(const char *filename,
const char *result,
const char *err,
const char *base) {
char *temp;
FILE *o, *f;
char str[1024];
int res = 0, i, ret;
temp = resultFilename(filename, "", ".res");
if (temp == NULL) {
fprintf(stderr, "Out of memory\n");
fatalError();
}
o = fopen(temp, "wb");
if (o == NULL) {
fprintf(stderr, "failed to open output file %s\n", temp);
free(temp);
return(-1);
}
f = fopen(filename, "rb");
if (f == NULL) {
fprintf(stderr, "failed to open input file %s\n", filename);
fclose(o);
if (temp != NULL) {
unlink(temp);
free(temp);
}
return(-1);
}
while (1) {
/*
* read one line in string buffer.
*/
if (fgets (&str[0], sizeof (str) - 1, f) == NULL)
break;
/*
* remove the ending spaces
*/
i = strlen(str);
while ((i > 0) &&
((str[i - 1] == '\n') || (str[i - 1] == '\r') ||
(str[i - 1] == ' ') || (str[i - 1] == '\t'))) {
i--;
str[i] = 0;
}
nb_tests++;
handleURI(str, base, o);
}
fclose(f);
fclose(o);
if (result != NULL) {
ret = compareFiles(temp, result);
if (ret) {
fprintf(stderr, "Result for %s failed in %s\n", filename, result);
res = 1;
}
}
if (err != NULL) {
ret = compareFileMem(err, testErrors, testErrorsSize);
if (ret != 0) {
fprintf(stderr, "Error for %s failed\n", filename);
res = 1;
}
}
if (temp != NULL) {
unlink(temp);
free(temp);
}
return(res);
}
| 0 |
[
"CWE-125"
] |
libxml2
|
a820dbeac29d330bae4be05d9ecd939ad6b4aa33
| 83,260,858,238,512,690,000,000,000,000,000,000,000 | 76 |
Bug 758605: Heap-based buffer overread in xmlDictAddString <https://bugzilla.gnome.org/show_bug.cgi?id=758605>
Reviewed by David Kilzer.
* HTMLparser.c:
(htmlParseName): Add bounds check.
(htmlParseNameComplex): Ditto.
* result/HTML/758605.html: Added.
* result/HTML/758605.html.err: Added.
* result/HTML/758605.html.sax: Added.
* runtest.c:
(pushParseTest): The input for the new test case was so small
(4 bytes) that htmlParseChunk() was never called after
htmlCreatePushParserCtxt(), thereby creating a false positive
test failure. Fixed by using a do-while loop so we always call
htmlParseChunk() at least once.
* test/HTML/758605.html: Added.
|
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
const struct in6_addr *saddr, *daddr;
struct net *net = dev_net(skb->dev);
struct udphdr *uh;
struct sock *sk;
u32 ulen = 0;
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto discard;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
ulen = ntohs(uh->len);
if (ulen > skb->len)
goto short_packet;
if (proto == IPPROTO_UDP) {
/* UDP validates ulen. */
/* Check for jumbo payload */
if (ulen == 0)
ulen = skb->len;
if (ulen < sizeof(*uh))
goto short_packet;
if (ulen < skb->len) {
if (pskb_trim_rcsum(skb, ulen))
goto short_packet;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
}
}
if (udp6_csum_init(skb, uh, proto))
goto csum_error;
/*
* Multicast receive code
*/
if (ipv6_addr_is_multicast(daddr))
return __udp6_lib_mcast_deliver(net, skb,
saddr, daddr, udptable, proto);
/* Unicast */
/*
* check socket cache ... must talk to Alan about his plans
* for sock caches... i'll skip this for now.
*/
sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk) {
int ret;
if (!uh->check && !udp_sk(sk)->no_check6_rx) {
udp6_csum_zero_error(skb);
goto csum_error;
}
if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
ip6_compute_pseudo);
ret = udpv6_queue_rcv_skb(sk, skb);
/* a return value > 0 means to resubmit the input */
if (ret > 0)
return ret;
return 0;
}
if (!uh->check) {
udp6_csum_zero_error(skb);
goto csum_error;
}
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
if (udp_lib_checksum_complete(skb))
goto csum_error;
__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
kfree_skb(skb);
return 0;
short_packet:
net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
proto == IPPROTO_UDPLITE ? "-Lite" : "",
saddr, ntohs(uh->source),
ulen, skb->len,
daddr, ntohs(uh->dest));
goto discard;
csum_error:
__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
discard:
__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
kfree_skb(skb);
return 0;
}
| 0 |
[] |
linux
|
a612769774a30e4fc143c4cb6395c12573415660
| 18,835,909,205,124,114,000,000,000,000,000,000,000 | 108 |
udp: prevent bugcheck if filter truncates packet too much
If socket filter truncates an udp packet below the length of UDP header
in udpv6_queue_rcv_skb() or udp_queue_rcv_skb(), it will trigger a
BUG_ON in skb_pull_rcsum(). This BUG_ON (and therefore a system crash if
kernel is configured that way) can be easily enforced by an unprivileged
user which was reported as CVE-2016-6162. For a reproducer, see
http://seclists.org/oss-sec/2016/q3/8
Fixes: e6afc8ace6dd ("udp: remove headers from UDP packets before queueing")
Reported-by: Marco Grassi <[email protected]>
Signed-off-by: Michal Kubecek <[email protected]>
Acked-by: Eric Dumazet <[email protected]>
Acked-by: Willem de Bruijn <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void rfbNewFramebuffer(rfbScreenInfoPtr screen, char *framebuffer,
int width, int height,
int bitsPerSample, int samplesPerPixel,
int bytesPerPixel)
{
rfbPixelFormat old_format;
rfbBool format_changed = FALSE;
rfbClientIteratorPtr iterator;
rfbClientPtr cl;
/* Update information in the screenInfo structure */
old_format = screen->serverFormat;
if (width & 3)
rfbErr("WARNING: New width (%d) is not a multiple of 4.\n", width);
screen->width = width;
screen->height = height;
screen->bitsPerPixel = screen->depth = 8*bytesPerPixel;
screen->paddedWidthInBytes = width*bytesPerPixel;
rfbInitServerFormat(screen, bitsPerSample);
if (memcmp(&screen->serverFormat, &old_format,
sizeof(rfbPixelFormat)) != 0) {
format_changed = TRUE;
}
screen->frameBuffer = framebuffer;
/* Adjust pointer position if necessary */
if (screen->cursorX >= width)
screen->cursorX = width - 1;
if (screen->cursorY >= height)
screen->cursorY = height - 1;
/* For each client: */
iterator = rfbGetClientIterator(screen);
while ((cl = rfbClientIteratorNext(iterator)) != NULL) {
/* Re-install color translation tables if necessary */
if (format_changed)
screen->setTranslateFunction(cl);
/* Mark the screen contents as changed, and schedule sending
NewFBSize message if supported by this client. */
LOCK(cl->updateMutex);
sraRgnDestroy(cl->modifiedRegion);
cl->modifiedRegion = sraRgnCreateRect(0, 0, width, height);
sraRgnMakeEmpty(cl->copyRegion);
cl->copyDX = 0;
cl->copyDY = 0;
if (cl->useNewFBSize)
cl->newFBSizePending = TRUE;
TSIGNAL(cl->updateCond);
UNLOCK(cl->updateMutex);
}
rfbReleaseClientIterator(iterator);
}
| 0 |
[] |
libvncserver
|
804335f9d296440bb708ca844f5d89b58b50b0c6
| 282,762,357,010,111,100,000,000,000,000,000,000,000 | 65 |
Thread safety for zrle, zlib, tight.
Proposed tight security type fix for debian bug 517422.
|
static inline void dma_contiguous_reserve(phys_addr_t limit) { }
| 0 |
[
"CWE-682"
] |
linux
|
67a2e213e7e937c41c52ab5bc46bf3f4de469f6e
| 88,070,187,271,015,980,000,000,000,000,000,000,000 | 1 |
mm: cma: fix incorrect type conversion for size during dma allocation
This was found during userspace fuzzing test when a large size dma cma
allocation is made by driver(like ion) through userspace.
show_stack+0x10/0x1c
dump_stack+0x74/0xc8
kasan_report_error+0x2b0/0x408
kasan_report+0x34/0x40
__asan_storeN+0x15c/0x168
memset+0x20/0x44
__dma_alloc_coherent+0x114/0x18c
Signed-off-by: Rohit Vaswani <[email protected]>
Acked-by: Greg Kroah-Hartman <[email protected]>
Cc: Marek Szyprowski <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
int zend_shared_memdup_size(void *source, size_t size)
{
void **old_p;
if (zend_hash_index_find(&xlat_table, (ulong)source, (void **)&old_p) == SUCCESS) {
/* we already duplicated this pointer */
return 0;
}
zend_shared_alloc_register_xlat_entry(source, source);
return ZEND_ALIGNED_SIZE(size);
}
| 0 |
[
"CWE-416"
] |
php-src
|
0a8f28b43212cc2ddbc1f2df710e37b1bec0addd
| 248,293,696,941,250,770,000,000,000,000,000,000,000 | 11 |
Fixed bug #68677 (Use After Free in OPcache)
(cherry picked from commit 777c39f4042327eac4b63c7ee87dc1c7a09a3115)
|
static void krb5_save_ccname_done(struct tevent_req *req)
{
struct krb5_auth_state *state = tevent_req_data(req, struct krb5_auth_state);
struct krb5child_req *kr = state->kr;
struct pam_data *pd = state->pd;
int ret;
char *password = NULL;
if (kr->is_offline) {
if (dp_opt_get_bool(kr->krb5_ctx->opts,KRB5_STORE_PASSWORD_IF_OFFLINE)) {
krb5_pam_handler_cache_auth_step(req);
return;
}
DEBUG(4, ("Backend is marked offline, retry later!\n"));
state->pam_status = PAM_AUTHINFO_UNAVAIL;
state->dp_err = DP_ERR_OFFLINE;
ret = EOK;
goto done;
}
if (state->be_ctx->domain->cache_credentials == TRUE) {
/* password caching failures are not fatal errors */
state->pam_status = PAM_SUCCESS;
state->dp_err = DP_ERR_OK;
switch(pd->cmd) {
case SSS_CMD_RENEW:
/* The authtok is set to the credential cache
* during renewal. We don't want to save this
* as the cached password.
*/
break;
case SSS_PAM_AUTHENTICATE:
case SSS_PAM_CHAUTHTOK_PRELIM:
password = talloc_size(state, pd->authtok_size + 1);
if (password != NULL) {
memcpy(password, pd->authtok, pd->authtok_size);
password[pd->authtok_size] = '\0';
}
break;
case SSS_PAM_CHAUTHTOK:
password = talloc_size(state, pd->newauthtok_size + 1);
if (password != NULL) {
memcpy(password, pd->newauthtok, pd->newauthtok_size);
password[pd->newauthtok_size] = '\0';
}
break;
default:
DEBUG(0, ("unsupported PAM command [%d].\n", pd->cmd));
}
if (password == NULL) {
if (pd->cmd != SSS_CMD_RENEW) {
DEBUG(0, ("password not available, offline auth may not work.\n"));
/* password caching failures are not fatal errors */
}
ret = EOK;
goto done;
}
talloc_set_destructor((TALLOC_CTX *)password, password_destructor);
ret = sysdb_cache_password(state, state->be_ctx->sysdb,
state->be_ctx->domain, pd->user,
password);
if (ret) {
DEBUG(2, ("Failed to cache password, offline auth may not work."
" (%d)[%s]!?\n", ret, strerror(ret)));
/* password caching failures are not fatal errors */
}
}
state->pam_status = PAM_SUCCESS;
state->dp_err = DP_ERR_OK;
ret = EOK;
done:
if (ret == EOK) {
tevent_req_done(req);
} else {
tevent_req_error(req, ret);
}
}
| 0 |
[
"CWE-287"
] |
sssd
|
fffdae81651b460f3d2c119c56d5caa09b4de42a
| 173,914,808,548,093,200,000,000,000,000,000,000,000 | 86 |
Fix bad password caching when using automatic TGT renewal
Fixes CVE-2011-1758, https://fedorahosted.org/sssd/ticket/856
|
static inline bool may_mandlock(void)
{
pr_warn("VFS: \"mand\" mount option not supported");
return false;
}
| 0 |
[
"CWE-200"
] |
linux
|
427215d85e8d1476da1a86b8d67aceb485eb3631
| 274,582,650,618,878,170,000,000,000,000,000,000,000 | 5 |
ovl: prevent private clone if bind mount is not allowed
Add the following checks from __do_loopback() to clone_private_mount() as
well:
- verify that the mount is in the current namespace
- verify that there are no locked children
Reported-by: Alois Wohlschlager <[email protected]>
Fixes: c771d683a62e ("vfs: introduce clone_private_mount()")
Cc: <[email protected]> # v3.18
Signed-off-by: Miklos Szeredi <[email protected]>
|
read_cupsd_conf(cups_file_t *fp) /* I - File to read from */
{
int linenum; /* Current line number */
char line[HTTP_MAX_BUFFER],
/* Line from file */
temp[HTTP_MAX_BUFFER],
/* Temporary buffer for value */
*value; /* Pointer to value */
int valuelen; /* Length of value */
http_addrlist_t *addrlist, /* Address list */
*addr; /* Current address */
/*
* Loop through each line in the file...
*/
linenum = 0;
while (cupsFileGetConf(fp, line, sizeof(line), &value, &linenum))
{
/*
* Decode the directive...
*/
if (!_cups_strcasecmp(line, "<Location") && value)
{
/*
* <Location path>
*/
linenum = read_location(fp, value, linenum);
if (linenum == 0)
return (0);
}
else if (!_cups_strcasecmp(line, "<Policy") && value)
{
/*
* <Policy name>
*/
linenum = read_policy(fp, value, linenum);
if (linenum == 0)
return (0);
}
else if (!_cups_strcasecmp(line, "FaxRetryInterval") && value)
{
JobRetryInterval = atoi(value);
cupsdLogMessage(CUPSD_LOG_WARN,
"FaxRetryInterval is deprecated; use "
"JobRetryInterval on line %d of %s.", linenum, ConfigurationFile);
}
else if (!_cups_strcasecmp(line, "FaxRetryLimit") && value)
{
JobRetryLimit = atoi(value);
cupsdLogMessage(CUPSD_LOG_WARN,
"FaxRetryLimit is deprecated; use "
"JobRetryLimit on line %d of %s.", linenum, ConfigurationFile);
}
#ifdef HAVE_SSL
else if (!_cups_strcasecmp(line, "SSLOptions"))
{
/*
* SSLOptions [AllowRC4] [AllowSSL3] [AllowDH] [DenyCBC] [DenyTLS1.0] [None]
*/
int options = _HTTP_TLS_NONE,/* SSL/TLS options */
min_version = _HTTP_TLS_1_0,
max_version = _HTTP_TLS_MAX;
if (value)
{
char *start, /* Start of option */
*end; /* End of option */
for (start = value; *start; start = end)
{
/*
* Find end of keyword...
*/
end = start;
while (*end && !_cups_isspace(*end))
end ++;
if (*end)
*end++ = '\0';
/*
* Compare...
*/
if (!_cups_strcasecmp(start, "AllowRC4"))
options |= _HTTP_TLS_ALLOW_RC4;
else if (!_cups_strcasecmp(start, "AllowSSL3"))
min_version = _HTTP_TLS_SSL3;
else if (!_cups_strcasecmp(start, "AllowDH"))
options |= _HTTP_TLS_ALLOW_DH;
else if (!_cups_strcasecmp(start, "DenyCBC"))
options |= _HTTP_TLS_DENY_CBC;
else if (!_cups_strcasecmp(start, "DenyTLS1.0"))
min_version = _HTTP_TLS_1_1;
else if (!_cups_strcasecmp(start, "MaxTLS1.0"))
max_version = _HTTP_TLS_1_0;
else if (!_cups_strcasecmp(start, "MaxTLS1.1"))
max_version = _HTTP_TLS_1_1;
else if (!_cups_strcasecmp(start, "MaxTLS1.2"))
max_version = _HTTP_TLS_1_2;
else if (!_cups_strcasecmp(start, "MaxTLS1.3"))
max_version = _HTTP_TLS_1_3;
else if (!_cups_strcasecmp(start, "MinTLS1.0"))
min_version = _HTTP_TLS_1_0;
else if (!_cups_strcasecmp(start, "MinTLS1.1"))
min_version = _HTTP_TLS_1_1;
else if (!_cups_strcasecmp(start, "MinTLS1.2"))
min_version = _HTTP_TLS_1_2;
else if (!_cups_strcasecmp(start, "MinTLS1.3"))
min_version = _HTTP_TLS_1_3;
else if (!_cups_strcasecmp(start, "None"))
options = _HTTP_TLS_NONE;
else if (_cups_strcasecmp(start, "NoEmptyFragments"))
cupsdLogMessage(CUPSD_LOG_WARN, "Unknown SSL option %s at line %d.", start, linenum);
}
}
_httpTLSSetOptions(options, min_version, max_version);
}
#endif /* HAVE_SSL */
else if ((!_cups_strcasecmp(line, "Port") || !_cups_strcasecmp(line, "Listen")
#ifdef HAVE_SSL
|| !_cups_strcasecmp(line, "SSLPort") || !_cups_strcasecmp(line, "SSLListen")
#endif /* HAVE_SSL */
) && value)
{
/*
* Add listening address(es) to the list...
*/
cupsd_listener_t *lis; /* New listeners array */
/*
* Get the address list...
*/
addrlist = get_address(value, IPP_PORT);
if (!addrlist)
{
cupsdLogMessage(CUPSD_LOG_ERROR, "Bad %s address %s at line %d.", line,
value, linenum);
continue;
}
/*
* Add each address...
*/
for (addr = addrlist; addr; addr = addr->next)
{
/*
* See if this address is already present...
*/
for (lis = (cupsd_listener_t *)cupsArrayFirst(Listeners);
lis;
lis = (cupsd_listener_t *)cupsArrayNext(Listeners))
if (httpAddrEqual(&(addr->addr), &(lis->address)) &&
httpAddrPort(&(addr->addr)) == httpAddrPort(&(lis->address)))
break;
if (lis)
{
#ifdef HAVE_ONDEMAND
if (!lis->on_demand)
#endif /* HAVE_ONDEMAND */
{
httpAddrString(&lis->address, temp, sizeof(temp));
cupsdLogMessage(CUPSD_LOG_WARN,
"Duplicate listen address \"%s\" ignored.", temp);
}
continue;
}
/*
* Allocate another listener...
*/
if (!Listeners)
Listeners = cupsArrayNew(NULL, NULL);
if (!Listeners)
{
cupsdLogMessage(CUPSD_LOG_ERROR,
"Unable to allocate %s at line %d - %s.",
line, linenum, strerror(errno));
break;
}
if ((lis = calloc(1, sizeof(cupsd_listener_t))) == NULL)
{
cupsdLogMessage(CUPSD_LOG_ERROR,
"Unable to allocate %s at line %d - %s.",
line, linenum, strerror(errno));
break;
}
cupsArrayAdd(Listeners, lis);
/*
* Copy the current address and log it...
*/
memcpy(&(lis->address), &(addr->addr), sizeof(lis->address));
lis->fd = -1;
#ifdef HAVE_SSL
if (!_cups_strcasecmp(line, "SSLPort") || !_cups_strcasecmp(line, "SSLListen"))
lis->encryption = HTTP_ENCRYPT_ALWAYS;
#endif /* HAVE_SSL */
httpAddrString(&lis->address, temp, sizeof(temp));
#ifdef AF_LOCAL
if (lis->address.addr.sa_family == AF_LOCAL)
cupsdLogMessage(CUPSD_LOG_INFO, "Listening to %s (Domain)", temp);
else
#endif /* AF_LOCAL */
cupsdLogMessage(CUPSD_LOG_INFO, "Listening to %s:%d (IPv%d)", temp,
httpAddrPort(&(lis->address)),
httpAddrFamily(&(lis->address)) == AF_INET ? 4 : 6);
if (!httpAddrLocalhost(&(lis->address)))
RemotePort = httpAddrPort(&(lis->address));
}
/*
* Free the list...
*/
httpAddrFreeList(addrlist);
}
else if (!_cups_strcasecmp(line, "BrowseProtocols") ||
!_cups_strcasecmp(line, "BrowseLocalProtocols"))
{
/*
* "BrowseProtocols name [... name]"
* "BrowseLocalProtocols name [... name]"
*/
int protocols = parse_protocols(value);
if (protocols < 0)
{
cupsdLogMessage(CUPSD_LOG_ERROR,
"Unknown browse protocol \"%s\" on line %d of %s.",
value, linenum, ConfigurationFile);
break;
}
BrowseLocalProtocols = protocols;
}
else if (!_cups_strcasecmp(line, "DefaultAuthType") && value)
{
/*
* DefaultAuthType {basic,digest,basicdigest,negotiate}
*/
if (!_cups_strcasecmp(value, "none"))
default_auth_type = CUPSD_AUTH_NONE;
else if (!_cups_strcasecmp(value, "basic"))
default_auth_type = CUPSD_AUTH_BASIC;
else if (!_cups_strcasecmp(value, "negotiate"))
default_auth_type = CUPSD_AUTH_NEGOTIATE;
else if (!_cups_strcasecmp(value, "auto"))
default_auth_type = CUPSD_AUTH_AUTO;
else
{
cupsdLogMessage(CUPSD_LOG_WARN,
"Unknown default authorization type %s on line %d of %s.",
value, linenum, ConfigurationFile);
if (FatalErrors & CUPSD_FATAL_CONFIG)
return (0);
}
}
#ifdef HAVE_SSL
else if (!_cups_strcasecmp(line, "DefaultEncryption"))
{
/*
* DefaultEncryption {Never,IfRequested,Required}
*/
if (!value || !_cups_strcasecmp(value, "never"))
DefaultEncryption = HTTP_ENCRYPT_NEVER;
else if (!_cups_strcasecmp(value, "required"))
DefaultEncryption = HTTP_ENCRYPT_REQUIRED;
else if (!_cups_strcasecmp(value, "ifrequested"))
DefaultEncryption = HTTP_ENCRYPT_IF_REQUESTED;
else
{
cupsdLogMessage(CUPSD_LOG_WARN,
"Unknown default encryption %s on line %d of %s.",
value, linenum, ConfigurationFile);
if (FatalErrors & CUPSD_FATAL_CONFIG)
return (0);
}
}
#endif /* HAVE_SSL */
else if (!_cups_strcasecmp(line, "HostNameLookups") && value)
{
/*
* Do hostname lookups?
*/
if (!_cups_strcasecmp(value, "off") || !_cups_strcasecmp(value, "no") ||
!_cups_strcasecmp(value, "false"))
HostNameLookups = 0;
else if (!_cups_strcasecmp(value, "on") || !_cups_strcasecmp(value, "yes") ||
!_cups_strcasecmp(value, "true"))
HostNameLookups = 1;
else if (!_cups_strcasecmp(value, "double"))
HostNameLookups = 2;
else
cupsdLogMessage(CUPSD_LOG_WARN, "Unknown HostNameLookups %s on line %d of %s.",
value, linenum, ConfigurationFile);
}
else if (!_cups_strcasecmp(line, "AccessLogLevel") && value)
{
/*
* Amount of logging to do to access log...
*/
if (!_cups_strcasecmp(value, "all"))
AccessLogLevel = CUPSD_ACCESSLOG_ALL;
else if (!_cups_strcasecmp(value, "actions"))
AccessLogLevel = CUPSD_ACCESSLOG_ACTIONS;
else if (!_cups_strcasecmp(value, "config"))
AccessLogLevel = CUPSD_ACCESSLOG_CONFIG;
else if (!_cups_strcasecmp(value, "none"))
AccessLogLevel = CUPSD_ACCESSLOG_NONE;
else
cupsdLogMessage(CUPSD_LOG_WARN, "Unknown AccessLogLevel %s on line %d of %s.",
value, linenum, ConfigurationFile);
}
else if (!_cups_strcasecmp(line, "LogLevel") && value)
{
/*
* Amount of logging to do to error log...
*/
if (!_cups_strcasecmp(value, "debug2"))
LogLevel = CUPSD_LOG_DEBUG2;
else if (!_cups_strcasecmp(value, "debug"))
LogLevel = CUPSD_LOG_DEBUG;
else if (!_cups_strcasecmp(value, "info"))
LogLevel = CUPSD_LOG_INFO;
else if (!_cups_strcasecmp(value, "notice"))
LogLevel = CUPSD_LOG_NOTICE;
else if (!_cups_strcasecmp(value, "warn"))
LogLevel = CUPSD_LOG_WARN;
else if (!_cups_strcasecmp(value, "error"))
LogLevel = CUPSD_LOG_ERROR;
else if (!_cups_strcasecmp(value, "crit"))
LogLevel = CUPSD_LOG_CRIT;
else if (!_cups_strcasecmp(value, "alert"))
LogLevel = CUPSD_LOG_ALERT;
else if (!_cups_strcasecmp(value, "emerg"))
LogLevel = CUPSD_LOG_EMERG;
else if (!_cups_strcasecmp(value, "none"))
LogLevel = CUPSD_LOG_NONE;
else
cupsdLogMessage(CUPSD_LOG_WARN, "Unknown LogLevel %s on line %d of %s.",
value, linenum, ConfigurationFile);
}
else if (!_cups_strcasecmp(line, "LogTimeFormat") && value)
{
/*
* Amount of logging to do to error log...
*/
if (!_cups_strcasecmp(value, "standard"))
LogTimeFormat = CUPSD_TIME_STANDARD;
else if (!_cups_strcasecmp(value, "usecs"))
LogTimeFormat = CUPSD_TIME_USECS;
else
cupsdLogMessage(CUPSD_LOG_WARN, "Unknown LogTimeFormat %s on line %d of %s.",
value, linenum, ConfigurationFile);
}
else if (!_cups_strcasecmp(line, "ServerTokens") && value)
{
/*
* Set the string used for the Server header...
*/
struct utsname plat; /* Platform info */
uname(&plat);
if (!_cups_strcasecmp(value, "ProductOnly"))
cupsdSetString(&ServerHeader, "CUPS IPP");
else if (!_cups_strcasecmp(value, "Major"))
cupsdSetStringf(&ServerHeader, "CUPS/%d IPP/2", CUPS_VERSION_MAJOR);
else if (!_cups_strcasecmp(value, "Minor"))
cupsdSetStringf(&ServerHeader, "CUPS/%d.%d IPP/2.1", CUPS_VERSION_MAJOR,
CUPS_VERSION_MINOR);
else if (!_cups_strcasecmp(value, "Minimal"))
cupsdSetString(&ServerHeader, CUPS_MINIMAL " IPP/2.1");
else if (!_cups_strcasecmp(value, "OS"))
cupsdSetStringf(&ServerHeader, CUPS_MINIMAL " (%s %s) IPP/2.1",
plat.sysname, plat.release);
else if (!_cups_strcasecmp(value, "Full"))
cupsdSetStringf(&ServerHeader, CUPS_MINIMAL " (%s %s; %s) IPP/2.1",
plat.sysname, plat.release, plat.machine);
else if (!_cups_strcasecmp(value, "None"))
cupsdSetString(&ServerHeader, "");
else
cupsdLogMessage(CUPSD_LOG_WARN, "Unknown ServerTokens %s on line %d of %s.",
value, linenum, ConfigurationFile);
}
else if (!_cups_strcasecmp(line, "ServerAlias") && value)
{
/*
* ServerAlias name [... name]
*/
if (!ServerAlias)
ServerAlias = cupsArrayNew(NULL, NULL);
for (; *value;)
{
for (valuelen = 0; value[valuelen]; valuelen ++)
if (_cups_isspace(value[valuelen]) || value[valuelen] == ',')
break;
if (value[valuelen])
{
value[valuelen] = '\0';
valuelen ++;
}
cupsdAddAlias(ServerAlias, value);
for (value += valuelen; *value; value ++)
if (!_cups_isspace(*value) || *value != ',')
break;
}
}
else if (!_cups_strcasecmp(line, "AccessLog") ||
!_cups_strcasecmp(line, "CacheDir") ||
!_cups_strcasecmp(line, "ConfigFilePerm") ||
!_cups_strcasecmp(line, "DataDir") ||
!_cups_strcasecmp(line, "DocumentRoot") ||
!_cups_strcasecmp(line, "ErrorLog") ||
!_cups_strcasecmp(line, "FatalErrors") ||
!_cups_strcasecmp(line, "FileDevice") ||
!_cups_strcasecmp(line, "FontPath") ||
!_cups_strcasecmp(line, "Group") ||
!_cups_strcasecmp(line, "LogFilePerm") ||
!_cups_strcasecmp(line, "LPDConfigFile") ||
!_cups_strcasecmp(line, "PageLog") ||
!_cups_strcasecmp(line, "PassEnv") ||
!_cups_strcasecmp(line, "Printcap") ||
!_cups_strcasecmp(line, "PrintcapFormat") ||
!_cups_strcasecmp(line, "RemoteRoot") ||
!_cups_strcasecmp(line, "RequestRoot") ||
!_cups_strcasecmp(line, "ServerBin") ||
!_cups_strcasecmp(line, "ServerCertificate") ||
!_cups_strcasecmp(line, "ServerKey") ||
!_cups_strcasecmp(line, "ServerKeychain") ||
!_cups_strcasecmp(line, "ServerRoot") ||
!_cups_strcasecmp(line, "SetEnv") ||
!_cups_strcasecmp(line, "SMBConfigFile") ||
!_cups_strcasecmp(line, "StateDir") ||
!_cups_strcasecmp(line, "SystemGroup") ||
!_cups_strcasecmp(line, "SystemGroupAuthKey") ||
!_cups_strcasecmp(line, "TempDir") ||
!_cups_strcasecmp(line, "User"))
{
cupsdLogMessage(CUPSD_LOG_INFO,
"Please move \"%s%s%s\" on line %d of %s to the %s file; "
"this will become an error in a future release.",
line, value ? " " : "", value ? value : "", linenum,
ConfigurationFile, CupsFilesFile);
}
else
parse_variable(ConfigurationFile, linenum, line, value,
sizeof(cupsd_vars) / sizeof(cupsd_vars[0]), cupsd_vars);
}
return (1);
}
| 0 |
[] |
cups
|
d47f6aec436e0e9df6554436e391471097686ecc
| 320,908,868,294,690,270,000,000,000,000,000,000,000 | 493 |
Fix local privilege escalation to root and sandbox bypasses in scheduler
(rdar://37836779, rdar://37836995, rdar://37837252, rdar://37837581)
|
__visible __used void *trampoline_handler(struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
kprobe_opcode_t *correct_ret_addr = NULL;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
/* fixup registers */
#ifdef CONFIG_X86_64
regs->cs = __KERNEL_CS;
#else
regs->cs = __KERNEL_CS | get_kernel_rpl();
regs->gs = 0;
#endif
regs->ip = trampoline_address;
regs->orig_ax = ~0UL;
/*
* It is possible to have multiple instances associated with a given
* task either because multiple functions in the call path have
* return probes installed on them, and/or more than one
* return probe was registered for a target function.
*
* We can handle this because:
* - instances are always pushed into the head of the list
* - when multiple return probes are registered for the same
* function, the (chronologically) first instance's ret_addr
* will be the real return address, and all the rest will
* point to kretprobe_trampoline.
*/
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
orig_ret_address = (unsigned long)ri->ret_addr;
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
correct_ret_addr = ri->ret_addr;
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
__this_cpu_write(current_kprobe, &ri->rp->kp);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->ret_addr = correct_ret_addr;
ri->rp->handler(ri, regs);
__this_cpu_write(current_kprobe, NULL);
}
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_hash_unlock(current, &flags);
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
return (void *)orig_ret_address;
}
| 0 |
[
"CWE-264"
] |
linux
|
548acf19234dbda5a52d5a8e7e205af46e9da840
| 322,064,831,147,679,670,000,000,000,000,000,000,000 | 86 |
x86/mm: Expand the exception table logic to allow new handling options
Huge amounts of help from Andy Lutomirski and Borislav Petkov to
produce this. Andy provided the inspiration to add classes to the
exception table with a clever bit-squeezing trick, Boris pointed
out how much cleaner it would all be if we just had a new field.
Linus Torvalds blessed the expansion with:
' I'd rather not be clever in order to save just a tiny amount of space
in the exception table, which isn't really criticial for anybody. '
The third field is another relative function pointer, this one to a
handler that executes the actions.
We start out with three handlers:
1: Legacy - just jumps the to fixup IP
2: Fault - provide the trap number in %ax to the fixup code
3: Cleaned up legacy for the uaccess error hack
Signed-off-by: Tony Luck <[email protected]>
Reviewed-by: Borislav Petkov <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: http://lkml.kernel.org/r/f6af78fcbd348cf4939875cfda9c19689b5e50b8.1455732970.git.tony.luck@intel.com
Signed-off-by: Ingo Molnar <[email protected]>
|
static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
struct rdma_ucm_query_addr_resp *resp)
{
if (!cm_id->device)
return;
resp->node_guid = (__force __u64) cm_id->device->node_guid;
resp->port_num = cm_id->port_num;
resp->pkey = (__force __u16) cpu_to_be16(
ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
}
| 0 |
[
"CWE-416",
"CWE-703"
] |
linux
|
cb2595c1393b4a5211534e6f0a0fbad369e21ad8
| 339,910,347,693,258,540,000,000,000,000,000,000,000 | 11 |
infiniband: fix a possible use-after-free bug
ucma_process_join() will free the new allocated "mc" struct,
if there is any error after that, especially the copy_to_user().
But in parallel, ucma_leave_multicast() could find this "mc"
through idr_find() before ucma_process_join() frees it, since it
is already published.
So "mc" could be used in ucma_leave_multicast() after it is been
allocated and freed in ucma_process_join(), since we don't refcnt
it.
Fix this by separating "publish" from ID allocation, so that we
can get an ID first and publish it later after copy_to_user().
Fixes: c8f6a362bf3e ("RDMA/cma: Add multicast communication support")
Reported-by: Noam Rathaus <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
|
static bool check_request_str(const char* const str,
bool (*check) (int c))
{
for (size_t i(0); str[i] != '\0'; ++i)
{
if (!check(str[i]))
{
WSREP_WARN("Illegal character in state transfer request: %i (%c).",
str[i], str[i]);
return true;
}
}
return false;
}
| 0 |
[
"CWE-77"
] |
mysql-wsrep
|
4ea4b0c6a318209ac09b15aaa906c7b4a13b988c
| 160,735,219,665,205,440,000,000,000,000,000,000,000 | 15 |
codership/mysql-wsrep-bugs#758 Donor uses invalid SST methods
|
res_init(void)
{
/*
* These three fields used to be statically initialized. This made
* it hard to use this code in a shared library. It is necessary,
* now that we're doing dynamic initialization here, that we preserve
* the old semantics: if an application modifies one of these three
* fields of _res before res_init() is called, res_init() will not
* alter them. Of course, if an application is setting them to
* _zero_ before calling res_init(), hoping to override what used
* to be the static default, we can't detect it and unexpected results
* will follow. Zero for any of these fields would make no sense,
* so one can safely assume that the applications were already getting
* unexpected results.
*
* _res.options is tricky since some apps were known to diddle the bits
* before res_init() was first called. We can't replicate that semantic
* with dynamic initialization (they may have turned bits off that are
* set in RES_DEFAULT). Our solution is to declare such applications
* "broken". They could fool us by setting RES_INIT but none do (yet).
*/
__UCLIBC_MUTEX_LOCK(__resolv_lock);
if (!_res.retrans)
_res.retrans = RES_TIMEOUT;
if (!_res.retry)
_res.retry = 4;
if (!(_res.options & RES_INIT))
_res.options = RES_DEFAULT;
/*
* This one used to initialize implicitly to zero, so unless the app
* has set it to something in particular, we can randomize it now.
*/
if (!_res.id)
_res.id = res_randomid();
__res_sync = NULL;
__res_vinit(&_res, 1);
__res_sync = res_sync_func;
__UCLIBC_MUTEX_UNLOCK(__resolv_lock);
return 0;
}
| 0 |
[
"CWE-79"
] |
uclibc-ng
|
0f822af0445e5348ce7b7bd8ce1204244f31d174
| 228,288,607,442,376,530,000,000,000,000,000,000,000 | 46 |
libc/inet/resolv.c: add __hnbad to check DNS entries for validity…
… using the same rules glibc does
also call __hnbad in some places to check answers
|
gs_grab_init (GSGrab *grab)
{
grab->priv = GS_GRAB_GET_PRIVATE (grab);
grab->priv->mouse_hide_cursor = FALSE;
grab->priv->invisible = gtk_invisible_new ();
gtk_widget_show (grab->priv->invisible);
}
| 0 |
[] |
gnome-screensaver
|
f93a22c175090cf02e80bc3ee676b53f1251f685
| 262,536,816,974,438,700,000,000,000,000,000,000,000 | 8 |
Nullify grab window variables when windows are destroyed
If we don't do this then there is a time period where the
grab window variables contain dangling pointers which can
cause crashes.
Part of fix for
https://bugzilla.gnome.org/show_bug.cgi?id=609789
|
cmsPipeline* _cmsReadFloatDevicelinkTag(cmsHPROFILE hProfile, cmsTagSignature tagFloat)
{
cmsContext ContextID = cmsGetProfileContextID(hProfile);
cmsPipeline* Lut = cmsPipelineDup((cmsPipeline*) cmsReadTag(hProfile, tagFloat));
cmsColorSpaceSignature PCS = cmsGetPCS(hProfile);
cmsColorSpaceSignature spc = cmsGetColorSpace(hProfile);
if (Lut == NULL) return NULL;
if (spc == cmsSigLabData)
{
if (!cmsPipelineInsertStage(Lut, cmsAT_BEGIN, _cmsStageNormalizeToLabFloat(ContextID)))
goto Error;
}
else
if (spc == cmsSigXYZData)
{
if (!cmsPipelineInsertStage(Lut, cmsAT_BEGIN, _cmsStageNormalizeToXyzFloat(ContextID)))
goto Error;
}
if (PCS == cmsSigLabData)
{
if (!cmsPipelineInsertStage(Lut, cmsAT_END, _cmsStageNormalizeFromLabFloat(ContextID)))
goto Error;
}
else
if (PCS == cmsSigXYZData)
{
if (!cmsPipelineInsertStage(Lut, cmsAT_END, _cmsStageNormalizeFromXyzFloat(ContextID)))
goto Error;
}
return Lut;
Error:
cmsPipelineFree(Lut);
return NULL;
}
| 0 |
[] |
Little-CMS
|
41d222df1bc6188131a8f46c32eab0a4d4cdf1b6
| 91,036,346,470,158,110,000,000,000,000,000,000,000 | 38 |
Memory squeezing fix: lcms2 cmsPipeline construction
When creating a new pipeline, lcms would often try to allocate a stage
and pass it to cmsPipelineInsertStage without checking whether the
allocation succeeded. cmsPipelineInsertStage would then assert (or crash)
if it had not.
The fix here is to change cmsPipelineInsertStage to check and return
an error value. All calling code is then checked to test this return
value and cope.
|
proxy_C_UnwrapKey (CK_X_FUNCTION_LIST *self,
CK_SESSION_HANDLE handle,
CK_MECHANISM_PTR mechanism,
CK_OBJECT_HANDLE unwrapping_key,
CK_BYTE_PTR wrapped_key,
CK_ULONG wrapped_key_len,
CK_ATTRIBUTE_PTR template,
CK_ULONG count,
CK_OBJECT_HANDLE_PTR key)
{
State *state = (State *)self;
Mapping map;
CK_RV rv;
rv = map_session_to_real (state->px, &handle, &map, NULL);
if (rv != CKR_OK)
return rv;
return (map.funcs->C_UnwrapKey) (handle, mechanism, unwrapping_key, wrapped_key, wrapped_key_len, template, count, key);
}
| 0 |
[
"CWE-190"
] |
p11-kit
|
5307a1d21a50cacd06f471a873a018d23ba4b963
| 321,989,906,186,964,800,000,000,000,000,000,000,000 | 19 |
Check for arithmetic overflows before allocating
|
static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
{
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 1;
}
| 0 |
[] |
kvm
|
a642fc305053cc1c6e47e4f4df327895747ab485
| 54,320,812,922,734,600,000,000,000,000,000,000,000 | 5 |
kvm: vmx: handle invvpid vm exit gracefully
On systems with invvpid instruction support (corresponding bit in
IA32_VMX_EPT_VPID_CAP MSR is set) guest invocation of invvpid
causes vm exit, which is currently not handled and results in
propagation of unknown exit to userspace.
Fix this by installing an invvpid vm exit handler.
This is CVE-2014-3646.
Cc: [email protected]
Signed-off-by: Petr Matousek <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
xfs_quiesce_attr(
struct xfs_mount *mp)
{
int error = 0;
/* wait for all modifications to complete */
while (atomic_read(&mp->m_active_trans) > 0)
delay(100);
/* force the log to unpin objects from the now complete transactions */
xfs_log_force(mp, XFS_LOG_SYNC);
/* reclaim inodes to do any IO before the freeze completes */
xfs_reclaim_inodes(mp, 0);
xfs_reclaim_inodes(mp, SYNC_WAIT);
/* Push the superblock and write an unmount record */
error = xfs_log_sbcount(mp);
if (error)
xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
"Frozen image may not be consistent.");
/*
* Just warn here till VFS can correctly support
* read-only remount without racing.
*/
WARN_ON(atomic_read(&mp->m_active_trans) != 0);
xfs_log_quiesce(mp);
}
| 0 |
[
"CWE-416"
] |
linux
|
c9fbd7bbc23dbdd73364be4d045e5d3612cf6e82
| 70,006,689,834,227,890,000,000,000,000,000,000,000 | 29 |
xfs: clear sb->s_fs_info on mount failure
We recently had an oops reported on a 4.14 kernel in
xfs_reclaim_inodes_count() where sb->s_fs_info pointed to garbage
and so the m_perag_tree lookup walked into lala land.
Essentially, the machine was under memory pressure when the mount
was being run, xfs_fs_fill_super() failed after allocating the
xfs_mount and attaching it to sb->s_fs_info. It then cleaned up and
freed the xfs_mount, but the sb->s_fs_info field still pointed to
the freed memory. Hence when the superblock shrinker then ran
it fell off the bad pointer.
With the superblock shrinker problem fixed at teh VFS level, this
stale s_fs_info pointer is still a problem - we use it
unconditionally in ->put_super when the superblock is being torn
down, and hence we can still trip over it after a ->fill_super
call failure. Hence we need to clear s_fs_info if
xfs-fs_fill_super() fails, and we need to check if it's valid in
the places it can potentially be dereferenced after a ->fill_super
failure.
Signed-Off-By: Dave Chinner <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
Signed-off-by: Darrick J. Wong <[email protected]>
|
virtual SAMPLETYPE *ptrBegin()
{
return output->ptrBegin();
}
| 0 |
[
"CWE-617"
] |
soundtouch
|
107f2c5d201a4dfea1b7f15c5957ff2ac9e5f260
| 153,656,172,686,155,330,000,000,000,000,000,000,000 | 4 |
Replaced illegal-number-of-channel assertions with run-time exception
|
void cgtimer_time(cgtimer_t *ts_start)
{
clock_gettime(CLOCK_MONOTONIC, ts_start);
}
| 0 |
[
"CWE-20",
"CWE-703"
] |
sgminer
|
910c36089940e81fb85c65b8e63dcd2fac71470c
| 71,127,451,005,365,630,000,000,000,000,000,000,000 | 4 |
stratum: parse_notify(): Don't die on malformed bbversion/prev_hash/nbit/ntime.
Might have introduced a memory leak, don't have time to check. :(
Should the other hex2bin()'s be checked?
Thanks to Mick Ayzenberg <mick.dejavusecurity.com> for finding this.
|
R_API void r_anal_function_delete_unused_vars(RAnalFunction *fcn) {
r_return_if_fail (fcn);
void **v;
RPVector *vars_clone = (RPVector *)r_vector_clone ((RVector *)&fcn->vars);
r_pvector_foreach (vars_clone, v) {
RAnalVar *var = *v;
if (r_vector_empty (&var->accesses)) {
r_anal_function_delete_var (fcn, var);
}
}
r_pvector_free (vars_clone);
}
| 0 |
[
"CWE-416"
] |
radare2
|
a7ce29647fcb38386d7439696375e16e093d6acb
| 206,353,606,142,563,030,000,000,000,000,000,000,000 | 12 |
Fix UAF in aaaa on arm/thumb switching ##crash
* Reported by @peacock-doris via huntr.dev
* Reproducer tests_65185
* This is a logic fix, but not the fully safe as changes in the code
can result on UAF again, to properly protect r2 from crashing we
need to break the ABI and add refcounting to RRegItem, which can't
happen in 5.6.x because of abi-compat rules
|
static int __noflush_suspending(struct mapped_device *md)
{
return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
}
| 0 |
[
"CWE-362"
] |
linux
|
b9a41d21dceadf8104812626ef85dc56ee8a60ed
| 110,212,796,389,501,860,000,000,000,000,000,000,000 | 4 |
dm: fix race between dm_get_from_kobject() and __dm_destroy()
The following BUG_ON was hit when testing repeat creation and removal of
DM devices:
kernel BUG at drivers/md/dm.c:2919!
CPU: 7 PID: 750 Comm: systemd-udevd Not tainted 4.1.44
Call Trace:
[<ffffffff81649e8b>] dm_get_from_kobject+0x34/0x3a
[<ffffffff81650ef1>] dm_attr_show+0x2b/0x5e
[<ffffffff817b46d1>] ? mutex_lock+0x26/0x44
[<ffffffff811df7f5>] sysfs_kf_seq_show+0x83/0xcf
[<ffffffff811de257>] kernfs_seq_show+0x23/0x25
[<ffffffff81199118>] seq_read+0x16f/0x325
[<ffffffff811de994>] kernfs_fop_read+0x3a/0x13f
[<ffffffff8117b625>] __vfs_read+0x26/0x9d
[<ffffffff8130eb59>] ? security_file_permission+0x3c/0x44
[<ffffffff8117bdb8>] ? rw_verify_area+0x83/0xd9
[<ffffffff8117be9d>] vfs_read+0x8f/0xcf
[<ffffffff81193e34>] ? __fdget_pos+0x12/0x41
[<ffffffff8117c686>] SyS_read+0x4b/0x76
[<ffffffff817b606e>] system_call_fastpath+0x12/0x71
The bug can be easily triggered, if an extra delay (e.g. 10ms) is added
between the test of DMF_FREEING & DMF_DELETING and dm_get() in
dm_get_from_kobject().
To fix it, we need to ensure the test of DMF_FREEING & DMF_DELETING and
dm_get() are done in an atomic way, so _minor_lock is used.
The other callers of dm_get() have also been checked to be OK: some
callers invoke dm_get() under _minor_lock, some callers invoke it under
_hash_lock, and dm_start_request() invoke it after increasing
md->open_count.
Cc: [email protected]
Signed-off-by: Hou Tao <[email protected]>
Signed-off-by: Mike Snitzer <[email protected]>
|
void Val(bool& v) const {
v = Bool();
}
| 0 |
[
"CWE-613"
] |
mongo
|
e55d6e2292e5dbe2f97153251d8193d1cc89f5d7
| 89,069,073,346,502,080,000,000,000,000,000,000,000 | 3 |
SERVER-38984 Validate unique User ID on UserCache hit
|
RE_FIBER* _yr_re_fiber_kill(
RE_FIBER_LIST* fiber_list,
RE_FIBER_POOL* fiber_pool,
RE_FIBER* fiber)
{
RE_FIBER* next_fiber = fiber->next;
if (fiber->prev != NULL)
fiber->prev->next = next_fiber;
if (next_fiber != NULL)
next_fiber->prev = fiber->prev;
if (fiber_pool->fibers.tail != NULL)
fiber_pool->fibers.tail->next = fiber;
if (fiber_list->tail == fiber)
fiber_list->tail = fiber->prev;
if (fiber_list->head == fiber)
fiber_list->head = next_fiber;
fiber->next = NULL;
fiber->prev = fiber_pool->fibers.tail;
fiber_pool->fibers.tail = fiber;
if (fiber_pool->fibers.head == NULL)
fiber_pool->fibers.head = fiber;
return next_fiber;
}
| 0 |
[
"CWE-125"
] |
yara
|
83d799804648c2a0895d40a19835d9b757c6fa4e
| 228,639,888,713,795,040,000,000,000,000,000,000,000 | 31 |
Fix issue #646 (#648)
* Fix issue #646 and some edge cases with wide regexps using \b and \B
* Rename function IS_WORD_CHAR to _yr_re_is_word_char
|
encrypt_data_new (FrWindow *window,
const char *password,
gboolean encrypt_header)
{
EncryptData *edata;
edata = g_new0 (EncryptData, 1);
edata->window = window;
if (password != NULL)
edata->password = g_strdup (password);
edata->encrypt_header = encrypt_header;
edata->temp_extraction_dir = _g_file_get_temp_work_dir (NULL);
return edata;
}
| 0 |
[
"CWE-22"
] |
file-roller
|
b147281293a8307808475e102a14857055f81631
| 287,856,129,766,532,440,000,000,000,000,000,000,000 | 15 |
libarchive: sanitize filenames before extracting
|
vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
struct iov_iter *iter,
struct scatterlist *sg, int sg_count)
{
size_t off = iter->iov_offset;
int i, ret;
for (i = 0; i < iter->nr_segs; i++) {
void __user *base = iter->iov[i].iov_base + off;
size_t len = iter->iov[i].iov_len - off;
ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
if (ret < 0) {
for (i = 0; i < sg_count; i++) {
struct page *page = sg_page(&sg[i]);
if (page)
put_page(page);
}
return ret;
}
sg += ret;
off = 0;
}
return 0;
}
| 0 |
[
"CWE-200",
"CWE-119"
] |
linux
|
59c816c1f24df0204e01851431d3bab3eb76719c
| 280,196,138,906,967,750,000,000,000,000,000,000,000 | 25 |
vhost/scsi: potential memory corruption
This code in vhost_scsi_make_tpg() is confusing because we limit "tpgt"
to UINT_MAX but the data type of "tpg->tport_tpgt" and that is a u16.
I looked at the context and it turns out that in
vhost_scsi_set_endpoint(), "tpg->tport_tpgt" is used as an offset into
the vs_tpg[] array which has VHOST_SCSI_MAX_TARGET (256) elements so
anything higher than 255 then it is invalid. I have made that the limit
now.
In vhost_scsi_send_evt() we mask away values higher than 255, but now
that the limit has changed, we don't need the mask.
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Nicholas Bellinger <[email protected]>
|
u32 parse_fps(char *arg_val, u32 opt)
{
u32 ticks, dts_inc;
if (!strcmp(arg_val, "auto")) {
M4_LOG(GF_LOG_WARNING, ("Warning, fps=auto option is deprecated\n"));
}
else if ((sscanf(arg_val, "%u-%u", &ticks, &dts_inc)==2) || (sscanf(arg_val, "%u/%u", &ticks, &dts_inc)==2) ) {
if (!dts_inc) dts_inc = 1;
import_fps.num = ticks;
import_fps.den = dts_inc;
} else {
gf_parse_frac(arg_val, &import_fps);
}
return 0;
}
| 0 |
[
"CWE-401",
"CWE-787"
] |
gpac
|
a51f951b878c2b73c1d8e2f1518c7cdc5fb82c3f
| 187,541,832,279,763,520,000,000,000,000,000,000,000 | 15 |
fixed #1782 (fuzz)
|
void MD5::encodeString(char const* str)
{
size_t len = strlen(str);
update(QUtil::unsigned_char_pointer(str), len);
final();
}
| 0 |
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
| 89,486,898,718,234,930,000,000,000,000,000,000,000 | 7 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
|
static BIO *init_responder(char *port)
{
BIO *acbio = NULL, *bufbio = NULL;
bufbio = BIO_new(BIO_f_buffer());
if (!bufbio)
goto err;
#ifndef OPENSSL_NO_SOCK
acbio = BIO_new_accept(port);
#else
BIO_printf(bio_err, "Error setting up accept BIO - sockets not supported.\n");
#endif
if (!acbio)
goto err;
BIO_set_accept_bios(acbio, bufbio);
bufbio = NULL;
if (BIO_do_accept(acbio) <= 0)
{
BIO_printf(bio_err, "Error setting up accept BIO\n");
ERR_print_errors(bio_err);
goto err;
}
return acbio;
err:
BIO_free_all(acbio);
BIO_free(bufbio);
return NULL;
}
| 0 |
[] |
openssl
|
d65b8b2162f33ac0d53dace588a0847ed827626c
| 213,126,408,289,223,900,000,000,000,000,000,000,000 | 30 |
Backport OCSP fixes.
|
static int pagemap_open(struct inode *inode, struct file *file)
{
/* do not disclose physical addresses: attack vector */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
"to stop being page-shift some time soon. See the "
"linux/Documentation/vm/pagemap.txt for details.\n");
return 0;
}
| 0 |
[
"CWE-200"
] |
linux
|
ab676b7d6fbf4b294bf198fb27ade5b0e865c7ce
| 220,085,461,782,823,160,000,000,000,000,000,000,000 | 10 |
pagemap: do not leak physical addresses to non-privileged userspace
As pointed by recent post[1] on exploiting DRAM physical imperfection,
/proc/PID/pagemap exposes sensitive information which can be used to do
attacks.
This disallows anybody without CAP_SYS_ADMIN to read the pagemap.
[1] http://googleprojectzero.blogspot.com/2015/03/exploiting-dram-rowhammer-bug-to-gain.html
[ Eventually we might want to do anything more finegrained, but for now
this is the simple model. - Linus ]
Signed-off-by: Kirill A. Shutemov <[email protected]>
Acked-by: Konstantin Khlebnikov <[email protected]>
Acked-by: Andy Lutomirski <[email protected]>
Cc: Pavel Emelyanov <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Mark Seaborn <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
|
static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct inquiry_data data;
int num_rsp = *((__u8 *) skb->data);
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp)
return;
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
return;
hci_dev_lock(hdev);
if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
struct inquiry_info_with_rssi_and_pscan_mode *info;
info = (void *) (skb->data + 1);
if (skb->len < num_rsp * sizeof(*info) + 1)
goto unlock;
for (; num_rsp; num_rsp--, info++) {
u32 flags;
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
data.pscan_period_mode = info->pscan_period_mode;
data.pscan_mode = info->pscan_mode;
memcpy(data.dev_class, info->dev_class, 3);
data.clock_offset = info->clock_offset;
data.rssi = info->rssi;
data.ssp_mode = 0x00;
flags = hci_inquiry_cache_update(hdev, &data, false);
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, info->rssi,
flags, NULL, 0, NULL, 0);
}
} else {
struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
if (skb->len < num_rsp * sizeof(*info) + 1)
goto unlock;
for (; num_rsp; num_rsp--, info++) {
u32 flags;
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
data.pscan_period_mode = info->pscan_period_mode;
data.pscan_mode = 0x00;
memcpy(data.dev_class, info->dev_class, 3);
data.clock_offset = info->clock_offset;
data.rssi = info->rssi;
data.ssp_mode = 0x00;
flags = hci_inquiry_cache_update(hdev, &data, false);
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, info->rssi,
flags, NULL, 0, NULL, 0);
}
}
unlock:
hci_dev_unlock(hdev);
}
| 0 |
[
"CWE-416"
] |
linux
|
5c4c8c9544099bb9043a10a5318130a943e32fc3
| 101,336,281,385,882,300,000,000,000,000,000,000,000 | 70 |
Bluetooth: verify AMP hci_chan before amp_destroy
hci_chan can be created in 2 places: hci_loglink_complete_evt() if
it is an AMP hci_chan, or l2cap_conn_add() otherwise. In theory,
Only AMP hci_chan should be removed by a call to
hci_disconn_loglink_complete_evt(). However, the controller might mess
up, call that function, and destroy an hci_chan which is not initiated
by hci_loglink_complete_evt().
This patch adds a verification that the destroyed hci_chan must have
been init'd by hci_loglink_complete_evt().
Example crash call trace:
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0xe3/0x144 lib/dump_stack.c:118
print_address_description+0x67/0x22a mm/kasan/report.c:256
kasan_report_error mm/kasan/report.c:354 [inline]
kasan_report mm/kasan/report.c:412 [inline]
kasan_report+0x251/0x28f mm/kasan/report.c:396
hci_send_acl+0x3b/0x56e net/bluetooth/hci_core.c:4072
l2cap_send_cmd+0x5af/0x5c2 net/bluetooth/l2cap_core.c:877
l2cap_send_move_chan_cfm_icid+0x8e/0xb1 net/bluetooth/l2cap_core.c:4661
l2cap_move_fail net/bluetooth/l2cap_core.c:5146 [inline]
l2cap_move_channel_rsp net/bluetooth/l2cap_core.c:5185 [inline]
l2cap_bredr_sig_cmd net/bluetooth/l2cap_core.c:5464 [inline]
l2cap_sig_channel net/bluetooth/l2cap_core.c:5799 [inline]
l2cap_recv_frame+0x1d12/0x51aa net/bluetooth/l2cap_core.c:7023
l2cap_recv_acldata+0x2ea/0x693 net/bluetooth/l2cap_core.c:7596
hci_acldata_packet net/bluetooth/hci_core.c:4606 [inline]
hci_rx_work+0x2bd/0x45e net/bluetooth/hci_core.c:4796
process_one_work+0x6f8/0xb50 kernel/workqueue.c:2175
worker_thread+0x4fc/0x670 kernel/workqueue.c:2321
kthread+0x2f0/0x304 kernel/kthread.c:253
ret_from_fork+0x3a/0x50 arch/x86/entry/entry_64.S:415
Allocated by task 38:
set_track mm/kasan/kasan.c:460 [inline]
kasan_kmalloc+0x8d/0x9a mm/kasan/kasan.c:553
kmem_cache_alloc_trace+0x102/0x129 mm/slub.c:2787
kmalloc include/linux/slab.h:515 [inline]
kzalloc include/linux/slab.h:709 [inline]
hci_chan_create+0x86/0x26d net/bluetooth/hci_conn.c:1674
l2cap_conn_add.part.0+0x1c/0x814 net/bluetooth/l2cap_core.c:7062
l2cap_conn_add net/bluetooth/l2cap_core.c:7059 [inline]
l2cap_connect_cfm+0x134/0x852 net/bluetooth/l2cap_core.c:7381
hci_connect_cfm+0x9d/0x122 include/net/bluetooth/hci_core.h:1404
hci_remote_ext_features_evt net/bluetooth/hci_event.c:4161 [inline]
hci_event_packet+0x463f/0x72fa net/bluetooth/hci_event.c:5981
hci_rx_work+0x197/0x45e net/bluetooth/hci_core.c:4791
process_one_work+0x6f8/0xb50 kernel/workqueue.c:2175
worker_thread+0x4fc/0x670 kernel/workqueue.c:2321
kthread+0x2f0/0x304 kernel/kthread.c:253
ret_from_fork+0x3a/0x50 arch/x86/entry/entry_64.S:415
Freed by task 1732:
set_track mm/kasan/kasan.c:460 [inline]
__kasan_slab_free mm/kasan/kasan.c:521 [inline]
__kasan_slab_free+0x106/0x128 mm/kasan/kasan.c:493
slab_free_hook mm/slub.c:1409 [inline]
slab_free_freelist_hook+0xaa/0xf6 mm/slub.c:1436
slab_free mm/slub.c:3009 [inline]
kfree+0x182/0x21e mm/slub.c:3972
hci_disconn_loglink_complete_evt net/bluetooth/hci_event.c:4891 [inline]
hci_event_packet+0x6a1c/0x72fa net/bluetooth/hci_event.c:6050
hci_rx_work+0x197/0x45e net/bluetooth/hci_core.c:4791
process_one_work+0x6f8/0xb50 kernel/workqueue.c:2175
worker_thread+0x4fc/0x670 kernel/workqueue.c:2321
kthread+0x2f0/0x304 kernel/kthread.c:253
ret_from_fork+0x3a/0x50 arch/x86/entry/entry_64.S:415
The buggy address belongs to the object at ffff8881d7af9180
which belongs to the cache kmalloc-128 of size 128
The buggy address is located 24 bytes inside of
128-byte region [ffff8881d7af9180, ffff8881d7af9200)
The buggy address belongs to the page:
page:ffffea00075ebe40 count:1 mapcount:0 mapping:ffff8881da403200 index:0x0
flags: 0x8000000000000200(slab)
raw: 8000000000000200 dead000000000100 dead000000000200 ffff8881da403200
raw: 0000000000000000 0000000080150015 00000001ffffffff 0000000000000000
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff8881d7af9080: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb
ffff8881d7af9100: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
>ffff8881d7af9180: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
^
ffff8881d7af9200: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
ffff8881d7af9280: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
Signed-off-by: Archie Pusaka <[email protected]>
Reported-by: [email protected]
Reviewed-by: Alain Michaud <[email protected]>
Reviewed-by: Abhishek Pandit-Subedi <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]>
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.