func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static int session_end_stream_headers_received(nghttp2_session *session,
nghttp2_frame *frame,
nghttp2_stream *stream) {
int rv;
if ((frame->hd.flags & NGHTTP2_FLAG_END_STREAM) == 0) {
return 0;
}
nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD);
rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream);
if (nghttp2_is_fatal(rv)) {
return rv;
}
return 0;
} | 0 | []
| nghttp2 | 0a6ce87c22c69438ecbffe52a2859c3a32f1620f | 13,604,211,667,893,726,000,000,000,000,000,000,000 | 16 | Add nghttp2_option_set_max_outbound_ack |
int ssl3_send_next_proto(SSL *s)
{
unsigned int len, padding_len;
unsigned char *d;
if (s->state == SSL3_ST_CW_NEXT_PROTO_A)
{
len = s->next_proto_negotiated_len;
padding_len = 32 - ((len + 2) % 32);
d = (unsigned char *)s->init_buf->data;
d[4] = len;
memcpy(d + 5, s->next_proto_negotiated, len);
d[5 + len] = padding_len;
memset(d + 6 + len, 0, padding_len);
*(d++)=SSL3_MT_NEXT_PROTO;
l2n3(2 + len + padding_len, d);
s->state = SSL3_ST_CW_NEXT_PROTO_B;
s->init_num = 4 + 2 + len + padding_len;
s->init_off = 0;
}
return ssl3_do_write(s, SSL3_RT_HANDSHAKE);
} | 0 | [
"CWE-310"
]
| openssl | 37580f43b5a39f5f4e920d17273fab9713d3a744 | 184,261,365,222,104,550,000,000,000,000,000,000,000 | 23 | Only allow ephemeral RSA keys in export ciphersuites.
OpenSSL clients would tolerate temporary RSA keys in non-export
ciphersuites. It also had an option SSL_OP_EPHEMERAL_RSA which
enabled this server side. Remove both options as they are a
protocol violation.
Thanks to Karthikeyan Bhargavan for reporting this issue.
(CVE-2015-0204)
Reviewed-by: Matt Caswell <[email protected]>
Reviewed-by: Tim Hudson <[email protected]>
(cherry picked from commit 4b4c1fcc88aec8c9e001b0a0077d3cd4de1ed0e6)
Conflicts:
doc/ssl/SSL_CTX_set_options.pod |
PJ_DEF(void) pj_dns_init_a_rr( pj_dns_parsed_rr *rec,
const pj_str_t *res_name,
unsigned dnsclass,
unsigned ttl,
const pj_in_addr *ip_addr)
{
pj_bzero(rec, sizeof(*rec));
rec->name = *res_name;
rec->type = PJ_DNS_TYPE_A;
rec->dnsclass = (pj_uint16_t) dnsclass;
rec->ttl = ttl;
rec->rdata.a.ip_addr = *ip_addr;
} | 0 | [
"CWE-120",
"CWE-787"
]
| pjproject | 9fae8f43accef8ea65d4a8ae9cdf297c46cfe29a | 235,227,011,606,128,060,000,000,000,000,000,000,000 | 13 | Merge pull request from GHSA-p6g5-v97c-w5q4
* Prevent heap buffer overflow when parsing DNS packets
* Make sure packet parsing doesn't advance beyond max/end
* Update checks
* Remove check
Co-authored-by: sauwming <[email protected]> |
create_sconv_object(const char *fc, const char *tc,
unsigned current_codepage, int flag)
{
struct archive_string_conv *sc;
sc = calloc(1, sizeof(*sc));
if (sc == NULL)
return (NULL);
sc->next = NULL;
sc->from_charset = strdup(fc);
if (sc->from_charset == NULL) {
free(sc);
return (NULL);
}
sc->to_charset = strdup(tc);
if (sc->to_charset == NULL) {
free(sc->from_charset);
free(sc);
return (NULL);
}
archive_string_init(&sc->utftmp);
if (flag & SCONV_TO_CHARSET) {
/*
* Convert characters from the current locale charset to
* a specified charset.
*/
sc->from_cp = current_codepage;
sc->to_cp = make_codepage_from_charset(tc);
#if defined(_WIN32) && !defined(__CYGWIN__)
if (IsValidCodePage(sc->to_cp))
flag |= SCONV_WIN_CP;
#endif
} else if (flag & SCONV_FROM_CHARSET) {
/*
* Convert characters from a specified charset to
* the current locale charset.
*/
sc->to_cp = current_codepage;
sc->from_cp = make_codepage_from_charset(fc);
#if defined(_WIN32) && !defined(__CYGWIN__)
if (IsValidCodePage(sc->from_cp))
flag |= SCONV_WIN_CP;
#endif
}
/*
* Check if "from charset" and "to charset" are the same.
*/
if (strcmp(fc, tc) == 0 ||
(sc->from_cp != (unsigned)-1 && sc->from_cp == sc->to_cp))
sc->same = 1;
else
sc->same = 0;
/*
* Mark if "from charset" or "to charset" are UTF-8 or UTF-16BE/LE.
*/
if (strcmp(tc, "UTF-8") == 0)
flag |= SCONV_TO_UTF8;
else if (strcmp(tc, "UTF-16BE") == 0)
flag |= SCONV_TO_UTF16BE;
else if (strcmp(tc, "UTF-16LE") == 0)
flag |= SCONV_TO_UTF16LE;
if (strcmp(fc, "UTF-8") == 0)
flag |= SCONV_FROM_UTF8;
else if (strcmp(fc, "UTF-16BE") == 0)
flag |= SCONV_FROM_UTF16BE;
else if (strcmp(fc, "UTF-16LE") == 0)
flag |= SCONV_FROM_UTF16LE;
#if defined(_WIN32) && !defined(__CYGWIN__)
if (sc->to_cp == CP_UTF8)
flag |= SCONV_TO_UTF8;
else if (sc->to_cp == CP_UTF16BE)
flag |= SCONV_TO_UTF16BE | SCONV_WIN_CP;
else if (sc->to_cp == CP_UTF16LE)
flag |= SCONV_TO_UTF16LE | SCONV_WIN_CP;
if (sc->from_cp == CP_UTF8)
flag |= SCONV_FROM_UTF8;
else if (sc->from_cp == CP_UTF16BE)
flag |= SCONV_FROM_UTF16BE | SCONV_WIN_CP;
else if (sc->from_cp == CP_UTF16LE)
flag |= SCONV_FROM_UTF16LE | SCONV_WIN_CP;
#endif
/*
* Set a flag for Unicode NFD. Usually iconv cannot correctly
* handle it. So we have to translate NFD characters to NFC ones
* ourselves before iconv handles. Another reason is to prevent
* that the same sight of two filenames, one is NFC and other
* is NFD, would be in its directory.
* On Mac OS X, although its filesystem layer automatically
* convert filenames to NFD, it would be useful for filename
* comparing to find out the same filenames that we normalize
* that to be NFD ourselves.
*/
if ((flag & SCONV_FROM_CHARSET) &&
(flag & (SCONV_FROM_UTF16 | SCONV_FROM_UTF8))) {
#if defined(__APPLE__)
if (flag & SCONV_TO_UTF8)
flag |= SCONV_NORMALIZATION_D;
else
#endif
flag |= SCONV_NORMALIZATION_C;
}
#if defined(__APPLE__)
/*
* In case writing an archive file, make sure that a filename
* going to be passed to iconv is a Unicode NFC string since
* a filename in HFS Plus filesystem is a Unicode NFD one and
* iconv cannot handle it with "UTF-8" charset. It is simpler
* than a use of "UTF-8-MAC" charset.
*/
if ((flag & SCONV_TO_CHARSET) &&
(flag & (SCONV_FROM_UTF16 | SCONV_FROM_UTF8)) &&
!(flag & (SCONV_TO_UTF16 | SCONV_TO_UTF8)))
flag |= SCONV_NORMALIZATION_C;
/*
* In case reading an archive file. make sure that a filename
* will be passed to users is a Unicode NFD string in order to
* correctly compare the filename with other one which comes
* from HFS Plus filesystem.
*/
if ((flag & SCONV_FROM_CHARSET) &&
!(flag & (SCONV_FROM_UTF16 | SCONV_FROM_UTF8)) &&
(flag & SCONV_TO_UTF8))
flag |= SCONV_NORMALIZATION_D;
#endif
#if defined(HAVE_ICONV)
sc->cd_w = (iconv_t)-1;
/*
* Create an iconv object.
*/
if (((flag & (SCONV_TO_UTF8 | SCONV_TO_UTF16)) &&
(flag & (SCONV_FROM_UTF8 | SCONV_FROM_UTF16))) ||
(flag & SCONV_WIN_CP)) {
/* This case we won't use iconv. */
sc->cd = (iconv_t)-1;
} else {
sc->cd = iconv_open(tc, fc);
if (sc->cd == (iconv_t)-1 && (sc->flag & SCONV_BEST_EFFORT)) {
/*
* Unfortunaly, all of iconv implements do support
* "CP932" character-set, so we should use "SJIS"
* instead if iconv_open failed.
*/
if (strcmp(tc, "CP932") == 0)
sc->cd = iconv_open("SJIS", fc);
else if (strcmp(fc, "CP932") == 0)
sc->cd = iconv_open(tc, "SJIS");
}
#if defined(_WIN32) && !defined(__CYGWIN__)
/*
* archive_mstring on Windows directly convert multi-bytes
* into archive_wstring in order not to depend on locale
* so that you can do a I18N programing. This will be
* used only in archive_mstring_copy_mbs_len_l so far.
*/
if (flag & SCONV_FROM_CHARSET) {
sc->cd_w = iconv_open("UTF-8", fc);
if (sc->cd_w == (iconv_t)-1 &&
(sc->flag & SCONV_BEST_EFFORT)) {
if (strcmp(fc, "CP932") == 0)
sc->cd_w = iconv_open("UTF-8", "SJIS");
}
}
#endif /* _WIN32 && !__CYGWIN__ */
}
#endif /* HAVE_ICONV */
sc->flag = flag;
/*
* Set up converters.
*/
setup_converter(sc);
return (sc);
} | 0 | [
"CWE-476"
]
| libarchive | 42a3408ac7df1e69bea9ea12b72e14f59f7400c0 | 2,777,716,316,181,340,000,000,000,000,000,000,000 | 180 | archive_strncat_l(): allocate and do not convert if length == 0
This ensures e.g. that archive_mstring_copy_mbs_len_l() does not set
aes_set = AES_SET_MBS with aes_mbs.s == NULL.
Resolves possible null-pointer dereference reported by OSS-Fuzz.
Reported-By: OSS-Fuzz issue 286 |
static int ffm_write_write_index(int fd, int64_t pos)
{
uint8_t buf[8];
int i;
for(i=0;i<8;i++)
buf[i] = (pos >> (56 - i * 8)) & 0xff;
if (lseek(fd, 8, SEEK_SET) < 0)
goto bail_eio;
if (write(fd, buf, 8) != 8)
goto bail_eio;
return 8;
bail_eio:
return AVERROR(EIO);
} | 0 | [
"CWE-119",
"CWE-787"
]
| FFmpeg | a5d25faa3f4b18dac737fdb35d0dd68eb0dc2156 | 166,822,098,014,675,030,000,000,000,000,000,000,000 | 17 | ffserver: Check chunk size
Fixes out of array access
Fixes: poc_ffserver.py
Found-by: Paul Cher <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]> |
static void FVMenuVKernFromHKern(GWindow gw, struct gmenuitem *UNUSED(mi), GEvent *UNUSED(e)) {
FontView *fv = (FontView *) GDrawGetUserData(gw);
FVVKernFromHKern(&fv->b);
} | 0 | [
"CWE-119",
"CWE-787"
]
| fontforge | 626f751752875a0ddd74b9e217b6f4828713573c | 168,367,838,143,970,720,000,000,000,000,000,000,000 | 5 | Warn users before discarding their unsaved scripts (#3852)
* Warn users before discarding their unsaved scripts
This closes #3846. |
WebPImage::WebPImage(BasicIo::UniquePtr io)
: Image(ImageType::webp, mdNone, std::move(io))
{
} // WebPImage::WebPImage | 0 | [
"CWE-190"
]
| exiv2 | c73d1e27198a389ce7caf52ac30f8e2120acdafd | 185,907,058,187,741,100,000,000,000,000,000,000,000 | 4 | Avoid negative integer overflow when `filesize < io_->tell()`.
This fixes #791. |
static int on_stream_io(sd_event_source *es, int fd, uint32_t revents, void *userdata) {
DnsStream *s = userdata;
int r;
assert(s);
#if ENABLE_DNS_OVER_TLS
if (s->encrypted) {
r = dnstls_stream_on_io(s, revents);
if (r == DNSTLS_STREAM_CLOSED)
return 0;
if (r == -EAGAIN)
return dns_stream_update_io(s);
if (r < 0)
return dns_stream_complete(s, -r);
r = dns_stream_update_io(s);
if (r < 0)
return r;
}
#endif
/* only identify after connecting */
if (s->tfo_salen == 0) {
r = dns_stream_identify(s);
if (r < 0)
return dns_stream_complete(s, -r);
}
if ((revents & EPOLLOUT) &&
s->write_packet &&
s->n_written < sizeof(s->write_size) + s->write_packet->size) {
struct iovec iov[2];
ssize_t ss;
iov[0] = IOVEC_MAKE(&s->write_size, sizeof(s->write_size));
iov[1] = IOVEC_MAKE(DNS_PACKET_DATA(s->write_packet), s->write_packet->size);
IOVEC_INCREMENT(iov, 2, s->n_written);
ss = dns_stream_writev(s, iov, 2, 0);
if (ss < 0) {
if (!IN_SET(-ss, EINTR, EAGAIN))
return dns_stream_complete(s, -ss);
} else
s->n_written += ss;
/* Are we done? If so, disable the event source for EPOLLOUT */
if (s->n_written >= sizeof(s->write_size) + s->write_packet->size) {
r = dns_stream_update_io(s);
if (r < 0)
return dns_stream_complete(s, -r);
}
}
if ((revents & (EPOLLIN|EPOLLHUP|EPOLLRDHUP)) &&
(!s->read_packet ||
s->n_read < sizeof(s->read_size) + s->read_packet->size)) {
if (s->n_read < sizeof(s->read_size)) {
ssize_t ss;
ss = dns_stream_read(s, (uint8_t*) &s->read_size + s->n_read, sizeof(s->read_size) - s->n_read);
if (ss < 0) {
if (!IN_SET(-ss, EINTR, EAGAIN))
return dns_stream_complete(s, -ss);
} else if (ss == 0)
return dns_stream_complete(s, ECONNRESET);
else
s->n_read += ss;
}
if (s->n_read >= sizeof(s->read_size)) {
if (be16toh(s->read_size) < DNS_PACKET_HEADER_SIZE)
return dns_stream_complete(s, EBADMSG);
if (s->n_read < sizeof(s->read_size) + be16toh(s->read_size)) {
ssize_t ss;
if (!s->read_packet) {
r = dns_packet_new(&s->read_packet, s->protocol, be16toh(s->read_size), DNS_PACKET_SIZE_MAX);
if (r < 0)
return dns_stream_complete(s, -r);
s->read_packet->size = be16toh(s->read_size);
s->read_packet->ipproto = IPPROTO_TCP;
s->read_packet->family = s->peer.sa.sa_family;
s->read_packet->ttl = s->ttl;
s->read_packet->ifindex = s->ifindex;
if (s->read_packet->family == AF_INET) {
s->read_packet->sender.in = s->peer.in.sin_addr;
s->read_packet->sender_port = be16toh(s->peer.in.sin_port);
s->read_packet->destination.in = s->local.in.sin_addr;
s->read_packet->destination_port = be16toh(s->local.in.sin_port);
} else {
assert(s->read_packet->family == AF_INET6);
s->read_packet->sender.in6 = s->peer.in6.sin6_addr;
s->read_packet->sender_port = be16toh(s->peer.in6.sin6_port);
s->read_packet->destination.in6 = s->local.in6.sin6_addr;
s->read_packet->destination_port = be16toh(s->local.in6.sin6_port);
if (s->read_packet->ifindex == 0)
s->read_packet->ifindex = s->peer.in6.sin6_scope_id;
if (s->read_packet->ifindex == 0)
s->read_packet->ifindex = s->local.in6.sin6_scope_id;
}
}
ss = dns_stream_read(s,
(uint8_t*) DNS_PACKET_DATA(s->read_packet) + s->n_read - sizeof(s->read_size),
sizeof(s->read_size) + be16toh(s->read_size) - s->n_read);
if (ss < 0) {
if (!IN_SET(-ss, EINTR, EAGAIN))
return dns_stream_complete(s, -ss);
} else if (ss == 0)
return dns_stream_complete(s, ECONNRESET);
else
s->n_read += ss;
}
/* Are we done? If so, disable the event source for EPOLLIN */
if (s->n_read >= sizeof(s->read_size) + be16toh(s->read_size)) {
/* If there's a packet handler
* installed, call that. Note that
* this is optional... */
if (s->on_packet) {
r = s->on_packet(s);
if (r < 0)
return r;
}
r = dns_stream_update_io(s);
if (r < 0)
return dns_stream_complete(s, -r);
}
}
}
if ((s->write_packet && s->n_written >= sizeof(s->write_size) + s->write_packet->size) &&
(s->read_packet && s->n_read >= sizeof(s->read_size) + s->read_packet->size))
return dns_stream_complete(s, 0);
return 0;
} | 1 | [
"CWE-416",
"CWE-703"
]
| systemd | d973d94dec349fb676fdd844f6fe2ada3538f27c | 223,326,031,509,501,250,000,000,000,000,000,000,000 | 147 | resolved: pin stream while calling callbacks for it
These callbacks might unref the stream, but we still have to access it,
let's hence ref it explicitly.
Maybe fixes: #10725 |
int SELECT_LEX::period_setup_conds(THD *thd, TABLE_LIST *tables)
{
DBUG_ENTER("SELECT_LEX::period_setup_conds");
const bool update_conds= !skip_setup_conds(thd);
Query_arena backup;
Query_arena *arena= thd->activate_stmt_arena_if_needed(&backup);
DBUG_ASSERT(!tables->next_local && tables->table);
Item *result= NULL;
for (TABLE_LIST *table= tables; table; table= table->next_local)
{
if (!table->table)
continue;
vers_select_conds_t &conds= table->period_conditions;
if (!table->table->s->period.name.streq(conds.name))
{
my_error(ER_PERIOD_NOT_FOUND, MYF(0), conds.name.str);
if (arena)
thd->restore_active_arena(arena, &backup);
DBUG_RETURN(-1);
}
if (update_conds)
{
conds.period= &table->table->s->period;
result= and_items(thd, result,
period_get_condition(thd, table, this, &conds, true));
}
}
if (update_conds)
where= and_items(thd, where, result);
if (arena)
thd->restore_active_arena(arena, &backup);
DBUG_RETURN(0);
} | 0 | []
| server | 8c34eab9688b4face54f15f89f5d62bdfd93b8a7 | 230,306,615,573,361,970,000,000,000,000,000,000,000 | 39 | MDEV-28094 Window function in expression in ORDER BY
call item->split_sum_func() in setup_order() just as
it's done in setup_fields() |
_dbus_open_socket (int *fd_p,
int domain,
int type,
int protocol,
DBusError *error)
{
#ifdef SOCK_CLOEXEC
dbus_bool_t cloexec_done;
*fd_p = socket (domain, type | SOCK_CLOEXEC, protocol);
cloexec_done = *fd_p >= 0;
/* Check if kernel seems to be too old to know SOCK_CLOEXEC */
if (*fd_p < 0 && (errno == EINVAL || errno == EPROTOTYPE))
#endif
{
*fd_p = socket (domain, type, protocol);
}
if (*fd_p >= 0)
{
#ifdef SOCK_CLOEXEC
if (!cloexec_done)
#endif
{
_dbus_fd_set_close_on_exec(*fd_p);
}
_dbus_verbose ("socket fd %d opened\n", *fd_p);
return TRUE;
}
else
{
dbus_set_error(error,
_dbus_error_from_errno (errno),
"Failed to open socket: %s",
_dbus_strerror (errno));
return FALSE;
}
} | 0 | [
"CWE-404"
]
| dbus | 872b085f12f56da25a2dbd9bd0b2dff31d5aea63 | 174,059,251,725,830,600,000,000,000,000,000,000,000 | 40 | sysdeps-unix: On MSG_CTRUNC, close the fds we did receive
MSG_CTRUNC indicates that we have received fewer fds that we should
have done because the buffer was too small, but we were treating it
as though it indicated that we received *no* fds. If we received any,
we still have to make sure we close them, otherwise they will be leaked.
On the system bus, if an attacker can induce us to leak fds in this
way, that's a local denial of service via resource exhaustion.
Reported-by: Kevin Backhouse, GitHub Security Lab
Fixes: dbus#294
Fixes: CVE-2020-12049
Fixes: GHSL-2020-057 |
static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq,
u8 *sq_state)
{
int err;
err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state);
if (err)
goto out;
sq->state = *sq_state;
out:
return err;
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | 0625b4ba1a5d4703c7fb01c497bd6c156908af00 | 306,214,506,473,721,640,000,000,000,000,000,000,000 | 14 | IB/mlx5: Fix leaking stack memory to userspace
mlx5_ib_create_qp_resp was never initialized and only the first 4 bytes
were written.
Fixes: 41d902cb7c32 ("RDMA/mlx5: Fix definition of mlx5_ib_create_qp_resp")
Cc: <[email protected]>
Acked-by: Leon Romanovsky <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]> |
vgr_get_auname(cmdidx_T cmdidx)
{
switch (cmdidx)
{
case CMD_vimgrep: return (char_u *)"vimgrep";
case CMD_lvimgrep: return (char_u *)"lvimgrep";
case CMD_vimgrepadd: return (char_u *)"vimgrepadd";
case CMD_lvimgrepadd: return (char_u *)"lvimgrepadd";
case CMD_grep: return (char_u *)"grep";
case CMD_lgrep: return (char_u *)"lgrep";
case CMD_grepadd: return (char_u *)"grepadd";
case CMD_lgrepadd: return (char_u *)"lgrepadd";
default: return NULL;
}
} | 0 | [
"CWE-416"
]
| vim | 4f1b083be43f351bc107541e7b0c9655a5d2c0bb | 327,949,159,045,208,740,000,000,000,000,000,000,000 | 15 | patch 9.0.0322: crash when no errors and 'quickfixtextfunc' is set
Problem: Crash when no errors and 'quickfixtextfunc' is set.
Solution: Do not handle errors if there aren't any. |
static void *etm_setup_aux(int event_cpu, void **pages,
int nr_pages, bool overwrite)
{
int cpu;
cpumask_t *mask;
struct coresight_device *sink;
struct etm_event_data *event_data = NULL;
event_data = alloc_event_data(event_cpu);
if (!event_data)
return NULL;
/*
* In theory nothing prevent tracers in a trace session from being
* associated with different sinks, nor having a sink per tracer. But
* until we have HW with this kind of topology we need to assume tracers
* in a trace session are using the same sink. Therefore go through
* the coresight bus and pick the first enabled sink.
*
* When operated from sysFS users are responsible to enable the sink
* while from perf, the perf tools will do it based on the choice made
* on the cmd line. As such the "enable_sink" flag in sysFS is reset.
*/
sink = coresight_get_enabled_sink(true);
if (!sink)
goto err;
INIT_WORK(&event_data->work, free_event_data);
mask = &event_data->mask;
/* Setup the path for each CPU in a trace session */
for_each_cpu(cpu, mask) {
struct coresight_device *csdev;
csdev = per_cpu(csdev_src, cpu);
if (!csdev)
goto err;
/*
* Building a path doesn't enable it, it simply builds a
* list of devices from source to sink that can be
* referenced later when the path is actually needed.
*/
event_data->path[cpu] = coresight_build_path(csdev, sink);
if (IS_ERR(event_data->path[cpu]))
goto err;
}
if (!sink_ops(sink)->alloc_buffer)
goto err;
cpu = cpumask_first(mask);
/* Get the AUX specific data from the sink buffer */
event_data->snk_config =
sink_ops(sink)->alloc_buffer(sink, cpu, pages,
nr_pages, overwrite);
if (!event_data->snk_config)
goto err;
out:
return event_data;
err:
etm_free_aux(event_data);
event_data = NULL;
goto out;
} | 0 | [
"CWE-20",
"CWE-476"
]
| linux | f09444639099584bc4784dfcd85ada67c6f33e0f | 142,687,314,200,012,320,000,000,000,000,000,000,000 | 68 | coresight: fix kernel panic caused by invalid CPU
Commit d52c9750f150 ("coresight: reset "enable_sink" flag when need be")
caused a kernel panic because of the using of an invalid value: after
'for_each_cpu(cpu, mask)', value of local variable 'cpu' become invalid,
causes following 'cpu_to_node' access invalid memory area.
This patch brings the deleted 'cpu = cpumask_first(mask)' back.
Panic log:
$ perf record -e cs_etm// ls
Unable to handle kernel paging request at virtual address fffe801804af4f10
pgd = ffff8017ce031600
[fffe801804af4f10] *pgd=0000000000000000, *pud=0000000000000000
Internal error: Oops: 96000004 [#1] SMP
Modules linked in:
CPU: 33 PID: 1619 Comm: perf Not tainted 4.7.1+ #16
Hardware name: Huawei Taishan 2280 /CH05TEVBA, BIOS 1.10 11/24/2016
task: ffff8017cb0c8400 ti: ffff8017cb154000 task.ti: ffff8017cb154000
PC is at tmc_alloc_etf_buffer+0x60/0xd4
LR is at tmc_alloc_etf_buffer+0x44/0xd4
pc : [<ffff000008633df8>] lr : [<ffff000008633ddc>] pstate: 60000145
sp : ffff8017cb157b40
x29: ffff8017cb157b40 x28: 0000000000000000
...skip...
7a60: ffff000008c64dc8 0000000000000006 0000000000000253 ffffffffffffffff
7a80: 0000000000000000 0000000000000000 ffff0000080872cc 0000000000000001
[<ffff000008633df8>] tmc_alloc_etf_buffer+0x60/0xd4
[<ffff000008632b9c>] etm_setup_aux+0x1dc/0x1e8
[<ffff00000816eed4>] rb_alloc_aux+0x2b0/0x338
[<ffff00000816a5e4>] perf_mmap+0x414/0x568
[<ffff0000081ab694>] mmap_region+0x324/0x544
[<ffff0000081abbe8>] do_mmap+0x334/0x3e0
[<ffff000008191150>] vm_mmap_pgoff+0xa4/0xc8
[<ffff0000081a9a30>] SyS_mmap_pgoff+0xb0/0x22c
[<ffff0000080872e4>] sys_mmap+0x18/0x28
[<ffff0000080843f0>] el0_svc_naked+0x24/0x28
Code: 912040a5 d0001c00 f873d821 911c6000 (b8656822)
---[ end trace 98933da8f92b0c9a ]---
Signed-off-by: Wang Nan <[email protected]>
Cc: Xia Kaixu <[email protected]>
Cc: Li Zefan <[email protected]>
Cc: Mathieu Poirier <[email protected]>
Cc: [email protected]
Cc: [email protected]
Fixes: d52c9750f150 ("coresight: reset "enable_sink" flag when need be")
Signed-off-by: Mathieu Poirier <[email protected]>
Cc: stable <[email protected]> # 4.10
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
SPL_METHOD(SplObjectStorage, unserialize)
{
spl_SplObjectStorage *intern = (spl_SplObjectStorage*)zend_object_store_get_object(getThis() TSRMLS_CC);
char *buf;
int buf_len;
const unsigned char *p, *s;
php_unserialize_data_t var_hash;
zval *pentry, *pmembers, *pcount = NULL, *pinf;
long count;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &buf, &buf_len) == FAILURE) {
return;
}
if (buf_len == 0) {
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC, "Empty serialized string cannot be empty");
return;
}
/* storage */
s = p = (const unsigned char*)buf;
PHP_VAR_UNSERIALIZE_INIT(var_hash);
if (*p!= 'x' || *++p != ':') {
goto outexcept;
}
++p;
ALLOC_INIT_ZVAL(pcount);
if (!php_var_unserialize(&pcount, &p, s + buf_len, &var_hash TSRMLS_CC) || Z_TYPE_P(pcount) != IS_LONG) {
goto outexcept;
}
--p; /* for ';' */
count = Z_LVAL_P(pcount);
while(count-- > 0) {
spl_SplObjectStorageElement *pelement;
char *hash;
int hash_len;
if (*p != ';') {
goto outexcept;
}
++p;
if(*p != 'O' && *p != 'C' && *p != 'r') {
goto outexcept;
}
ALLOC_INIT_ZVAL(pentry);
if (!php_var_unserialize(&pentry, &p, s + buf_len, &var_hash TSRMLS_CC)) {
zval_ptr_dtor(&pentry);
goto outexcept;
}
if(Z_TYPE_P(pentry) != IS_OBJECT) {
zval_ptr_dtor(&pentry);
goto outexcept;
}
ALLOC_INIT_ZVAL(pinf);
if (*p == ',') { /* new version has inf */
++p;
if (!php_var_unserialize(&pinf, &p, s + buf_len, &var_hash TSRMLS_CC)) {
zval_ptr_dtor(&pinf);
goto outexcept;
}
}
hash = spl_object_storage_get_hash(intern, getThis(), pentry, &hash_len TSRMLS_CC);
if (!hash) {
zval_ptr_dtor(&pentry);
zval_ptr_dtor(&pinf);
goto outexcept;
}
pelement = spl_object_storage_get(intern, hash, hash_len TSRMLS_CC);
spl_object_storage_free_hash(intern, hash);
if(pelement) {
if(pelement->inf) {
var_push_dtor(&var_hash, &pelement->inf);
}
if(pelement->obj) {
var_push_dtor(&var_hash, &pelement->obj);
}
}
spl_object_storage_attach(intern, getThis(), pentry, pinf TSRMLS_CC);
zval_ptr_dtor(&pentry);
zval_ptr_dtor(&pinf);
}
if (*p != ';') {
goto outexcept;
}
++p;
/* members */
if (*p!= 'm' || *++p != ':') {
goto outexcept;
}
++p;
ALLOC_INIT_ZVAL(pmembers);
if (!php_var_unserialize(&pmembers, &p, s + buf_len, &var_hash TSRMLS_CC) || Z_TYPE_P(pmembers) != IS_ARRAY) {
zval_ptr_dtor(&pmembers);
goto outexcept;
}
/* copy members */
if (!intern->std.properties) {
rebuild_object_properties(&intern->std);
}
zend_hash_copy(intern->std.properties, Z_ARRVAL_P(pmembers), (copy_ctor_func_t) zval_add_ref, (void *) NULL, sizeof(zval *));
zval_ptr_dtor(&pmembers);
/* done reading $serialized */
if (pcount) {
zval_ptr_dtor(&pcount);
}
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
return;
outexcept:
if (pcount) {
zval_ptr_dtor(&pcount);
}
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC, "Error at offset %ld of %d bytes", (long)((char*)p - buf), buf_len);
return;
} /* }}} */ | 1 | [
"CWE-416"
]
| php-src | c2e197e4efc663ca55f393bf0e799848842286f3 | 263,106,354,654,996,170,000,000,000,000,000,000,000 | 128 | Fix bug #70168 - Use After Free Vulnerability in unserialize() with SplObjectStorage |
static void set_x11_file(pid_t pid, int display) {
char *fname;
if (asprintf(&fname, "%s/%d", RUN_FIREJAIL_X11_DIR, pid) == -1)
errExit("asprintf");
// the file is deleted first
FILE *fp = fopen(fname, "w");
if (!fp) {
fprintf(stderr, "Error: cannot create %s\n", fname);
exit(1);
}
fprintf(fp, "%d\n", display);
// mode and ownership
SET_PERMS_STREAM(fp, 0, 0, 0644);
fclose(fp);
} | 0 | [
"CWE-703"
]
| firejail | 6b8dba29d73257311564ee7f27b9b14758cc693e | 288,710,942,688,841,750,000,000,000,000,000,000,000 | 17 | security fix |
parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
{
struct ipv6hdr *ipv6h = (struct ipv6hdr *) raw;
__u8 nexthdr = ipv6h->nexthdr;
__u16 off = sizeof (*ipv6h);
while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
__u16 optlen = 0;
struct ipv6_opt_hdr *hdr;
if (raw + off + sizeof (*hdr) > skb->data &&
!pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
break;
hdr = (struct ipv6_opt_hdr *) (raw + off);
if (nexthdr == NEXTHDR_FRAGMENT) {
struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
if (frag_hdr->frag_off)
break;
optlen = 8;
} else if (nexthdr == NEXTHDR_AUTH) {
optlen = (hdr->hdrlen + 2) << 2;
} else {
optlen = ipv6_optlen(hdr);
}
if (nexthdr == NEXTHDR_DEST) {
__u16 i = off + 2;
while (1) {
struct ipv6_tlv_tnl_enc_lim *tel;
/* No more room for encapsulation limit */
if (i + sizeof (*tel) > off + optlen)
break;
tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
/* return index of option if found and valid */
if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
tel->length == 1)
return i;
/* else jump to next option */
if (tel->type)
i += tel->length + 2;
else
i++;
}
}
nexthdr = hdr->nexthdr;
off += optlen;
}
return 0;
} | 0 | []
| linux-2.6 | d5aa407f59f5b83d2c50ec88f5bf56d40f1f8978 | 160,384,927,135,482,120,000,000,000,000,000,000,000 | 50 | tunnels: fix netns vs proto registration ordering
Same stuff as in ip_gre patch: receive hook can be called before netns
setup is done, oopsing in net_generic().
Signed-off-by: Alexey Dobriyan <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
inline float uint2float(const unsigned int u) {
if (u<(1U<<19)) return (float)u; // Consider safe storage of unsigned int as floats until 19bits (i.e 524287).
float f;
const unsigned int v = u|(1U<<(8*sizeof(unsigned int)-1)); // set sign bit to 1.
// use memcpy instead of simple assignment to avoid undesired optimizations by C++-compiler.
std::memcpy(&f,&v,sizeof(float));
return f; | 0 | [
"CWE-125"
]
| CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 45,181,601,628,303,190,000,000,000,000,000,000,000 | 8 | Fix other issues in 'CImg<T>::load_bmp()'. |
print_indent_tree(FILE* f, Node* node, int indent)
{
int i;
NodeType type;
UChar* p;
int add = 3;
Indent(f, indent);
if (IS_NULL(node)) {
fprintf(f, "ERROR: null node!!!\n");
exit(0);
}
type = NODE_TYPE(node);
switch (type) {
case NODE_LIST:
case NODE_ALT:
if (type == NODE_LIST)
fprintf(f, "<list:%p>\n", node);
else
fprintf(f, "<alt:%p>\n", node);
print_indent_tree(f, NODE_CAR(node), indent + add);
while (IS_NOT_NULL(node = NODE_CDR(node))) {
if (NODE_TYPE(node) != type) {
fprintf(f, "ERROR: list/alt right is not a cons. %d\n", NODE_TYPE(node));
exit(0);
}
print_indent_tree(f, NODE_CAR(node), indent + add);
}
break;
case NODE_STRING:
{
char* str;
char* mode;
if (NODE_STRING_IS_CRUDE(node))
mode = "-crude";
else if (NODE_IS_IGNORECASE(node))
mode = "-ignorecase";
else
mode = "";
if (STR_(node)->s == STR_(node)->end)
str = "empty-string";
else
str = "string";
fprintf(f, "<%s%s:%p>", str, mode, node);
for (p = STR_(node)->s; p < STR_(node)->end; p++) {
if (*p >= 0x20 && *p < 0x7f)
fputc(*p, f);
else {
fprintf(f, " 0x%02x", *p);
}
}
}
break;
case NODE_CCLASS:
#define CCLASS_MBUF_MAX_OUTPUT_NUM 10
fprintf(f, "<cclass:%p>", node);
if (IS_NCCLASS_NOT(CCLASS_(node))) fputs(" not", f);
if (CCLASS_(node)->mbuf) {
BBuf* bbuf = CCLASS_(node)->mbuf;
fprintf(f, " mbuf(%u) ", bbuf->used);
for (i = 0; i < bbuf->used && i < CCLASS_MBUF_MAX_OUTPUT_NUM; i++) {
if (i > 0) fprintf(f, ",");
fprintf(f, "%0x", bbuf->p[i]);
}
if (i < bbuf->used) fprintf(f, "...");
}
break;
case NODE_CTYPE:
fprintf(f, "<ctype:%p> ", node);
switch (CTYPE_(node)->ctype) {
case CTYPE_ANYCHAR:
fprintf(f, "anychar");
break;
case ONIGENC_CTYPE_WORD:
if (CTYPE_(node)->not != 0)
fputs("not word", f);
else
fputs("word", f);
if (CTYPE_(node)->ascii_mode != 0)
fputs(" (ascii)", f);
break;
default:
fprintf(f, "ERROR: undefined ctype.\n");
exit(0);
}
break;
case NODE_ANCHOR:
fprintf(f, "<anchor:%p> ", node);
switch (ANCHOR_(node)->type) {
case ANCR_BEGIN_BUF: fputs("begin buf", f); break;
case ANCR_END_BUF: fputs("end buf", f); break;
case ANCR_BEGIN_LINE: fputs("begin line", f); break;
case ANCR_END_LINE: fputs("end line", f); break;
case ANCR_SEMI_END_BUF: fputs("semi end buf", f); break;
case ANCR_BEGIN_POSITION: fputs("begin position", f); break;
case ANCR_WORD_BOUNDARY: fputs("word boundary", f); break;
case ANCR_NO_WORD_BOUNDARY: fputs("not word boundary", f); break;
#ifdef USE_WORD_BEGIN_END
case ANCR_WORD_BEGIN: fputs("word begin", f); break;
case ANCR_WORD_END: fputs("word end", f); break;
#endif
case ANCR_TEXT_SEGMENT_BOUNDARY:
fputs("text-segment boundary", f); break;
case ANCR_NO_TEXT_SEGMENT_BOUNDARY:
fputs("no text-segment boundary", f); break;
case ANCR_PREC_READ:
fprintf(f, "prec read\n");
print_indent_tree(f, NODE_BODY(node), indent + add);
break;
case ANCR_PREC_READ_NOT:
fprintf(f, "prec read not\n");
print_indent_tree(f, NODE_BODY(node), indent + add);
break;
case ANCR_LOOK_BEHIND:
fprintf(f, "look behind\n");
print_indent_tree(f, NODE_BODY(node), indent + add);
break;
case ANCR_LOOK_BEHIND_NOT:
fprintf(f, "look behind not\n");
print_indent_tree(f, NODE_BODY(node), indent + add);
break;
default:
fprintf(f, "ERROR: undefined anchor type.\n");
break;
}
break;
case NODE_BACKREF:
{
int* p;
BackRefNode* br = BACKREF_(node);
p = BACKREFS_P(br);
fprintf(f, "<backref%s:%p>", NODE_IS_CHECKER(node) ? "-checker" : "", node);
for (i = 0; i < br->back_num; i++) {
if (i > 0) fputs(", ", f);
fprintf(f, "%d", p[i]);
}
#ifdef USE_BACKREF_WITH_LEVEL
if (NODE_IS_NEST_LEVEL(node)) {
fprintf(f, ", level: %d", br->nest_level);
}
#endif
}
break;
#ifdef USE_CALL
case NODE_CALL:
{
CallNode* cn = CALL_(node);
fprintf(f, "<call:%p>", node);
fprintf(f, " num: %d, name", cn->called_gnum);
p_string(f, cn->name_end - cn->name, cn->name);
}
break;
#endif
case NODE_QUANT:
fprintf(f, "<quantifier:%p>{%d,%d}%s%s\n", node,
QUANT_(node)->lower, QUANT_(node)->upper,
(QUANT_(node)->greedy ? "" : "?"),
QUANT_(node)->include_referred == 0 ? "" : " referred");
print_indent_tree(f, NODE_BODY(node), indent + add);
break;
case NODE_BAG:
fprintf(f, "<bag:%p> ", node);
if (BAG_(node)->type == BAG_IF_ELSE) {
Node* Then;
Node* Else;
BagNode* bn;
bn = BAG_(node);
fprintf(f, "if-else\n");
print_indent_tree(f, NODE_BODY(node), indent + add);
Then = bn->te.Then;
Else = bn->te.Else;
if (IS_NULL(Then)) {
Indent(f, indent + add);
fprintf(f, "THEN empty\n");
}
else
print_indent_tree(f, Then, indent + add);
if (IS_NULL(Else)) {
Indent(f, indent + add);
fprintf(f, "ELSE empty\n");
}
else
print_indent_tree(f, Else, indent + add);
break;
}
switch (BAG_(node)->type) {
case BAG_OPTION:
fprintf(f, "option:%d", BAG_(node)->o.options);
break;
case BAG_MEMORY:
fprintf(f, "memory:%d", BAG_(node)->m.regnum);
if (NODE_IS_CALLED(node))
fprintf(f, ", called");
else if (NODE_IS_REFERENCED(node))
fprintf(f, ", referenced");
if (NODE_IS_FIXED_ADDR(node))
fprintf(f, ", fixed-addr");
break;
case BAG_STOP_BACKTRACK:
fprintf(f, "stop-bt");
break;
default:
break;
}
fprintf(f, "\n");
print_indent_tree(f, NODE_BODY(node), indent + add);
break;
case NODE_GIMMICK:
fprintf(f, "<gimmick:%p> ", node);
switch (GIMMICK_(node)->type) {
case GIMMICK_FAIL:
fprintf(f, "fail");
break;
case GIMMICK_SAVE:
fprintf(f, "save:%d:%d", GIMMICK_(node)->detail_type, GIMMICK_(node)->id);
break;
case GIMMICK_UPDATE_VAR:
fprintf(f, "update_var:%d:%d", GIMMICK_(node)->detail_type, GIMMICK_(node)->id);
break;
#ifdef USE_CALLOUT
case GIMMICK_CALLOUT:
switch (GIMMICK_(node)->detail_type) {
case ONIG_CALLOUT_OF_CONTENTS:
fprintf(f, "callout:contents:%d", GIMMICK_(node)->num);
break;
case ONIG_CALLOUT_OF_NAME:
fprintf(f, "callout:name:%d:%d", GIMMICK_(node)->id, GIMMICK_(node)->num);
break;
}
#endif
}
break;
default:
fprintf(f, "print_indent_tree: undefined node type %d\n", NODE_TYPE(node));
break;
}
if (type != NODE_LIST && type != NODE_ALT && type != NODE_QUANT &&
type != NODE_BAG)
fprintf(f, "\n");
fflush(f);
} | 0 | [
"CWE-787"
]
| oniguruma | cbe9f8bd9cfc6c3c87a60fbae58fa1a85db59df0 | 2,730,181,118,605,986,000,000,000,000,000,000,000 | 269 | #207: Out-of-bounds write |
process_fn(buf, len, data)
char *buf;
int len;
char *data; /* dummy */
{
struct plop *pp = plop_tab + (int)(unsigned char)*buf;
if (len)
{
*buf = 0;
return;
}
if (pp->buf)
{
ProcessInput(pp->buf, pp->len);
return;
}
Msg(0, "Empty register.");
} | 0 | []
| screen | c5db181b6e017cfccb8d7842ce140e59294d9f62 | 4,215,822,868,640,108,000,000,000,000,000,000,000 | 19 | ansi: add support for xterm OSC 11
It allows for getting and setting the background color. Notably, Vim uses
OSC 11 to learn whether it's running on a light or dark colored terminal
and choose a color scheme accordingly.
Tested with gnome-terminal and xterm. When called with "?" argument the
current background color is returned:
$ echo -ne "\e]11;?\e\\"
$ 11;rgb:2323/2727/2929
Signed-off-by: Lubomir Rintel <[email protected]>
(cherry picked from commit 7059bff20a28778f9d3acf81cad07b1388d02309)
Signed-off-by: Amadeusz Sławiński <[email protected] |
static ut64 addr_to_offset(struct MACH0_(obj_t) *bin, ut64 addr) {
if (bin->segs) {
size_t i;
for (i = 0; i < bin->nsegs; i++) {
const ut64 segment_base = (ut64)bin->segs[i].vmaddr;
const ut64 segment_size = (ut64)bin->segs[i].vmsize;
if (addr >= segment_base && addr < segment_base + segment_size) {
return bin->segs[i].fileoff + (addr - segment_base);
}
}
}
return 0;
} | 0 | [
"CWE-125",
"CWE-787"
]
| radare2 | 0052500c1ed5bf8263b26b9fd7773dbdc6f170c4 | 257,517,615,274,529,800,000,000,000,000,000,000,000 | 13 | Fix heap OOB read in macho.iterate_chained_fixups ##crash
* Reported by peacock-doris via huntr.dev
* Reproducer 'tests_65305'
mrmacete:
* Return early if segs_count is 0
* Initialize segs_count also for reconstructed fixups
Co-authored-by: pancake <[email protected]>
Co-authored-by: Francesco Tamagni <[email protected]> |
static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
{
struct fuse_file *ff = file->private_data;
struct fuse_mount *fm = ff->fm;
struct fuse_args_pages *ap = &ia->ap;
loff_t pos = page_offset(ap->pages[0]);
size_t count = ap->num_pages << PAGE_SHIFT;
ssize_t res;
int err;
ap->args.out_pages = true;
ap->args.page_zeroing = true;
ap->args.page_replace = true;
/* Don't overflow end offset */
if (pos + (count - 1) == LLONG_MAX) {
count--;
ap->descs[ap->num_pages - 1].length--;
}
WARN_ON((loff_t) (pos + count) < 0);
fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
ia->read.attr_ver = fuse_get_attr_version(fm->fc);
if (fm->fc->async_read) {
ia->ff = fuse_file_get(ff);
ap->args.end = fuse_readpages_end;
err = fuse_simple_background(fm, &ap->args, GFP_KERNEL);
if (!err)
return;
} else {
res = fuse_simple_request(fm, &ap->args);
err = res < 0 ? res : 0;
}
fuse_readpages_end(fm, &ap->args, err);
} | 0 | [
"CWE-459"
]
| linux | 5d069dbe8aaf2a197142558b6fb2978189ba3454 | 193,642,258,636,002,630,000,000,000,000,000,000,000 | 35 | fuse: fix bad inode
Jan Kara's analysis of the syzbot report (edited):
The reproducer opens a directory on FUSE filesystem, it then attaches
dnotify mark to the open directory. After that a fuse_do_getattr() call
finds that attributes returned by the server are inconsistent, and calls
make_bad_inode() which, among other things does:
inode->i_mode = S_IFREG;
This then confuses dnotify which doesn't tear down its structures
properly and eventually crashes.
Avoid calling make_bad_inode() on a live inode: switch to a private flag on
the fuse inode. Also add the test to ops which the bad_inode_ops would
have caught.
This bug goes back to the initial merge of fuse in 2.6.14...
Reported-by: [email protected]
Signed-off-by: Miklos Szeredi <[email protected]>
Tested-by: Jan Kara <[email protected]>
Cc: <[email protected]> |
static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
{
u32 valid_flags = 0;
memset(ci, 0, sizeof(*ci));
bacpy(&ci->bdaddr, &session->bdaddr);
ci->flags = session->flags & valid_flags;
ci->state = BT_CONNECTED;
if (session->input) {
ci->vendor = session->input->id.vendor;
ci->product = session->input->id.product;
ci->version = session->input->id.version;
if (session->input->name)
strlcpy(ci->name, session->input->name, 128);
else
strlcpy(ci->name, "HID Boot Device", 128);
} else if (session->hid) {
ci->vendor = session->hid->vendor;
ci->product = session->hid->product;
ci->version = session->hid->version;
strlcpy(ci->name, session->hid->name, 128);
}
} | 0 | [
"CWE-787"
]
| linux | 7992c18810e568b95c869b227137a2215702a805 | 19,010,031,805,152,158,000,000,000,000,000,000,000 | 24 | Bluetooth: hidp: buffer overflow in hidp_process_report
CVE-2018-9363
The buffer length is unsigned at all layers, but gets cast to int and
checked in hidp_process_report and can lead to a buffer overflow.
Switch len parameter to unsigned int to resolve issue.
This affects 3.18 and newer kernels.
Signed-off-by: Mark Salyzyn <[email protected]>
Fixes: a4b1b5877b514b276f0f31efe02388a9c2836728 ("HID: Bluetooth: hidp: make sure input buffers are big enough")
Cc: Marcel Holtmann <[email protected]>
Cc: Johan Hedberg <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Benjamin Tissoires <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Acked-by: Kees Cook <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]> |
static ssize_t extent_cache_hits_show(struct ext4_attr *a,
struct ext4_sb_info *sbi, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_hits);
} | 0 | [
"CWE-703",
"CWE-189"
]
| linux | d50f2ab6f050311dbf7b8f5501b25f0bf64a439b | 322,274,037,308,136,330,000,000,000,000,000,000,000 | 5 | ext4: fix undefined behavior in ext4_fill_flex_info()
Commit 503358ae01b70ce6909d19dd01287093f6b6271c ("ext4: avoid divide by
zero when trying to mount a corrupted file system") fixes CVE-2009-4307
by performing a sanity check on s_log_groups_per_flex, since it can be
set to a bogus value by an attacker.
sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
groups_per_flex = 1 << sbi->s_log_groups_per_flex;
if (groups_per_flex < 2) { ... }
This patch fixes two potential issues in the previous commit.
1) The sanity check might only work on architectures like PowerPC.
On x86, 5 bits are used for the shifting amount. That means, given a
large s_log_groups_per_flex value like 36, groups_per_flex = 1 << 36
is essentially 1 << 4 = 16, rather than 0. This will bypass the check,
leaving s_log_groups_per_flex and groups_per_flex inconsistent.
2) The sanity check relies on undefined behavior, i.e., oversized shift.
A standard-confirming C compiler could rewrite the check in unexpected
ways. Consider the following equivalent form, assuming groups_per_flex
is unsigned for simplicity.
groups_per_flex = 1 << sbi->s_log_groups_per_flex;
if (groups_per_flex == 0 || groups_per_flex == 1) {
We compile the code snippet using Clang 3.0 and GCC 4.6. Clang will
completely optimize away the check groups_per_flex == 0, leaving the
patched code as vulnerable as the original. GCC keeps the check, but
there is no guarantee that future versions will do the same.
Signed-off-by: Xi Wang <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]>
Cc: [email protected] |
int fit_config_verify(const void *fit, int conf_noffset)
{
return fit_config_verify_required_sigs(fit, conf_noffset,
gd_fdt_blob());
} | 0 | []
| u-boot | 79af75f7776fc20b0d7eb6afe1e27c00fdb4b9b4 | 260,408,972,776,184,420,000,000,000,000,000,000,000 | 5 | fit: Don't allow verification of images with @ nodes
When searching for a node called 'fred', any unit address appended to the
name is ignored by libfdt, meaning that 'fred' can match 'fred@1'. This
means that we cannot be sure that the node originally intended is the one
that is used.
Disallow use of nodes with unit addresses.
Update the forge test also, since it uses @ addresses.
CVE-2021-27138
Signed-off-by: Simon Glass <[email protected]>
Reported-by: Bruce Monroe <[email protected]>
Reported-by: Arie Haenel <[email protected]>
Reported-by: Julien Lenoir <[email protected]> |
int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
{
pr_debug("len %d\n", skb->len);
if (!ndev || (!test_bit(NCI_UP, &ndev->flags) &&
!test_bit(NCI_INIT, &ndev->flags))) {
kfree_skb(skb);
return -ENXIO;
}
/* Queue frame for rx worker thread */
skb_queue_tail(&ndev->rx_q, skb);
queue_work(ndev->rx_wq, &ndev->rx_work);
return 0;
} | 0 | []
| linux | 48b71a9e66c2eab60564b1b1c85f4928ed04e406 | 270,434,669,449,419,500,000,000,000,000,000,000,000 | 16 | NFC: add NCI_UNREG flag to eliminate the race
There are two sites that calls queue_work() after the
destroy_workqueue() and lead to possible UAF.
The first site is nci_send_cmd(), which can happen after the
nci_close_device as below
nfcmrvl_nci_unregister_dev | nfc_genl_dev_up
nci_close_device |
flush_workqueue |
del_timer_sync |
nci_unregister_device | nfc_get_device
destroy_workqueue | nfc_dev_up
nfc_unregister_device | nci_dev_up
device_del | nci_open_device
| __nci_request
| nci_send_cmd
| queue_work !!!
Another site is nci_cmd_timer, awaked by the nci_cmd_work from the
nci_send_cmd.
... | ...
nci_unregister_device | queue_work
destroy_workqueue |
nfc_unregister_device | ...
device_del | nci_cmd_work
| mod_timer
| ...
| nci_cmd_timer
| queue_work !!!
For the above two UAF, the root cause is that the nfc_dev_up can race
between the nci_unregister_device routine. Therefore, this patch
introduce NCI_UNREG flag to easily eliminate the possible race. In
addition, the mutex_lock in nci_close_device can act as a barrier.
Signed-off-by: Lin Ma <[email protected]>
Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation")
Reviewed-by: Jakub Kicinski <[email protected]>
Reviewed-by: Krzysztof Kozlowski <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]> |
void CLASS blend_highlights()
{
int clip=INT_MAX, row, col, c, i, j;
static const float trans[2][4][4] =
{ { { 1,1,1 }, { 1.7320508,-1.7320508,0 }, { -1,-1,2 } },
{ { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } };
static const float itrans[2][4][4] =
{ { { 1,0.8660254,-0.5 }, { 1,-0.8660254,-0.5 }, { 1,0,1 } },
{ { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } };
float cam[2][4], lab[2][4], sum[2], chratio;
if ((unsigned) (colors-3) > 1) return;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Blending highlights...\n"));
#endif
FORCC if (clip > (i = 65535*pre_mul[c])) clip = i;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,0,2);
#endif
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
FORCC if (image[row*width+col][c] > clip) break;
if (c == colors) continue;
FORCC {
cam[0][c] = image[row*width+col][c];
cam[1][c] = MIN(cam[0][c],clip);
}
for (i=0; i < 2; i++) {
FORCC for (lab[i][c]=j=0; j < colors; j++)
lab[i][c] += trans[colors-3][c][j] * cam[i][j];
for (sum[i]=0,c=1; c < colors; c++)
sum[i] += SQR(lab[i][c]);
}
chratio = sqrt(sum[1]/sum[0]);
for (c=1; c < colors; c++)
lab[0][c] *= chratio;
FORCC for (cam[0][c]=j=0; j < colors; j++)
cam[0][c] += itrans[colors-3][c][j] * lab[0][j];
FORCC image[row*width+col][c] = cam[0][c] / colors;
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,1,2);
#endif
} | 0 | []
| LibRaw | c4e374ea6c979a7d1d968f5082b7d0ea8cd27202 | 152,828,525,084,513,170,000,000,000,000,000,000,000 | 44 | additional data checks backported from 0.15.4 |
void CLASS parseSonySR2 (uchar *cbuf_SR2, unsigned SR2SubIFDOffset, unsigned SR2SubIFDLength, unsigned dng_writer)
{
unsigned c;
unsigned entries, tag, type, len;
unsigned icbuf_SR2;
int ival;
int TagProcessed;
float num;
int i;
entries = sget2(cbuf_SR2);
if (entries > 1000) return;
icbuf_SR2 = 2;
while (entries--) {
tag = sget2(cbuf_SR2 + icbuf_SR2);
icbuf_SR2 += 2;
type = sget2(cbuf_SR2 + icbuf_SR2);
icbuf_SR2 += 2;
len = sget4(cbuf_SR2 + icbuf_SR2);
icbuf_SR2 += 4;
if (len * ("11124811248484"[type < 14 ? type : 0] - '0') > 4) {
ival = sget4(cbuf_SR2 + icbuf_SR2) - SR2SubIFDOffset;
} else {
ival = icbuf_SR2;
}
if(ival > SR2SubIFDLength) // points out of orig. buffer size
break; // END processing. Generally we should check against SR2SubIFDLength minus 6 of 8, depending on tag, but we allocated extra 1024b for buffer, so this does not matter
icbuf_SR2 += 4;
TagProcessed = 0;
if (dng_writer == nonDNG) {
switch (tag) {
case 0x7300:
for (c = 0; c < 4 && c < len; c++)
cblack[c] = sget2(cbuf_SR2 + ival + 2 * c);
TagProcessed = 1;
break;
case 0x7303:
FORC4 cam_mul[c ^ (c < 2)] = sget2(cbuf_SR2 + ival + 2 * c);
TagProcessed = 1;
break;
case 0x7310:
FORC4 cblack[c ^ c >> 1] = sget2(cbuf_SR2 + ival + 2 * c);
i = cblack[3];
FORC3 if (i > cblack[c]) i = cblack[c];
FORC4 cblack[c] -= i;
black = i;
TagProcessed = 1;
break;
case 0x7313:
FORC4 cam_mul[c ^ (c >> 1)] = sget2(cbuf_SR2 + ival + 2 * c);
TagProcessed = 1;
break;
case 0x74a0:
c = sget4(cbuf_SR2 + ival+4);
if (c) ilm.MaxAp4MaxFocal = ((float)sget4(cbuf_SR2 + ival)) / ((float)c);
TagProcessed = 1;
break;
case 0x74a1:
c = sget4(cbuf_SR2 + ival+4);
if (c) ilm.MaxAp4MinFocal = ((float)sget4(cbuf_SR2 + ival)) / ((float)c);
TagProcessed = 1;
break;
case 0x74a2:
c = sget4(cbuf_SR2 + ival+4);
if (c) ilm.MaxFocal = ((float)sget4(cbuf_SR2 + ival)) / ((float)c);
TagProcessed = 1;
break;
case 0x74a3:
c = sget4(cbuf_SR2 + ival+4);
if (c) ilm.MinFocal = ((float)sget4(cbuf_SR2 + ival)) / ((float)c);
TagProcessed = 1;
break;
case 0x7800:
for (i = 0; i < 3; i++) {
num = 0.0;
for (c = 0; c < 3; c++) {
imgdata.color.ccm[i][c] = (float)((short)sget2(cbuf_SR2 + ival + 2 * (i*3+c)));
num += imgdata.color.ccm[i][c];
}
if (num > 0.01)
FORC3 imgdata.color.ccm[i][c] = imgdata.color.ccm[i][c] / num;
}
TagProcessed = 1;
break;
case 0x787f:
if (len == 3) {
FORC3 imgdata.color.linear_max[c] = sget2(cbuf_SR2 + ival + 2 * c);
imgdata.color.linear_max[3] = imgdata.color.linear_max[1];
} else if (len == 1) {
imgdata.color.linear_max[0] =
imgdata.color.linear_max[1] =
imgdata.color.linear_max[2] =
imgdata.color.linear_max[3] = sget2(cbuf_SR2 + ival);
}
TagProcessed = 1;
break;
}
}
if (!TagProcessed) {
switch (tag) {
case 0x7302:
FORC4 icWBC[LIBRAW_WBI_Auto][c ^ (c < 2)] = sget2(cbuf_SR2 + ival + 2 * c);
break;
case 0x7312:
FORC4 icWBC[LIBRAW_WBI_Auto][c ^ (c >> 1)] = sget2(cbuf_SR2 + ival + 2 * c);
break;
case 0x7480:
case 0x7820:
FORC3 icWBC[LIBRAW_WBI_Daylight][c] = sget2(cbuf_SR2 + ival + 2 * c);
icWBC[LIBRAW_WBI_Daylight][3] = icWBC[LIBRAW_WBI_Daylight][1];
break;
case 0x7481:
case 0x7821:
FORC3 icWBC[LIBRAW_WBI_Cloudy][c] = sget2(cbuf_SR2 + ival + 2 * c);
icWBC[LIBRAW_WBI_Cloudy][3] = icWBC[LIBRAW_WBI_Cloudy][1];
break;
case 0x7482:
case 0x7822:
FORC3 icWBC[LIBRAW_WBI_Tungsten][c] = sget2(cbuf_SR2 + ival + 2 * c);
icWBC[LIBRAW_WBI_Tungsten][3] = icWBC[LIBRAW_WBI_Tungsten][1];
break;
case 0x7483:
case 0x7823:
FORC3 icWBC[LIBRAW_WBI_Flash][c] = sget2(cbuf_SR2 + ival + 2 * c);
icWBC[LIBRAW_WBI_Flash][3] = icWBC[LIBRAW_WBI_Flash][1];
break;
case 0x7484:
case 0x7824:
icWBCTC[0][0] = 4500;
FORC3 icWBCTC[0][c + 1] = sget2(cbuf_SR2 + ival + 2 * c);
icWBCTC[0][4] = icWBCTC[0][2];
break;
case 0x7486:
FORC3 icWBC[LIBRAW_WBI_Fluorescent][c] = sget2(cbuf_SR2 + ival + 2 * c);
icWBC[LIBRAW_WBI_Fluorescent][3] = icWBC[LIBRAW_WBI_Fluorescent][1];
break;
case 0x7825:
FORC3 icWBC[LIBRAW_WBI_Shade][c] = sget2(cbuf_SR2 + ival + 2 * c);
icWBC[LIBRAW_WBI_Shade][3] = icWBC[LIBRAW_WBI_Shade][1];
break;
case 0x7826:
FORC3 icWBC[LIBRAW_WBI_FL_W][c] = sget2(cbuf_SR2 + ival + 2 * c);
icWBC[LIBRAW_WBI_FL_W][3] = icWBC[LIBRAW_WBI_FL_W][1];
break;
case 0x7827:
FORC3 icWBC[LIBRAW_WBI_FL_N][c] = sget2(cbuf_SR2 + ival + 2 * c);
icWBC[LIBRAW_WBI_FL_N][3] = icWBC[LIBRAW_WBI_FL_N][1];
break;
case 0x7828:
FORC3 icWBC[LIBRAW_WBI_FL_D][c] = sget2(cbuf_SR2 + ival + 2 * c);
icWBC[LIBRAW_WBI_FL_D][3] = icWBC[LIBRAW_WBI_FL_D][1];
break;
case 0x7829:
FORC3 icWBC[LIBRAW_WBI_FL_L][c] = sget2(cbuf_SR2 + ival + 2 * c);
icWBC[LIBRAW_WBI_FL_L][3] = icWBC[LIBRAW_WBI_FL_L][1];
break;
case 0x782a:
icWBCTC[1][0] = 8500;
FORC3 icWBCTC[1][c + 1] = sget2(cbuf_SR2 + ival + 2 * c);
icWBCTC[1][4] = icWBCTC[1][2];
break;
case 0x782b:
icWBCTC[2][0] = 6000;
FORC3 icWBCTC[2][c + 1] = sget2(cbuf_SR2 + ival + 2 * c);
icWBCTC[2][4] = icWBCTC[2][2];
break;
case 0x782c:
icWBCTC[3][0] = 3200;
FORC3 icWBC[LIBRAW_WBI_StudioTungsten][c] = icWBCTC[3][c + 1] =
sget2(cbuf_SR2 + ival + 2 * c);
icWBC[LIBRAW_WBI_StudioTungsten][3] = icWBCTC[3][4] = icWBCTC[3][2];;
break;
case 0x782d:
icWBCTC[4][0] = 2500;
FORC3 icWBCTC[4][c + 1] = sget2(cbuf_SR2 + ival + 2 * c);
icWBCTC[4][4] = icWBCTC[4][2];
break;
}
}
}
} | 0 | [
"CWE-400"
]
| LibRaw | e67a9862d10ebaa97712f532eca1eb5e2e410a22 | 177,538,469,698,685,230,000,000,000,000,000,000,000 | 184 | Fixed Secunia Advisory SA86384
- possible infinite loop in unpacked_load_raw()
- possible infinite loop in parse_rollei()
- possible infinite loop in parse_sinar_ia()
Credits: Laurent Delosieres, Secunia Research at Flexera |
int socket_connect(const char *addr, uint16_t port)
{
int sfd = -1;
int yes = 1;
struct hostent *hp;
struct sockaddr_in saddr;
#ifdef WIN32
WSADATA wsa_data;
if (!wsa_init) {
if (WSAStartup(MAKEWORD(2,2), &wsa_data) != ERROR_SUCCESS) {
fprintf(stderr, "WSAStartup failed!\n");
ExitProcess(-1);
}
wsa_init = 1;
}
#endif
if (!addr) {
errno = EINVAL;
return -1;
}
if ((hp = gethostbyname(addr)) == NULL) {
if (verbose >= 2)
fprintf(stderr, "%s: unknown host '%s'\n", __func__, addr);
return -1;
}
if (!hp->h_addr) {
if (verbose >= 2)
fprintf(stderr, "%s: gethostbyname returned NULL address!\n",
__func__);
return -1;
}
if (0 > (sfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP))) {
perror("socket()");
return -1;
}
if (setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void*)&yes, sizeof(int)) == -1) {
perror("setsockopt()");
socket_close(sfd);
return -1;
}
#ifdef SO_NOSIGPIPE
if (setsockopt(sfd, SOL_SOCKET, SO_NOSIGPIPE, (void*)&yes, sizeof(int)) == -1) {
perror("setsockopt()");
socket_close(sfd);
return -1;
}
#endif
memset((void *) &saddr, 0, sizeof(saddr));
saddr.sin_family = AF_INET;
saddr.sin_addr.s_addr = *(uint32_t *) hp->h_addr;
saddr.sin_port = htons(port);
if (connect(sfd, (struct sockaddr *) &saddr, sizeof(saddr)) < 0) {
perror("connect");
socket_close(sfd);
return -2;
}
return sfd;
} | 0 | [
"CWE-284",
"CWE-703"
]
| libusbmuxd | 4397b3376dc4e4cb1c991d0aed61ce6482614196 | 322,594,025,769,497,520,000,000,000,000,000,000,000 | 67 | common: [security fix] Make sure sockets only listen locally |
const Address *HttpDownstreamConnection::get_raddr() const { return raddr_; } | 0 | []
| nghttp2 | 319d5ab1c6d916b6b8a0d85b2ae3f01b3ad04f2c | 138,775,158,793,835,210,000,000,000,000,000,000,000 | 1 | nghttpx: Fix request stall
Fix request stall if backend connection is reused and buffer is full. |
MYSQL_STMT *open_cursor(const char *query)
{
int rc;
const ulong type= (ulong)CURSOR_TYPE_READ_ONLY;
MYSQL_STMT *stmt= mysql_stmt_init(mysql);
rc= mysql_stmt_prepare(stmt, query, strlen(query));
check_execute(stmt, rc);
mysql_stmt_attr_set(stmt, STMT_ATTR_CURSOR_TYPE, (void*) &type);
return stmt;
} | 0 | [
"CWE-284",
"CWE-295"
]
| mysql-server | 3bd5589e1a5a93f9c224badf983cd65c45215390 | 131,479,950,689,073,000,000,000,000,000,000,000,000 | 12 | WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options |
err_free(perrdetail *err)
{
Py_CLEAR(err->filename);
} | 0 | [
"CWE-125"
]
| cpython | dcfcd146f8e6fc5c2fc16a4c192a0c5f5ca8c53c | 175,327,294,633,040,230,000,000,000,000,000,000,000 | 4 | bpo-35766: Merge typed_ast back into CPython (GH-11645) |
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
return load_vdso32();
} | 0 | []
| linux | 394f56fe480140877304d342dec46d50dc823d46 | 53,343,786,834,108,700,000,000,000,000,000,000,000 | 4 | x86_64, vdso: Fix the vdso address randomization algorithm
The theory behind vdso randomization is that it's mapped at a random
offset above the top of the stack. To avoid wasting a page of
memory for an extra page table, the vdso isn't supposed to extend
past the lowest PMD into which it can fit. Other than that, the
address should be a uniformly distributed address that meets all of
the alignment requirements.
The current algorithm is buggy: the vdso has about a 50% probability
of being at the very end of a PMD. The current algorithm also has a
decent chance of failing outright due to incorrect handling of the
case where the top of the stack is near the top of its PMD.
This fixes the implementation. The paxtest estimate of vdso
"randomisation" improves from 11 bits to 18 bits. (Disclaimer: I
don't know what the paxtest code is actually calculating.)
It's worth noting that this algorithm is inherently biased: the vdso
is more likely to end up near the end of its PMD than near the
beginning. Ideally we would either nix the PMD sharing requirement
or jointly randomize the vdso and the stack to reduce the bias.
In the mean time, this is a considerable improvement with basically
no risk of compatibility issues, since the allowed outputs of the
algorithm are unchanged.
As an easy test, doing this:
for i in `seq 10000`
do grep -P vdso /proc/self/maps |cut -d- -f1
done |sort |uniq -d
used to produce lots of output (1445 lines on my most recent run).
A tiny subset looks like this:
7fffdfffe000
7fffe01fe000
7fffe05fe000
7fffe07fe000
7fffe09fe000
7fffe0bfe000
7fffe0dfe000
Note the suspicious fe000 endings. With the fix, I get a much more
palatable 76 repeated addresses.
Reviewed-by: Kees Cook <[email protected]>
Cc: [email protected]
Signed-off-by: Andy Lutomirski <[email protected]> |
static void vt_console_print(struct console *co, const char *b, unsigned count)
{
struct vc_data *vc = vc_cons[fg_console].d;
unsigned char c;
static DEFINE_SPINLOCK(printing_lock);
const ushort *start;
ushort start_x, cnt;
int kmsg_console;
/* console busy or not yet initialized */
if (!printable)
return;
if (!spin_trylock(&printing_lock))
return;
kmsg_console = vt_get_kmsg_redirect();
if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
vc = vc_cons[kmsg_console - 1].d;
if (!vc_cons_allocated(fg_console)) {
/* impossible */
/* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */
goto quit;
}
if (vc->vc_mode != KD_TEXT)
goto quit;
/* undraw cursor first */
if (con_is_fg(vc))
hide_cursor(vc);
start = (ushort *)vc->vc_pos;
start_x = vc->state.x;
cnt = 0;
while (count--) {
c = *b++;
if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
if (cnt && con_is_visible(vc))
vc->vc_sw->con_putcs(vc, start, cnt, vc->state.y, start_x);
cnt = 0;
if (c == 8) { /* backspace */
bs(vc);
start = (ushort *)vc->vc_pos;
start_x = vc->state.x;
continue;
}
if (c != 13)
lf(vc);
cr(vc);
start = (ushort *)vc->vc_pos;
start_x = vc->state.x;
if (c == 10 || c == 13)
continue;
}
vc_uniscr_putc(vc, c);
scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos);
notify_write(vc, c);
cnt++;
if (vc->state.x == vc->vc_cols - 1) {
vc->vc_need_wrap = 1;
} else {
vc->vc_pos += 2;
vc->state.x++;
}
}
if (cnt && con_is_visible(vc))
vc->vc_sw->con_putcs(vc, start, cnt, vc->state.y, start_x);
set_cursor(vc);
notify_update(vc);
quit:
spin_unlock(&printing_lock);
} | 0 | [
"CWE-125"
]
| linux | 3c4e0dff2095c579b142d5a0693257f1c58b4804 | 43,418,446,496,957,340,000,000,000,000,000,000,000 | 74 | vt: Disable KD_FONT_OP_COPY
It's buggy:
On Fri, Nov 06, 2020 at 10:30:08PM +0800, Minh Yuan wrote:
> We recently discovered a slab-out-of-bounds read in fbcon in the latest
> kernel ( v5.10-rc2 for now ). The root cause of this vulnerability is that
> "fbcon_do_set_font" did not handle "vc->vc_font.data" and
> "vc->vc_font.height" correctly, and the patch
> <https://lkml.org/lkml/2020/9/27/223> for VT_RESIZEX can't handle this
> issue.
>
> Specifically, we use KD_FONT_OP_SET to set a small font.data for tty6, and
> use KD_FONT_OP_SET again to set a large font.height for tty1. After that,
> we use KD_FONT_OP_COPY to assign tty6's vc_font.data to tty1's vc_font.data
> in "fbcon_do_set_font", while tty1 retains the original larger
> height. Obviously, this will cause an out-of-bounds read, because we can
> access a smaller vc_font.data with a larger vc_font.height.
Further there was only one user ever.
- Android's loadfont, busybox and console-tools only ever use OP_GET
and OP_SET
- fbset documentation only mentions the kernel cmdline font: option,
not anything else.
- systemd used OP_COPY before release 232 published in Nov 2016
Now unfortunately the crucial report seems to have gone down with
gmane, and the commit message doesn't say much. But the pull request
hints at OP_COPY being broken
https://github.com/systemd/systemd/pull/3651
So in other words, this never worked, and the only project which
foolishly every tried to use it, realized that rather quickly too.
Instead of trying to fix security issues here on dead code by adding
missing checks, fix the entire thing by removing the functionality.
Note that systemd code using the OP_COPY function ignored the return
value, so it doesn't matter what we're doing here really - just in
case a lone server somewhere happens to be extremely unlucky and
running an affected old version of systemd. The relevant code from
font_copy_to_all_vcs() in systemd was:
/* copy font from active VT, where the font was uploaded to */
cfo.op = KD_FONT_OP_COPY;
cfo.height = vcs.v_active-1; /* tty1 == index 0 */
(void) ioctl(vcfd, KDFONTOP, &cfo);
Note this just disables the ioctl, garbage collecting the now unused
callbacks is left for -next.
v2: Tetsuo found the old mail, which allowed me to find it on another
archive. Add the link too.
Acked-by: Peilin Ye <[email protected]>
Reported-by: Minh Yuan <[email protected]>
References: https://lists.freedesktop.org/archives/systemd-devel/2016-June/036935.html
References: https://github.com/systemd/systemd/pull/3651
Cc: Greg KH <[email protected]>
Cc: Peilin Ye <[email protected]>
Cc: Tetsuo Handa <[email protected]>
Signed-off-by: Daniel Vetter <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static GF_Err isom_create_init_from_mem(const char *fileName, GF_ISOFile *file)
{
u32 sample_rate=0;
u32 nb_channels=0;
u32 bps=0;
//u32 atag=0;
u32 nal_len=4;
u32 width = 0;
u32 height = 0;
u32 timescale = 10000000;
u64 tfdt = 0;
char sz4cc[5];
char CodecParams[2048];
u32 CodecParamLen=0;
char *sep, *val;
GF_TrackBox *trak;
GF_TrackExtendsBox *trex;
GF_SampleTableBox *stbl;
sz4cc[0] = 0;
val = (char*) ( fileName + strlen("isobmff://") );
while (1) {
sep = strchr(val, ' ');
if (sep) sep[0] = 0;
if (!strncmp(val, "4cc=", 4)) strcpy(sz4cc, val+4);
else if (!strncmp(val, "init=", 5)) {
char szH[3], *data = val+5;
u32 i, len = (u32) strlen(data);
for (i=0; i<len; i+=2) {
u32 v;
//init is hex-encoded so 2 input bytes for one output char
szH[0] = data[i];
szH[1] = data[i+1];
szH[2] = 0;
sscanf(szH, "%X", &v);
CodecParams[CodecParamLen] = (char) v;
CodecParamLen++;
}
}
else if (!strncmp(val, "nal=", 4)) nal_len = atoi(val+4);
else if (!strncmp(val, "bps=", 4)) bps = atoi(val+4);
//else if (!strncmp(val, "atag=", 5)) atag = atoi(val+5);
else if (!strncmp(val, "ch=", 3)) nb_channels = atoi(val+3);
else if (!strncmp(val, "srate=", 6)) sample_rate = atoi(val+6);
else if (!strncmp(val, "w=", 2)) width = atoi(val+2);
else if (!strncmp(val, "h=", 2)) height = atoi(val+2);
else if (!strncmp(val, "scale=", 6)) timescale = atoi(val+6);
else if (!strncmp(val, "tfdt=", 5)) {
sscanf(val+5, LLX, &tfdt);
}
if (!sep) break;
sep[0] = ' ';
val = sep+1;
}
if (!stricmp(sz4cc, "H264") || !stricmp(sz4cc, "AVC1")) {
}
else if (!stricmp(sz4cc, "AACL")) {
}
else {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Cannot convert smooth media type %s to ISO init segment\n", sz4cc));
return GF_NOT_SUPPORTED;
}
file->moov = (GF_MovieBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_MOOV);
if (!file->moov) return GF_OUT_OF_MEM;
gf_list_add(file->TopBoxes, file->moov);
file->moov->mov = file;
file->is_smooth = GF_TRUE;
file->moov->mvhd = (GF_MovieHeaderBox *) gf_isom_box_new_parent(&file->moov->child_boxes, GF_ISOM_BOX_TYPE_MVHD);
if (!file->moov->mvhd) return GF_OUT_OF_MEM;
file->moov->mvhd->timeScale = timescale;
file->moov->mvex = (GF_MovieExtendsBox *) gf_isom_box_new_parent(&file->moov->child_boxes, GF_ISOM_BOX_TYPE_MVEX);
if (!file->moov->mvex) return GF_OUT_OF_MEM;
trex = (GF_TrackExtendsBox *) gf_isom_box_new_parent(&file->moov->mvex->child_boxes, GF_ISOM_BOX_TYPE_TREX);
if (!trex) return GF_OUT_OF_MEM;
trex->def_sample_desc_index = 1;
trex->trackID = 1;
gf_list_add(file->moov->mvex->TrackExList, trex);
trak = (GF_TrackBox *) gf_isom_box_new_parent(&file->moov->child_boxes, GF_ISOM_BOX_TYPE_TRAK);
if (!trak) return GF_OUT_OF_MEM;
trak->moov = file->moov;
gf_list_add(file->moov->trackList, trak);
trak->Header = (GF_TrackHeaderBox *) gf_isom_box_new_parent(&trak->child_boxes, GF_ISOM_BOX_TYPE_TKHD);
if (!trak->Header) return GF_OUT_OF_MEM;
trak->Header->trackID = 1;
trak->Header->flags |= 1;
trak->Header->width = width;
trak->Header->height = height;
trak->Media = (GF_MediaBox *) gf_isom_box_new_parent(&trak->child_boxes, GF_ISOM_BOX_TYPE_MDIA);
if (!trak->Media) return GF_OUT_OF_MEM;
trak->Media->mediaTrack = trak;
trak->Media->mediaHeader = (GF_MediaHeaderBox *) gf_isom_box_new_parent(&trak->Media->child_boxes, GF_ISOM_BOX_TYPE_MDHD);
if (!trak->Media->mediaHeader) return GF_OUT_OF_MEM;
trak->Media->mediaHeader->timeScale = timescale;
trak->Media->handler = (GF_HandlerBox *) gf_isom_box_new_parent(&trak->Media->child_boxes,GF_ISOM_BOX_TYPE_HDLR);
if (!trak->Media->handler) return GF_OUT_OF_MEM;
//we assume by default vide for handler type (only used for smooth streaming)
trak->Media->handler->handlerType = width ? GF_ISOM_MEDIA_VISUAL : GF_ISOM_MEDIA_AUDIO;
trak->Media->information = (GF_MediaInformationBox *) gf_isom_box_new_parent(&trak->Media->child_boxes, GF_ISOM_BOX_TYPE_MINF);
if (!trak->Media->information) return GF_OUT_OF_MEM;
trak->Media->information->sampleTable = (GF_SampleTableBox *) gf_isom_box_new_parent(&trak->Media->information->child_boxes, GF_ISOM_BOX_TYPE_STBL);
if (!trak->Media->information->sampleTable) return GF_OUT_OF_MEM;
stbl = trak->Media->information->sampleTable;
stbl->SampleSize = (GF_SampleSizeBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSZ);
if (!stbl->SampleSize) return GF_OUT_OF_MEM;
stbl->TimeToSample = (GF_TimeToSampleBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STTS);
if (!stbl->TimeToSample) return GF_OUT_OF_MEM;
stbl->ChunkOffset = (GF_Box *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STCO);
if (!stbl->ChunkOffset) return GF_OUT_OF_MEM;
stbl->SampleToChunk = (GF_SampleToChunkBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSC);
if (!stbl->SampleToChunk) return GF_OUT_OF_MEM;
stbl->SyncSample = (GF_SyncSampleBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSS);
if (!stbl->SyncSample) return GF_OUT_OF_MEM;
stbl->SampleDescription = (GF_SampleDescriptionBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSD);
if (!stbl->SampleDescription) return GF_OUT_OF_MEM;
trak->dts_at_seg_start = tfdt;
trak->dts_at_next_frag_start = tfdt;
if (!stricmp(sz4cc, "H264") || !stricmp(sz4cc, "AVC1")) {
#ifndef GPAC_DISABLE_AV_PARSERS
u32 pos = 0;
u32 end, sc_size=0;
#endif
GF_MPEGVisualSampleEntryBox *avc = (GF_MPEGVisualSampleEntryBox *) gf_isom_box_new_parent(&stbl->SampleDescription->child_boxes, GF_ISOM_BOX_TYPE_AVC1);
if (!avc) return GF_OUT_OF_MEM;
avc->avc_config = (GF_AVCConfigurationBox *) gf_isom_box_new_parent(&avc->child_boxes, GF_ISOM_BOX_TYPE_AVCC);
if (!avc->avc_config) return GF_OUT_OF_MEM;
avc->Width = width;
avc->Height = height;
avc->avc_config->config = gf_odf_avc_cfg_new();
avc->avc_config->config->nal_unit_size = nal_len;
avc->avc_config->config->configurationVersion = 1;
#ifndef GPAC_DISABLE_AV_PARSERS
//locate pps and sps
gf_media_nalu_next_start_code((u8 *) CodecParams, CodecParamLen, &sc_size);
pos += sc_size;
while (pos<CodecParamLen) {
GF_NALUFFParam *slc;
u8 nal_type;
char *nal = &CodecParams[pos];
end = gf_media_nalu_next_start_code(nal, CodecParamLen-pos, &sc_size);
if (!end) end = CodecParamLen;
GF_SAFEALLOC(slc, GF_NALUFFParam);
if (!slc) break;
slc->size = end;
slc->data = gf_malloc(sizeof(char)*slc->size);
if (!slc->data) return GF_OUT_OF_MEM;
memcpy(slc->data, nal, sizeof(char)*slc->size);
nal_type = nal[0] & 0x1F;
if (nal_type == GF_AVC_NALU_SEQ_PARAM) {
/* AVCState avcc;
u32 idx = gf_avc_read_sps(slc->data, slc->size, &avcc, 0, NULL);
avc->avc_config->config->profile_compatibility = avcc.sps[idx].prof_compat;
avc->avc_config->config->AVCProfileIndication = avcc.sps[idx].profile_idc;
avc->avc_config->config->AVCLevelIndication = avcc.sps[idx].level_idc;
avc->avc_config->config->chroma_format = avcc.sps[idx].chroma_format;
avc->avc_config->config->luma_bit_depth = 8 + avcc.sps[idx].luma_bit_depth_m8;
avc->avc_config->config->chroma_bit_depth = 8 + avcc.sps[idx].chroma_bit_depth_m8;
*/
gf_list_add(avc->avc_config->config->sequenceParameterSets, slc);
} else {
gf_list_add(avc->avc_config->config->pictureParameterSets, slc);
}
pos += slc->size + sc_size;
}
#endif
AVC_RewriteESDescriptor(avc);
}
else if (!stricmp(sz4cc, "AACL")) {
#ifndef GPAC_DISABLE_AV_PARSERS
GF_M4ADecSpecInfo aacinfo;
#endif
GF_MPEGAudioSampleEntryBox *aac = (GF_MPEGAudioSampleEntryBox *) gf_isom_box_new_parent(&stbl->SampleDescription->child_boxes, GF_ISOM_BOX_TYPE_MP4A);
if (!aac) return GF_OUT_OF_MEM;
aac->esd = (GF_ESDBox *) gf_isom_box_new_parent(&aac->child_boxes, GF_ISOM_BOX_TYPE_ESDS);
if (!aac->esd) return GF_OUT_OF_MEM;
aac->esd->desc = gf_odf_desc_esd_new(2);
if (!aac->esd->desc) return GF_OUT_OF_MEM;
#ifndef GPAC_DISABLE_AV_PARSERS
memset(&aacinfo, 0, sizeof(GF_M4ADecSpecInfo));
aacinfo.nb_chan = nb_channels;
aacinfo.base_object_type = GF_M4A_AAC_LC;
aacinfo.base_sr = sample_rate;
gf_m4a_write_config(&aacinfo, &aac->esd->desc->decoderConfig->decoderSpecificInfo->data, &aac->esd->desc->decoderConfig->decoderSpecificInfo->dataLength);
#endif
aac->esd->desc->decoderConfig->streamType = GF_STREAM_AUDIO;
aac->esd->desc->decoderConfig->objectTypeIndication = GF_CODECID_AAC_MPEG4;
aac->bitspersample = bps;
aac->samplerate_hi = sample_rate;
aac->channel_count = nb_channels;
}
return GF_OK;
} | 0 | [
"CWE-787"
]
| gpac | f0a41d178a2dc5ac185506d9fa0b0a58356b16f7 | 302,076,872,625,462,330,000,000,000,000,000,000,000 | 213 | fixed #2120 |
static ssize_t ipmi_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct smi_info *smi_info = dev_get_drvdata(dev);
return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
} | 0 | [
"CWE-416"
]
| linux | 401e7e88d4ef80188ffa07095ac00456f901b8c4 | 232,406,326,519,498,800,000,000,000,000,000,000,000 | 8 | ipmi_si: fix use-after-free of resource->name
When we excute the following commands, we got oops
rmmod ipmi_si
cat /proc/ioports
[ 1623.482380] Unable to handle kernel paging request at virtual address ffff00000901d478
[ 1623.482382] Mem abort info:
[ 1623.482383] ESR = 0x96000007
[ 1623.482385] Exception class = DABT (current EL), IL = 32 bits
[ 1623.482386] SET = 0, FnV = 0
[ 1623.482387] EA = 0, S1PTW = 0
[ 1623.482388] Data abort info:
[ 1623.482389] ISV = 0, ISS = 0x00000007
[ 1623.482390] CM = 0, WnR = 0
[ 1623.482393] swapper pgtable: 4k pages, 48-bit VAs, pgdp = 00000000d7d94a66
[ 1623.482395] [ffff00000901d478] pgd=000000dffbfff003, pud=000000dffbffe003, pmd=0000003f5d06e003, pte=0000000000000000
[ 1623.482399] Internal error: Oops: 96000007 [#1] SMP
[ 1623.487407] Modules linked in: ipmi_si(E) nls_utf8 isofs rpcrdma ib_iser ib_srpt target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_umad rdma_cm ib_cm dm_mirror dm_region_hash dm_log iw_cm dm_mod aes_ce_blk crypto_simd cryptd aes_ce_cipher ses ghash_ce sha2_ce enclosure sha256_arm64 sg sha1_ce hisi_sas_v2_hw hibmc_drm sbsa_gwdt hisi_sas_main ip_tables mlx5_ib ib_uverbs marvell ib_core mlx5_core ixgbe mdio hns_dsaf ipmi_devintf hns_enet_drv ipmi_msghandler hns_mdio [last unloaded: ipmi_si]
[ 1623.532410] CPU: 30 PID: 11438 Comm: cat Kdump: loaded Tainted: G E 5.0.0-rc3+ #168
[ 1623.541498] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.37 11/21/2017
[ 1623.548822] pstate: a0000005 (NzCv daif -PAN -UAO)
[ 1623.553684] pc : string+0x28/0x98
[ 1623.557040] lr : vsnprintf+0x368/0x5e8
[ 1623.560837] sp : ffff000013213a80
[ 1623.564191] x29: ffff000013213a80 x28: ffff00001138abb5
[ 1623.569577] x27: ffff000013213c18 x26: ffff805f67d06049
[ 1623.574963] x25: 0000000000000000 x24: ffff00001138abb5
[ 1623.580349] x23: 0000000000000fb7 x22: ffff0000117ed000
[ 1623.585734] x21: ffff000011188fd8 x20: ffff805f67d07000
[ 1623.591119] x19: ffff805f67d06061 x18: ffffffffffffffff
[ 1623.596505] x17: 0000000000000200 x16: 0000000000000000
[ 1623.601890] x15: ffff0000117ed748 x14: ffff805f67d07000
[ 1623.607276] x13: ffff805f67d0605e x12: 0000000000000000
[ 1623.612661] x11: 0000000000000000 x10: 0000000000000000
[ 1623.618046] x9 : 0000000000000000 x8 : 000000000000000f
[ 1623.623432] x7 : ffff805f67d06061 x6 : fffffffffffffffe
[ 1623.628817] x5 : 0000000000000012 x4 : ffff00000901d478
[ 1623.634203] x3 : ffff0a00ffffff04 x2 : ffff805f67d07000
[ 1623.639588] x1 : ffff805f67d07000 x0 : ffffffffffffffff
[ 1623.644974] Process cat (pid: 11438, stack limit = 0x000000008d4cbc10)
[ 1623.651592] Call trace:
[ 1623.654068] string+0x28/0x98
[ 1623.657071] vsnprintf+0x368/0x5e8
[ 1623.660517] seq_vprintf+0x70/0x98
[ 1623.668009] seq_printf+0x7c/0xa0
[ 1623.675530] r_show+0xc8/0xf8
[ 1623.682558] seq_read+0x330/0x440
[ 1623.689877] proc_reg_read+0x78/0xd0
[ 1623.697346] __vfs_read+0x60/0x1a0
[ 1623.704564] vfs_read+0x94/0x150
[ 1623.711339] ksys_read+0x6c/0xd8
[ 1623.717939] __arm64_sys_read+0x24/0x30
[ 1623.725077] el0_svc_common+0x120/0x148
[ 1623.732035] el0_svc_handler+0x30/0x40
[ 1623.738757] el0_svc+0x8/0xc
[ 1623.744520] Code: d1000406 aa0103e2 54000149 b4000080 (39400085)
[ 1623.753441] ---[ end trace f91b6a4937de9835 ]---
[ 1623.760871] Kernel panic - not syncing: Fatal exception
[ 1623.768935] SMP: stopping secondary CPUs
[ 1623.775718] Kernel Offset: disabled
[ 1623.781998] CPU features: 0x002,21006008
[ 1623.788777] Memory Limit: none
[ 1623.798329] Starting crashdump kernel...
[ 1623.805202] Bye!
If io_setup is called successful in try_smi_init() but try_smi_init()
goes out_err before calling ipmi_register_smi(), so ipmi_unregister_smi()
will not be called while removing module. It leads to the resource that
allocated in io_setup() can not be freed, but the name(DEVICE_NAME) of
resource is freed while removing the module. It causes use-after-free
when cat /proc/ioports.
Fix this by calling io_cleanup() while try_smi_init() goes to out_err.
and don't call io_cleanup() until io_setup() returns successful to avoid
warning prints.
Fixes: 93c303d2045b ("ipmi_si: Clean up shutdown a bit")
Cc: [email protected]
Reported-by: NuoHan Qiao <[email protected]>
Suggested-by: Corey Minyard <[email protected]>
Signed-off-by: Yang Yingliang <[email protected]>
Signed-off-by: Corey Minyard <[email protected]> |
bool userfaultfd_remove(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
struct userfaultfd_ctx *ctx;
struct userfaultfd_wait_queue ewq;
ctx = vma->vm_userfaultfd_ctx.ctx;
if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
return true;
userfaultfd_ctx_get(ctx);
up_read(&mm->mmap_sem);
msg_init(&ewq.msg);
ewq.msg.event = UFFD_EVENT_REMOVE;
ewq.msg.arg.remove.start = start;
ewq.msg.arg.remove.end = end;
userfaultfd_event_wait_completion(ctx, &ewq);
return false;
} | 0 | [
"CWE-416"
]
| linux | 384632e67e0829deb8015ee6ad916b180049d252 | 325,838,241,726,593,500,000,000,000,000,000,000,000 | 24 | userfaultfd: non-cooperative: fix fork use after free
When reading the event from the uffd, we put it on a temporary
fork_event list to detect if we can still access it after releasing and
retaking the event_wqh.lock.
If fork aborts and removes the event from the fork_event all is fine as
long as we're still in the userfault read context and fork_event head is
still alive.
We've to put the event allocated in the fork kernel stack, back from
fork_event list-head to the event_wqh head, before returning from
userfaultfd_ctx_read, because the fork_event head lifetime is limited to
the userfaultfd_ctx_read stack lifetime.
Forgetting to move the event back to its event_wqh place then results in
__remove_wait_queue(&ctx->event_wqh, &ewq->wq); in
userfaultfd_event_wait_completion to remove it from a head that has been
already freed from the reader stack.
This could only happen if resolve_userfault_fork failed (for example if
there are no file descriptors available to allocate the fork uffd). If
it succeeded it was put back correctly.
Furthermore, after find_userfault_evt receives a fork event, the forked
userfault context in fork_nctx and uwq->msg.arg.reserved.reserved1 can
be released by the fork thread as soon as the event_wqh.lock is
released. Taking a reference on the fork_nctx before dropping the lock
prevents an use after free in resolve_userfault_fork().
If the fork side aborted and it already released everything, we still
try to succeed resolve_userfault_fork(), if possible.
Fixes: 893e26e61d04eac9 ("userfaultfd: non-cooperative: Add fork() event")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Andrea Arcangeli <[email protected]>
Reported-by: Mark Rutland <[email protected]>
Tested-by: Mark Rutland <[email protected]>
Cc: Pavel Emelyanov <[email protected]>
Cc: Mike Rapoport <[email protected]>
Cc: "Dr. David Alan Gilbert" <[email protected]>
Cc: Mike Kravetz <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
sp_name *LEX::make_sp_name_package_routine(THD *thd, const LEX_CSTRING *name)
{
sp_name *res= make_sp_name(thd, name);
if (likely(res) && unlikely(strchr(res->m_name.str, '.')))
{
my_error(ER_SP_WRONG_NAME, MYF(0), res->m_name.str);
res= NULL;
}
return res;
} | 0 | [
"CWE-703"
]
| server | 39feab3cd31b5414aa9b428eaba915c251ac34a2 | 203,874,842,816,000,280,000,000,000,000,000,000,000 | 10 | MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <[email protected]> |
static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
{
if (dev->addr_len != IEEE802154_ADDR_LEN)
return -1;
memcpy(eui, dev->dev_addr, 8);
eui[0] ^= 2;
return 0;
} | 0 | []
| net | 4b08a8f1bd8cb4541c93ec170027b4d0782dab52 | 309,653,564,925,621,760,000,000,000,000,000,000,000 | 8 | ipv6: remove max_addresses check from ipv6_create_tempaddr
Because of the max_addresses check attackers were able to disable privacy
extensions on an interface by creating enough autoconfigured addresses:
<http://seclists.org/oss-sec/2012/q4/292>
But the check is not actually needed: max_addresses protects the
kernel to install too many ipv6 addresses on an interface and guards
addrconf_prefix_rcv to install further addresses as soon as this limit
is reached. We only generate temporary addresses in direct response of
a new address showing up. As soon as we filled up the maximum number of
addresses of an interface, we stop installing more addresses and thus
also stop generating more temp addresses.
Even if the attacker tries to generate a lot of temporary addresses
by announcing a prefix and removing it again (lifetime == 0) we won't
install more temp addresses, because the temporary addresses do count
to the maximum number of addresses, thus we would stop installing new
autoconfigured addresses when the limit is reached.
This patch fixes CVE-2013-0343 (but other layer-2 attacks are still
possible).
Thanks to Ding Tianhong to bring this topic up again.
Cc: Ding Tianhong <[email protected]>
Cc: George Kargiotakis <[email protected]>
Cc: P J P <[email protected]>
Cc: YOSHIFUJI Hideaki <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Acked-by: Ding Tianhong <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
struct sctp_chunk *sctp_make_ifwdtsn(const struct sctp_association *asoc,
__u32 new_cum_tsn, size_t nstreams,
struct sctp_ifwdtsn_skip *skiplist)
{
struct sctp_chunk *retval = NULL;
struct sctp_ifwdtsn_hdr ftsn_hdr;
size_t hint;
hint = (nstreams + 1) * sizeof(__u32);
retval = sctp_make_control(asoc, SCTP_CID_I_FWD_TSN, 0, hint,
GFP_ATOMIC);
if (!retval)
return NULL;
ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn);
retval->subh.ifwdtsn_hdr =
sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr);
sctp_addto_chunk(retval, nstreams * sizeof(skiplist[0]), skiplist);
return retval;
} | 0 | [
"CWE-20"
]
| linux | 07f2c7ab6f8d0a7e7c5764c4e6cc9c52951b9d9c | 252,154,605,942,984,400,000,000,000,000,000,000,000 | 23 | sctp: verify size of a new chunk in _sctp_make_chunk()
When SCTP makes INIT or INIT_ACK packet the total chunk length
can exceed SCTP_MAX_CHUNK_LEN which leads to kernel panic when
transmitting these packets, e.g. the crash on sending INIT_ACK:
[ 597.804948] skbuff: skb_over_panic: text:00000000ffae06e4 len:120168
put:120156 head:000000007aa47635 data:00000000d991c2de
tail:0x1d640 end:0xfec0 dev:<NULL>
...
[ 597.976970] ------------[ cut here ]------------
[ 598.033408] kernel BUG at net/core/skbuff.c:104!
[ 600.314841] Call Trace:
[ 600.345829] <IRQ>
[ 600.371639] ? sctp_packet_transmit+0x2095/0x26d0 [sctp]
[ 600.436934] skb_put+0x16c/0x200
[ 600.477295] sctp_packet_transmit+0x2095/0x26d0 [sctp]
[ 600.540630] ? sctp_packet_config+0x890/0x890 [sctp]
[ 600.601781] ? __sctp_packet_append_chunk+0x3b4/0xd00 [sctp]
[ 600.671356] ? sctp_cmp_addr_exact+0x3f/0x90 [sctp]
[ 600.731482] sctp_outq_flush+0x663/0x30d0 [sctp]
[ 600.788565] ? sctp_make_init+0xbf0/0xbf0 [sctp]
[ 600.845555] ? sctp_check_transmitted+0x18f0/0x18f0 [sctp]
[ 600.912945] ? sctp_outq_tail+0x631/0x9d0 [sctp]
[ 600.969936] sctp_cmd_interpreter.isra.22+0x3be1/0x5cb0 [sctp]
[ 601.041593] ? sctp_sf_do_5_1B_init+0x85f/0xc30 [sctp]
[ 601.104837] ? sctp_generate_t1_cookie_event+0x20/0x20 [sctp]
[ 601.175436] ? sctp_eat_data+0x1710/0x1710 [sctp]
[ 601.233575] sctp_do_sm+0x182/0x560 [sctp]
[ 601.284328] ? sctp_has_association+0x70/0x70 [sctp]
[ 601.345586] ? sctp_rcv+0xef4/0x32f0 [sctp]
[ 601.397478] ? sctp6_rcv+0xa/0x20 [sctp]
...
Here the chunk size for INIT_ACK packet becomes too big, mostly
because of the state cookie (INIT packet has large size with
many address parameters), plus additional server parameters.
Later this chunk causes the panic in skb_put_data():
skb_packet_transmit()
sctp_packet_pack()
skb_put_data(nskb, chunk->skb->data, chunk->skb->len);
'nskb' (head skb) was previously allocated with packet->size
from u16 'chunk->chunk_hdr->length'.
As suggested by Marcelo we should check the chunk's length in
_sctp_make_chunk() before trying to allocate skb for it and
discard a chunk if its size bigger than SCTP_MAX_CHUNK_LEN.
Signed-off-by: Alexey Kodanev <[email protected]>
Acked-by: Marcelo Ricardo Leitner <[email protected]>
Acked-by: Neil Horman <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
process_cmd_clients(char *line)
{
CMD_Request request;
CMD_Reply reply;
SubnetToDo *head, *todo, *tail, *p, *next_node, *new_node;
int i, j, nets_looked_up, clients_looked_up;
int word;
unsigned long mask;
unsigned long ip, bits;
unsigned long client_hits;
unsigned long peer_hits;
unsigned long cmd_hits_auth;
unsigned long cmd_hits_normal;
unsigned long cmd_hits_bad;
unsigned long last_ntp_hit_ago;
unsigned long last_cmd_hit_ago;
char hostname_buf[50];
int n_replies;
head = todo = MallocNew(SubnetToDo);
todo->next = NULL;
/* Set up initial query = root subnet */
todo->ip = 0;
todo->bits = 0;
tail = todo;
do {
request.command = htons(REQ_SUBNETS_ACCESSED);
/* Build list of subnets to examine */
i=0;
p=todo;
while((i < MAX_SUBNETS_ACCESSED) &&
p &&
(p->bits < 32)) {
request.data.subnets_accessed.subnets[i].ip = htonl(p->ip);
request.data.subnets_accessed.subnets[i].bits_specd = htonl(p->bits);
p = p->next;
i++;
}
nets_looked_up = i;
if (nets_looked_up == 0) {
/* No subnets need examining */
break;
}
request.data.subnets_accessed.n_subnets = htonl(nets_looked_up);
if (request_reply(&request, &reply, RPY_SUBNETS_ACCESSED, 0)) {
n_replies = ntohl(reply.data.subnets_accessed.n_subnets);
for (j=0; j<n_replies; j++) {
ip = ntohl(reply.data.subnets_accessed.subnets[j].ip);
bits = ntohl(reply.data.subnets_accessed.subnets[j].bits_specd);
for (i=0; i<256; i++) {
word = i/32;
mask = 1UL << (i%32);
if (ntohl(reply.data.subnets_accessed.subnets[j].bitmap[word]) & mask) {
/* Add this subnet to the todo list */
new_node = MallocNew(SubnetToDo);
new_node->next = NULL;
new_node->bits = bits + 8;
new_node->ip = ip | (i << (24 - bits));
tail->next = new_node;
tail = new_node;
#if 0
printf("%08lx %2d %3d %08lx\n", ip, bits, i, new_node->ip);
#endif
}
}
}
/* Skip the todo pointer forwards by the number of nets looked
up. Can't do this earlier, because we might have to point
at the next layer of subnets that have only just been
concatenated to the linked list. */
for (i=0; i<nets_looked_up; i++) {
todo = todo->next;
}
}
} else {
return;
}
} while (1); /* keep going until all subnets have been expanded, | 0 | [
"CWE-189"
]
| chrony | 7712455d9aa33d0db0945effaa07e900b85987b1 | 166,704,704,772,175,920,000,000,000,000,000,000,000 | 89 | Fix buffer overflow when processing crafted command packets
When the length of the REQ_SUBNETS_ACCESSED, REQ_CLIENT_ACCESSES
command requests and the RPY_SUBNETS_ACCESSED, RPY_CLIENT_ACCESSES,
RPY_CLIENT_ACCESSES_BY_INDEX, RPY_MANUAL_LIST command replies is
calculated, the number of items stored in the packet is not validated.
A crafted command request/reply can be used to crash the server/client.
Only clients allowed by cmdallow (by default only localhost) can crash
the server.
With chrony versions 1.25 and 1.26 this bug has a smaller security
impact as the server requires the clients to be authenticated in order
to process the subnet and client accesses commands. In 1.27 and 1.28,
however, the invalid calculated length is included also in the
authentication check which may cause another crash. |
GF_Box *csgp_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_CompactSampleGroupBox, GF_ISOM_BOX_TYPE_CSGP);
return (GF_Box *)tmp;
} | 0 | [
"CWE-787"
]
| gpac | 77510778516803b7f7402d7423c6d6bef50254c3 | 335,278,014,472,036,900,000,000,000,000,000,000,000 | 5 | fixed #2255 |
int gnutls_x509_ext_import_subject_alt_names(const gnutls_datum_t * ext,
gnutls_subject_alt_names_t sans,
unsigned int flags)
{
ASN1_TYPE c2 = ASN1_TYPE_EMPTY;
int result, ret;
unsigned int i;
gnutls_datum_t san, othername_oid;
unsigned type;
result =
asn1_create_element(_gnutls_get_pkix(), "PKIX1.GeneralNames", &c2);
if (result != ASN1_SUCCESS) {
gnutls_assert();
return _gnutls_asn2err(result);
}
result = asn1_der_decoding(&c2, ext->data, ext->size, NULL);
if (result != ASN1_SUCCESS) {
gnutls_assert();
ret = _gnutls_asn2err(result);
goto cleanup;
}
i = 0;
do {
san.data = NULL;
san.size = 0;
othername_oid.data = NULL;
ret = _gnutls_parse_general_name2(c2, "", i, &san, &type, 0);
if (ret < 0)
break;
if (type == GNUTLS_SAN_OTHERNAME) {
ret =
_gnutls_parse_general_name2(c2, "", i,
&othername_oid,
NULL, 1);
if (ret < 0)
break;
} else if (san.size == 0 || san.data == NULL) {
ret = gnutls_assert_val(GNUTLS_E_X509_UNKNOWN_SAN);
break;
}
ret = subject_alt_names_set(&sans->names, &sans->size,
type, &san,
(char *)othername_oid.data);
if (ret < 0)
break;
i++;
} while (ret >= 0);
sans->size = i;
if (ret < 0 && ret != GNUTLS_E_REQUESTED_DATA_NOT_AVAILABLE) {
gnutls_free(san.data);
gnutls_free(othername_oid.data);
gnutls_assert();
goto cleanup;
}
ret = 0;
cleanup:
asn1_delete_structure(&c2);
return ret;
} | 0 | []
| gnutls | d6972be33264ecc49a86cd0958209cd7363af1e9 | 79,008,202,124,744,110,000,000,000,000,000,000,000 | 69 | eliminated double-free in the parsing of dist points
Reported by Robert Święcki. |
static int selinux_task_setpgid(struct task_struct *p, pid_t pgid)
{
return avc_has_perm(&selinux_state,
current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
PROCESS__SETPGID, NULL);
} | 0 | [
"CWE-416"
]
| linux | a3727a8bac0a9e77c70820655fd8715523ba3db7 | 269,178,266,086,537,500,000,000,000,000,000,000,000 | 6 | selinux,smack: fix subjective/objective credential use mixups
Jann Horn reported a problem with commit eb1231f73c4d ("selinux:
clarify task subjective and objective credentials") where some LSM
hooks were attempting to access the subjective credentials of a task
other than the current task. Generally speaking, it is not safe to
access another task's subjective credentials and doing so can cause
a number of problems.
Further, while looking into the problem, I realized that Smack was
suffering from a similar problem brought about by a similar commit
1fb057dcde11 ("smack: differentiate between subjective and objective
task credentials").
This patch addresses this problem by restoring the use of the task's
objective credentials in those cases where the task is other than the
current executing task. Not only does this resolve the problem
reported by Jann, it is arguably the correct thing to do in these
cases.
Cc: [email protected]
Fixes: eb1231f73c4d ("selinux: clarify task subjective and objective credentials")
Fixes: 1fb057dcde11 ("smack: differentiate between subjective and objective task credentials")
Reported-by: Jann Horn <[email protected]>
Acked-by: Eric W. Biederman <[email protected]>
Acked-by: Casey Schaufler <[email protected]>
Signed-off-by: Paul Moore <[email protected]> |
pthread_equal(pthread_t t1, pthread_t t2)
{
return t1 == t2;
} | 0 | [
"CWE-703",
"CWE-125"
]
| portable | 17c88164016df821df2dff4b2b1291291ec4f28a | 254,569,499,115,488,000,000,000,000,000,000,000,000 | 4 | Make pthread_mutex static initialisation work on Windows.
This takes the dynamic initialisation code added to CRYPTO_lock() in e5081719
and applies it to the Window's pthread_mutex implementation. This allows for
PTHREAD_MUTEX_INITIALIZER to be used on Windows.
bcook has agreed to place this code in the public domain (as per the rest of
the code in pthread.h). |
static void checkpoint(void)
{
checkpoint_requested = 0;
if (object_count) {
cycle_packfile();
dump_branches();
dump_tags();
dump_marks();
}
} | 0 | [
"CWE-119",
"CWE-787"
]
| git | 34fa79a6cde56d6d428ab0d3160cb094ebad3305 | 269,936,201,565,932,700,000,000,000,000,000,000,000 | 10 | prefer memcpy to strcpy
When we already know the length of a string (e.g., because
we just malloc'd to fit it), it's nicer to use memcpy than
strcpy, as it makes it more obvious that we are not going to
overflow the buffer (because the size we pass matches the
size in the allocation).
This also eliminates calls to strcpy, which make auditing
the code base harder.
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]> |
static int on_transaction_timeout(sd_event_source *s, usec_t usec, void *userdata) {
DnsTransaction *t = userdata;
assert(s);
assert(t);
if (!t->initial_jitter_scheduled || t->initial_jitter_elapsed) {
/* Timeout reached? Increase the timeout for the server used */
switch (t->scope->protocol) {
case DNS_PROTOCOL_DNS:
assert(t->server);
dns_server_packet_lost(t->server, t->stream ? IPPROTO_TCP : IPPROTO_UDP, t->current_feature_level);
break;
case DNS_PROTOCOL_LLMNR:
case DNS_PROTOCOL_MDNS:
dns_scope_packet_lost(t->scope, usec - t->start_usec);
break;
default:
assert_not_reached("Invalid DNS protocol.");
}
if (t->initial_jitter_scheduled)
t->initial_jitter_elapsed = true;
}
log_debug("Timeout reached on transaction %" PRIu16 ".", t->id);
dns_transaction_retry(t, true);
return 0;
} | 0 | [
"CWE-416"
]
| systemd | 904dcaf9d4933499f8334859f52ea8497f2d24ff | 239,041,175,558,732,300,000,000,000,000,000,000,000 | 33 | resolved: take particular care when detaching DnsServer from its default stream
DnsStream and DnsServer have a symbiotic relationship: one DnsStream is
the current "default" stream of the server (and thus reffed by it), but
each stream also refs the server it is connected to. This cyclic
dependency can result in weird situations: when one is
destroyed/unlinked/stopped it needs to unregister itself from the other,
but doing this will trigger unregistration of the other. Hence, let's
make sure we unregister the stream from the server before destroying it,
to break this cycle.
Most likely fixes: #10725 |
sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_fwdtsn_skip *skip;
__u16 len;
__u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
/* Make sure that the FORWARD_TSN chunk has valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
len = ntohs(chunk->chunk_hdr->length);
len -= sizeof(struct sctp_chunkhdr);
skb_pull(chunk->skb, len);
tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn);
/* The TSN is too high--silently discard the chunk and count on it
* getting retransmitted later.
*/
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto discard_noforce;
/* Silently discard the chunk if stream-id is not valid */
sctp_walk_fwdtsn(skip, chunk) {
if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
goto discard_noforce;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
/* Count this as receiving DATA. */
if (asoc->autoclose) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
/* FIXME: For now send a SACK, but DATA processing may
* send another.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE());
return SCTP_DISPOSITION_CONSUME;
discard_noforce:
return SCTP_DISPOSITION_DISCARD;
} | 0 | [
"CWE-119"
]
| linux-2.6 | 9fcb95a105758b81ef0131cd18e2db5149f13e95 | 150,774,253,065,721,530,000,000,000,000,000,000,000 | 65 | sctp: Avoid memory overflow while FWD-TSN chunk is received with bad stream ID
If FWD-TSN chunk is received with bad stream ID, the sctp will not do the
validity check, this may cause memory overflow when overwrite the TSN of
the stream ID.
The FORWARD-TSN chunk is like this:
FORWARD-TSN chunk
Type = 192
Flags = 0
Length = 172
NewTSN = 99
Stream = 10000
StreamSequence = 0xFFFF
This patch fix this problem by discard the chunk if stream ID is not
less than MIS.
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void WebContents::OnAudioStateChanged(bool audible) {
Emit("-audio-state-changed", audible);
} | 0 | []
| electron | e9fa834757f41c0b9fe44a4dffe3d7d437f52d34 | 189,330,150,827,729,730,000,000,000,000,000,000,000 | 3 | fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame
Co-authored-by: Samuel Attard <[email protected]>
Co-authored-by: Samuel Attard <[email protected]> |
static int __init io_uring_init(void)
{
#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
} while (0)
#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
__BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
BUILD_BUG_SQE_ELEM(0, __u8, opcode);
BUILD_BUG_SQE_ELEM(1, __u8, flags);
BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
BUILD_BUG_SQE_ELEM(4, __s32, fd);
BUILD_BUG_SQE_ELEM(8, __u64, off);
BUILD_BUG_SQE_ELEM(8, __u64, addr2);
BUILD_BUG_SQE_ELEM(16, __u64, addr);
BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
BUILD_BUG_SQE_ELEM(24, __u32, len);
BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
BUILD_BUG_SQE_ELEM(32, __u64, user_data);
BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
BUILD_BUG_SQE_ELEM(42, __u16, personality);
BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
sizeof(struct io_uring_rsrc_update));
BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
sizeof(struct io_uring_rsrc_update2));
/* should fit into one byte */
BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
SLAB_ACCOUNT);
return 0; | 0 | [
"CWE-125"
]
| linux | 89c2b3b74918200e46699338d7bcc19b1ea12110 | 241,242,823,615,809,160,000,000,000,000,000,000,000 | 54 | io_uring: reexpand under-reexpanded iters
[ 74.211232] BUG: KASAN: stack-out-of-bounds in iov_iter_revert+0x809/0x900
[ 74.212778] Read of size 8 at addr ffff888025dc78b8 by task
syz-executor.0/828
[ 74.214756] CPU: 0 PID: 828 Comm: syz-executor.0 Not tainted
5.14.0-rc3-next-20210730 #1
[ 74.216525] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996),
BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
[ 74.219033] Call Trace:
[ 74.219683] dump_stack_lvl+0x8b/0xb3
[ 74.220706] print_address_description.constprop.0+0x1f/0x140
[ 74.224226] kasan_report.cold+0x7f/0x11b
[ 74.226085] iov_iter_revert+0x809/0x900
[ 74.227960] io_write+0x57d/0xe40
[ 74.232647] io_issue_sqe+0x4da/0x6a80
[ 74.242578] __io_queue_sqe+0x1ac/0xe60
[ 74.245358] io_submit_sqes+0x3f6e/0x76a0
[ 74.248207] __do_sys_io_uring_enter+0x90c/0x1a20
[ 74.257167] do_syscall_64+0x3b/0x90
[ 74.257984] entry_SYSCALL_64_after_hwframe+0x44/0xae
old_size = iov_iter_count();
...
iov_iter_revert(old_size - iov_iter_count());
If iov_iter_revert() is done base on the initial size as above, and the
iter is truncated and not reexpanded in the middle, it miscalculates
borders causing problems. This trace is due to no one reexpanding after
generic_write_checks().
Now iters store how many bytes has been truncated, so reexpand them to
the initial state right before reverting.
Cc: [email protected]
Reported-by: Palash Oswal <[email protected]>
Reported-by: Sudip Mukherjee <[email protected]>
Reported-and-tested-by: [email protected]
Signed-off-by: Pavel Begunkov <[email protected]>
Signed-off-by: Al Viro <[email protected]> |
SWFRect SWFShape_getEdgeBounds(SWFShape shape)
{
if(shape->useVersion == SWF_SHAPE4)
return shape->edgeBounds;
else
return NULL;
} | 0 | [
"CWE-20",
"CWE-476"
]
| libming | 6e76e8c71cb51c8ba0aa9737a636b9ac3029887f | 197,057,307,676,050,500,000,000,000,000,000,000,000 | 7 | SWFShape_setLeftFillStyle: prevent fill overflow |
static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
u32 val)
{
u32 mask = iwl_trans_pcie_prph_msk(trans);
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
((addr & mask) | (3 << 24)));
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
} | 0 | [
"CWE-476"
]
| linux | 8188a18ee2e48c9a7461139838048363bfce3fef | 187,270,383,216,974,260,000,000,000,000,000,000,000 | 9 | iwlwifi: pcie: fix rb_allocator workqueue allocation
We don't handle failures in the rb_allocator workqueue allocation
correctly. To fix that, move the code earlier so the cleanup is
easier and we don't have to undo all the interrupt allocations in
this case.
Signed-off-by: Johannes Berg <[email protected]>
Signed-off-by: Luca Coelho <[email protected]> |
static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
struct dj_report *dj_report)
{
struct hid_device *hdev = djrcv_dev->hdev;
struct hid_report *report;
struct hid_report_enum *output_report_enum;
u8 *data = (u8 *)(&dj_report->device_index);
unsigned int i;
output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT];
report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
if (!report) {
dev_err(&hdev->dev, "%s: unable to find dj report\n", __func__);
return -ENODEV;
}
for (i = 0; i < DJREPORT_SHORT_LENGTH - 1; i++)
report->field[0]->value[i] = data[i];
hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
return 0;
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | ad3e14d7c5268c2e24477c6ef54bbdf88add5d36 | 199,117,568,682,795,700,000,000,000,000,000,000,000 | 24 | HID: logitech: perform bounds checking on device_id early enough
device_index is a char type and the size of paired_dj_deivces is 7
elements, therefore proper bounds checking has to be applied to
device_index before it is used.
We are currently performing the bounds checking in
logi_dj_recv_add_djhid_device(), which is too late, as malicious device
could send REPORT_TYPE_NOTIF_DEVICE_UNPAIRED early enough and trigger the
problem in one of the report forwarding functions called from
logi_dj_raw_event().
Fix this by performing the check at the earliest possible ocasion in
logi_dj_raw_event().
Cc: [email protected]
Reported-by: Ben Hawkes <[email protected]>
Reviewed-by: Benjamin Tissoires <[email protected]>
Signed-off-by: Jiri Kosina <[email protected]> |
static const char *parse_scheme(struct parse_state *state)
{
size_t mb;
const char *tmp = state->ptr;
do {
switch (*state->ptr) {
case ':':
/* scheme delimiter */
state->url.scheme = &state->buffer[0];
state->buffer[state->offset++] = 0;
return ++state->ptr;
case '0': case '1': case '2': case '3': case '4': case '5': case '6':
case '7': case '8': case '9':
case '+': case '-': case '.':
if (state->ptr == tmp) {
goto softfail;
}
/* no break */
case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G':
case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N':
case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U':
case 'V': case 'W': case 'X': case 'Y': case 'Z':
case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g':
case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n':
case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u':
case 'v': case 'w': case 'x': case 'y': case 'z':
/* scheme part */
state->buffer[state->offset++] = *state->ptr;
break;
default:
if (!(mb = parse_mb(state, PARSE_SCHEME, state->ptr, state->end, tmp, 1))) {
goto softfail;
}
state->ptr += mb - 1;
}
} while (++state->ptr != state->end);
softfail:
state->offset = 0;
return state->ptr = tmp;
} | 0 | [
"CWE-119"
]
| ext-http | 3724cd76a28be1d6049b5537232e97ac567ae1f5 | 195,802,959,808,883,330,000,000,000,000,000,000,000 | 44 | fix bug #71719 (Buffer overflow in HTTP url parsing functions)
The parser's offset was not reset when we softfail in scheme
parsing and continue to parse a path.
Thanks to hlt99 at blinkenshell dot org for the report. |
**/
const CImg<T>& save_yuv(const char *const filename,
const unsigned int chroma_subsampling=444,
const bool is_rgb=true) const {
CImgList<T>(*this,true).save_yuv(filename,chroma_subsampling,is_rgb);
return *this; | 0 | [
"CWE-119",
"CWE-787"
]
| CImg | ac8003393569aba51048c9d67e1491559877b1d1 | 183,344,144,465,074,770,000,000,000,000,000,000,000 | 6 | . |
static int evm_calc_hmac_or_hash(struct dentry *dentry,
const char *req_xattr_name,
const char *req_xattr_value,
size_t req_xattr_value_len,
char type, char *digest)
{
struct inode *inode = dentry->d_inode;
struct shash_desc *desc;
char **xattrname;
size_t xattr_size = 0;
char *xattr_value = NULL;
int error;
int size;
if (!inode->i_op || !inode->i_op->getxattr)
return -EOPNOTSUPP;
desc = init_desc(type);
if (IS_ERR(desc))
return PTR_ERR(desc);
error = -ENODATA;
for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
if ((req_xattr_name && req_xattr_value)
&& !strcmp(*xattrname, req_xattr_name)) {
error = 0;
crypto_shash_update(desc, (const u8 *)req_xattr_value,
req_xattr_value_len);
continue;
}
size = vfs_getxattr_alloc(dentry, *xattrname,
&xattr_value, xattr_size, GFP_NOFS);
if (size == -ENOMEM) {
error = -ENOMEM;
goto out;
}
if (size < 0)
continue;
error = 0;
xattr_size = size;
crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size);
}
hmac_add_misc(desc, inode, digest);
out:
kfree(xattr_value);
kfree(desc);
return error;
} | 0 | [
"CWE-703"
]
| linux | a67adb997419fb53540d4a4f79c6471c60bc69b6 | 334,137,913,834,150,500,000,000,000,000,000,000,000 | 49 | evm: checking if removexattr is not a NULL
The following lines of code produce a kernel oops.
fd = socket(PF_FILE, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
fchmod(fd, 0666);
[ 139.922364] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 139.924982] IP: [< (null)>] (null)
[ 139.924982] *pde = 00000000
[ 139.924982] Oops: 0000 [#5] SMP
[ 139.924982] Modules linked in: fuse dm_crypt dm_mod i2c_piix4 serio_raw evdev binfmt_misc button
[ 139.924982] Pid: 3070, comm: acpid Tainted: G D 3.8.0-rc2-kds+ #465 Bochs Bochs
[ 139.924982] EIP: 0060:[<00000000>] EFLAGS: 00010246 CPU: 0
[ 139.924982] EIP is at 0x0
[ 139.924982] EAX: cf5ef000 EBX: cf5ef000 ECX: c143d600 EDX: c15225f2
[ 139.924982] ESI: cf4d2a1c EDI: cf4d2a1c EBP: cc02df10 ESP: cc02dee4
[ 139.924982] DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
[ 139.924982] CR0: 80050033 CR2: 00000000 CR3: 0c059000 CR4: 000006d0
[ 139.924982] DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000
[ 139.924982] DR6: ffff0ff0 DR7: 00000400
[ 139.924982] Process acpid (pid: 3070, ti=cc02c000 task=d7705340 task.ti=cc02c000)
[ 139.924982] Stack:
[ 139.924982] c1203c88 00000000 cc02def4 cf4d2a1c ae21eefa 471b60d5 1083c1ba c26a5940
[ 139.924982] e891fb5e 00000041 00000004 cc02df1c c1203964 00000000 cc02df4c c10e20c3
[ 139.924982] 00000002 00000000 00000000 22222222 c1ff2222 cf5ef000 00000000 d76efb08
[ 139.924982] Call Trace:
[ 139.924982] [<c1203c88>] ? evm_update_evmxattr+0x5b/0x62
[ 139.924982] [<c1203964>] evm_inode_post_setattr+0x22/0x26
[ 139.924982] [<c10e20c3>] notify_change+0x25f/0x281
[ 139.924982] [<c10cbf56>] chmod_common+0x59/0x76
[ 139.924982] [<c10e27a1>] ? put_unused_fd+0x33/0x33
[ 139.924982] [<c10cca09>] sys_fchmod+0x39/0x5c
[ 139.924982] [<c13f4f30>] syscall_call+0x7/0xb
[ 139.924982] Code: Bad EIP value.
This happens because sockets do not define the removexattr operation.
Before removing the xattr, verify the removexattr function pointer is
not NULL.
Signed-off-by: Dmitry Kasatkin <[email protected]>
Signed-off-by: Mimi Zohar <[email protected]>
Cc: [email protected]
Signed-off-by: James Morris <[email protected]> |
ClientHttpRequest::calloutsError(const err_type error, const ErrorDetail::Pointer &errDetail)
{
// The original author of the code also wanted to pass an errno to
// setReplyToError, but it seems unlikely that the errno reflects the
// true cause of the error at this point, so I did not pass it.
if (calloutContext) {
Ip::Address noAddr;
noAddr.setNoAddr();
ConnStateData * c = getConn();
calloutContext->error = clientBuildError(error, Http::scInternalServerError,
NULL,
c != NULL ? c->clientConnection->remote : noAddr,
request,
al
);
#if USE_AUTH
calloutContext->error->auth_user_request =
c != NULL && c->getAuth() != NULL ? c->getAuth() : request->auth_user_request;
#endif
calloutContext->error->detailError(errDetail);
calloutContext->readNextRequest = true;
if (c != NULL)
c->expectNoForwarding();
}
//else if(calloutContext == NULL) is it possible?
} | 0 | [
"CWE-116"
]
| squid | 6bf66733c122804fada7f5839ef5f3b57e57591c | 162,549,488,202,081,880,000,000,000,000,000,000,000 | 26 | Handle more Range requests (#790)
Also removed some effectively unused code. |
void LinkResolver::runtime_resolve_special_method(CallInfo& result,
const LinkInfo& link_info,
const methodHandle& resolved_method,
Handle recv, TRAPS) {
Klass* resolved_klass = link_info.resolved_klass();
// resolved method is selected method unless we have an old-style lookup
// for a superclass method
// Invokespecial for a superinterface, resolved method is selected method,
// no checks for shadowing
methodHandle sel_method(THREAD, resolved_method());
if (link_info.check_access() &&
// check if the method is not <init>
resolved_method->name() != vmSymbols::object_initializer_name()) {
// check if this is an old-style super call and do a new lookup if so
// a) check if ACC_SUPER flag is set for the current class
Klass* current_klass = link_info.current_klass();
if ((current_klass->is_super() || !AllowNonVirtualCalls) &&
// b) check if the class of the resolved_klass is a superclass
// (not supertype in order to exclude interface classes) of the current class.
// This check is not performed for super.invoke for interface methods
// in super interfaces.
current_klass->is_subclass_of(resolved_klass) &&
current_klass != resolved_klass
) {
// Lookup super method
Klass* super_klass = current_klass->super();
sel_method = lookup_instance_method_in_klasses(super_klass,
resolved_method->name(),
resolved_method->signature(),
Klass::find_private, CHECK);
// check if found
if (sel_method.is_null()) {
ResourceMark rm(THREAD);
stringStream ss;
ss.print("'");
resolved_method->print_external_name(&ss);
ss.print("'");
THROW_MSG(vmSymbols::java_lang_AbstractMethodError(), ss.as_string());
// check loader constraints if found a different method
} else if (sel_method() != resolved_method()) {
check_method_loader_constraints(link_info, sel_method, "method", CHECK);
}
}
// Check that the class of objectref (the receiver) is the current class or interface,
// or a subtype of the current class or interface (the sender), otherwise invokespecial
// throws IllegalAccessError.
// The verifier checks that the sender is a subtype of the class in the I/MR operand.
// The verifier also checks that the receiver is a subtype of the sender, if the sender is
// a class. If the sender is an interface, the check has to be performed at runtime.
InstanceKlass* sender = InstanceKlass::cast(current_klass);
sender = sender->is_anonymous() ? sender->host_klass() : sender;
if (sender->is_interface() && recv.not_null()) {
Klass* receiver_klass = recv->klass();
if (!receiver_klass->is_subtype_of(sender)) {
ResourceMark rm(THREAD);
char buf[500];
jio_snprintf(buf, sizeof(buf),
"Receiver class %s must be the current class or a subtype of interface %s",
receiver_klass->external_name(),
sender->external_name());
THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), buf);
}
}
}
// check if not static
if (sel_method->is_static()) {
ResourceMark rm(THREAD);
stringStream ss;
ss.print("Expecting non-static method '");
resolved_method->print_external_name(&ss);
ss.print("'");
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), ss.as_string());
}
// check if abstract
if (sel_method->is_abstract()) {
ResourceMark rm(THREAD);
stringStream ss;
ss.print("'");
Method::print_external_name(&ss, resolved_klass, sel_method->name(), sel_method->signature());
ss.print("'");
THROW_MSG(vmSymbols::java_lang_AbstractMethodError(), ss.as_string());
}
if (log_develop_is_enabled(Trace, itables)) {
trace_method_resolution("invokespecial selected method: resolved-class:",
resolved_klass, resolved_klass, sel_method, true);
}
// setup result
result.set_static(resolved_klass, sel_method, CHECK);
} | 0 | []
| jdk11u | 132745902a4601dc64b2c8ca112ca30292feccb4 | 292,401,825,167,469,300,000,000,000,000,000,000,000 | 98 | 8281866: Enhance MethodHandle invocations
Reviewed-by: mbaesken
Backport-of: d974d9da365f787f67971d88c79371c8b0769f75 |
spell_suggest(int count)
{
char_u *line;
pos_T prev_cursor = curwin->w_cursor;
char_u wcopy[MAXWLEN + 2];
char_u *p;
int i;
int c;
suginfo_T sug;
suggest_T *stp;
int mouse_used;
int need_cap;
int limit;
int selected = count;
int badlen = 0;
int msg_scroll_save = msg_scroll;
int wo_spell_save = curwin->w_p_spell;
if (!curwin->w_p_spell)
{
did_set_spelllang(curwin);
curwin->w_p_spell = TRUE;
}
if (*curwin->w_s->b_p_spl == NUL)
{
emsg(_(e_spell_checking_is_not_possible));
return;
}
if (VIsual_active)
{
// Use the Visually selected text as the bad word. But reject
// a multi-line selection.
if (curwin->w_cursor.lnum != VIsual.lnum)
{
vim_beep(BO_SPELL);
return;
}
badlen = (int)curwin->w_cursor.col - (int)VIsual.col;
if (badlen < 0)
badlen = -badlen;
else
curwin->w_cursor.col = VIsual.col;
++badlen;
end_visual_mode();
// make sure we don't include the NUL at the end of the line
line = ml_get_curline();
if (badlen > (int)STRLEN(line) - (int)curwin->w_cursor.col)
badlen = (int)STRLEN(line) - (int)curwin->w_cursor.col;
}
// Find the start of the badly spelled word.
else if (spell_move_to(curwin, FORWARD, TRUE, TRUE, NULL) == 0
|| curwin->w_cursor.col > prev_cursor.col)
{
// No bad word or it starts after the cursor: use the word under the
// cursor.
curwin->w_cursor = prev_cursor;
line = ml_get_curline();
p = line + curwin->w_cursor.col;
// Backup to before start of word.
while (p > line && spell_iswordp_nmw(p, curwin))
MB_PTR_BACK(line, p);
// Forward to start of word.
while (*p != NUL && !spell_iswordp_nmw(p, curwin))
MB_PTR_ADV(p);
if (!spell_iswordp_nmw(p, curwin)) // No word found.
{
beep_flush();
return;
}
curwin->w_cursor.col = (colnr_T)(p - line);
}
// Get the word and its length.
// Figure out if the word should be capitalised.
need_cap = check_need_cap(curwin->w_cursor.lnum, curwin->w_cursor.col);
// Make a copy of current line since autocommands may free the line.
line = vim_strsave(ml_get_curline());
if (line == NULL)
goto skip;
// Get the list of suggestions. Limit to 'lines' - 2 or the number in
// 'spellsuggest', whatever is smaller.
if (sps_limit > (int)Rows - 2)
limit = (int)Rows - 2;
else
limit = sps_limit;
spell_find_suggest(line + curwin->w_cursor.col, badlen, &sug, limit,
TRUE, need_cap, TRUE);
if (sug.su_ga.ga_len == 0)
msg(_("Sorry, no suggestions"));
else if (count > 0)
{
if (count > sug.su_ga.ga_len)
smsg(_("Sorry, only %ld suggestions"), (long)sug.su_ga.ga_len);
}
else
{
#ifdef FEAT_RIGHTLEFT
// When 'rightleft' is set the list is drawn right-left.
cmdmsg_rl = curwin->w_p_rl;
if (cmdmsg_rl)
msg_col = Columns - 1;
#endif
// List the suggestions.
msg_start();
msg_row = Rows - 1; // for when 'cmdheight' > 1
lines_left = Rows; // avoid more prompt
vim_snprintf((char *)IObuff, IOSIZE, _("Change \"%.*s\" to:"),
sug.su_badlen, sug.su_badptr);
#ifdef FEAT_RIGHTLEFT
if (cmdmsg_rl && STRNCMP(IObuff, "Change", 6) == 0)
{
// And now the rabbit from the high hat: Avoid showing the
// untranslated message rightleft.
vim_snprintf((char *)IObuff, IOSIZE, ":ot \"%.*s\" egnahC",
sug.su_badlen, sug.su_badptr);
}
#endif
msg_puts((char *)IObuff);
msg_clr_eos();
msg_putchar('\n');
msg_scroll = TRUE;
for (i = 0; i < sug.su_ga.ga_len; ++i)
{
stp = &SUG(sug.su_ga, i);
// The suggested word may replace only part of the bad word, add
// the not replaced part.
vim_strncpy(wcopy, stp->st_word, MAXWLEN);
if (sug.su_badlen > stp->st_orglen)
vim_strncpy(wcopy + stp->st_wordlen,
sug.su_badptr + stp->st_orglen,
sug.su_badlen - stp->st_orglen);
vim_snprintf((char *)IObuff, IOSIZE, "%2d", i + 1);
#ifdef FEAT_RIGHTLEFT
if (cmdmsg_rl)
rl_mirror(IObuff);
#endif
msg_puts((char *)IObuff);
vim_snprintf((char *)IObuff, IOSIZE, " \"%s\"", wcopy);
msg_puts((char *)IObuff);
// The word may replace more than "su_badlen".
if (sug.su_badlen < stp->st_orglen)
{
vim_snprintf((char *)IObuff, IOSIZE, _(" < \"%.*s\""),
stp->st_orglen, sug.su_badptr);
msg_puts((char *)IObuff);
}
if (p_verbose > 0)
{
// Add the score.
if (sps_flags & (SPS_DOUBLE | SPS_BEST))
vim_snprintf((char *)IObuff, IOSIZE, " (%s%d - %d)",
stp->st_salscore ? "s " : "",
stp->st_score, stp->st_altscore);
else
vim_snprintf((char *)IObuff, IOSIZE, " (%d)",
stp->st_score);
#ifdef FEAT_RIGHTLEFT
if (cmdmsg_rl)
// Mirror the numbers, but keep the leading space.
rl_mirror(IObuff + 1);
#endif
msg_advance(30);
msg_puts((char *)IObuff);
}
msg_putchar('\n');
}
#ifdef FEAT_RIGHTLEFT
cmdmsg_rl = FALSE;
msg_col = 0;
#endif
// Ask for choice.
selected = prompt_for_number(&mouse_used);
if (mouse_used)
selected -= lines_left;
lines_left = Rows; // avoid more prompt
// don't delay for 'smd' in normal_cmd()
msg_scroll = msg_scroll_save;
}
if (selected > 0 && selected <= sug.su_ga.ga_len && u_save_cursor() == OK)
{
// Save the from and to text for :spellrepall.
VIM_CLEAR(repl_from);
VIM_CLEAR(repl_to);
stp = &SUG(sug.su_ga, selected - 1);
if (sug.su_badlen > stp->st_orglen)
{
// Replacing less than "su_badlen", append the remainder to
// repl_to.
repl_from = vim_strnsave(sug.su_badptr, sug.su_badlen);
vim_snprintf((char *)IObuff, IOSIZE, "%s%.*s", stp->st_word,
sug.su_badlen - stp->st_orglen,
sug.su_badptr + stp->st_orglen);
repl_to = vim_strsave(IObuff);
}
else
{
// Replacing su_badlen or more, use the whole word.
repl_from = vim_strnsave(sug.su_badptr, stp->st_orglen);
repl_to = vim_strsave(stp->st_word);
}
// Replace the word.
p = alloc(STRLEN(line) - stp->st_orglen + stp->st_wordlen + 1);
if (p != NULL)
{
int len_diff = stp->st_wordlen - stp->st_orglen;
c = (int)(sug.su_badptr - line);
mch_memmove(p, line, c);
STRCPY(p + c, stp->st_word);
STRCAT(p, sug.su_badptr + stp->st_orglen);
// For redo we use a change-word command.
ResetRedobuff();
AppendToRedobuff((char_u *)"ciw");
AppendToRedobuffLit(p + c,
stp->st_wordlen + sug.su_badlen - stp->st_orglen);
AppendCharToRedobuff(ESC);
// "p" may be freed here
ml_replace(curwin->w_cursor.lnum, p, FALSE);
curwin->w_cursor.col = c;
changed_bytes(curwin->w_cursor.lnum, c);
if (curbuf->b_has_textprop && len_diff != 0)
adjust_prop_columns(curwin->w_cursor.lnum, c, len_diff,
APC_SUBSTITUTE);
}
}
else
curwin->w_cursor = prev_cursor;
spell_find_cleanup(&sug);
skip:
vim_free(line);
curwin->w_p_spell = wo_spell_save;
} | 0 | [
"CWE-125",
"CWE-787"
]
| vim | 156d3911952d73b03d7420dc3540215247db0fe8 | 127,374,864,930,300,680,000,000,000,000,000,000,000 | 253 | patch 8.2.5123: using invalid index when looking for spell suggestions
Problem: Using invalid index when looking for spell suggestions.
Solution: Do not decrement the index when it is zero. |
virDomainMemoryPeek(virDomainPtr dom,
unsigned long long start /* really 64 bits */,
size_t size,
void *buffer,
unsigned int flags)
{
virConnectPtr conn;
VIR_DOMAIN_DEBUG(dom, "start=%lld, size=%zi, buffer=%p, flags=%x",
start, size, buffer, flags);
virResetLastError();
virCheckDomainReturn(dom, -1);
conn = dom->conn;
virCheckReadOnlyGoto(conn->flags, error);
/* Note on access to physical memory: A VIR_MEMORY_PHYSICAL flag is
* a possibility. However it isn't really useful unless the caller
* can also access registers, particularly CR3 on x86 in order to
* get the Page Table Directory. Since registers are different on
* every architecture, that would imply another call to get the
* machine registers.
*
* The QEMU driver handles VIR_MEMORY_VIRTUAL, mapping it
* to the qemu 'memsave' command which does the virtual to physical
* mapping inside qemu.
*
* The QEMU driver also handles VIR_MEMORY_PHYSICAL, mapping it
* to the qemu 'pmemsave' command.
*
* At time of writing there is no Xen driver. However the Xen
* hypervisor only lets you map physical pages from other domains,
* and so the Xen driver would have to do the virtual to physical
* mapping by chasing 2, 3 or 4-level page tables from the PTD.
* There is example code in libxc (xc_translate_foreign_address)
* which does this, although we cannot copy this code directly
* because of incompatible licensing.
*/
VIR_EXCLUSIVE_FLAGS_GOTO(VIR_MEMORY_VIRTUAL, VIR_MEMORY_PHYSICAL, error);
/* Allow size == 0 as an access test. */
if (size > 0)
virCheckNonNullArgGoto(buffer, error);
if (conn->driver->domainMemoryPeek) {
int ret;
ret = conn->driver->domainMemoryPeek(dom, start, size,
buffer, flags);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(dom->conn);
return -1;
} | 0 | [
"CWE-254"
]
| libvirt | 506e9d6c2d4baaf580d489fff0690c0ff2ff588f | 21,320,029,214,039,310,000,000,000,000,000,000,000 | 62 | virDomainGetTime: Deny on RO connections
We have a policy that if API may end up talking to a guest agent
it should require RW connection. We don't obey the rule in
virDomainGetTime().
Signed-off-by: Michal Privoznik <[email protected]> |
static void write_pack_file(void)
{
uint32_t i = 0, j;
struct sha1file *f;
off_t offset;
uint32_t nr_remaining = nr_result;
time_t last_mtime = 0;
struct object_entry **write_order;
if (progress > pack_to_stdout)
progress_state = start_progress(_("Writing objects"), nr_result);
written_list = xmalloc(to_pack.nr_objects * sizeof(*written_list));
write_order = compute_write_order();
do {
unsigned char sha1[20];
char *pack_tmp_name = NULL;
if (pack_to_stdout)
f = sha1fd_throughput(1, "<stdout>", progress_state);
else
f = create_tmp_packfile(&pack_tmp_name);
offset = write_pack_header(f, nr_remaining);
if (reuse_packfile) {
off_t packfile_size;
assert(pack_to_stdout);
packfile_size = write_reused_pack(f);
offset += packfile_size;
}
nr_written = 0;
for (; i < to_pack.nr_objects; i++) {
struct object_entry *e = write_order[i];
if (write_one(f, e, &offset) == WRITE_ONE_BREAK)
break;
display_progress(progress_state, written);
}
/*
* Did we write the wrong # entries in the header?
* If so, rewrite it like in fast-import
*/
if (pack_to_stdout) {
sha1close(f, sha1, CSUM_CLOSE);
} else if (nr_written == nr_remaining) {
sha1close(f, sha1, CSUM_FSYNC);
} else {
int fd = sha1close(f, sha1, 0);
fixup_pack_header_footer(fd, sha1, pack_tmp_name,
nr_written, sha1, offset);
close(fd);
write_bitmap_index = 0;
}
if (!pack_to_stdout) {
struct stat st;
struct strbuf tmpname = STRBUF_INIT;
/*
* Packs are runtime accessed in their mtime
* order since newer packs are more likely to contain
* younger objects. So if we are creating multiple
* packs then we should modify the mtime of later ones
* to preserve this property.
*/
if (stat(pack_tmp_name, &st) < 0) {
warning("failed to stat %s: %s",
pack_tmp_name, strerror(errno));
} else if (!last_mtime) {
last_mtime = st.st_mtime;
} else {
struct utimbuf utb;
utb.actime = st.st_atime;
utb.modtime = --last_mtime;
if (utime(pack_tmp_name, &utb) < 0)
warning("failed utime() on %s: %s",
pack_tmp_name, strerror(errno));
}
strbuf_addf(&tmpname, "%s-", base_name);
if (write_bitmap_index) {
bitmap_writer_set_checksum(sha1);
bitmap_writer_build_type_index(written_list, nr_written);
}
finish_tmp_packfile(&tmpname, pack_tmp_name,
written_list, nr_written,
&pack_idx_opts, sha1);
if (write_bitmap_index) {
strbuf_addf(&tmpname, "%s.bitmap", sha1_to_hex(sha1));
stop_progress(&progress_state);
bitmap_writer_show_progress(progress);
bitmap_writer_reuse_bitmaps(&to_pack);
bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1);
bitmap_writer_build(&to_pack);
bitmap_writer_finish(written_list, nr_written,
tmpname.buf, write_bitmap_options);
write_bitmap_index = 0;
}
strbuf_release(&tmpname);
free(pack_tmp_name);
puts(sha1_to_hex(sha1));
}
/* mark written objects as written to previous pack */
for (j = 0; j < nr_written; j++) {
written_list[j]->offset = (off_t)-1;
}
nr_remaining -= nr_written;
} while (nr_remaining && i < to_pack.nr_objects);
free(written_list);
free(write_order);
stop_progress(&progress_state);
if (written != nr_result)
die("wrote %"PRIu32" objects while expecting %"PRIu32,
written, nr_result);
} | 0 | [
"CWE-119",
"CWE-787"
]
| git | de1e67d0703894cb6ea782e36abb63976ab07e60 | 108,523,838,592,350,740,000,000,000,000,000,000,000 | 126 | list-objects: pass full pathname to callbacks
When we find a blob at "a/b/c", we currently pass this to
our show_object_fn callbacks as two components: "a/b/" and
"c". Callbacks which want the full value then call
path_name(), which concatenates the two. But this is an
inefficient interface; the path is a strbuf, and we could
simply append "c" to it temporarily, then roll back the
length, without creating a new copy.
So we could improve this by teaching the callsites of
path_name() this trick (and there are only 3). But we can
also notice that no callback actually cares about the
broken-down representation, and simply pass each callback
the full path "a/b/c" as a string. The callback code becomes
even simpler, then, as we do not have to worry about freeing
an allocated buffer, nor rolling back our modification to
the strbuf.
This is theoretically less efficient, as some callbacks
would not bother to format the final path component. But in
practice this is not measurable. Since we use the same
strbuf over and over, our work to grow it is amortized, and
we really only pay to memcpy a few bytes.
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]> |
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
} | 0 | []
| ImageMagick | bd9f1e7d1bd2c8e2cf7895d133c5c5b5cd3526b6 | 284,169,726,245,893,400,000,000,000,000,000,000,000 | 8 | Fixed memory leak when reading incorrect PSD files |
TEST(GatherNdOpTest, Int8Int32) {
GatherNdOpModel m({TensorType_INT8, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<int8_t>({1, -1, 1, -2, 2, 2, //
3, 3, -3, -4, -4, 4, //
5, -5, 5, 6, -6, 6});
m.SetPositions<int32_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int8_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));
} | 0 | [
"CWE-125"
]
| tensorflow | 595a65a3e224a0362d7e68c2213acfc2b499a196 | 127,779,622,227,810,110,000,000,000,000,000,000,000 | 10 | Return a TFLite error if gather_nd will result in reading invalid memory
PiperOrigin-RevId: 463054033 |
zrestore(i_ctx_t *i_ctx_p)
{
os_ptr op = osp;
alloc_save_t *asave;
bool last;
vm_save_t *vmsave;
int code = restore_check_operand(op, &asave, idmemory);
if (code < 0)
return code;
if_debug2m('u', imemory, "[u]vmrestore 0x%lx, id = %lu\n",
(ulong) alloc_save_client_data(asave),
(ulong) op->value.saveid);
if (I_VALIDATE_BEFORE_RESTORE)
ivalidate_clean_spaces(i_ctx_p);
/* Check the contents of the stacks. */
osp--;
{
int code;
if ((code = restore_check_stack(i_ctx_p, &o_stack, asave, false)) < 0 ||
(code = restore_check_stack(i_ctx_p, &e_stack, asave, true)) < 0 ||
(code = restore_check_stack(i_ctx_p, &d_stack, asave, false)) < 0
) {
osp++;
return code;
}
}
/* Reset l_new in all stack entries if the new save level is zero. */
/* Also do some special fixing on the e-stack. */
restore_fix_stack(i_ctx_p, &o_stack, asave, false);
restore_fix_stack(i_ctx_p, &e_stack, asave, true);
restore_fix_stack(i_ctx_p, &d_stack, asave, false);
/* Iteratively restore the state of memory, */
/* also doing a grestoreall at each step. */
do {
vmsave = alloc_save_client_data(alloc_save_current(idmemory));
/* Restore the graphics state. */
gs_grestoreall_for_restore(igs, vmsave->gsave);
/*
* If alloc_save_space decided to do a second save, the vmsave
* object was allocated one save level less deep than the
* current level, so ifree_object won't actually free it;
* however, it points to a gsave object that definitely
* *has* been freed. In order not to trip up the garbage
* collector, we clear the gsave pointer now.
*/
vmsave->gsave = 0;
/* Now it's safe to restore the state of memory. */
code = alloc_restore_step_in(idmemory, asave);
if (code < 0)
return code;
last = code;
}
while (!last);
{
uint space = icurrent_space;
ialloc_set_space(idmemory, avm_local);
ifree_object(vmsave, "zrestore");
ialloc_set_space(idmemory, space);
}
dict_set_top(); /* reload dict stack cache */
if (I_VALIDATE_AFTER_RESTORE)
ivalidate_clean_spaces(i_ctx_p);
/* If the i_ctx_p LockFilePermissions is true, but the userparams */
/* we just restored is false, we need to make sure that we do not */
/* cause an 'invalidaccess' in setuserparams. Temporarily set */
/* LockFilePermissions false until the gs_lev2.ps can do a */
/* setuserparams from the restored userparam dictionary. */
i_ctx_p->LockFilePermissions = false;
return 0;
} | 1 | []
| ghostpdl | 5516c614dc33662a2afdc377159f70218e67bde5 | 16,957,655,121,206,395,000,000,000,000,000,000,000 | 73 | Improve restore robustness
Prompted by looking at Bug 699654:
There are two variants of the restore operator in Ghostscript: one is Level 1
(restoring VM), the other is Level 2+ (adding page device restoring to the
Level operator).
This was implemented by the Level 2+ version restoring the device in the
graphics state, then calling the Level 1 implementation to handle actually
restoring the VM state.
The problem was that the operand checking, and sanity of the save object was
only done by the Level 1 variant, thus meaning an invalid save object could
leave a (Level 2+) restore partially complete - with the page device part
restored, but not VM, and the page device not configured.
To solve that, this commit splits the operand and sanity checking, and the
core of the restore operation into separate functions, so the relevant
operators can validate the operand *before* taking any further action. That
reduces the chances of an invalid restore leaving the interpreter in an
unknown state.
If an error occurs during the actual VM restore it is essentially fatal, and the
interpreter cannot continue, but as an extra surety for security, in the event
of such an error, we'll explicitly preserve the LockSafetyParams of the device,
rather than rely on the post-restore device configuration (which won't happen
in the event of an error). |
CERT *ssl_cert_new(void)
{
CERT *ret = OPENSSL_zalloc(sizeof(*ret));
if (ret == NULL) {
ERR_raise(ERR_LIB_SSL, ERR_R_MALLOC_FAILURE);
return NULL;
}
ret->key = &(ret->pkeys[SSL_PKEY_RSA]);
ret->references = 1;
ret->sec_cb = ssl_security_default_callback;
ret->sec_level = OPENSSL_TLS_SECURITY_LEVEL;
ret->sec_ex = NULL;
ret->lock = CRYPTO_THREAD_lock_new();
if (ret->lock == NULL) {
ERR_raise(ERR_LIB_SSL, ERR_R_MALLOC_FAILURE);
OPENSSL_free(ret);
return NULL;
}
return ret;
} | 0 | [
"CWE-835"
]
| openssl | 758754966791c537ea95241438454aa86f91f256 | 165,074,655,627,951,340,000,000,000,000,000,000,000 | 23 | Fix invalid handling of verify errors in libssl
In the event that X509_verify() returned an internal error result then
libssl would mishandle this and set rwstate to SSL_RETRY_VERIFY. This
subsequently causes SSL_get_error() to return SSL_ERROR_WANT_RETRY_VERIFY.
That return code is supposed to only ever be returned if an application
is using an app verify callback to complete replace the use of
X509_verify(). Applications may not be written to expect that return code
and could therefore crash (or misbehave in some other way) as a result.
CVE-2021-4044
Reviewed-by: Tomas Mraz <[email protected]> |
static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
u32 portid, u32 seq, int event, u16 flags)
{
struct nlmsghdr *nlh;
u8 scope = RT_SCOPE_UNIVERSE;
int ifindex = ifmca->idev->dev->ifindex;
if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
scope = RT_SCOPE_SITE;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
if (nlh == NULL)
return -EMSGSIZE;
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
if (nla_put(skb, IFA_MULTICAST, 16, &ifmca->mca_addr) < 0 ||
put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
nlmsg_end(skb, nlh);
return 0;
} | 0 | [
"CWE-20"
]
| linux | 77751427a1ff25b27d47a4c36b12c3c8667855ac | 140,314,237,689,519,570,000,000,000,000,000,000,000 | 25 | ipv6: addrconf: validate new MTU before applying it
Currently we don't check if the new MTU is valid or not and this allows
one to configure a smaller than minimum allowed by RFCs or even bigger
than interface own MTU, which is a problem as it may lead to packet
drops.
If you have a daemon like NetworkManager running, this may be exploited
by remote attackers by forging RA packets with an invalid MTU, possibly
leading to a DoS. (NetworkManager currently only validates for values
too small, but not for too big ones.)
The fix is just to make sure the new value is valid. That is, between
IPV6_MIN_MTU and interface's MTU.
Note that similar check is already performed at
ndisc_router_discovery(), for when kernel itself parses the RA.
Signed-off-by: Marcelo Ricardo Leitner <[email protected]>
Signed-off-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
CImg<floatT> get_object3dtoCImg3d(const CImgList<tp>& primitives,
const CImgList<tc>& colors,
const to& opacities,
const bool full_check=true) const {
CImg<charT> error_message(1024);
if (!is_object3d(primitives,colors,opacities,full_check,error_message))
throw CImgInstanceException(_cimg_instance
"object3dtoCImg3d(): Invalid specified 3D object (%u,%u) (%s).",
cimg_instance,_width,primitives._width,error_message.data());
CImg<floatT> res(1,_size_object3dtoCImg3d(primitives,colors,opacities));
float *ptrd = res._data;
// Put magick number.
*(ptrd++) = 'C' + 0.5f; *(ptrd++) = 'I' + 0.5f; *(ptrd++) = 'm' + 0.5f;
*(ptrd++) = 'g' + 0.5f; *(ptrd++) = '3' + 0.5f; *(ptrd++) = 'd' + 0.5f;
// Put number of vertices and primitives.
*(ptrd++) = cimg::uint2float(_width);
*(ptrd++) = cimg::uint2float(primitives._width);
// Put vertex data.
if (is_empty() || !primitives) return res;
const T *ptrx = data(0,0), *ptry = data(0,1), *ptrz = data(0,2);
cimg_forX(*this,p) {
*(ptrd++) = (float)*(ptrx++);
*(ptrd++) = (float)*(ptry++);
*(ptrd++) = (float)*(ptrz++);
}
// Put primitive data.
cimglist_for(primitives,p) {
*(ptrd++) = (float)primitives[p].size();
const tp *ptrp = primitives[p]._data;
cimg_foroff(primitives[p],i) *(ptrd++) = cimg::uint2float((unsigned int)*(ptrp++));
}
// Put color/texture data.
const unsigned int csiz = std::min(colors._width,primitives._width);
for (int c = 0; c<(int)csiz; ++c) {
const CImg<tc>& color = colors[c];
const tc *ptrc = color._data;
if (color.size()==3) { *(ptrd++) = (float)*(ptrc++); *(ptrd++) = (float)*(ptrc++); *(ptrd++) = (float)*ptrc; }
else {
*(ptrd++) = -128.f;
int shared_ind = -1;
if (color.is_shared()) for (int i = 0; i<c; ++i) if (ptrc==colors[i]._data) { shared_ind = i; break; }
if (shared_ind<0) {
*(ptrd++) = (float)color._width;
*(ptrd++) = (float)color._height;
*(ptrd++) = (float)color._spectrum;
cimg_foroff(color,l) *(ptrd++) = (float)*(ptrc++);
} else {
*(ptrd++) = (float)shared_ind;
*(ptrd++) = 0;
*(ptrd++) = 0;
}
}
}
const int csiz2 = primitives.width() - colors.width();
for (int c = 0; c<csiz2; ++c) { *(ptrd++) = 200.f; *(ptrd++) = 200.f; *(ptrd++) = 200.f; }
// Put opacity data.
ptrd = _object3dtoCImg3d(opacities,ptrd);
const float *ptre = res.end();
while (ptrd<ptre) *(ptrd++) = 1.f;
return res;
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 292,198,373,068,379,680,000,000,000,000,000,000,000 | 67 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
Item_case_expr::Item_case_expr(uint case_expr_id)
:Item_sp_variable( C_STRING_WITH_LEN("case_expr")),
m_case_expr_id(case_expr_id)
{
} | 0 | []
| server | b000e169562697aa072600695d4f0c0412f94f4f | 221,036,146,155,277,240,000,000,000,000,000,000,000 | 5 | Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL))
based on:
commit f7316aa0c9a
Author: Ajo Robert <[email protected]>
Date: Thu Aug 24 17:03:21 2017 +0530
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item. |
ar6000_acl_data_tx(struct sk_buff *skb, struct net_device *dev)
{
struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev);
struct ar_cookie *cookie;
HTC_ENDPOINT_ID eid = ENDPOINT_UNUSED;
cookie = NULL;
AR6000_SPIN_LOCK(&ar->arLock, 0);
/* For now we send ACL on BE endpoint: We can also have a dedicated EP */
eid = arAc2EndpointID (ar, 0);
/* allocate resource for this packet */
cookie = ar6000_alloc_cookie(ar);
if (cookie != NULL) {
/* update counts while the lock is held */
ar->arTxPending[eid]++;
ar->arTotalTxDataPending++;
}
AR6000_SPIN_UNLOCK(&ar->arLock, 0);
if (cookie != NULL) {
cookie->arc_bp[0] = (unsigned long)skb;
cookie->arc_bp[1] = 0;
SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt,
cookie,
A_NETBUF_DATA(skb),
A_NETBUF_LEN(skb),
eid,
AR6K_DATA_PKT_TAG);
/* HTC interface is asynchronous, if this fails, cleanup will happen in
* the ar6000_tx_complete callback */
HTCSendPkt(ar->arHtcTarget, &cookie->HtcPkt);
} else {
/* no packet to send, cleanup */
A_NETBUF_FREE(skb);
AR6000_STAT_INC(ar, tx_dropped);
AR6000_STAT_INC(ar, tx_aborted_errors);
}
return 0;
} | 0 | [
"CWE-703",
"CWE-264"
]
| linux | 550fd08c2cebad61c548def135f67aba284c6162 | 165,022,616,002,884,460,000,000,000,000,000,000,000 | 44 | net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <[email protected]>
CC: Karsten Keil <[email protected]>
CC: "David S. Miller" <[email protected]>
CC: Jay Vosburgh <[email protected]>
CC: Andy Gospodarek <[email protected]>
CC: Patrick McHardy <[email protected]>
CC: Krzysztof Halasa <[email protected]>
CC: "John W. Linville" <[email protected]>
CC: Greg Kroah-Hartman <[email protected]>
CC: Marcel Holtmann <[email protected]>
CC: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
cpio_to_stat (struct stat *st, struct cpio_file_stat *hdr)
{
memset (st, 0, sizeof (*st));
st->st_dev = makedev (hdr->c_dev_maj, hdr->c_dev_min);
st->st_ino = hdr->c_ino;
st->st_mode = hdr->c_mode & 0777;
if (hdr->c_mode & CP_IFREG)
st->st_mode |= S_IFREG;
else if (hdr->c_mode & CP_IFDIR)
st->st_mode |= S_IFDIR;
#ifdef S_IFBLK
else if (hdr->c_mode & CP_IFBLK)
st->st_mode |= S_IFBLK;
#endif
#ifdef S_IFCHR
else if (hdr->c_mode & CP_IFCHR)
st->st_mode |= S_IFCHR;
#endif
#ifdef S_IFFIFO
else if (hdr->c_mode & CP_IFIFO)
st->st_mode |= S_IFIFO;
#endif
#ifdef S_IFLNK
else if (hdr->c_mode & CP_IFLNK)
st->st_mode |= S_IFLNK;
#endif
#ifdef S_IFSOCK
else if (hdr->c_mode & CP_IFSOCK)
st->st_mode |= S_IFSOCK;
#endif
#ifdef S_IFNWK
else if (hdr->c_mode & CP_IFNWK)
st->st_mode |= S_IFNWK;
#endif
st->st_nlink = hdr->c_nlink;
st->st_uid = CPIO_UID (hdr->c_uid);
st->st_gid = CPIO_GID (hdr->c_gid);
st->st_rdev = makedev (hdr->c_rdev_maj, hdr->c_rdev_min);
st->st_mtime = hdr->c_mtime;
st->st_size = hdr->c_filesize;
} | 0 | [
"CWE-190"
]
| cpio | dd96882877721703e19272fe25034560b794061b | 283,901,625,305,889,230,000,000,000,000,000,000,000 | 41 | Rewrite dynamic string support.
* src/dstring.c (ds_init): Take a single argument.
(ds_free): New function.
(ds_resize): Take a single argument. Use x2nrealloc to expand
the storage.
(ds_reset,ds_append,ds_concat,ds_endswith): New function.
(ds_fgetstr): Rewrite. In particular, this fixes integer overflow.
* src/dstring.h (dynamic_string): Keep both the allocated length
(ds_size) and index of the next free byte in the string (ds_idx).
(ds_init,ds_resize): Change signature.
(ds_len): New macro.
(ds_free,ds_reset,ds_append,ds_concat,ds_endswith): New protos.
* src/copyin.c: Use new ds_ functions.
* src/copyout.c: Likewise.
* src/copypass.c: Likewise.
* src/util.c: Likewise. |
virDomainObjBroadcast(virDomainObjPtr vm)
{
virCondBroadcast(&vm->cond);
} | 0 | [
"CWE-212"
]
| libvirt | a5b064bf4b17a9884d7d361733737fb614ad8979 | 33,036,797,966,825,390,000,000,000,000,000,000,000 | 4 | conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used
Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410
(v6.1.0-122-g3b076391be) we support http cookies. Since they may contain
somewhat sensitive information we should not format them into the XML
unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted.
Reported-by: Han Han <[email protected]>
Signed-off-by: Peter Krempa <[email protected]>
Reviewed-by: Erik Skultety <[email protected]> |
de265_error video_usability_information::read(error_queue* errqueue, bitreader* br,
const seq_parameter_set* sps)
{
int vlc;
// --- sample aspect ratio (SAR) ---
aspect_ratio_info_present_flag = get_bits(br, 1);
if (aspect_ratio_info_present_flag) {
int aspect_ratio_idc = get_bits(br, 8);
if (aspect_ratio_idc <= NUM_SAR_PRESETS) {
sar_width = sar_presets[aspect_ratio_idc][0];
sar_height = sar_presets[aspect_ratio_idc][1];
}
else if (aspect_ratio_idc == EXTENDED_SAR) {
sar_width = get_bits(br, 16);
sar_height = get_bits(br, 16);
}
else {
sar_width = 0;
sar_height = 0;
}
}
else {
sar_width = 0;
sar_height = 0;
}
// --- overscan ---
overscan_info_present_flag = get_bits(br, 1);
if (overscan_info_present_flag) {
overscan_appropriate_flag = get_bits(br, 1);
}
// --- video signal type ---
{ // defaults
video_format = VideoFormat_Unspecified;
video_full_range_flag = false;
colour_primaries = 2;
transfer_characteristics = 2;
matrix_coeffs = 2;
}
video_signal_type_present_flag = get_bits(br, 1);
if (video_signal_type_present_flag) {
int video_format_idc = get_bits(br, 3);
if (video_format_idc > 5) {
video_format_idc = VideoFormat_Unspecified;
}
video_format = (VideoFormat)video_format_idc;
video_full_range_flag = get_bits(br, 1);
colour_description_present_flag = get_bits(br, 1);
if (colour_description_present_flag) {
colour_primaries = get_bits(br, 8);
if (colour_primaries == 0 ||
colour_primaries == 3 ||
colour_primaries >= 11) {
colour_primaries = 2;
}
transfer_characteristics = get_bits(br, 8);
if (transfer_characteristics == 0 ||
transfer_characteristics == 3 ||
transfer_characteristics >= 18) {
transfer_characteristics = 2;
}
matrix_coeffs = get_bits(br, 8);
if (matrix_coeffs >= 11) {
matrix_coeffs = 2;
}
}
}
// --- chroma / interlaced ---
chroma_loc_info_present_flag = get_bits(br, 1);
if (chroma_loc_info_present_flag) {
READ_VLC(chroma_sample_loc_type_top_field, uvlc);
READ_VLC(chroma_sample_loc_type_bottom_field, uvlc);
}
else {
chroma_sample_loc_type_top_field = 0;
chroma_sample_loc_type_bottom_field = 0;
}
neutral_chroma_indication_flag = get_bits(br, 1);
field_seq_flag = get_bits(br, 1);
frame_field_info_present_flag = get_bits(br, 1);
// --- default display window ---
default_display_window_flag = get_bits(br, 1);
if (default_display_window_flag) {
READ_VLC(def_disp_win_left_offset, uvlc);
READ_VLC(def_disp_win_right_offset, uvlc);
READ_VLC(def_disp_win_top_offset, uvlc);
READ_VLC(def_disp_win_bottom_offset, uvlc);
}
else {
def_disp_win_left_offset = 0;
def_disp_win_right_offset = 0;
def_disp_win_top_offset = 0;
def_disp_win_bottom_offset = 0;
}
// --- timing ---
vui_timing_info_present_flag = get_bits(br, 1);
if (vui_timing_info_present_flag) {
vui_num_units_in_tick = get_bits(br, 32);
vui_time_scale = get_bits(br, 32);
vui_poc_proportional_to_timing_flag = get_bits(br, 1);
if (vui_poc_proportional_to_timing_flag) {
READ_VLC_OFFSET(vui_num_ticks_poc_diff_one, uvlc, 1);
}
// --- hrd parameters ---
vui_hrd_parameters_present_flag = get_bits(br, 1);
if (vui_hrd_parameters_present_flag) {
de265_error err;
err = hrd_parameters(errqueue, br, sps);
if (err) {
return err;
}
}
}
// --- bitstream restriction ---
bitstream_restriction_flag = get_bits(br,1);
if (bitstream_restriction_flag) {
tiles_fixed_structure_flag = get_bits(br,1);
motion_vectors_over_pic_boundaries_flag = get_bits(br,1);
restricted_ref_pic_lists_flag = get_bits(br,1);
READ_VLC(min_spatial_segmentation_idc, uvlc);
if (min_spatial_segmentation_idc > 4095) {
errqueue->add_warning(DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE, false);
min_spatial_segmentation_idc = 0;
}
READ_VLC(max_bytes_per_pic_denom, uvlc);
if (max_bytes_per_pic_denom > 16) {
errqueue->add_warning(DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE, false);
max_bytes_per_pic_denom = 2;
}
READ_VLC(max_bits_per_min_cu_denom, uvlc);
if (max_bits_per_min_cu_denom > 16) {
errqueue->add_warning(DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE, false);
max_bits_per_min_cu_denom = 1;
}
READ_VLC(log2_max_mv_length_horizontal, uvlc);
if (log2_max_mv_length_horizontal > 15) {
errqueue->add_warning(DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE, false);
log2_max_mv_length_horizontal = 15;
}
READ_VLC(log2_max_mv_length_vertical, uvlc);
if (log2_max_mv_length_vertical > 15) {
errqueue->add_warning(DE265_ERROR_CODED_PARAMETER_OUT_OF_RANGE, false);
log2_max_mv_length_vertical = 15;
}
}
else {
tiles_fixed_structure_flag = false;
motion_vectors_over_pic_boundaries_flag = true;
restricted_ref_pic_lists_flag = false; // NOTE: default not specified in standard 2014/10
min_spatial_segmentation_idc = 0;
max_bytes_per_pic_denom = 2;
max_bits_per_min_cu_denom = 1;
log2_max_mv_length_horizontal = 15;
log2_max_mv_length_vertical = 15;
}
//vui_read = true;
return DE265_OK;
} | 0 | [
"CWE-787"
]
| libde265 | 8e89fe0e175d2870c39486fdd09250b230ec10b8 | 309,108,094,246,615,670,000,000,000,000,000,000,000 | 195 | error on out-of-range cpb_cnt_minus1 (oss-fuzz issue 27590) |
new_msg_sync_lsdb (u_int32_t seqnum, struct lsa_filter_type *filter)
{
u_char buf[OSPF_API_MAX_MSG_SIZE];
struct msg_sync_lsdb *smsg;
int len;
smsg = (struct msg_sync_lsdb *) buf;
len = sizeof (struct msg_sync_lsdb) +
filter->num_areas * sizeof (struct in_addr);
smsg->filter.typemask = htons (filter->typemask);
smsg->filter.origin = filter->origin;
smsg->filter.num_areas = filter->num_areas;
return msg_new (MSG_SYNC_LSDB, smsg, seqnum, len);
} | 1 | [
"CWE-119"
]
| quagga | 3f872fe60463a931c5c766dbf8c36870c0023e88 | 211,126,696,673,716,740,000,000,000,000,000,000,000 | 14 | ospfd: CVE-2013-2236, stack overrun in apiserver
the OSPF API-server (exporting the LSDB and allowing announcement of
Opaque-LSAs) writes past the end of fixed on-stack buffers. This leads
to an exploitable stack overflow.
For this condition to occur, the following two conditions must be true:
- Quagga is configured with --enable-opaque-lsa
- ospfd is started with the "-a" command line option
If either of these does not hold, the relevant code is not executed and
the issue does not get triggered.
Since the issue occurs on receiving large LSAs (larger than 1488 bytes),
it is possible for this to happen during normal operation of a network.
In particular, if there is an OSPF router with a large number of
interfaces, the Router-LSA of that router may exceed 1488 bytes and
trigger this, leading to an ospfd crash.
For an attacker to exploit this, s/he must be able to inject valid LSAs
into the OSPF domain. Any best-practice protection measure (using
crypto authentication, restricting OSPF to internal interfaces, packet
filtering protocol 89, etc.) will prevent exploitation. On top of that,
remote (not on an OSPF-speaking network segment) attackers will have
difficulties bringing up the adjacency needed to inject a LSA.
This patch only performs minimal changes to remove the possibility of a
stack overrun. The OSPF API in general is quite ugly and needs a
rewrite.
Reported-by: Ricky Charlet <[email protected]>
Cc: Florian Weimer <[email protected]>
Signed-off-by: David Lamparter <[email protected]> |
yang_read_common(struct lys_module *module, char *value, enum yytokentype type)
{
int ret = 0;
switch (type) {
case MODULE_KEYWORD:
module->name = lydict_insert_zc(module->ctx, value);
break;
case NAMESPACE_KEYWORD:
ret = yang_check_string(module, &module->ns, "namespace", "module", value, NULL);
break;
case ORGANIZATION_KEYWORD:
ret = yang_check_string(module, &module->org, "organization", "module", value, NULL);
break;
case CONTACT_KEYWORD:
ret = yang_check_string(module, &module->contact, "contact", "module", value, NULL);
break;
default:
free(value);
LOGINT(module->ctx);
ret = EXIT_FAILURE;
break;
}
return ret;
} | 0 | [
"CWE-415"
]
| libyang | d9feacc4a590d35dbc1af21caf9080008b4450ed | 113,782,913,471,873,260,000,000,000,000,000,000,000 | 26 | yang parser BUGFIX double free
Fixes #742 |
QPDFWriter::getOriginalID1()
{
QPDFObjectHandle trailer = this->m->pdf.getTrailer();
if (trailer.hasKey("/ID"))
{
return trailer.getKey("/ID").getArrayItem(0).getStringValue();
}
else
{
return "";
}
} | 0 | [
"CWE-787"
]
| qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 247,644,762,125,305,160,000,000,000,000,000,000,000 | 12 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
R_API RCore *r_core_new() {
RCore *c = R_NEW0 (RCore);
if (c) {
r_core_init (c);
}
return c;
} | 0 | [
"CWE-415",
"CWE-703"
]
| radare2 | cb8b683758edddae2d2f62e8e63a738c39f92683 | 76,186,090,628,107,870,000,000,000,000,000,000,000 | 7 | Fix #16303 - c->table_query double free (#16318) |
void format_read_arglist(va_list va, FORMAT_REC *format,
char **arglist, int arglist_size,
char *buffer, int buffer_size)
{
int num, len, bufpos;
g_return_if_fail(format->params < arglist_size);
bufpos = 0;
arglist[format->params] = NULL;
for (num = 0; num < format->params; num++) {
switch (format->paramtypes[num]) {
case FORMAT_STRING:
arglist[num] = (char *) va_arg(va, char *);
if (arglist[num] == NULL)
arglist[num] = "";
break;
case FORMAT_INT: {
int d = (int) va_arg(va, int);
if (bufpos >= buffer_size) {
arglist[num] = "";
break;
}
arglist[num] = buffer+bufpos;
len = g_snprintf(buffer+bufpos, buffer_size-bufpos,
"%d", d);
bufpos += len+1;
break;
}
case FORMAT_LONG: {
long l = (long) va_arg(va, long);
if (bufpos >= buffer_size) {
arglist[num] = "";
break;
}
arglist[num] = buffer+bufpos;
len = g_snprintf(buffer+bufpos, buffer_size-bufpos,
"%ld", l);
bufpos += len+1;
break;
}
case FORMAT_FLOAT: {
double f = (double) va_arg(va, double);
if (bufpos >= buffer_size) {
arglist[num] = "";
break;
}
arglist[num] = buffer+bufpos;
len = g_snprintf(buffer+bufpos, buffer_size-bufpos,
"%0.2f", f);
bufpos += len+1;
break;
}
}
}
} | 0 | [
"CWE-476"
]
| irssi | 6c6c42e3d1b49d90aacc0b67f8540471cae02a1d | 162,540,103,011,465,860,000,000,000,000,000,000,000 | 62 | Merge branch 'security' into 'master'
See merge request !7 |
static inline const unsigned char *PushShortPixel(const EndianType endian,
const unsigned char *pixels,unsigned short *pixel)
{
register unsigned int
quantum;
if (endian == LSBEndian)
{
quantum=(unsigned int) *pixels++;
quantum|=(unsigned int) (*pixels++ << 8);
*pixel=(unsigned short) (quantum & 0xffff);
return(pixels);
}
quantum=(unsigned int) (*pixels++ << 8);
quantum|=(unsigned int) *pixels++;
*pixel=(unsigned short) (quantum & 0xffff);
return(pixels);
} | 0 | [
"CWE-190"
]
| ImageMagick | f60d59cc3a7e3402d403361e0985ffa56f746a82 | 156,037,517,530,183,700,000,000,000,000,000,000,000 | 18 | https://github.com/ImageMagick/ImageMagick/issues/1727 |
LogicalResult matchAndRewrite(TFL::FullyConnectedOp fully_connected_op,
PatternRewriter &) const override {
auto input = fully_connected_op.input();
auto input_ty = input.getType().dyn_cast<ShapedType>();
auto output_ty = fully_connected_op.output()[0]
.getType()
.template dyn_cast<ShapedType>();
if (!input_ty.hasStaticShape() ||
fully_connected_op.weights_format() != "DEFAULT" ||
fully_connected_op.keep_num_dims() || !output_ty.hasStaticShape() ||
output_ty.getRank() != 2) {
return failure();
}
auto reshape_op = input.getDefiningOp<TFL::ReshapeOp>();
if (!reshape_op) return failure();
// Check if the last dimension does not change after reshape.
auto reshape_input = reshape_op.input();
auto reshape_input_ty = reshape_input.getType().dyn_cast<ShapedType>();
if (!reshape_input_ty.hasStaticShape() || input_ty.getRank() == 0 ||
reshape_input_ty.getRank() == 0 ||
input_ty.getDimSize(input_ty.getRank() - 1) !=
reshape_input_ty.getDimSize(reshape_input_ty.getRank() - 1)) {
return failure();
}
// Connect the input to the one of reshape.
fully_connected_op.setOperand(0, reshape_input);
return success();
} | 0 | [
"CWE-476",
"CWE-125"
]
| tensorflow | d6b57f461b39fd1aa8c1b870f1b974aac3554955 | 107,497,612,681,191,020,000,000,000,000,000,000,000 | 31 | Prevent nullptr dereference in MLIR TFLite dialect/optimizer.
PiperOrigin-RevId: 387220762
Change-Id: Id136ef04bb3d36123b4685d316ae81a9ec924d6b |
process_colour_pointer_common(STREAM s, int bpp)
{
uint16 width, height, cache_idx, masklen, datalen;
uint16 x, y;
uint8 *mask;
uint8 *data;
RD_HCURSOR cursor;
in_uint16_le(s, cache_idx);
in_uint16_le(s, x);
in_uint16_le(s, y);
in_uint16_le(s, width);
in_uint16_le(s, height);
in_uint16_le(s, masklen);
in_uint16_le(s, datalen);
in_uint8p(s, data, datalen);
in_uint8p(s, mask, masklen);
if ((width != 32) || (height != 32))
{
warning("process_colour_pointer_common: " "width %d height %d\n", width, height);
}
/* keep hotspot within cursor bounding box */
x = MIN(x, width - 1);
y = MIN(y, height - 1);
cursor = ui_create_cursor(x, y, width, height, mask, data, bpp);
ui_set_cursor(cursor);
cache_put_cursor(cache_idx, cursor);
} | 0 | [
"CWE-787"
]
| rdesktop | 766ebcf6f23ccfe8323ac10242ae6e127d4505d2 | 277,610,795,918,580,400,000,000,000,000,000,000,000 | 29 | Malicious RDP server security fixes
This commit includes fixes for a set of 21 vulnerabilities in
rdesktop when a malicious RDP server is used.
All vulnerabilities was identified and reported by Eyal Itkin.
* Add rdp_protocol_error function that is used in several fixes
* Refactor of process_bitmap_updates
* Fix possible integer overflow in s_check_rem() on 32bit arch
* Fix memory corruption in process_bitmap_data - CVE-2018-8794
* Fix remote code execution in process_bitmap_data - CVE-2018-8795
* Fix remote code execution in process_plane - CVE-2018-8797
* Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175
* Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175
* Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176
* Fix Denial of Service in sec_recv - CVE-2018-20176
* Fix minor information leak in rdpdr_process - CVE-2018-8791
* Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792
* Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793
* Fix Denial of Service in process_bitmap_data - CVE-2018-8796
* Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798
* Fix Denial of Service in process_secondary_order - CVE-2018-8799
* Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800
* Fix major information leak in ui_clip_handle_data - CVE-2018-20174
* Fix memory corruption in rdp_in_unistr - CVE-2018-20177
* Fix Denial of Service in process_demand_active - CVE-2018-20178
* Fix remote code execution in lspci_process - CVE-2018-20179
* Fix remote code execution in rdpsnddbg_process - CVE-2018-20180
* Fix remote code execution in seamless_process - CVE-2018-20181
* Fix remote code execution in seamless_process_line - CVE-2018-20182 |
static JSValue js_print_ex(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv, u32 ltool, u32 error_type)
{
int i=0;
Bool first=GF_TRUE;
s32 logl = GF_LOG_INFO;
JSValue v, g;
const char *c_logname=NULL;
const char *log_name = "JS";
if ((argc>1) && JS_IsNumber(argv[0])) {
JS_ToInt32(ctx, &logl, argv[0]);
i=1;
}
if (error_type)
logl = GF_LOG_ERROR;
g = JS_GetGlobalObject(ctx);
v = JS_GetPropertyStr(ctx, g, "_gpac_log_name");
if (!JS_IsUndefined(v) && !JS_IsNull(v)) {
c_logname = JS_ToCString(ctx, v);
JS_FreeValue(ctx, v);
if (c_logname) {
log_name = c_logname;
if (!strlen(log_name))
log_name = NULL;
}
}
JS_FreeValue(ctx, g);
if (log_name) {
#ifndef GPAC_DISABLE_LOG
GF_LOG(logl, ltool, ("[%s] ", log_name));
#else
fprintf(stderr, "[%s] ", log_name);
#endif
}
if (error_type==2) {
#ifndef GPAC_DISABLE_LOG
GF_LOG(logl, ltool, ("Throw "));
#else
fprintf(stderr, "Throw ");
#endif
}
for (; i < argc; i++) {
const char *str = JS_ToCString(ctx, argv[i]);
if (!str) return GF_JS_EXCEPTION(ctx);
if (logl==-1) {
gf_sys_format_help(stderr, GF_PRINTARG_HIGHLIGHT_FIRST, "%s\n", str);
} else if (logl==-2) {
gf_sys_format_help(stderr, 0, "%s\n", str);
} else if (logl<0) {
fprintf(stderr, "%s%s", (first) ? "" : " ", str);
} else {
#ifndef GPAC_DISABLE_LOG
GF_LOG(logl, ltool, ("%s%s", (first) ? "" : " ", str));
#else
fprintf(stderr, "%s%s", (first) ? "" : " ", str);
#endif
if (JS_IsException(argv[i])) {
js_dump_error_exc(ctx, argv[i]);
}
}
JS_FreeCString(ctx, str);
first=GF_FALSE;
}
#ifndef GPAC_DISABLE_LOG
GF_LOG(logl, ltool, ("\n"));
#else
fprintf(stderr, "\n");
#endif
if (c_logname) JS_FreeCString(ctx, c_logname);
return JS_UNDEFINED;
} | 0 | [
"CWE-787"
]
| gpac | ea1eca00fd92fa17f0e25ac25652622924a9a6a0 | 262,681,323,173,767,940,000,000,000,000,000,000,000 | 75 | fixed #2138 |
static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
struct net_device *dev = ifa->ifa_dev->dev;
struct net *net = dev_net(dev);
switch (event) {
case NETDEV_UP:
fib_add_ifaddr(ifa);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
fib_sync_up(dev, RTNH_F_DEAD);
#endif
atomic_inc(&net->ipv4.dev_addr_genid);
rt_cache_flush(dev_net(dev));
break;
case NETDEV_DOWN:
fib_del_ifaddr(ifa, NULL);
atomic_inc(&net->ipv4.dev_addr_genid);
if (!ifa->ifa_dev->ifa_list) {
/* Last address was deleted from this interface.
* Disable IP.
*/
fib_disable_ip(dev, event, true);
} else {
rt_cache_flush(dev_net(dev));
}
break;
}
return NOTIFY_DONE;
} | 0 | [
"CWE-399"
]
| net-next | fbd40ea0180a2d328c5adc61414dc8bab9335ce2 | 48,078,173,263,667,770,000,000,000,000,000,000,000 | 30 | ipv4: Don't do expensive useless work during inetdev destroy.
When an inetdev is destroyed, every address assigned to the interface
is removed. And in this scenerio we do two pointless things which can
be very expensive if the number of assigned interfaces is large:
1) Address promotion. We are deleting all addresses, so there is no
point in doing this.
2) A full nf conntrack table purge for every address. We only need to
do this once, as is already caught by the existing
masq_dev_notifier so masq_inet_event() can skip this.
Reported-by: Solar Designer <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
Tested-by: Cyrill Gorcunov <[email protected]> |
static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret)
{
MirrorBlockJob *s = op->s;
if (ret < 0) {
BlockErrorAction action;
bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
action = mirror_error_action(s, false, -ret);
if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
s->ret = ret;
}
}
mirror_iteration_done(op, ret);
} | 0 | [
"CWE-476"
]
| qemu | 66fed30c9cd11854fc878a4eceb507e915d7c9cd | 285,629,753,710,033,200,000,000,000,000,000,000,000 | 16 | block/mirror: fix NULL pointer dereference in mirror_wait_on_conflicts()
In mirror_iteration() we call mirror_wait_on_conflicts() with
`self` parameter set to NULL.
Starting from commit d44dae1a7c we dereference `self` pointer in
mirror_wait_on_conflicts() without checks if it is not NULL.
Backtrace:
Program terminated with signal SIGSEGV, Segmentation fault.
#0 mirror_wait_on_conflicts (self=0x0, s=<optimized out>, offset=<optimized out>, bytes=<optimized out>)
at ../block/mirror.c:172
172 self->waiting_for_op = op;
[Current thread is 1 (Thread 0x7f0908931ec0 (LWP 380249))]
(gdb) bt
#0 mirror_wait_on_conflicts (self=0x0, s=<optimized out>, offset=<optimized out>, bytes=<optimized out>)
at ../block/mirror.c:172
#1 0x00005610c5d9d631 in mirror_run (job=0x5610c76a2c00, errp=<optimized out>) at ../block/mirror.c:491
#2 0x00005610c5d58726 in job_co_entry (opaque=0x5610c76a2c00) at ../job.c:917
#3 0x00005610c5f046c6 in coroutine_trampoline (i0=<optimized out>, i1=<optimized out>)
at ../util/coroutine-ucontext.c:173
#4 0x00007f0909975820 in ?? () at ../sysdeps/unix/sysv/linux/x86_64/__start_context.S:91
from /usr/lib64/libc.so.6
Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=2001404
Fixes: d44dae1a7c ("block/mirror: fix active mirror dead-lock in mirror_wait_on_conflicts")
Signed-off-by: Stefano Garzarella <[email protected]>
Message-Id: <[email protected]>
Reviewed-by: Vladimir Sementsov-Ogievskiy <[email protected]>
Signed-off-by: Hanna Reitz <[email protected]> |
static coroutine_fn int vpc_co_read(BlockDriverState *bs, int64_t sector_num,
uint8_t *buf, int nb_sectors)
{
int ret;
BDRVVPCState *s = bs->opaque;
qemu_co_mutex_lock(&s->lock);
ret = vpc_read(bs, sector_num, buf, nb_sectors);
qemu_co_mutex_unlock(&s->lock);
return ret;
} | 0 | [
"CWE-20"
]
| qemu | 97f1c45c6f456572e5b504b8614e4a69e23b8e3a | 182,068,551,945,010,400,000,000,000,000,000,000,000 | 10 | vpc/vhd: add bounds check for max_table_entries and block_size (CVE-2014-0144)
This adds checks to make sure that max_table_entries and block_size
are in sane ranges. Memory is allocated based on max_table_entries,
and block_size is used to calculate indices into that allocated
memory, so if these values are incorrect that can lead to potential
unbounded memory allocation, or invalid memory accesses.
Also, the allocation of the pagetable is changed from g_malloc0()
to qemu_blockalign().
Signed-off-by: Jeff Cody <[email protected]>
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]> |
static int exif_process_IFD_in_TIFF(image_info_type *ImageInfo, size_t dir_offset, int section_index TSRMLS_DC)
{
int i, sn, num_entries, sub_section_index = 0;
unsigned char *dir_entry;
char tagname[64];
size_t ifd_size, dir_size, entry_offset, next_offset, entry_length, entry_value=0, fgot;
int entry_tag , entry_type;
tag_table_type tag_table = exif_get_tag_table(section_index);
if (ImageInfo->ifd_nesting_level > MAX_IFD_NESTING_LEVEL) {
return FALSE;
}
if (ImageInfo->FileSize >= dir_offset+2) {
sn = exif_file_sections_add(ImageInfo, M_PSEUDO, 2, NULL);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: filesize(x%04X), IFD dir(x%04X + x%04X)", ImageInfo->FileSize, dir_offset, 2);
#endif
php_stream_seek(ImageInfo->infile, dir_offset, SEEK_SET); /* we do not know the order of sections */
php_stream_read(ImageInfo->infile, (char*)ImageInfo->file.list[sn].data, 2);
num_entries = php_ifd_get16u(ImageInfo->file.list[sn].data, ImageInfo->motorola_intel);
dir_size = 2/*num dir entries*/ +12/*length of entry*/*num_entries +4/* offset to next ifd (points to thumbnail or NULL)*/;
if (ImageInfo->FileSize >= dir_offset+dir_size) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: filesize(x%04X), IFD dir(x%04X + x%04X), IFD entries(%d)", ImageInfo->FileSize, dir_offset+2, dir_size-2, num_entries);
#endif
if (exif_file_sections_realloc(ImageInfo, sn, dir_size TSRMLS_CC)) {
return FALSE;
}
php_stream_read(ImageInfo->infile, (char*)(ImageInfo->file.list[sn].data+2), dir_size-2);
/*exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Dump: %s", exif_char_dump(ImageInfo->file.list[sn].data, dir_size, 0));*/
next_offset = php_ifd_get32u(ImageInfo->file.list[sn].data + dir_size - 4, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF done, next offset x%04X", next_offset);
#endif
/* now we have the directory we can look how long it should be */
ifd_size = dir_size;
for(i=0;i<num_entries;i++) {
dir_entry = ImageInfo->file.list[sn].data+2+i*12;
entry_tag = php_ifd_get16u(dir_entry+0, ImageInfo->motorola_intel);
entry_type = php_ifd_get16u(dir_entry+2, ImageInfo->motorola_intel);
if (entry_type > NUM_FORMATS) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: tag(0x%04X,%12s): Illegal format code 0x%04X, switching to BYTE", entry_tag, exif_get_tagname(entry_tag, tagname, -12, tag_table TSRMLS_CC), entry_type);
/* Since this is repeated in exif_process_IFD_TAG make it a notice here */
/* and make it a warning in the exif_process_IFD_TAG which is called */
/* elsewhere. */
entry_type = TAG_FMT_BYTE;
/*The next line would break the image on writeback: */
/* php_ifd_set16u(dir_entry+2, entry_type, ImageInfo->motorola_intel);*/
}
entry_length = php_ifd_get32u(dir_entry+4, ImageInfo->motorola_intel) * php_tiff_bytes_per_format[entry_type];
if (entry_length <= 4) {
switch(entry_type) {
case TAG_FMT_USHORT:
entry_value = php_ifd_get16u(dir_entry+8, ImageInfo->motorola_intel);
break;
case TAG_FMT_SSHORT:
entry_value = php_ifd_get16s(dir_entry+8, ImageInfo->motorola_intel);
break;
case TAG_FMT_ULONG:
entry_value = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
break;
case TAG_FMT_SLONG:
entry_value = php_ifd_get32s(dir_entry+8, ImageInfo->motorola_intel);
break;
}
switch(entry_tag) {
case TAG_IMAGEWIDTH:
case TAG_COMP_IMAGE_WIDTH:
ImageInfo->Width = entry_value;
break;
case TAG_IMAGEHEIGHT:
case TAG_COMP_IMAGE_HEIGHT:
ImageInfo->Height = entry_value;
break;
case TAG_PHOTOMETRIC_INTERPRETATION:
switch (entry_value) {
case PMI_BLACK_IS_ZERO:
case PMI_WHITE_IS_ZERO:
case PMI_TRANSPARENCY_MASK:
ImageInfo->IsColor = 0;
break;
case PMI_RGB:
case PMI_PALETTE_COLOR:
case PMI_SEPARATED:
case PMI_YCBCR:
case PMI_CIELAB:
ImageInfo->IsColor = 1;
break;
}
break;
}
} else {
entry_offset = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
/* if entry needs expading ifd cache and entry is at end of current ifd cache. */
/* otherwise there may be huge holes between two entries */
if (entry_offset + entry_length > dir_offset + ifd_size
&& entry_offset == dir_offset + ifd_size) {
ifd_size = entry_offset + entry_length - dir_offset;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Resize struct: x%04X + x%04X - x%04X = x%04X", entry_offset, entry_length, dir_offset, ifd_size);
#endif
}
}
}
if (ImageInfo->FileSize >= dir_offset + ImageInfo->file.list[sn].size) {
if (ifd_size > dir_size) {
if (dir_offset + ifd_size > ImageInfo->FileSize) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than size of IFD(x%04X + x%04X)", ImageInfo->FileSize, dir_offset, ifd_size);
return FALSE;
}
if (exif_file_sections_realloc(ImageInfo, sn, ifd_size TSRMLS_CC)) {
return FALSE;
}
/* read values not stored in directory itself */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: filesize(x%04X), IFD(x%04X + x%04X)", ImageInfo->FileSize, dir_offset, ifd_size);
#endif
php_stream_read(ImageInfo->infile, (char*)(ImageInfo->file.list[sn].data+dir_size), ifd_size-dir_size);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF, done");
#endif
}
/* now process the tags */
for(i=0;i<num_entries;i++) {
dir_entry = ImageInfo->file.list[sn].data+2+i*12;
entry_tag = php_ifd_get16u(dir_entry+0, ImageInfo->motorola_intel);
entry_type = php_ifd_get16u(dir_entry+2, ImageInfo->motorola_intel);
/*entry_length = php_ifd_get32u(dir_entry+4, ImageInfo->motorola_intel);*/
if (entry_tag == TAG_EXIF_IFD_POINTER ||
entry_tag == TAG_INTEROP_IFD_POINTER ||
entry_tag == TAG_GPS_IFD_POINTER ||
entry_tag == TAG_SUB_IFD
) {
switch(entry_tag) {
case TAG_EXIF_IFD_POINTER:
ImageInfo->sections_found |= FOUND_EXIF;
sub_section_index = SECTION_EXIF;
break;
case TAG_GPS_IFD_POINTER:
ImageInfo->sections_found |= FOUND_GPS;
sub_section_index = SECTION_GPS;
break;
case TAG_INTEROP_IFD_POINTER:
ImageInfo->sections_found |= FOUND_INTEROP;
sub_section_index = SECTION_INTEROP;
break;
case TAG_SUB_IFD:
ImageInfo->sections_found |= FOUND_THUMBNAIL;
sub_section_index = SECTION_THUMBNAIL;
break;
}
entry_offset = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Next IFD: %s @x%04X", exif_get_sectionname(sub_section_index), entry_offset);
#endif
ImageInfo->ifd_nesting_level++;
exif_process_IFD_in_TIFF(ImageInfo, entry_offset, sub_section_index TSRMLS_CC);
if (section_index!=SECTION_THUMBNAIL && entry_tag==TAG_SUB_IFD) {
if (ImageInfo->Thumbnail.filetype != IMAGE_FILETYPE_UNKNOWN
&& ImageInfo->Thumbnail.size
&& ImageInfo->Thumbnail.offset
&& ImageInfo->read_thumbnail
) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "%s THUMBNAIL @0x%04X + 0x%04X", ImageInfo->Thumbnail.data ? "Ignore" : "Read", ImageInfo->Thumbnail.offset, ImageInfo->Thumbnail.size);
#endif
if (!ImageInfo->Thumbnail.data) {
ImageInfo->Thumbnail.data = safe_emalloc(ImageInfo->Thumbnail.size, 1, 0);
php_stream_seek(ImageInfo->infile, ImageInfo->Thumbnail.offset, SEEK_SET);
fgot = php_stream_read(ImageInfo->infile, ImageInfo->Thumbnail.data, ImageInfo->Thumbnail.size);
if (fgot < ImageInfo->Thumbnail.size) {
EXIF_ERRLOG_THUMBEOF(ImageInfo)
}
exif_thumbnail_build(ImageInfo TSRMLS_CC);
}
}
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Next IFD: %s done", exif_get_sectionname(sub_section_index));
#endif
} else {
if (!exif_process_IFD_TAG(ImageInfo, (char*)dir_entry,
(char*)(ImageInfo->file.list[sn].data-dir_offset),
ifd_size, 0, section_index, 0, tag_table TSRMLS_CC)) {
return FALSE;
}
}
}
/* If we had a thumbnail in a SUB_IFD we have ANOTHER image in NEXT IFD */
if (next_offset && section_index != SECTION_THUMBNAIL) {
/* this should be a thumbnail IFD */
/* the thumbnail itself is stored at Tag=StripOffsets */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read next IFD (THUMBNAIL) at x%04X", next_offset);
#endif
ImageInfo->ifd_nesting_level++;
exif_process_IFD_in_TIFF(ImageInfo, next_offset, SECTION_THUMBNAIL TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "%s THUMBNAIL @0x%04X + 0x%04X", ImageInfo->Thumbnail.data ? "Ignore" : "Read", ImageInfo->Thumbnail.offset, ImageInfo->Thumbnail.size);
#endif
if (!ImageInfo->Thumbnail.data && ImageInfo->Thumbnail.offset && ImageInfo->Thumbnail.size && ImageInfo->read_thumbnail) {
ImageInfo->Thumbnail.data = safe_emalloc(ImageInfo->Thumbnail.size, 1, 0);
php_stream_seek(ImageInfo->infile, ImageInfo->Thumbnail.offset, SEEK_SET);
fgot = php_stream_read(ImageInfo->infile, ImageInfo->Thumbnail.data, ImageInfo->Thumbnail.size);
if (fgot < ImageInfo->Thumbnail.size) {
EXIF_ERRLOG_THUMBEOF(ImageInfo)
}
exif_thumbnail_build(ImageInfo TSRMLS_CC);
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read next IFD (THUMBNAIL) done");
#endif
}
return TRUE;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than size of IFD(x%04X)", ImageInfo->FileSize, dir_offset+ImageInfo->file.list[sn].size);
return FALSE;
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than size of IFD dir(x%04X)", ImageInfo->FileSize, dir_offset+dir_size);
return FALSE;
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than start of IFD dir(x%04X)", ImageInfo->FileSize, dir_offset+2);
return FALSE;
}
} | 1 | [
"CWE-200"
]
| php-src | 6dbb1ee46b5f4725cc6519abf91e512a2a10dfed | 50,702,194,199,841,100,000,000,000,000,000,000,000 | 228 | Fixed bug #72627: Memory Leakage In exif_process_IFD_in_TIFF |
MagickExport int LocaleLowercase(const int c)
{
#if defined(MAGICKCORE_LOCALE_SUPPORT)
if (c_locale != (locale_t) NULL)
return(tolower_l((int) ((unsigned char) c),c_locale));
#endif
return(tolower((int) ((unsigned char) c)));
} | 1 | [
"CWE-399",
"CWE-125"
]
| ImageMagick | edc7d3035883ddca8413e4fe7689aa2e579ef04a | 316,133,956,972,304,400,000,000,000,000,000,000,000 | 8 | ... |
static int init_vqs(struct virtnet_info *vi)
{
int ret;
/* Allocate send & receive queues */
ret = virtnet_alloc_queues(vi);
if (ret)
goto err;
ret = virtnet_find_vqs(vi);
if (ret)
goto err_free;
get_online_cpus();
virtnet_set_affinity(vi);
put_online_cpus();
return 0;
err_free:
virtnet_free_queues(vi);
err:
return ret;
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | 48900cb6af4282fa0fb6ff4d72a81aa3dadb5c39 | 175,019,911,736,976,600,000,000,000,000,000,000,000 | 24 | virtio-net: drop NETIF_F_FRAGLIST
virtio declares support for NETIF_F_FRAGLIST, but assumes
that there are at most MAX_SKB_FRAGS + 2 fragments which isn't
always true with a fraglist.
A longer fraglist in the skb will make the call to skb_to_sgvec overflow
the sg array, leading to memory corruption.
Drop NETIF_F_FRAGLIST so we only get what we can handle.
Cc: Michael S. Tsirkin <[email protected]>
Signed-off-by: Jason Wang <[email protected]>
Acked-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void test_nghttp2_session_change_stream_priority(void) {
nghttp2_session *session;
nghttp2_session_callbacks callbacks;
nghttp2_stream *stream1, *stream2, *stream3, *stream5;
nghttp2_priority_spec pri_spec;
int rv;
memset(&callbacks, 0, sizeof(callbacks));
nghttp2_session_server_new(&session, &callbacks, NULL);
stream1 = open_recv_stream(session, 1);
stream3 = open_recv_stream_with_dep_weight(session, 3, 199, stream1);
stream2 = open_sent_stream_with_dep_weight(session, 2, 101, stream3);
nghttp2_priority_spec_init(&pri_spec, 1, 256, 0);
rv = nghttp2_session_change_stream_priority(session, 2, &pri_spec);
CU_ASSERT(0 == rv);
CU_ASSERT(stream1 == stream2->dep_prev);
CU_ASSERT(256 == stream2->weight);
/* Cannot change stream which does not exist */
rv = nghttp2_session_change_stream_priority(session, 5, &pri_spec);
CU_ASSERT(NGHTTP2_ERR_INVALID_ARGUMENT == rv);
/* It is an error to depend on itself */
rv = nghttp2_session_change_stream_priority(session, 1, &pri_spec);
CU_ASSERT(NGHTTP2_ERR_INVALID_ARGUMENT == rv);
/* It is an error to change priority of root stream (0) */
rv = nghttp2_session_change_stream_priority(session, 0, &pri_spec);
CU_ASSERT(NGHTTP2_ERR_INVALID_ARGUMENT == rv);
/* Depends on the non-existing idle stream. This creates that idle
stream. */
nghttp2_priority_spec_init(&pri_spec, 5, 9, 1);
rv = nghttp2_session_change_stream_priority(session, 2, &pri_spec);
CU_ASSERT(0 == rv);
stream5 = nghttp2_session_get_stream_raw(session, 5);
CU_ASSERT(NULL != stream5);
CU_ASSERT(&session->root == stream5->dep_prev);
CU_ASSERT(stream5 == stream2->dep_prev);
CU_ASSERT(9 == stream2->weight);
nghttp2_session_del(session);
/* Check that this works in client session too */
nghttp2_session_client_new(&session, &callbacks, NULL);
stream1 = open_sent_stream(session, 1);
nghttp2_priority_spec_init(&pri_spec, 5, 9, 1);
rv = nghttp2_session_change_stream_priority(session, 1, &pri_spec);
CU_ASSERT(0 == rv);
stream5 = nghttp2_session_get_stream_raw(session, 5);
CU_ASSERT(NULL != stream5);
CU_ASSERT(&session->root == stream5->dep_prev);
CU_ASSERT(stream5 == stream1->dep_prev);
CU_ASSERT(9 == stream1->weight);
nghttp2_session_del(session);
} | 0 | []
| nghttp2 | 0a6ce87c22c69438ecbffe52a2859c3a32f1620f | 195,290,065,442,086,460,000,000,000,000,000,000,000 | 73 | Add nghttp2_option_set_max_outbound_ack |
static int ct_play(struct media_player *mp, void *user_data)
{
struct avrcp_player *player = user_data;
return ct_press(player, AVC_PLAY);
} | 0 | [
"CWE-200"
]
| bluez | e2b0f0d8d63e1223bb714a9efb37e2257818268b | 107,178,833,425,342,060,000,000,000,000,000,000,000 | 6 | avrcp: Fix not checking if params_len match number of received bytes
This makes sure the number of bytes in the params_len matches the
remaining bytes received so the code don't end up accessing invalid
memory. |
my_bool STDCALL
mysql_ssl_set(MYSQL *mysql MY_ATTRIBUTE((unused)) ,
const char *key MY_ATTRIBUTE((unused)),
const char *cert MY_ATTRIBUTE((unused)),
const char *ca MY_ATTRIBUTE((unused)),
const char *capath MY_ATTRIBUTE((unused)),
const char *cipher MY_ATTRIBUTE((unused)))
{
my_bool result= 0;
DBUG_ENTER("mysql_ssl_set");
#if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY)
result=
mysql_options(mysql, MYSQL_OPT_SSL_KEY, key) +
mysql_options(mysql, MYSQL_OPT_SSL_CERT, cert) +
mysql_options(mysql, MYSQL_OPT_SSL_CA, ca) +
mysql_options(mysql, MYSQL_OPT_SSL_CAPATH, capath) +
mysql_options(mysql, MYSQL_OPT_SSL_CIPHER, cipher)
? 1 : 0;
#endif
DBUG_RETURN(result); | 0 | [
"CWE-319"
]
| mysql-server | 0002e1380d5f8c113b6bce91f2cf3f75136fd7c7 | 158,311,243,398,466,220,000,000,000,000,000,000,000 | 20 | BUG#25575605: SETTING --SSL-MODE=REQUIRED SENDS CREDENTIALS BEFORE VERIFYING SSL CONNECTION
MYSQL_OPT_SSL_MODE option introduced.
It is set in case of --ssl-mode=REQUIRED and permits only SSL connection.
(cherry picked from commit f91b941842d240b8a62645e507f5554e8be76aec) |
int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
{
struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
struct device *dev = i915->drm.dev;
struct drm_i915_gem_object *obj;
const struct firmware *fw = NULL;
struct uc_css_header *css;
size_t size;
int err;
GEM_BUG_ON(!i915->wopcm.size);
GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
err = i915_inject_probe_error(i915, -ENXIO);
if (err)
goto fail;
__force_fw_fetch_failures(uc_fw, -EINVAL);
__force_fw_fetch_failures(uc_fw, -ESTALE);
err = request_firmware(&fw, uc_fw->path, dev);
if (err)
goto fail;
/* Check the size of the blob before examining buffer contents */
if (unlikely(fw->size < sizeof(struct uc_css_header))) {
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, sizeof(struct uc_css_header));
err = -ENODATA;
goto fail;
}
css = (struct uc_css_header *)fw->data;
/* Check integrity of size values inside CSS header */
size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
css->exponent_size_dw) * sizeof(u32);
if (unlikely(size != sizeof(struct uc_css_header))) {
drm_warn(&i915->drm,
"%s firmware %s: unexpected header size: %zu != %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, sizeof(struct uc_css_header));
err = -EPROTO;
goto fail;
}
/* uCode size must calculated from other sizes */
uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
/* now RSA */
if (unlikely(css->key_size_dw != UOS_RSA_SCRATCH_COUNT)) {
drm_warn(&i915->drm, "%s firmware %s: unexpected key size: %u != %u\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
css->key_size_dw, UOS_RSA_SCRATCH_COUNT);
err = -EPROTO;
goto fail;
}
uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
/* At least, it should have header, uCode and RSA. Size of all three. */
size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
if (unlikely(fw->size < size)) {
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, size);
err = -ENOEXEC;
goto fail;
}
/* Sanity check whether this fw is not larger than whole WOPCM memory */
size = __intel_uc_fw_get_upload_size(uc_fw);
if (unlikely(size >= i915->wopcm.size)) {
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
size, (size_t)i915->wopcm.size);
err = -E2BIG;
goto fail;
}
/* Get version numbers from the CSS header */
uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
css->sw_version);
uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
css->sw_version);
if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
uc_fw->major_ver_found, uc_fw->minor_ver_found,
uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
if (!intel_uc_fw_is_overridden(uc_fw)) {
err = -ENOEXEC;
goto fail;
}
}
if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
uc_fw->private_data_size = css->private_data_size;
obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto fail;
}
uc_fw->obj = obj;
uc_fw->size = fw->size;
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
release_firmware(fw);
return 0;
fail:
intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
INTEL_UC_FIRMWARE_MISSING :
INTEL_UC_FIRMWARE_ERROR);
drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
release_firmware(fw); /* OK even if fw is NULL */
return err;
} | 0 | [
"CWE-20",
"CWE-190"
]
| linux | c784e5249e773689e38d2bc1749f08b986621a26 | 180,683,099,285,407,440,000,000,000,000,000,000,000 | 127 | drm/i915/guc: Update to use firmware v49.0.1
The latest GuC firmware includes a number of interface changes that
require driver updates to match.
* Starting from Gen11, the ID to be provided to GuC needs to contain
the engine class in bits [0..2] and the instance in bits [3..6].
NOTE: this patch breaks pointer dereferences in some existing GuC
functions that use the guc_id to dereference arrays but these functions
are not used for now as we have GuC submission disabled and we will
update these functions in follow up patch which requires new IDs.
* The new GuC requires the additional data structure (ADS) and associated
'private_data' pointer to be setup. This is basically a scratch area
of memory that the GuC owns. The size is read from the CSS header.
* There is now a physical to logical engine mapping table in the ADS
which needs to be configured in order for the firmware to load. For
now, the table is initialised with a 1 to 1 mapping.
* GUC_CTL_CTXINFO has been removed from the initialization params.
* reg_state_buffer is maintained internally by the GuC as part of
the private data.
* The ADS layout has changed significantly. This patch updates the
shared structure and also adds better documentation of the layout.
* While i915 does not use GuC doorbells, the firmware now requires
that some initialisation is done.
* The number of engine classes and instances supported in the ADS has
been increased.
Signed-off-by: John Harrison <[email protected]>
Signed-off-by: Matthew Brost <[email protected]>
Signed-off-by: Daniele Ceraolo Spurio <[email protected]>
Signed-off-by: Oscar Mateo <[email protected]>
Signed-off-by: Michel Thierry <[email protected]>
Signed-off-by: Rodrigo Vivi <[email protected]>
Signed-off-by: Michal Wajdeczko <[email protected]>
Cc: Michal Winiarski <[email protected]>
Cc: Tomasz Lis <[email protected]>
Cc: Joonas Lahtinen <[email protected]>
Reviewed-by: Daniele Ceraolo Spurio <[email protected]>
Signed-off-by: Joonas Lahtinen <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected] |
BuildVersion* Binary::build_version() {
return command<BuildVersion>();
} | 0 | [
"CWE-703"
]
| LIEF | 7acf0bc4224081d4f425fcc8b2e361b95291d878 | 182,860,969,605,686,470,000,000,000,000,000,000,000 | 3 | Resolve #764 |
long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked)
{
/* FOLL_GET and FOLL_PIN are mutually exclusive. */
if (WARN_ON_ONCE(gup_flags & FOLL_GET))
return -EINVAL;
gup_flags |= FOLL_PIN;
return __get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags,
pages, vmas, locked);
} | 0 | [
"CWE-362"
]
| linux | 17839856fd588f4ab6b789f482ed3ffd7c403e1f | 43,302,032,949,414,170,000,000,000,000,000,000,000 | 13 | gup: document and work around "COW can break either way" issue
Doing a "get_user_pages()" on a copy-on-write page for reading can be
ambiguous: the page can be COW'ed at any time afterwards, and the
direction of a COW event isn't defined.
Yes, whoever writes to it will generally do the COW, but if the thread
that did the get_user_pages() unmapped the page before the write (and
that could happen due to memory pressure in addition to any outright
action), the writer could also just take over the old page instead.
End result: the get_user_pages() call might result in a page pointer
that is no longer associated with the original VM, and is associated
with - and controlled by - another VM having taken it over instead.
So when doing a get_user_pages() on a COW mapping, the only really safe
thing to do would be to break the COW when getting the page, even when
only getting it for reading.
At the same time, some users simply don't even care.
For example, the perf code wants to look up the page not because it
cares about the page, but because the code simply wants to look up the
physical address of the access for informational purposes, and doesn't
really care about races when a page might be unmapped and remapped
elsewhere.
This adds logic to force a COW event by setting FOLL_WRITE on any
copy-on-write mapping when FOLL_GET (or FOLL_PIN) is used to get a page
pointer as a result.
The current semantics end up being:
- __get_user_pages_fast(): no change. If you don't ask for a write,
you won't break COW. You'd better know what you're doing.
- get_user_pages_fast(): the fast-case "look it up in the page tables
without anything getting mmap_sem" now refuses to follow a read-only
page, since it might need COW breaking. Which happens in the slow
path - the fast path doesn't know if the memory might be COW or not.
- get_user_pages() (including the slow-path fallback for gup_fast()):
for a COW mapping, turn on FOLL_WRITE for FOLL_GET/FOLL_PIN, with
very similar semantics to FOLL_FORCE.
If it turns out that we want finer granularity (ie "only break COW when
it might actually matter" - things like the zero page are special and
don't need to be broken) we might need to push these semantics deeper
into the lookup fault path. So if people care enough, it's possible
that we might end up adding a new internal FOLL_BREAK_COW flag to go
with the internal FOLL_COW flag we already have for tracking "I had a
COW".
Alternatively, if it turns out that different callers might want to
explicitly control the forced COW break behavior, we might even want to
make such a flag visible to the users of get_user_pages() instead of
using the above default semantics.
But for now, this is mostly commentary on the issue (this commit message
being a lot bigger than the patch, and that patch in turn is almost all
comments), with that minimal "enable COW breaking early" logic using the
existing FOLL_WRITE behavior.
[ It might be worth noting that we've always had this ambiguity, and it
could arguably be seen as a user-space issue.
You only get private COW mappings that could break either way in
situations where user space is doing cooperative things (ie fork()
before an execve() etc), but it _is_ surprising and very subtle, and
fork() is supposed to give you independent address spaces.
So let's treat this as a kernel issue and make the semantics of
get_user_pages() easier to understand. Note that obviously a true
shared mapping will still get a page that can change under us, so this
does _not_ mean that get_user_pages() somehow returns any "stable"
page ]
Reported-by: Jann Horn <[email protected]>
Tested-by: Christoph Hellwig <[email protected]>
Acked-by: Oleg Nesterov <[email protected]>
Acked-by: Kirill Shutemov <[email protected]>
Acked-by: Jan Kara <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
{
int err = 0;
struct ip_tunnel_parm p;
struct ip_tunnel *t;
struct net *net = dev_net(dev);
struct ipip_net *ipn = net_generic(net, ipip_net_id);
switch (cmd) {
case SIOCGETTUNNEL:
t = NULL;
if (dev == ipn->fb_tunnel_dev) {
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
err = -EFAULT;
break;
}
t = ipip_tunnel_locate(net, &p, 0);
}
if (t == NULL)
t = netdev_priv(dev);
memcpy(&p, &t->parms, sizeof(p));
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT;
break;
case SIOCADDTUNNEL:
case SIOCCHGTUNNEL:
err = -EPERM;
if (!capable(CAP_NET_ADMIN))
goto done;
err = -EFAULT;
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
goto done;
err = -EINVAL;
if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
goto done;
if (p.iph.ttl)
p.iph.frag_off |= htons(IP_DF);
t = ipip_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
if (t != NULL) {
if (t->dev != dev) {
err = -EEXIST;
break;
}
} else {
if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) ||
(!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) {
err = -EINVAL;
break;
}
t = netdev_priv(dev);
ipip_tunnel_unlink(ipn, t);
t->parms.iph.saddr = p.iph.saddr;
t->parms.iph.daddr = p.iph.daddr;
memcpy(dev->dev_addr, &p.iph.saddr, 4);
memcpy(dev->broadcast, &p.iph.daddr, 4);
ipip_tunnel_link(ipn, t);
netdev_state_change(dev);
}
}
if (t) {
err = 0;
if (cmd == SIOCCHGTUNNEL) {
t->parms.iph.ttl = p.iph.ttl;
t->parms.iph.tos = p.iph.tos;
t->parms.iph.frag_off = p.iph.frag_off;
if (t->parms.link != p.link) {
t->parms.link = p.link;
ipip_tunnel_bind_dev(dev);
netdev_state_change(dev);
}
}
if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
err = -EFAULT;
} else
err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
break;
case SIOCDELTUNNEL:
err = -EPERM;
if (!capable(CAP_NET_ADMIN))
goto done;
if (dev == ipn->fb_tunnel_dev) {
err = -EFAULT;
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
goto done;
err = -ENOENT;
if ((t = ipip_tunnel_locate(net, &p, 0)) == NULL)
goto done;
err = -EPERM;
if (t->dev == ipn->fb_tunnel_dev)
goto done;
dev = t->dev;
}
unregister_netdevice(dev);
err = 0;
break;
default:
err = -EINVAL;
}
done:
return err;
} | 0 | []
| linux-2.6 | d5aa407f59f5b83d2c50ec88f5bf56d40f1f8978 | 337,395,392,452,217,150,000,000,000,000,000,000,000 | 113 | tunnels: fix netns vs proto registration ordering
Same stuff as in ip_gre patch: receive hook can be called before netns
setup is done, oopsing in net_generic().
Signed-off-by: Alexey Dobriyan <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
bool CIRCSock::OnTopicMessage(CTopicMessage& Message) {
const CNick& Nick = Message.GetNick();
CChan* pChan = m_pNetwork->FindChan(Message.GetParam(0));
if (pChan) {
Message.SetChan(pChan);
bool bReturn = false;
IRCSOCKMODULECALL(OnTopicMessage(Message), &bReturn);
if (bReturn) return true;
pChan->SetTopicOwner(Nick.GetNick());
pChan->SetTopicDate((unsigned long)time(nullptr));
pChan->SetTopic(Message.GetTopic());
}
return (pChan && pChan->IsDetached());
} | 0 | [
"CWE-20",
"CWE-284"
]
| znc | d22fef8620cdd87490754f607e7153979731c69d | 233,212,751,375,484,100,000,000,000,000,000,000,000 | 17 | Better cleanup lines coming from network.
Thanks for Jeriko One <[email protected]> for finding and reporting this. |
static int floppy_alloc_disk(unsigned int drive, unsigned int type)
{
struct gendisk *disk;
disk = blk_mq_alloc_disk(&tag_sets[drive], NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
blk_queue_max_hw_sectors(disk->queue, 64);
disk->major = FLOPPY_MAJOR;
disk->first_minor = TOMINOR(drive) | (type << 2);
disk->minors = 1;
disk->fops = &floppy_fops;
disk->flags |= GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE;
if (type)
sprintf(disk->disk_name, "fd%d_type%d", drive, type);
else
sprintf(disk->disk_name, "fd%d", drive);
/* to be cleaned up... */
disk->private_data = (void *)(long)drive;
disk->flags |= GENHD_FL_REMOVABLE;
disks[drive][type] = disk;
return 0;
} | 0 | [
"CWE-416"
]
| linux | 233087ca063686964a53c829d547c7571e3f67bf | 53,094,459,308,919,530,000,000,000,000,000,000,000 | 26 | floppy: disable FDRAWCMD by default
Minh Yuan reported a concurrency use-after-free issue in the floppy code
between raw_cmd_ioctl and seek_interrupt.
[ It turns out this has been around, and that others have reported the
KASAN splats over the years, but Minh Yuan had a reproducer for it and
so gets primary credit for reporting it for this fix - Linus ]
The problem is, this driver tends to break very easily and nowadays,
nobody is expected to use FDRAWCMD anyway since it was used to
manipulate non-standard formats. The risk of breaking the driver is
higher than the risk presented by this race, and accessing the device
requires privileges anyway.
Let's just add a config option to completely disable this ioctl and
leave it disabled by default. Distros shouldn't use it, and only those
running on antique hardware might need to enable it.
Link: https://lore.kernel.org/all/[email protected]/
Link: https://lore.kernel.org/lkml/CAKcFiNC=MfYVW-Jt9A3=FPJpTwCD2PL_ULNCpsCVE5s8ZeBQgQ@mail.gmail.com
Link: https://lore.kernel.org/all/CAEAjamu1FRhz6StCe_55XY5s389ZP_xmCF69k987En+1z53=eg@mail.gmail.com
Reported-by: Minh Yuan <[email protected]>
Reported-by: [email protected]
Reported-by: cruise k <[email protected]>
Reported-by: Kyungtae Kim <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Tested-by: Denis Efremov <[email protected]>
Signed-off-by: Willy Tarreau <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int generic_print_tuple(struct seq_file *s,
const struct nf_conntrack_tuple *tuple)
{
return 0;
} | 0 | [
"CWE-20",
"CWE-254",
"CWE-787"
]
| linux | db29a9508a9246e77087c5531e45b2c88ec6988b | 244,187,044,637,609,730,000,000,000,000,000,000,000 | 5 | netfilter: conntrack: disable generic tracking for known protocols
Given following iptables ruleset:
-P FORWARD DROP
-A FORWARD -m sctp --dport 9 -j ACCEPT
-A FORWARD -p tcp --dport 80 -j ACCEPT
-A FORWARD -p tcp -m conntrack -m state ESTABLISHED,RELATED -j ACCEPT
One would assume that this allows SCTP on port 9 and TCP on port 80.
Unfortunately, if the SCTP conntrack module is not loaded, this allows
*all* SCTP communication, to pass though, i.e. -p sctp -j ACCEPT,
which we think is a security issue.
This is because on the first SCTP packet on port 9, we create a dummy
"generic l4" conntrack entry without any port information (since
conntrack doesn't know how to extract this information).
All subsequent packets that are unknown will then be in established
state since they will fallback to proto_generic and will match the
'generic' entry.
Our originally proposed version [1] completely disabled generic protocol
tracking, but Jozsef suggests to not track protocols for which a more
suitable helper is available, hence we now mitigate the issue for in
tree known ct protocol helpers only, so that at least NAT and direction
information will still be preserved for others.
[1] http://www.spinics.net/lists/netfilter-devel/msg33430.html
Joint work with Daniel Borkmann.
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Jozsef Kadlecsik <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]> |
__releases(seq->private->l->lock)
{
struct bt_seq_state *s = seq->private;
struct bt_sock_list *l = s->l;
read_unlock(&l->lock);
} | 0 | [
"CWE-20",
"CWE-269"
]
| linux | f3d3342602f8bcbf37d7c46641cb9bca7618eb1c | 79,695,482,564,521,430,000,000,000,000,000,000,000 | 7 | net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
asmlinkage void do_notify_resume(__u32 thread_info_flags)
{
/* pending single-step? */
if (thread_info_flags & _TIF_SINGLESTEP)
clear_thread_flag(TIF_SINGLESTEP);
/* deal with pending signal delivery */
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
do_signal();
/* deal with notification on about to resume userspace execution */
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(__frame);
}
} /* end do_notify_resume() */ | 1 | []
| linux-2.6 | ee18d64c1f632043a02e6f5ba5e045bb26a5465f | 235,324,972,032,490,840,000,000,000,000,000,000,000 | 17 | KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]> |
var_check_permission(dictitem_T *di, char_u *name)
{
if (var_check_ro(di->di_flags, name, FALSE)
|| value_check_lock(di->di_tv.v_lock, name, FALSE)
|| var_check_lock(di->di_flags, name, FALSE))
return FAIL;
return OK;
} | 0 | [
"CWE-476"
]
| vim | 0f6e28f686dbb59ab3b562408ab9b2234797b9b1 | 17,004,695,814,355,672,000,000,000,000,000,000,000 | 8 | patch 8.2.4428: crash when switching tabpage while in the cmdline window
Problem: Crash when switching tabpage while in the cmdline window.
Solution: Disallow switching tabpage when in the cmdline window. |
**/
const CImg<T>& save_analyze(const char *const filename, const float *const voxel_size=0) const {
if (!filename)
throw CImgArgumentException(_cimg_instance
"save_analyze(): Specified filename is (null).",
cimg_instance);
if (is_empty()) { cimg::fempty(0,filename); return *this; }
std::FILE *file;
CImg<charT> hname(1024), iname(1024);
const char *const ext = cimg::split_filename(filename);
short datatype = -1;
if (!*ext) {
cimg_snprintf(hname,hname._width,"%s.hdr",filename);
cimg_snprintf(iname,iname._width,"%s.img",filename);
}
if (!cimg::strncasecmp(ext,"hdr",3)) {
std::strcpy(hname,filename);
std::strncpy(iname,filename,iname._width - 1);
cimg_sprintf(iname._data + std::strlen(iname) - 3,"img");
}
if (!cimg::strncasecmp(ext,"img",3)) {
std::strcpy(hname,filename);
std::strncpy(iname,filename,iname._width - 1);
cimg_sprintf(hname._data + std::strlen(iname) - 3,"hdr");
}
if (!cimg::strncasecmp(ext,"nii",3)) {
std::strncpy(hname,filename,hname._width - 1); *iname = 0;
}
CImg<charT> header(*iname?348:352,1,1,1,0);
int *const iheader = (int*)header._data;
*iheader = 348;
std::strcpy(header._data + 4,"CImg");
std::strcpy(header._data + 14," ");
((short*)&(header[36]))[0] = 4096;
((char*)&(header[38]))[0] = 114;
((short*)&(header[40]))[0] = 4;
((short*)&(header[40]))[1] = (short)_width;
((short*)&(header[40]))[2] = (short)_height;
((short*)&(header[40]))[3] = (short)_depth;
((short*)&(header[40]))[4] = (short)_spectrum;
if (!cimg::strcasecmp(pixel_type(),"bool")) datatype = 2;
if (!cimg::strcasecmp(pixel_type(),"unsigned char")) datatype = 2;
if (!cimg::strcasecmp(pixel_type(),"char")) datatype = 2;
if (!cimg::strcasecmp(pixel_type(),"unsigned short")) datatype = 4;
if (!cimg::strcasecmp(pixel_type(),"short")) datatype = 4;
if (!cimg::strcasecmp(pixel_type(),"unsigned int")) datatype = 8;
if (!cimg::strcasecmp(pixel_type(),"int")) datatype = 8;
if (!cimg::strcasecmp(pixel_type(),"unsigned int64")) datatype = 8;
if (!cimg::strcasecmp(pixel_type(),"int64")) datatype = 8;
if (!cimg::strcasecmp(pixel_type(),"float")) datatype = 16;
if (!cimg::strcasecmp(pixel_type(),"double")) datatype = 64;
if (datatype<0)
throw CImgIOException(_cimg_instance
"save_analyze(): Unsupported pixel type '%s' for file '%s'.",
cimg_instance,
pixel_type(),filename);
((short*)&(header[70]))[0] = datatype;
((short*)&(header[72]))[0] = sizeof(T);
((float*)&(header[108]))[0] = (float)(*iname?0:header.width());
((float*)&(header[112]))[0] = 1;
((float*)&(header[76]))[0] = 0;
if (voxel_size) {
((float*)&(header[76]))[1] = voxel_size[0];
((float*)&(header[76]))[2] = voxel_size[1];
((float*)&(header[76]))[3] = voxel_size[2];
} else ((float*)&(header[76]))[1] = ((float*)&(header[76]))[2] = ((float*)&(header[76]))[3] = 1;
file = cimg::fopen(hname,"wb");
cimg::fwrite(header._data,header.width(),file);
if (*iname) { cimg::fclose(file); file = cimg::fopen(iname,"wb"); }
cimg::fwrite(_data,size(),file);
cimg::fclose(file);
return *this; | 0 | [
"CWE-125"
]
| CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 131,823,012,444,305,100,000,000,000,000,000,000,000 | 75 | Fix other issues in 'CImg<T>::load_bmp()'. |
Subsets and Splits