func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
void __init memory_dev_init(void)
{
int ret;
unsigned long block_sz, nr;
/* Validate the configured memory block size */
block_sz = memory_block_size_bytes();
if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE)
panic("Memory block size not suitable: 0x%lx\n", block_sz);
sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
if (ret)
panic("%s() failed to register subsystem: %d\n", __func__, ret);
/*
* Create entries for memory sections that were found
* during boot and have been initialized
*/
for (nr = 0; nr <= __highest_present_section_nr;
nr += sections_per_block) {
ret = add_memory_block(nr);
if (ret)
panic("%s() failed to add memory block: %d\n", __func__,
ret);
}
} | 0 | [
"CWE-787"
] | linux | aa838896d87af561a33ecefea1caa4c15a68bc47 | 71,258,201,062,282,940,000,000,000,000,000,000,000 | 27 | drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions
Convert the various sprintf fmaily calls in sysfs device show functions
to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety.
Done with:
$ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 .
And cocci script:
$ cat sysfs_emit_dev.cocci
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Signed-off-by: Joe Perches <[email protected]>
Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = info->userhdr;
struct sw_flow_key key;
struct sk_buff *reply;
struct sw_flow *flow;
struct datapath *dp;
struct sw_flow_match match;
int err;
ovs_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
if (!dp) {
err = -ENODEV;
goto unlock;
}
if (!a[OVS_FLOW_ATTR_KEY]) {
err = ovs_flow_tbl_flush(&dp->table);
goto unlock;
}
ovs_match_init(&match, &key, NULL);
err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL);
if (err)
goto unlock;
flow = ovs_flow_tbl_lookup(&dp->table, &key);
if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
err = -ENOENT;
goto unlock;
}
reply = ovs_flow_cmd_alloc_info(flow, info);
if (!reply) {
err = -ENOMEM;
goto unlock;
}
ovs_flow_tbl_remove(&dp->table, flow);
err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
info->snd_seq, 0, OVS_FLOW_CMD_DEL);
BUG_ON(err < 0);
ovs_flow_free(flow, true);
ovs_unlock();
ovs_notify(&dp_flow_genl_family, reply, info);
return 0;
unlock:
ovs_unlock();
return err;
} | 0 | [
"CWE-416"
] | net | 36d5fe6a000790f56039afe26834265db0a3ad4c | 215,742,788,911,941,400,000,000,000,000,000,000,000 | 55 | core, nfqueue, openvswitch: Orphan frags in skb_zerocopy and handle errors
skb_zerocopy can copy elements of the frags array between skbs, but it doesn't
orphan them. Also, it doesn't handle errors, so this patch takes care of that
as well, and modify the callers accordingly. skb_tx_error() is also added to
the callers so they will signal the failed delivery towards the creator of the
skb.
Signed-off-by: Zoltan Kiss <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
const Type_limits_int *type_limits_int() const
{
return type_handler_long.type_limits_int_by_unsigned_flag(is_unsigned());
} | 0 | [
"CWE-416",
"CWE-703"
] | server | 08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917 | 235,599,911,359,530,960,000,000,000,000,000,000,000 | 4 | MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]> |
void LanLinkProvider::onStart()
{
const QHostAddress bindAddress = m_testMode? QHostAddress::LocalHost : QHostAddress::Any;
bool success = m_udpSocket.bind(bindAddress, m_udpListenPort, QUdpSocket::ShareAddress);
if (!success) {
QAbstractSocket::SocketError sockErr = m_udpSocket.error();
// Refer to https://doc.qt.io/qt-5/qabstractsocket.html#SocketError-enum to decode socket error number
QString errorMessage = QString::fromLatin1(QMetaEnum::fromType<QAbstractSocket::SocketError>().valueToKey(sockErr));
qCritical(KDECONNECT_CORE)
<< QLatin1String("Failed to bind UDP socket on port")
<< m_udpListenPort
<< QLatin1String("with error")
<< errorMessage;
}
Q_ASSERT(success);
m_tcpPort = MIN_TCP_PORT;
while (!m_server->listen(bindAddress, m_tcpPort)) {
m_tcpPort++;
if (m_tcpPort > MAX_TCP_PORT) { //No ports available?
qCritical(KDECONNECT_CORE) << "Error opening a port in range" << MIN_TCP_PORT << "-" << MAX_TCP_PORT;
m_tcpPort = 0;
return;
}
}
onNetworkChange();
qCDebug(KDECONNECT_CORE) << "LanLinkProvider started";
} | 0 | [
"CWE-400",
"CWE-703"
] | kdeconnect-kde | 4fbd01a3d44a0bcca888c49a77ec7cfd10e113d7 | 52,494,414,114,513,060,000,000,000,000,000,000,000 | 30 | Limit identity packets to 8KiB
Healthy identity packages shouldn't be that big and we don't want to
allow systems around us to send us ever humongous packages that will
just leave us without any memory.
Thanks Matthias Gerstner <[email protected]> for reporting this. |
static int caps_find_name(const char *name) {
int i;
int elems = sizeof(capslist) / sizeof(capslist[0]);
for (i = 0; i < elems; i++) {
if (strcmp(name, capslist[i].name) == 0)
return capslist[i].nr;
}
return -1;
} | 0 | [
"CWE-269",
"CWE-94"
] | firejail | 27cde3d7d1e4e16d4190932347c7151dc2a84c50 | 279,667,722,369,399,020,000,000,000,000,000,000,000 | 10 | fixing CVE-2022-31214 |
static OPJ_BOOL opj_j2k_write_qcc(opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 l_qcc_size, l_remaining_size;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_qcc_size = 5 + opj_j2k_get_SQcd_SQcc_size(p_j2k, p_j2k->m_current_tile_number,
p_comp_no);
l_qcc_size += p_j2k->m_private_image->numcomps <= 256 ? 0 : 1;
l_remaining_size = l_qcc_size;
if (l_qcc_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(
p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_qcc_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write QCC marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_qcc_size;
}
opj_j2k_write_qcc_in_memory(p_j2k, p_comp_no,
p_j2k->m_specific_param.m_encoder.m_header_tile_data, &l_remaining_size,
p_manager);
if (opj_stream_write_data(p_stream,
p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_qcc_size,
p_manager) != l_qcc_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
} | 0 | [
"CWE-416",
"CWE-787"
] | openjpeg | 4241ae6fbbf1de9658764a80944dc8108f2b4154 | 151,465,827,610,670,820,000,000,000,000,000,000,000 | 44 | Fix assertion in debug mode / heap-based buffer overflow in opj_write_bytes_LE for Cinema profiles with numresolutions = 1 (#985) |
static inline bool is_spoofed_6rd(struct ip_tunnel *tunnel, const __be32 v4addr,
const struct in6_addr *v6addr)
{
__be32 v4embed = 0;
if (check_6rd(tunnel, v6addr, &v4embed) && v4addr != v4embed)
return true;
return false;
} | 0 | [
"CWE-703",
"CWE-772",
"CWE-401"
] | linux | 07f12b26e21ab359261bf75cfcb424fdc7daeb6d | 329,540,504,081,121,650,000,000,000,000,000,000,000 | 8 | net: sit: fix memory leak in sit_init_net()
If register_netdev() is failed to register sitn->fb_tunnel_dev,
it will go to err_reg_dev and forget to free netdev(sitn->fb_tunnel_dev).
BUG: memory leak
unreferenced object 0xffff888378daad00 (size 512):
comm "syz-executor.1", pid 4006, jiffies 4295121142 (age 16.115s)
hex dump (first 32 bytes):
00 e6 ed c0 83 88 ff ff 00 00 00 00 00 00 00 00 ................
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
backtrace:
[<00000000d6dcb63e>] kvmalloc include/linux/mm.h:577 [inline]
[<00000000d6dcb63e>] kvzalloc include/linux/mm.h:585 [inline]
[<00000000d6dcb63e>] netif_alloc_netdev_queues net/core/dev.c:8380 [inline]
[<00000000d6dcb63e>] alloc_netdev_mqs+0x600/0xcc0 net/core/dev.c:8970
[<00000000867e172f>] sit_init_net+0x295/0xa40 net/ipv6/sit.c:1848
[<00000000871019fa>] ops_init+0xad/0x3e0 net/core/net_namespace.c:129
[<00000000319507f6>] setup_net+0x2ba/0x690 net/core/net_namespace.c:314
[<0000000087db4f96>] copy_net_ns+0x1dc/0x330 net/core/net_namespace.c:437
[<0000000057efc651>] create_new_namespaces+0x382/0x730 kernel/nsproxy.c:107
[<00000000676f83de>] copy_namespaces+0x2ed/0x3d0 kernel/nsproxy.c:165
[<0000000030b74bac>] copy_process.part.27+0x231e/0x6db0 kernel/fork.c:1919
[<00000000fff78746>] copy_process kernel/fork.c:1713 [inline]
[<00000000fff78746>] _do_fork+0x1bc/0xe90 kernel/fork.c:2224
[<000000001c2e0d1c>] do_syscall_64+0xc8/0x580 arch/x86/entry/common.c:290
[<00000000ec48bd44>] entry_SYSCALL_64_after_hwframe+0x49/0xbe
[<0000000039acff8a>] 0xffffffffffffffff
Signed-off-by: Mao Wenan <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
WandExport void DrawSetOpacity(DrawingWand *wand,const double opacity)
{
Quantum
quantum_alpha;
assert(wand != (DrawingWand *) NULL);
assert(wand->signature == MagickWandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
quantum_alpha=ClampToQuantum(QuantumRange*opacity);
if ((wand->filter_off != MagickFalse) ||
(CurrentContext->alpha != quantum_alpha))
{
CurrentContext->alpha=quantum_alpha;
(void) MVGPrintf(wand,"opacity %.20g\n",opacity);
}
} | 0 | [
"CWE-476"
] | ImageMagick | 6ad5fc3c9b652eec27fc0b1a0817159f8547d5d9 | 62,715,663,172,044,980,000,000,000,000,000,000,000 | 17 | https://github.com/ImageMagick/ImageMagick/issues/716 |
static void cbq_put(struct Qdisc *sch, unsigned long arg)
{
struct cbq_class *cl = (struct cbq_class*)arg;
if (--cl->refcnt == 0) {
#ifdef CONFIG_NET_CLS_POLICE
struct cbq_sched_data *q = qdisc_priv(sch);
spin_lock_bh(&sch->dev->queue_lock);
if (q->rx_class == cl)
q->rx_class = NULL;
spin_unlock_bh(&sch->dev->queue_lock);
#endif
cbq_destroy_class(sch, cl);
}
} | 0 | [
"CWE-200"
] | linux-2.6 | 8a47077a0b5aa2649751c46e7a27884e6686ccbf | 166,294,694,903,911,700,000,000,000,000,000,000,000 | 17 | [NETLINK]: Missing padding fields in dumped structures
Plug holes with padding fields and initialized them to zero.
Signed-off-by: Patrick McHardy <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
xfs_file_aio_write_checks(
struct file *file,
loff_t *pos,
size_t *count,
int *iolock)
{
struct inode *inode = file->f_mapping->host;
struct xfs_inode *ip = XFS_I(inode);
int error = 0;
restart:
error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
if (error)
return error;
/*
* If the offset is beyond the size of the file, we need to zero any
* blocks that fall between the existing EOF and the start of this
* write. If zeroing is needed and we are currently holding the
* iolock shared, we need to update it to exclusive which implies
* having to redo all checks before.
*/
if (*pos > i_size_read(inode)) {
if (*iolock == XFS_IOLOCK_SHARED) {
xfs_rw_iunlock(ip, *iolock);
*iolock = XFS_IOLOCK_EXCL;
xfs_rw_ilock(ip, *iolock);
goto restart;
}
error = -xfs_zero_eof(ip, *pos, i_size_read(inode));
if (error)
return error;
}
/*
* Updating the timestamps will grab the ilock again from
* xfs_fs_dirty_inode, so we have to call it after dropping the
* lock above. Eventually we should look into a way to avoid
* the pointless lock roundtrip.
*/
if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
error = file_update_time(file);
if (error)
return error;
}
/*
* If we're writing the file then make sure to clear the setuid and
* setgid bits if the process is not being run by root. This keeps
* people from modifying setuid and setgid binaries.
*/
return file_remove_suid(file);
} | 0 | [
"CWE-284",
"CWE-264"
] | linux | 8d0207652cbe27d1f962050737848e5ad4671958 | 72,458,252,891,870,820,000,000,000,000,000,000,000 | 53 | ->splice_write() via ->write_iter()
iter_file_splice_write() - a ->splice_write() instance that gathers the
pipe buffers, builds a bio_vec-based iov_iter covering those and feeds
it to ->write_iter(). A bunch of simple cases coverted to that...
[AV: fixed the braino spotted by Cyrill]
Signed-off-by: Al Viro <[email protected]> |
cifs_uncached_readdata_release(struct kref *refcount)
{
struct cifs_readdata *rdata = container_of(refcount,
struct cifs_readdata, refcount);
unsigned int i;
for (i = 0; i < rdata->nr_pages; i++) {
put_page(rdata->pages[i]);
rdata->pages[i] = NULL;
}
cifs_readdata_release(refcount);
} | 0 | [
"CWE-119",
"CWE-787"
] | linux | 5d81de8e8667da7135d3a32a964087c0faf5483f | 48,233,697,173,179,760,000,000,000,000,000,000,000 | 12 | cifs: ensure that uncached writes handle unmapped areas correctly
It's possible for userland to pass down an iovec via writev() that has a
bogus user pointer in it. If that happens and we're doing an uncached
write, then we can end up getting less bytes than we expect from the
call to iov_iter_copy_from_user. This is CVE-2014-0069
cifs_iovec_write isn't set up to handle that situation however. It'll
blindly keep chugging through the page array and not filling those pages
with anything useful. Worse yet, we'll later end up with a negative
number in wdata->tailsz, which will confuse the sending routines and
cause an oops at the very least.
Fix this by having the copy phase of cifs_iovec_write stop copying data
in this situation and send the last write as a short one. At the same
time, we want to avoid sending a zero-length write to the server, so
break out of the loop and set rc to -EFAULT if that happens. This also
allows us to handle the case where no address in the iovec is valid.
[Note: Marking this for stable on v3.4+ kernels, but kernels as old as
v2.6.38 may have a similar problem and may need similar fix]
Cc: <[email protected]> # v3.4+
Reviewed-by: Pavel Shilovsky <[email protected]>
Reported-by: Al Viro <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]> |
static int for_each_memory_block_cb(struct device *dev, void *data)
{
struct memory_block *mem = to_memory_block(dev);
struct for_each_memory_block_cb_data *cb_data = data;
return cb_data->func(mem, cb_data->arg);
} | 0 | [
"CWE-787"
] | linux | aa838896d87af561a33ecefea1caa4c15a68bc47 | 275,604,809,948,300,800,000,000,000,000,000,000,000 | 7 | drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions
Convert the various sprintf fmaily calls in sysfs device show functions
to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety.
Done with:
$ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 .
And cocci script:
$ cat sysfs_emit_dev.cocci
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Signed-off-by: Joe Perches <[email protected]>
Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
{
u64 vmx_ept_vpid_cap;
vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.nested_vmx_ept_caps,
vmx->nested.nested_vmx_vpid_caps);
/* Every bit is either reserved or a feature bit. */
if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
return -EINVAL;
vmx->nested.nested_vmx_ept_caps = data;
vmx->nested.nested_vmx_vpid_caps = data >> 32;
return 0;
} | 0 | [
"CWE-20",
"CWE-617"
] | linux | 3a8b0677fc6180a467e26cc32ce6b0c09a32f9bb | 53,872,670,946,996,350,000,000,000,000,000,000,000 | 15 | KVM: VMX: Do not BUG() on out-of-bounds guest IRQ
The value of the guest_irq argument to vmx_update_pi_irte() is
ultimately coming from a KVM_IRQFD API call. Do not BUG() in
vmx_update_pi_irte() if the value is out-of bounds. (Especially,
since KVM as a whole seems to hang after that.)
Instead, print a message only once if we find that we don't have a
route for a certain IRQ (which can be out-of-bounds or within the
array).
This fixes CVE-2017-1000252.
Fixes: efc644048ecde54 ("KVM: x86: Update IRTE for posted-interrupts")
Signed-off-by: Jan H. Schönherr <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
MYSOFA_EXPORT void mysofa_getversion(int *major, int *minor, int *patch) {
*major = CPACK_PACKAGE_VERSION_MAJOR;
*minor = CPACK_PACKAGE_VERSION_MINOR;
*patch = CPACK_PACKAGE_VERSION_PATCH;
} | 0 | [
"CWE-476",
"CWE-787"
] | libmysofa | 2e6fac6ab6156dae8e8c6f417741388084b70d6f | 59,792,414,156,256,380,000,000,000,000,000,000,000 | 5 | Fixed recursive function calls |
bool Chapter::Display::set_language(const char* language) {
return StrCpy(language, &language_);
} | 0 | [
"CWE-20"
] | libvpx | f00890eecdf8365ea125ac16769a83aa6b68792d | 329,340,757,226,130,800,000,000,000,000,000,000,000 | 3 | update libwebm to libwebm-1.0.0.27-352-g6ab9fcf
https://chromium.googlesource.com/webm/libwebm/+log/af81f26..6ab9fcf
Change-Id: I9d56e1fbaba9b96404b4fbabefddc1a85b79c25d |
NOEXPORT int generate_session_ticket_cb(SSL *ssl, void *arg) {
SSL_SESSION *sess;
TICKET_DATA ticket_data;
#if 0
SOCKADDR_UNION *addr;
#endif
int retval;
(void)arg; /* squash the unused parameter warning */
s_log(LOG_DEBUG, "Generate session ticket callback");
sess=SSL_get1_session(ssl);
if(!sess)
return 0;
memset(&ticket_data, 0, sizeof(TICKET_DATA));
ticket_data.session_authenticated=
SSL_SESSION_get_ex_data(sess, index_session_authenticated);
#if 0
/* TODO: add remote_start() invocation here */
CRYPTO_THREAD_read_lock(stunnel_locks[LOCK_ADDR]);
addr=SSL_SESSION_get_ex_data(sess, index_session_connect_address);
if(addr)
memcpy(&ticket_data.addr, addr, (size_t)addr_len(addr));
CRYPTO_THREAD_unlock(stunnel_locks[LOCK_ADDR]);
#endif
retval=SSL_SESSION_set1_ticket_appdata(sess,
&ticket_data, sizeof(TICKET_DATA));
SSL_SESSION_free(sess);
return retval;
} | 0 | [
"CWE-295"
] | stunnel | ebad9ddc4efb2635f37174c9d800d06206f1edf9 | 241,725,231,695,981,000,000,000,000,000,000,000,000 | 34 | stunnel-5.57 |
int MonClient::handle_auth_request(
Connection *con,
AuthConnectionMeta *auth_meta,
bool more,
uint32_t auth_method,
const ceph::buffer::list& payload,
ceph::buffer::list *reply)
{
if (payload.length() == 0) {
// for some channels prior to nautilus (osd heartbeat), we
// tolerate the lack of an authorizer.
if (!con->get_messenger()->require_authorizer) {
handle_authentication_dispatcher->ms_handle_authentication(con);
return 1;
}
return -EACCES;
}
auth_meta->auth_mode = payload[0];
if (auth_meta->auth_mode < AUTH_MODE_AUTHORIZER ||
auth_meta->auth_mode > AUTH_MODE_AUTHORIZER_MAX) {
return -EACCES;
}
AuthAuthorizeHandler *ah = get_auth_authorize_handler(con->get_peer_type(),
auth_method);
if (!ah) {
lderr(cct) << __func__ << " no AuthAuthorizeHandler found for auth method "
<< auth_method << dendl;
return -EOPNOTSUPP;
}
auto ac = &auth_meta->authorizer_challenge;
if (auth_meta->skip_authorizer_challenge) {
ldout(cct, 10) << __func__ << " skipping challenge on " << con << dendl;
ac = nullptr;
}
bool was_challenge = (bool)auth_meta->authorizer_challenge;
bool isvalid = ah->verify_authorizer(
cct,
*rotating_secrets,
payload,
auth_meta->get_connection_secret_length(),
reply,
&con->peer_name,
&con->peer_global_id,
&con->peer_caps_info,
&auth_meta->session_key,
&auth_meta->connection_secret,
ac);
if (isvalid) {
handle_authentication_dispatcher->ms_handle_authentication(con);
return 1;
}
if (!more && !was_challenge && auth_meta->authorizer_challenge) {
ldout(cct,10) << __func__ << " added challenge on " << con << dendl;
return 0;
}
ldout(cct,10) << __func__ << " bad authorizer on " << con << dendl;
// discard old challenge
auth_meta->authorizer_challenge.reset();
return -EACCES;
} | 0 | [
"CWE-294"
] | ceph | 6c14c2fb5650426285428dfe6ca1597e5ea1d07d | 228,831,172,587,267,200,000,000,000,000,000,000,000 | 62 | mon/MonClient: bring back CEPHX_V2 authorizer challenges
Commit c58c5754dfd2 ("msg/async/ProtocolV1: use AuthServer and
AuthClient") introduced a backwards compatibility issue into msgr1.
To fix it, commit 321548010578 ("mon/MonClient: skip CEPHX_V2
challenge if client doesn't support it") set out to skip authorizer
challenges for peers that don't support CEPHX_V2. However, it
made it so that authorizer challenges are skipped for all peers in
both msgr1 and msgr2 cases, effectively disabling the protection
against replay attacks that was put in place in commit f80b848d3f83
("auth/cephx: add authorizer challenge", CVE-2018-1128).
This is because con->get_features() always returns 0 at that
point. In msgr1 case, the peer shares its features along with the
authorizer, but while they are available in connect_msg.features they
aren't assigned to con until ProtocolV1::open(). In msgr2 case, the
peer doesn't share its features until much later (in CLIENT_IDENT
frame, i.e. after the authentication phase). The result is that
!CEPHX_V2 branch is taken in all cases and replay attack protection
is lost.
Only clusters with cephx_service_require_version set to 2 on the
service daemons would not be silently downgraded. But, since the
default is 1 and there are no reports of looping on BADAUTHORIZER
faults, I'm pretty sure that no one has ever done that. Note that
cephx_require_version set to 2 would have no effect even though it
is supposed to be stronger than cephx_service_require_version
because MonClient::handle_auth_request() didn't check it.
To fix:
- for msgr1, check connect_msg.features (as was done before commit
c58c5754dfd2) and challenge if CEPHX_V2 is supported. Together
with two preceding patches that resurrect proper cephx_* option
handling in msgr1, this covers both "I want old clients to work"
and "I wish to require better authentication" use cases.
- for msgr2, don't check anything and always challenge. CEPHX_V2
predates msgr2, anyone speaking msgr2 must support it.
Signed-off-by: Ilya Dryomov <[email protected]>
(cherry picked from commit 4a82c72e3bdddcb625933e83af8b50a444b961f1) |
static ssize_t stream_ssl_error(RedStream *s, int return_code)
{
SPICE_GNUC_UNUSED int ssl_error;
ssl_error = SSL_get_error(s->priv->ssl, return_code);
// OpenSSL can to return SSL_ERROR_WANT_READ if we attempt to read
// data and the socket did not receive all SSL packet.
// Under Windows errno is not set so potentially caller can detect
// the wrong error so we need to set errno.
#ifdef _WIN32
if (ssl_error == SSL_ERROR_WANT_READ || ssl_error == SSL_ERROR_WANT_WRITE) {
errno = EAGAIN;
} else {
errno = EPIPE;
}
#endif
// red_peer_receive is expected to receive -1 on errors while
// OpenSSL documentation just state a <0 value
return -1;
} | 0 | [] | spice | 95a0cfac8a1c8eff50f05e65df945da3bb501fc9 | 287,252,659,844,109,550,000,000,000,000,000,000,000 | 22 | With OpenSSL 1.0.2 and earlier: disable client-side renegotiation.
Fixed issue #49
Fixes BZ#1904459
Signed-off-by: Julien Ropé <[email protected]>
Reported-by: BlackKD
Acked-by: Frediano Ziglio <[email protected]> |
void __init cred_init(void)
{
/* allocate a slab in which we can store credentials */
cred_jar = kmem_cache_create("cred_jar", sizeof(struct cred),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
} | 0 | [] | linux-2.6 | ee18d64c1f632043a02e6f5ba5e045bb26a5465f | 100,796,191,587,704,470,000,000,000,000,000,000,000 | 6 | KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]> |
static void selinux_secmark_refcount_dec(void)
{
atomic_dec(&selinux_secmark_refcount);
} | 0 | [
"CWE-264"
] | linux | 259e5e6c75a910f3b5e656151dc602f53f9d7548 | 106,098,145,922,315,260,000,000,000,000,000,000,000 | 4 | Add PR_{GET,SET}_NO_NEW_PRIVS to prevent execve from granting privs
With this change, calling
prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
disables privilege granting operations at execve-time. For example, a
process will not be able to execute a setuid binary to change their uid
or gid if this bit is set. The same is true for file capabilities.
Additionally, LSM_UNSAFE_NO_NEW_PRIVS is defined to ensure that
LSMs respect the requested behavior.
To determine if the NO_NEW_PRIVS bit is set, a task may call
prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
It returns 1 if set and 0 if it is not set. If any of the arguments are
non-zero, it will return -1 and set errno to -EINVAL.
(PR_SET_NO_NEW_PRIVS behaves similarly.)
This functionality is desired for the proposed seccomp filter patch
series. By using PR_SET_NO_NEW_PRIVS, it allows a task to modify the
system call behavior for itself and its child tasks without being
able to impact the behavior of a more privileged task.
Another potential use is making certain privileged operations
unprivileged. For example, chroot may be considered "safe" if it cannot
affect privileged tasks.
Note, this patch causes execve to fail when PR_SET_NO_NEW_PRIVS is
set and AppArmor is in use. It is fixed in a subsequent patch.
Signed-off-by: Andy Lutomirski <[email protected]>
Signed-off-by: Will Drewry <[email protected]>
Acked-by: Eric Paris <[email protected]>
Acked-by: Kees Cook <[email protected]>
v18: updated change desc
v17: using new define values as per 3.4
Signed-off-by: James Morris <[email protected]> |
check_user_password_V4(std::string const& user_password,
QPDF::EncryptionData const& data)
{
// Algorithm 3.6 from the PDF 1.7 Reference Manual
std::string u_value = compute_U_value(user_password, data);
size_t to_compare = ((data.getR() >= 3) ? sizeof(MD5::Digest)
: key_bytes);
return (memcmp(data.getU().c_str(), u_value.c_str(), to_compare) == 0);
} | 0 | [
"CWE-787"
] | qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 32,716,282,475,118,010,000,000,000,000,000,000,000 | 10 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
static int parse_relop(tvbuff_t *tvb, int offset, proto_tree *tree, guint32 *relop, const char **str)
{
const char *str1 = NULL, *str2 = NULL;
guint32 tmp = tvb_get_letohl(tvb, offset);
guint32 modifier = (tmp & 0xF00);
DISSECTOR_ASSERT((tmp & 0xf) < PRSomeBits +1);
switch(tmp & 0xf) {
case PRLT:
*relop = PRLT;
break;
case PRLE:
*relop = PRLE;
break;
case PRGT:
*relop = PRGT;
break;
case PRGE:
*relop = PRGE;
break;
case PREQ:
*relop = PREQ;
break;
case PRNE:
*relop = PRNE;
break;
case PRRE:
*relop = PRRE;
break;
case PRAllBits:
*relop = PRAllBits;
break;
case PRSomeBits:
*relop = PRSomeBits;
break;
default:
break;
}
str2 = val_to_str(*relop, PR_VALS, "0x%04x");
if (modifier) {
switch (modifier) {
case PRAll:
*relop = *relop | PRAll;
break;
case PRAny:
*relop |= PRAny;
break;
default:
DISSECTOR_ASSERT(FALSE);
break;
}
str1 = try_val_to_str((modifier), PR_VALS);
if (str1) {
str1 = wmem_strdup_printf(wmem_packet_scope(), "%s | ", str1);
str2 = wmem_strdup_printf(wmem_packet_scope(), "%s%s", str1, str2);
}
}
proto_tree_add_string_format_value(tree, hf_mswsp_cproprestrict_relop, tvb, offset, 4, str2, "%s (0x%04x)", str2[0]=='\0' ? "" : str2, *relop);
if (str) {
*str = str2;
}
return offset + 4;
} | 0 | [
"CWE-770"
] | wireshark | b7a0650e061b5418ab4a8f72c6e4b00317aff623 | 47,870,311,376,139,540,000,000,000,000,000,000,000 | 66 | MS-WSP: Don't allocate huge amounts of memory.
Add a couple of memory allocation sanity checks, one of which
fixes #17331. |
size_t nghttp2_session_get_outbound_queue_size(nghttp2_session *session) {
return nghttp2_outbound_queue_size(&session->ob_urgent) +
nghttp2_outbound_queue_size(&session->ob_reg) +
nghttp2_outbound_queue_size(&session->ob_syn);
/* TODO account for item attached to stream */
} | 0 | [] | nghttp2 | 0a6ce87c22c69438ecbffe52a2859c3a32f1620f | 214,438,624,989,177,330,000,000,000,000,000,000,000 | 6 | Add nghttp2_option_set_max_outbound_ack |
routers_sort_by_identity(smartlist_t *routers)
{
smartlist_sort(routers, _compare_routerinfo_by_id_digest);
} | 0 | [
"CWE-399"
] | tor | 308f6dad20675c42b29862f4269ad1fbfb00dc9a | 159,266,746,610,049,790,000,000,000,000,000,000,000 | 4 | Mitigate a side-channel leak of which relays Tor chooses for a circuit
Tor's and OpenSSL's current design guarantee that there are other leaks,
but this one is likely to be more easily exploitable, and is easy to fix. |
static void layer_surface_closed(void *data,
struct zwlr_layer_surface_v1 *layer_surface) {
struct swaylock_surface *surface = data;
destroy_surface(surface);
} | 0 | [
"CWE-703"
] | swaylock | 1d1c75b6316d21933069a9d201f966d84099f6ca | 279,912,583,399,529,100,000,000,000,000,000,000,000 | 5 | Add support for ext-session-lock-v1
This is a new protocol to lock the session [1]. It should be more
reliable than layer-shell + input-inhibitor.
[1]: https://gitlab.freedesktop.org/wayland/wayland-protocols/-/merge_requests/131 |
static int qxl_init_primary(PCIDevice *dev)
{
PCIQXLDevice *qxl = DO_UPCAST(PCIQXLDevice, pci, dev);
VGACommonState *vga = &qxl->vga;
ram_addr_t ram_size = msb_mask(qxl->vga.vram_size * 2 - 1);
qxl->id = 0;
if (ram_size < 32 * 1024 * 1024) {
ram_size = 32 * 1024 * 1024;
}
vga_common_init(vga, ram_size);
vga_init(vga);
register_ioport_write(0x3c0, 16, 1, qxl_vga_ioport_write, vga);
register_ioport_write(0x3b4, 2, 1, qxl_vga_ioport_write, vga);
register_ioport_write(0x3d4, 2, 1, qxl_vga_ioport_write, vga);
register_ioport_write(0x3ba, 1, 1, qxl_vga_ioport_write, vga);
register_ioport_write(0x3da, 1, 1, qxl_vga_ioport_write, vga);
vga->ds = graphic_console_init(qxl_hw_update, qxl_hw_invalidate,
qxl_hw_screen_dump, qxl_hw_text_update, qxl);
qemu_spice_display_init_common(&qxl->ssd, vga->ds);
qxl0 = qxl;
register_displaychangelistener(vga->ds, &display_listener);
return qxl_init_common(qxl);
} | 0 | [] | qemu-kvm | 5ff4e36c804157bd84af43c139f8cd3a59722db9 | 121,612,999,655,658,620,000,000,000,000,000,000,000 | 28 | qxl: async io support using new spice api
Some of the QXL port i/o commands are waiting for the spice server to
complete certain actions. Add async versions for these commands, so we
don't block the vcpu while the spice server processses the command.
Instead the qxl device will raise an IRQ when done.
The async command processing relies on an added QXLInterface::async_complete
and added QXLWorker::*_async additions, in spice server qxl >= 3.1
Signed-off-by: Gerd Hoffmann <[email protected]>
Signed-off-by: Alon Levy <[email protected]> |
InputSource::findFirst(char const* start_chars,
qpdf_offset_t offset, size_t len,
Finder& finder)
{
// Basic approach: search for the first character of start_chars
// starting from offset but not going past len (if len != 0). Once
// the first character is found, see if it is the beginning of a
// sequence of characters matching start_chars. If so, call
// finder.check() to do caller-specific additional checks. If not,
// keep searching.
// This code is tricky and highly subject to off-by-one or other
// edge case logic errors. See comments throughout that explain
// how we're not missing any edge cases. There are also tests
// specifically constructed to make sure we caught the edge cases
// in testing.
char buf[1025]; // size known to input_source.cc in libtests
// To enable us to guarantee null-termination, save an extra byte
// so that buf[size] is valid memory.
size_t size = sizeof(buf) - 1;
if ((strlen(start_chars) < 1) || (strlen(start_chars) > size))
{
throw std::logic_error(
"InputSource::findSource called with"
" too small or too large of a character sequence");
}
char* p = 0;
qpdf_offset_t buf_offset = offset;
size_t bytes_read = 0;
// Guarantee that we return from this loop. Each time through, we
// either return, advance p, or restart the loop with a condition
// that will cause return on the next pass. Eventually we will
// either be out of range or hit EOF, either of which forces us to
// return.
while (true)
{
// Do we need to read more data? Pretend size = 5, buf starts
// at 0, and start_chars has 3 characters. buf[5] is valid and
// null. If p == 2, start_chars could be buf[2] through
// buf[4], so p + strlen(start_chars) == buf + size is okay.
// If p points to buf[size], since strlen(start_chars) is
// always >= 1, this overflow test will be correct for that
// case regardless of start_chars.
if ((p == 0) || ((p + strlen(start_chars)) > (buf + bytes_read)))
{
if (p)
{
QTC::TC("libtests", "InputSource read next block",
((p == buf + bytes_read) ? 0 : 1));
buf_offset += (p - buf);
}
this->seek(buf_offset, SEEK_SET);
// Read into buffer and zero out the rest of the buffer
// including buf[size]. We allocated an extra byte so that
// we could guarantee null termination as an extra
// protection against overrun when using string functions.
bytes_read = this->read(buf, size);
if (bytes_read < strlen(start_chars))
{
QTC::TC("libtests", "InputSource find EOF",
bytes_read == 0 ? 0 : 1);
return false;
}
memset(buf + bytes_read, '\0', 1 + (size - bytes_read));
p = buf;
}
// Search for the first character.
if ((p = static_cast<char*>(
memchr(p, start_chars[0], bytes_read - (p - buf)))) != 0)
{
if (p == buf)
{
QTC::TC("libtests", "InputSource found match at buf[0]");
}
// Found first letter.
if (len != 0)
{
// Make sure it's in range.
size_t p_relative_offset = (p - buf) + (buf_offset - offset);
if (p_relative_offset >= len)
{
// out of range
QTC::TC("libtests", "InputSource out of range");
return false;
}
}
if ((p + strlen(start_chars)) > (buf + bytes_read))
{
// If there are not enough bytes left in the file for
// start_chars, we will detect this on the next pass
// as EOF and return.
QTC::TC("libtests", "InputSource not enough bytes");
continue;
}
// See if p points to a sequence matching start_chars. We
// already checked above to make sure we are not going to
// overrun memory.
if (strncmp(p, start_chars, strlen(start_chars)) == 0)
{
// Call finder.check() with the input source
// positioned to the point of the match.
this->seek(buf_offset + (p - buf), SEEK_SET);
if (finder.check())
{
return true;
}
else
{
QTC::TC("libtests", "InputSource start_chars matched but not check");
}
}
else
{
QTC::TC("libtests", "InputSource first char matched but not string");
}
// This occurrence of the first character wasn't a match.
// Skip over it and keep searching.
++p;
}
else
{
// Trigger reading the next block
p = buf + bytes_read;
}
}
throw std::logic_error("InputSource after while (true)");
} | 1 | [
"CWE-787"
] | qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 311,213,169,030,404,400,000,000,000,000,000,000,000 | 132 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
DLLEXPORT int tjDecodeYUVPlanes(tjhandle handle,
const unsigned char **srcPlanes,
const int *strides, int subsamp,
unsigned char *dstBuf, int width, int pitch,
int height, int pixelFormat, int flags)
{
JSAMPROW *row_pointer = NULL;
JSAMPLE *_tmpbuf[MAX_COMPONENTS];
JSAMPROW *tmpbuf[MAX_COMPONENTS], *inbuf[MAX_COMPONENTS];
int i, retval = 0, row, pw0, ph0, pw[MAX_COMPONENTS], ph[MAX_COMPONENTS];
JSAMPLE *ptr;
jpeg_component_info *compptr;
int (*old_read_markers) (j_decompress_ptr);
void (*old_reset_marker_reader) (j_decompress_ptr);
getdinstance(handle);
this->jerr.stopOnWarning = (flags & TJFLAG_STOPONWARNING) ? TRUE : FALSE;
for (i = 0; i < MAX_COMPONENTS; i++) {
tmpbuf[i] = NULL; _tmpbuf[i] = NULL; inbuf[i] = NULL;
}
if ((this->init & DECOMPRESS) == 0)
_throw("tjDecodeYUVPlanes(): Instance has not been initialized for decompression");
if (!srcPlanes || !srcPlanes[0] || subsamp < 0 || subsamp >= NUMSUBOPT ||
dstBuf == NULL || width <= 0 || pitch < 0 || height <= 0 ||
pixelFormat < 0 || pixelFormat >= TJ_NUMPF)
_throw("tjDecodeYUVPlanes(): Invalid argument");
if (subsamp != TJSAMP_GRAY && (!srcPlanes[1] || !srcPlanes[2]))
_throw("tjDecodeYUVPlanes(): Invalid argument");
if (setjmp(this->jerr.setjmp_buffer)) {
/* If we get here, the JPEG code has signaled an error. */
retval = -1; goto bailout;
}
if (pixelFormat == TJPF_CMYK)
_throw("tjDecodeYUVPlanes(): Cannot decode YUV images into CMYK pixels.");
if (pitch == 0) pitch = width * tjPixelSize[pixelFormat];
dinfo->image_width = width;
dinfo->image_height = height;
#ifndef NO_PUTENV
if (flags & TJFLAG_FORCEMMX) putenv("JSIMD_FORCEMMX=1");
else if (flags & TJFLAG_FORCESSE) putenv("JSIMD_FORCESSE=1");
else if (flags & TJFLAG_FORCESSE2) putenv("JSIMD_FORCESSE2=1");
#endif
if (setDecodeDefaults(dinfo, pixelFormat, subsamp, flags) == -1) {
retval = -1; goto bailout;
}
old_read_markers = dinfo->marker->read_markers;
dinfo->marker->read_markers = my_read_markers;
old_reset_marker_reader = dinfo->marker->reset_marker_reader;
dinfo->marker->reset_marker_reader = my_reset_marker_reader;
jpeg_read_header(dinfo, TRUE);
dinfo->marker->read_markers = old_read_markers;
dinfo->marker->reset_marker_reader = old_reset_marker_reader;
this->dinfo.out_color_space = pf2cs[pixelFormat];
if (flags & TJFLAG_FASTDCT) this->dinfo.dct_method = JDCT_FASTEST;
dinfo->do_fancy_upsampling = FALSE;
dinfo->Se = DCTSIZE2 - 1;
jinit_master_decompress(dinfo);
(*dinfo->upsample->start_pass) (dinfo);
pw0 = PAD(width, dinfo->max_h_samp_factor);
ph0 = PAD(height, dinfo->max_v_samp_factor);
if (pitch == 0) pitch = dinfo->output_width * tjPixelSize[pixelFormat];
if ((row_pointer = (JSAMPROW *)malloc(sizeof(JSAMPROW) * ph0)) == NULL)
_throw("tjDecodeYUVPlanes(): Memory allocation failure");
for (i = 0; i < height; i++) {
if (flags & TJFLAG_BOTTOMUP)
row_pointer[i] = &dstBuf[(height - i - 1) * pitch];
else
row_pointer[i] = &dstBuf[i * pitch];
}
if (height < ph0)
for (i = height; i < ph0; i++) row_pointer[i] = row_pointer[height - 1];
for (i = 0; i < dinfo->num_components; i++) {
compptr = &dinfo->comp_info[i];
_tmpbuf[i] =
(JSAMPLE *)malloc(PAD(compptr->width_in_blocks * DCTSIZE, 32) *
compptr->v_samp_factor + 32);
if (!_tmpbuf[i])
_throw("tjDecodeYUVPlanes(): Memory allocation failure");
tmpbuf[i] = (JSAMPROW *)malloc(sizeof(JSAMPROW) * compptr->v_samp_factor);
if (!tmpbuf[i])
_throw("tjDecodeYUVPlanes(): Memory allocation failure");
for (row = 0; row < compptr->v_samp_factor; row++) {
unsigned char *_tmpbuf_aligned =
(unsigned char *)PAD((size_t)_tmpbuf[i], 32);
tmpbuf[i][row] =
&_tmpbuf_aligned[PAD(compptr->width_in_blocks * DCTSIZE, 32) * row];
}
pw[i] = pw0 * compptr->h_samp_factor / dinfo->max_h_samp_factor;
ph[i] = ph0 * compptr->v_samp_factor / dinfo->max_v_samp_factor;
inbuf[i] = (JSAMPROW *)malloc(sizeof(JSAMPROW) * ph[i]);
if (!inbuf[i])
_throw("tjDecodeYUVPlanes(): Memory allocation failure");
ptr = (JSAMPLE *)srcPlanes[i];
for (row = 0; row < ph[i]; row++) {
inbuf[i][row] = ptr;
ptr += (strides && strides[i] != 0) ? strides[i] : pw[i];
}
}
if (setjmp(this->jerr.setjmp_buffer)) {
/* If we get here, the JPEG code has signaled an error. */
retval = -1; goto bailout;
}
for (row = 0; row < ph0; row += dinfo->max_v_samp_factor) {
JDIMENSION inrow = 0, outrow = 0;
for (i = 0, compptr = dinfo->comp_info; i < dinfo->num_components;
i++, compptr++)
jcopy_sample_rows(inbuf[i],
row * compptr->v_samp_factor / dinfo->max_v_samp_factor, tmpbuf[i], 0,
compptr->v_samp_factor, pw[i]);
(dinfo->upsample->upsample) (dinfo, tmpbuf, &inrow,
dinfo->max_v_samp_factor, &row_pointer[row],
&outrow, dinfo->max_v_samp_factor);
}
jpeg_abort_decompress(dinfo);
bailout:
if (dinfo->global_state > DSTATE_START) jpeg_abort_decompress(dinfo);
if (row_pointer) free(row_pointer);
for (i = 0; i < MAX_COMPONENTS; i++) {
if (tmpbuf[i] != NULL) free(tmpbuf[i]);
if (_tmpbuf[i] != NULL) free(_tmpbuf[i]);
if (inbuf[i] != NULL) free(inbuf[i]);
}
if (this->jerr.warning) retval = -1;
this->jerr.stopOnWarning = FALSE;
return retval;
} | 0 | [
"CWE-787"
] | libjpeg-turbo | 3d9c64e9f8aa1ee954d1d0bb3390fc894bb84da3 | 330,153,379,047,136,660,000,000,000,000,000,000,000 | 144 | tjLoadImage(): Fix int overflow/segfault w/big BMP
Fixes #304 |
int hashTypeGetValue(robj *o, sds field, unsigned char **vstr, unsigned int *vlen, long long *vll) {
if (o->encoding == OBJ_ENCODING_ZIPLIST) {
*vstr = NULL;
if (hashTypeGetFromZiplist(o, field, vstr, vlen, vll) == 0)
return C_OK;
} else if (o->encoding == OBJ_ENCODING_HT) {
sds value;
if ((value = hashTypeGetFromHashTable(o, field)) != NULL) {
*vstr = (unsigned char*) value;
*vlen = sdslen(value);
return C_OK;
}
} else {
serverPanic("Unknown hash encoding");
}
return C_ERR;
} | 0 | [
"CWE-190"
] | redis | f6a40570fa63d5afdd596c78083d754081d80ae3 | 225,065,036,856,703,120,000,000,000,000,000,000,000 | 17 | Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628)
- fix possible heap corruption in ziplist and listpack resulting by trying to
allocate more than the maximum size of 4GB.
- prevent ziplist (hash and zset) from reaching size of above 1GB, will be
converted to HT encoding, that's not a useful size.
- prevent listpack (stream) from reaching size of above 1GB.
- XADD will start a new listpack if the new record may cause the previous
listpack to grow over 1GB.
- XADD will respond with an error if a single stream record is over 1GB
- List type (ziplist in quicklist) was truncating strings that were over 4GB,
now it'll respond with an error. |
GF_Err aprm_box_size(GF_Box *s)
{
u32 pos=0;
GF_AdobeStdEncryptionParamsBox *ptr = (GF_AdobeStdEncryptionParamsBox *)s;
gf_isom_check_position(s, (GF_Box *)ptr->enc_info, &pos);
gf_isom_check_position(s, (GF_Box *)ptr->key_info, &pos);
return GF_OK;
} | 0 | [
"CWE-703"
] | gpac | f19668964bf422cf5a63e4dbe1d3c6c75edadcbb | 4,185,340,577,490,857,600,000,000,000,000,000,000 | 8 | fixed #1879 |
static const struct lg4ff_compat_mode_switch *lg4ff_get_mode_switch_command(const u16 real_product_id, const u16 target_product_id)
{
switch (real_product_id) {
case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
switch (target_product_id) {
case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
return &lg4ff_mode_switch_ext01_dfp;
/* DFP can only be switched to its native mode */
default:
return NULL;
}
break;
case USB_DEVICE_ID_LOGITECH_G25_WHEEL:
switch (target_product_id) {
case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
return &lg4ff_mode_switch_ext01_dfp;
case USB_DEVICE_ID_LOGITECH_G25_WHEEL:
return &lg4ff_mode_switch_ext16_g25;
/* G25 can only be switched to DFP mode or its native mode */
default:
return NULL;
}
break;
case USB_DEVICE_ID_LOGITECH_G27_WHEEL:
switch (target_product_id) {
case USB_DEVICE_ID_LOGITECH_WHEEL:
return &lg4ff_mode_switch_ext09_dfex;
case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
return &lg4ff_mode_switch_ext09_dfp;
case USB_DEVICE_ID_LOGITECH_G25_WHEEL:
return &lg4ff_mode_switch_ext09_g25;
case USB_DEVICE_ID_LOGITECH_G27_WHEEL:
return &lg4ff_mode_switch_ext09_g27;
/* G27 can only be switched to DF-EX, DFP, G25 or its native mode */
default:
return NULL;
}
break;
case USB_DEVICE_ID_LOGITECH_G29_WHEEL:
switch (target_product_id) {
case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
return &lg4ff_mode_switch_ext09_dfp;
case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL:
return &lg4ff_mode_switch_ext09_dfgt;
case USB_DEVICE_ID_LOGITECH_G25_WHEEL:
return &lg4ff_mode_switch_ext09_g25;
case USB_DEVICE_ID_LOGITECH_G27_WHEEL:
return &lg4ff_mode_switch_ext09_g27;
case USB_DEVICE_ID_LOGITECH_G29_WHEEL:
return &lg4ff_mode_switch_ext09_g29;
/* G29 can only be switched to DF-EX, DFP, DFGT, G25, G27 or its native mode */
default:
return NULL;
}
break;
case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL:
switch (target_product_id) {
case USB_DEVICE_ID_LOGITECH_WHEEL:
return &lg4ff_mode_switch_ext09_dfex;
case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
return &lg4ff_mode_switch_ext09_dfp;
case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL:
return &lg4ff_mode_switch_ext09_dfgt;
/* DFGT can only be switched to DF-EX, DFP or its native mode */
default:
return NULL;
}
break;
/* No other wheels have multiple modes */
default:
return NULL;
}
} | 0 | [
"CWE-787"
] | linux | d9d4b1e46d9543a82c23f6df03f4ad697dab361b | 109,402,580,690,722,520,000,000,000,000,000,000,000 | 73 | HID: Fix assumption that devices have inputs
The syzbot fuzzer found a slab-out-of-bounds write bug in the hid-gaff
driver. The problem is caused by the driver's assumption that the
device must have an input report. While this will be true for all
normal HID input devices, a suitably malicious device can violate the
assumption.
The same assumption is present in over a dozen other HID drivers.
This patch fixes them by checking that the list of hid_inputs for the
hid_device is nonempty before allowing it to be used.
Reported-and-tested-by: [email protected]
Signed-off-by: Alan Stern <[email protected]>
CC: <[email protected]>
Signed-off-by: Benjamin Tissoires <[email protected]> |
static double mp_critical(_cimg_math_parser& mp) {
const double res = _mp_arg(1);
cimg_pragma_openmp(critical(mp_critical))
{
for (const CImg<ulongT> *const p_end = ++mp.p_code + mp.opcode[2];
mp.p_code<p_end; ++mp.p_code) { // Evaluate body
mp.opcode._data = mp.p_code->_data;
const ulongT target = mp.opcode[1];
mp.mem[target] = _cimg_mp_defunc(mp);
}
}
--mp.p_code;
return res; | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 201,650,240,454,340,900,000,000,000,000,000,000,000 | 14 | Fix other issues in 'CImg<T>::load_bmp()'. |
ofputil_append_queue_stat(struct ovs_list *replies,
const struct ofputil_queue_stats *oqs)
{
switch (ofpmp_version(replies)) {
case OFP13_VERSION: {
struct ofp13_queue_stats *reply = ofpmp_append(replies, sizeof *reply);
ofputil_queue_stats_to_ofp13(oqs, reply);
break;
}
case OFP12_VERSION:
case OFP11_VERSION: {
struct ofp11_queue_stats *reply = ofpmp_append(replies, sizeof *reply);
ofputil_queue_stats_to_ofp11(oqs, reply);
break;
}
case OFP10_VERSION: {
struct ofp10_queue_stats *reply = ofpmp_append(replies, sizeof *reply);
ofputil_queue_stats_to_ofp10(oqs, reply);
break;
}
case OFP14_VERSION:
case OFP15_VERSION:
case OFP16_VERSION: {
struct ofp14_queue_stats *reply = ofpmp_append(replies, sizeof *reply);
ofputil_queue_stats_to_ofp14(oqs, reply);
break;
}
default:
OVS_NOT_REACHED();
}
} | 0 | [
"CWE-772"
] | ovs | 77ad4225d125030420d897c873e4734ac708c66b | 185,198,777,952,779,700,000,000,000,000,000,000,000 | 35 | ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod().
Found by libFuzzer.
Reported-by: Bhargava Shastry <[email protected]>
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]> |
TEST_F(RouterTest, DirectResponseWithoutLocation) {
NiceMock<MockDirectResponseEntry> direct_response;
std::string route_name("route-test-name");
EXPECT_CALL(direct_response, newPath(_)).WillOnce(Return("http://host/"));
EXPECT_CALL(direct_response, routeName()).WillOnce(ReturnRef(route_name));
EXPECT_CALL(direct_response, responseCode()).WillRepeatedly(Return(Http::Code::OK));
EXPECT_CALL(direct_response, responseBody()).WillRepeatedly(ReturnRef(EMPTY_STRING));
EXPECT_CALL(*callbacks_.route_, directResponseEntry()).WillRepeatedly(Return(&direct_response));
absl::string_view route_name_view(route_name);
EXPECT_CALL(callbacks_.stream_info_, setRouteName(route_name_view));
Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));
EXPECT_CALL(span_, injectContext(_)).Times(0);
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_FALSE(callbacks_.stream_info_.attemptCount().has_value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value());
} | 0 | [
"CWE-703"
] | envoy | f0bb2219112d8cdb4c4e8b346834f962925362ca | 193,080,773,499,983,100,000,000,000,000,000,000,000 | 23 | [1.20] CVE-2022-21655
Crash with direct_response
Signed-off-by: Otto van der Schaaf <[email protected]> |
Mat_GetFilename(mat_t *mat)
{
const char *filename = NULL;
if ( NULL != mat )
filename = mat->filename;
return filename;
} | 0 | [
"CWE-401"
] | matio | a47b7cd3aca70e9a0bddf8146eb4ab0cbd19c2c3 | 141,205,732,478,967,170,000,000,000,000,000,000,000 | 7 | Fix memory leak
As reported by https://github.com/tbeu/matio/issues/131 |
static OPJ_BOOL opj_jp2_read_pclr(opj_jp2_t *jp2,
OPJ_BYTE * p_pclr_header_data,
OPJ_UINT32 p_pclr_header_size,
opj_event_mgr_t * p_manager
)
{
opj_jp2_pclr_t *jp2_pclr;
OPJ_BYTE *channel_size, *channel_sign;
OPJ_UINT32 *entries;
OPJ_UINT16 nr_entries, nr_channels;
OPJ_UINT16 i, j;
OPJ_UINT32 l_value;
OPJ_BYTE *orig_header_data = p_pclr_header_data;
/* preconditions */
assert(p_pclr_header_data != 00);
assert(jp2 != 00);
assert(p_manager != 00);
(void)p_pclr_header_size;
if (jp2->color.jp2_pclr) {
return OPJ_FALSE;
}
if (p_pclr_header_size < 3) {
return OPJ_FALSE;
}
opj_read_bytes(p_pclr_header_data, &l_value, 2); /* NE */
p_pclr_header_data += 2;
nr_entries = (OPJ_UINT16) l_value;
if ((nr_entries == 0U) || (nr_entries > 1024U)) {
opj_event_msg(p_manager, EVT_ERROR, "Invalid PCLR box. Reports %d entries\n",
(int)nr_entries);
return OPJ_FALSE;
}
opj_read_bytes(p_pclr_header_data, &l_value, 1); /* NPC */
++p_pclr_header_data;
nr_channels = (OPJ_UINT16) l_value;
if (nr_channels == 0U) {
opj_event_msg(p_manager, EVT_ERROR,
"Invalid PCLR box. Reports 0 palette columns\n");
return OPJ_FALSE;
}
if (p_pclr_header_size < 3 + (OPJ_UINT32)nr_channels) {
return OPJ_FALSE;
}
entries = (OPJ_UINT32*) opj_malloc(sizeof(OPJ_UINT32) * nr_channels *
nr_entries);
if (!entries) {
return OPJ_FALSE;
}
channel_size = (OPJ_BYTE*) opj_malloc(nr_channels);
if (!channel_size) {
opj_free(entries);
return OPJ_FALSE;
}
channel_sign = (OPJ_BYTE*) opj_malloc(nr_channels);
if (!channel_sign) {
opj_free(entries);
opj_free(channel_size);
return OPJ_FALSE;
}
jp2_pclr = (opj_jp2_pclr_t*)opj_malloc(sizeof(opj_jp2_pclr_t));
if (!jp2_pclr) {
opj_free(entries);
opj_free(channel_size);
opj_free(channel_sign);
return OPJ_FALSE;
}
jp2_pclr->channel_sign = channel_sign;
jp2_pclr->channel_size = channel_size;
jp2_pclr->entries = entries;
jp2_pclr->nr_entries = nr_entries;
jp2_pclr->nr_channels = (OPJ_BYTE) l_value;
jp2_pclr->cmap = NULL;
jp2->color.jp2_pclr = jp2_pclr;
for (i = 0; i < nr_channels; ++i) {
opj_read_bytes(p_pclr_header_data, &l_value, 1); /* Bi */
++p_pclr_header_data;
channel_size[i] = (OPJ_BYTE)((l_value & 0x7f) + 1);
channel_sign[i] = (l_value & 0x80) ? 1 : 0;
}
for (j = 0; j < nr_entries; ++j) {
for (i = 0; i < nr_channels; ++i) {
OPJ_UINT32 bytes_to_read = (OPJ_UINT32)((channel_size[i] + 7) >> 3);
if (bytes_to_read > sizeof(OPJ_UINT32)) {
bytes_to_read = sizeof(OPJ_UINT32);
}
if ((ptrdiff_t)p_pclr_header_size < (ptrdiff_t)(p_pclr_header_data -
orig_header_data) + (ptrdiff_t)bytes_to_read) {
return OPJ_FALSE;
}
opj_read_bytes(p_pclr_header_data, &l_value, bytes_to_read); /* Cji */
p_pclr_header_data += bytes_to_read;
*entries = (OPJ_UINT32) l_value;
entries++;
}
}
return OPJ_TRUE;
} | 0 | [
"CWE-20"
] | openjpeg | 4edb8c83374f52cd6a8f2c7c875e8ffacccb5fa5 | 209,469,213,026,605,700,000,000,000,000,000,000,000 | 113 | Add support for generation of PLT markers in encoder
* -PLT switch added to opj_compress
* Add a opj_encoder_set_extra_options() function that
accepts a PLT=YES option, and could be expanded later
for other uses.
-------
Testing with a Sentinel2 10m band, T36JTT_20160914T074612_B02.jp2,
coming from S2A_MSIL1C_20160914T074612_N0204_R135_T36JTT_20160914T081456.SAFE
Decompress it to TIFF:
```
opj_uncompress -i T36JTT_20160914T074612_B02.jp2 -o T36JTT_20160914T074612_B02.tif
```
Recompress it with similar parameters as original:
```
opj_compress -n 5 -c [256,256],[256,256],[256,256],[256,256],[256,256] -t 1024,1024 -PLT -i T36JTT_20160914T074612_B02.tif -o T36JTT_20160914T074612_B02_PLT.jp2
```
Dump codestream detail with GDAL dump_jp2.py utility (https://github.com/OSGeo/gdal/blob/master/gdal/swig/python/samples/dump_jp2.py)
```
python dump_jp2.py T36JTT_20160914T074612_B02.jp2 > /tmp/dump_sentinel2_ori.txt
python dump_jp2.py T36JTT_20160914T074612_B02_PLT.jp2 > /tmp/dump_sentinel2_openjpeg_plt.txt
```
The diff between both show very similar structure, and identical number of packets in PLT markers
Now testing with Kakadu (KDU803_Demo_Apps_for_Linux-x86-64_200210)
Full file decompression:
```
kdu_expand -i T36JTT_20160914T074612_B02_PLT.jp2 -o tmp.tif
Consumed 121 tile-part(s) from a total of 121 tile(s).
Consumed 80,318,806 codestream bytes (excluding any file format) = 5.329697
bits/pel.
Processed using the multi-threaded environment, with
8 parallel threads of execution
```
Partial decompresson (presumably using PLT markers):
```
kdu_expand -i T36JTT_20160914T074612_B02.jp2 -o tmp.pgm -region "{0.5,0.5},{0.01,0.01}"
kdu_expand -i T36JTT_20160914T074612_B02_PLT.jp2 -o tmp2.pgm -region "{0.5,0.5},{0.01,0.01}"
diff tmp.pgm tmp2.pgm && echo "same !"
```
-------
Funded by ESA for S2-MPC project |
term_report_winsize(term_T *term, int rows, int cols)
{
/* Use an ioctl() to report the new window size to the job. */
if (term->tl_job != NULL && term->tl_job->jv_channel != NULL)
{
int fd = -1;
int part;
for (part = PART_OUT; part < PART_COUNT; ++part)
{
fd = term->tl_job->jv_channel->ch_part[part].ch_fd;
if (isatty(fd))
break;
}
if (part < PART_COUNT && mch_report_winsize(fd, rows, cols) == OK)
mch_signal_job(term->tl_job, (char_u *)"winch");
}
} | 0 | [
"CWE-476"
] | vim | cd929f7ba8cc5b6d6dcf35c8b34124e969fed6b8 | 126,591,327,237,470,100,000,000,000,000,000,000,000 | 18 | patch 8.1.0633: crash when out of memory while opening a terminal window
Problem: Crash when out of memory while opening a terminal window.
Solution: Handle out-of-memory more gracefully. |
static ut64 get_entrypoint(RBuffer *buf) {
ut8 b;
ut64 result;
int addr;
for (addr = 0x8; addr <= 0x10; addr += 0x4) {
r_buf_read_at (buf, addr, &b, sizeof (b));
if (pyc_is_code (b, version.magic)) {
code_start_offset = addr;
r_buf_seek (buf, addr + 1, R_BUF_SET);
if ((result = get_code_object_addr (buf, version.magic)) == 0) {
return addr;
}
return result;
}
}
return 0;
} | 0 | [
"CWE-415"
] | radare2 | 049de62730f4954ef9a642f2eeebbca30a8eccdc | 174,786,179,345,989,200,000,000,000,000,000,000,000 | 17 | Fix #18679 - UAF when parsing corrupted pyc files ##bin |
invoke_NPN_RequestRead(NPStream *stream, NPByteRange *rangeList)
{
npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection),
NPERR_GENERIC_ERROR);
int error = rpc_method_invoke(g_rpc_connection,
RPC_METHOD_NPN_REQUEST_READ,
RPC_TYPE_NP_STREAM, stream,
RPC_TYPE_NP_BYTE_RANGE, rangeList,
RPC_TYPE_INVALID);
if (error != RPC_ERROR_NO_ERROR) {
npw_perror("NPN_RequestRead() invoke", error);
return NPERR_GENERIC_ERROR;
}
int32_t ret;
error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INT32, &ret, RPC_TYPE_INVALID);
if (error != RPC_ERROR_NO_ERROR) {
npw_perror("NPN_RequestRead() wait for reply", error);
return NPERR_GENERIC_ERROR;
}
return ret;
} | 0 | [
"CWE-264"
] | nspluginwrapper | 7e4ab8e1189846041f955e6c83f72bc1624e7a98 | 249,193,789,975,659,800,000,000,000,000,000,000,000 | 26 | Support all the new variables added |
static filter_rule *parse_rule_tok(const char **rulestr_ptr,
const filter_rule *template, int xflags,
const char **pat_ptr, unsigned int *pat_len_ptr)
{
const uchar *s = (const uchar *)*rulestr_ptr;
filter_rule *rule;
unsigned int len;
if (template->rflags & FILTRULE_WORD_SPLIT) {
/* Skip over any initial whitespace. */
while (isspace(*s))
s++;
/* Update to point to real start of rule. */
*rulestr_ptr = (const char *)s;
}
if (!*s)
return NULL;
rule = new0(filter_rule);
/* Inherit from the template. Don't inherit FILTRULES_SIDES; we check
* that later. */
rule->rflags = template->rflags & FILTRULES_FROM_CONTAINER;
/* Figure out what kind of a filter rule "s" is pointing at. Note
* that if FILTRULE_NO_PREFIXES is set, the rule is either an include
* or an exclude based on the inheritance of the FILTRULE_INCLUDE
* flag (above). XFLG_OLD_PREFIXES indicates a compatibility mode
* for old include/exclude patterns where just "+ " and "- " are
* allowed as optional prefixes. */
if (template->rflags & FILTRULE_NO_PREFIXES) {
if (*s == '!' && template->rflags & FILTRULE_CVS_IGNORE)
rule->rflags |= FILTRULE_CLEAR_LIST; /* Tentative! */
} else if (xflags & XFLG_OLD_PREFIXES) {
if (*s == '-' && s[1] == ' ') {
rule->rflags &= ~FILTRULE_INCLUDE;
s += 2;
} else if (*s == '+' && s[1] == ' ') {
rule->rflags |= FILTRULE_INCLUDE;
s += 2;
} else if (*s == '!')
rule->rflags |= FILTRULE_CLEAR_LIST; /* Tentative! */
} else {
char ch = 0;
BOOL prefix_specifies_side = False;
switch (*s) {
case 'c':
if ((s = RULE_STRCMP(s, "clear")) != NULL)
ch = '!';
break;
case 'd':
if ((s = RULE_STRCMP(s, "dir-merge")) != NULL)
ch = ':';
break;
case 'e':
if ((s = RULE_STRCMP(s, "exclude")) != NULL)
ch = '-';
break;
case 'h':
if ((s = RULE_STRCMP(s, "hide")) != NULL)
ch = 'H';
break;
case 'i':
if ((s = RULE_STRCMP(s, "include")) != NULL)
ch = '+';
break;
case 'm':
if ((s = RULE_STRCMP(s, "merge")) != NULL)
ch = '.';
break;
case 'p':
if ((s = RULE_STRCMP(s, "protect")) != NULL)
ch = 'P';
break;
case 'r':
if ((s = RULE_STRCMP(s, "risk")) != NULL)
ch = 'R';
break;
case 's':
if ((s = RULE_STRCMP(s, "show")) != NULL)
ch = 'S';
break;
default:
ch = *s;
if (s[1] == ',')
s++;
break;
}
switch (ch) {
case ':':
trust_sender_filter = 1;
rule->rflags |= FILTRULE_PERDIR_MERGE
| FILTRULE_FINISH_SETUP;
/* FALL THROUGH */
case '.':
rule->rflags |= FILTRULE_MERGE_FILE;
break;
case '+':
rule->rflags |= FILTRULE_INCLUDE;
break;
case '-':
break;
case 'S':
rule->rflags |= FILTRULE_INCLUDE;
/* FALL THROUGH */
case 'H':
rule->rflags |= FILTRULE_SENDER_SIDE;
prefix_specifies_side = True;
break;
case 'R':
rule->rflags |= FILTRULE_INCLUDE;
/* FALL THROUGH */
case 'P':
rule->rflags |= FILTRULE_RECEIVER_SIDE;
prefix_specifies_side = True;
break;
case '!':
rule->rflags |= FILTRULE_CLEAR_LIST;
break;
default:
rprintf(FERROR, "Unknown filter rule: `%s'\n", *rulestr_ptr);
exit_cleanup(RERR_SYNTAX);
}
while (ch != '!' && *++s && *s != ' ' && *s != '_') {
if (template->rflags & FILTRULE_WORD_SPLIT && isspace(*s)) {
s--;
break;
}
switch (*s) {
default:
invalid:
rprintf(FERROR,
"invalid modifier '%c' at position %d in filter rule: %s\n",
*s, (int)(s - (const uchar *)*rulestr_ptr), *rulestr_ptr);
exit_cleanup(RERR_SYNTAX);
case '-':
if (!BITS_SETnUNSET(rule->rflags, FILTRULE_MERGE_FILE, FILTRULE_NO_PREFIXES))
goto invalid;
rule->rflags |= FILTRULE_NO_PREFIXES;
break;
case '+':
if (!BITS_SETnUNSET(rule->rflags, FILTRULE_MERGE_FILE, FILTRULE_NO_PREFIXES))
goto invalid;
rule->rflags |= FILTRULE_NO_PREFIXES
| FILTRULE_INCLUDE;
break;
case '/':
rule->rflags |= FILTRULE_ABS_PATH;
break;
case '!':
/* Negation really goes with the pattern, so it
* isn't useful as a merge-file default. */
if (rule->rflags & FILTRULE_MERGE_FILE)
goto invalid;
rule->rflags |= FILTRULE_NEGATE;
break;
case 'C':
if (rule->rflags & FILTRULE_NO_PREFIXES || prefix_specifies_side)
goto invalid;
rule->rflags |= FILTRULE_NO_PREFIXES
| FILTRULE_WORD_SPLIT
| FILTRULE_NO_INHERIT
| FILTRULE_CVS_IGNORE;
break;
case 'e':
if (!(rule->rflags & FILTRULE_MERGE_FILE))
goto invalid;
rule->rflags |= FILTRULE_EXCLUDE_SELF;
break;
case 'n':
if (!(rule->rflags & FILTRULE_MERGE_FILE))
goto invalid;
rule->rflags |= FILTRULE_NO_INHERIT;
break;
case 'p':
rule->rflags |= FILTRULE_PERISHABLE;
break;
case 'r':
if (prefix_specifies_side)
goto invalid;
rule->rflags |= FILTRULE_RECEIVER_SIDE;
break;
case 's':
if (prefix_specifies_side)
goto invalid;
rule->rflags |= FILTRULE_SENDER_SIDE;
break;
case 'w':
if (!(rule->rflags & FILTRULE_MERGE_FILE))
goto invalid;
rule->rflags |= FILTRULE_WORD_SPLIT;
break;
case 'x':
rule->rflags |= FILTRULE_XATTR;
saw_xattr_filter = 1;
break;
}
}
if (*s)
s++;
}
if (template->rflags & FILTRULES_SIDES) {
if (rule->rflags & FILTRULES_SIDES) {
/* The filter and template both specify side(s). This
* is dodgy (and won't work correctly if the template is
* a one-sided per-dir merge rule), so reject it. */
rprintf(FERROR,
"specified-side merge file contains specified-side filter: %s\n",
*rulestr_ptr);
exit_cleanup(RERR_SYNTAX);
}
rule->rflags |= template->rflags & FILTRULES_SIDES;
}
if (template->rflags & FILTRULE_WORD_SPLIT) {
const uchar *cp = s;
/* Token ends at whitespace or the end of the string. */
while (!isspace(*cp) && *cp != '\0')
cp++;
len = cp - s;
} else
len = strlen((char*)s);
if (rule->rflags & FILTRULE_CLEAR_LIST) {
if (!(rule->rflags & FILTRULE_NO_PREFIXES)
&& !(xflags & XFLG_OLD_PREFIXES) && len) {
rprintf(FERROR,
"'!' rule has trailing characters: %s\n", *rulestr_ptr);
exit_cleanup(RERR_SYNTAX);
}
if (len > 1)
rule->rflags &= ~FILTRULE_CLEAR_LIST;
} else if (!len && !(rule->rflags & FILTRULE_CVS_IGNORE)) {
rprintf(FERROR, "unexpected end of filter rule: %s\n", *rulestr_ptr);
exit_cleanup(RERR_SYNTAX);
}
/* --delete-excluded turns an un-modified include/exclude into a sender-side rule. */
if (delete_excluded
&& !(rule->rflags & (FILTRULES_SIDES|FILTRULE_MERGE_FILE|FILTRULE_PERDIR_MERGE)))
rule->rflags |= FILTRULE_SENDER_SIDE;
*pat_ptr = (const char *)s;
*pat_len_ptr = len;
*rulestr_ptr = *pat_ptr + len;
return rule;
} | 0 | [] | rsync | b7231c7d02cfb65d291af74ff66e7d8c507ee871 | 276,062,267,226,109,300,000,000,000,000,000,000,000 | 247 | Some extra file-list safety checks. |
write_ignore_ca_cert_helper (CopyOneSettingValueInfo *info,
const char *tag,
const GByteArray *cert)
{
g_return_if_fail (info != NULL);
g_return_if_fail (tag != NULL);
if (cert) {
char *key;
key = g_strdup_printf ("%s/%s/%s", info->dir, NM_SETTING_802_1X_SETTING_NAME, tag);
gconf_client_unset (info->client, key, NULL);
g_free (key);
} else {
if (GPOINTER_TO_UINT (g_object_get_data (G_OBJECT (info->connection), tag)))
nm_gconf_set_bool_helper (info->client, info->dir, tag, NM_SETTING_802_1X_SETTING_NAME, TRUE);
}
} | 0 | [
"CWE-310"
] | network-manager-applet | 4020594dfbf566f1852f0acb36ad631a9e73a82b | 238,183,454,754,349,030,000,000,000,000,000,000,000 | 18 | core: fix CA cert mishandling after cert file deletion (deb #560067) (rh #546793)
If a connection was created with a CA certificate, but the user later
moved or deleted that CA certificate, the applet would simply provide the
connection to NetworkManager without any CA certificate. This could cause
NM to connect to the original network (or a network spoofing the original
network) without verifying the identity of the network as the user
expects.
In the future we can/should do better here by (1) alerting the user that
some connection is now no longer complete by flagging it in the connection
editor or notifying the user somehow, and (2) by using a freaking' cert
store already (not that Linux has one yet). |
static int tda9840_getmode(struct CHIPSTATE *chip)
{
int val, mode;
val = chip_read(chip);
mode = V4L2_TUNER_MODE_MONO;
if (val & TDA9840_DS_DUAL)
mode |= V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2;
if (val & TDA9840_ST_STEREO)
mode |= V4L2_TUNER_MODE_STEREO;
v4l_dbg(1, debug, chip->c, "tda9840_getmode(): raw chip read: %d, return: %d\n",
val, mode);
return mode;
} | 0 | [
"CWE-399"
] | linux-2.6 | 01a1a3cc1e3fbe718bd06a2a5d4d1a2d0fb4d7d9 | 62,369,890,194,862,620,000,000,000,000,000,000,000 | 15 | V4L/DVB (9624): CVE-2008-5033: fix OOPS on tvaudio when controlling bass/treble
This bug were supposed to be fixed by 5ba2f67afb02c5302b2898949ed6fc3b3d37dcf1,
where a call to NULL happens.
Not all tvaudio chips allow controlling bass/treble. So, the driver
has a table with a flag to indicate if the chip does support it.
Unfortunately, the handling of this logic were broken for a very long
time (probably since the first module version). Due to that, an OOPS
were generated for devices that don't support bass/treble.
This were the resulting OOPS message before the patch, with debug messages
enabled:
tvaudio' 1-005b: VIDIOC_S_CTRL
BUG: unable to handle kernel NULL pointer dereference at 00000000
IP: [<00000000>]
*pde = 22fda067 *pte = 00000000
Oops: 0000 [#1] SMP
Modules linked in: snd_hda_intel snd_seq_dummy snd_seq_oss snd_seq_midi_event snd_seq snd_seq_device
snd_pcm_oss snd_mixer_oss snd_pcm snd_timer snd_hwdep snd soundcore tuner_simple tuner_types tea5767 tuner
tvaudio bttv bridgebnep rfcomm l2cap bluetooth it87 hwmon_vid hwmon fuse sunrpc ipt_REJECT
nf_conntrack_ipv4 iptable_filter ip_tables ip6t_REJECT xt_tcpudp nf_conntrack_ipv6 xt_state nf_conntrack
ip6table_filter ip6_tables x_tables ipv6 dm_mirrordm_multipath dm_mod configfs videodev v4l1_compat
ir_common 8139cp compat_ioctl32 v4l2_common 8139too videobuf_dma_sg videobuf_core mii btcx_risc tveeprom
i915 button snd_page_alloc serio_raw drm pcspkr i2c_algo_bit i2c_i801 i2c_core iTCO_wdt
iTCO_vendor_support sr_mod cdrom sg ata_generic pata_acpi ata_piix libata sd_mod scsi_mod ext3 jbdmbcache
uhci_hcd ohci_hcd ehci_hcd [last unloaded: soundcore]
Pid: 15413, comm: qv4l2 Not tainted (2.6.25.14-108.fc9.i686 #1)
EIP: 0060:[<00000000>] EFLAGS: 00210246 CPU: 0
EIP is at 0x0
EAX: 00008000 EBX: ebd21600 ECX: e2fd9ec4 EDX: 00200046
ESI: f8c0f0c4 EDI: f8c0f0c4 EBP: e2fd9d50 ESP: e2fd9d2c
DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
Process qv4l2 (pid: 15413, ti=e2fd9000 task=ebe44000 task.ti=e2fd9000)
Stack: f8c0c6ae e2ff2a00 00000d00 e2fd9ec4 ebc4e000 e2fd9d5c f8c0c448 00000000
f899c12a e2fd9d5c f899c154 e2fd9d68 e2fd9d80 c0560185 e2fd9d88 f8f3e1d8
f8f3e1dc ebc4e034 f8f3e18c e2fd9ec4 00000000 e2fd9d90 f899c286 c008561c
Call Trace:
[<f8c0c6ae>] ? chip_command+0x266/0x4b6 [tvaudio]
[<f8c0c448>] ? chip_command+0x0/0x4b6 [tvaudio]
[<f899c12a>] ? i2c_cmd+0x0/0x2f [i2c_core]
[<f899c154>] ? i2c_cmd+0x2a/0x2f [i2c_core]
[<c0560185>] ? device_for_each_child+0x21/0x49
[<f899c286>] ? i2c_clients_command+0x1c/0x1e [i2c_core]
[<f8f283d8>] ? bttv_call_i2c_clients+0x14/0x16 [bttv]
[<f8f23601>] ? bttv_s_ctrl+0x1bc/0x313 [bttv]
[<f8f23445>] ? bttv_s_ctrl+0x0/0x313 [bttv]
[<f8b6096d>] ? __video_do_ioctl+0x1f84/0x3726 [videodev]
[<c05abb4e>] ? sock_aio_write+0x100/0x10d
[<c041b23e>] ? kmap_atomic_prot+0x1dd/0x1df
[<c043a0c9>] ? enqueue_hrtimer+0xc2/0xcd
[<c04f4fa4>] ? copy_from_user+0x39/0x121
[<f8b622b9>] ? __video_ioctl2+0x1aa/0x24a [videodev]
[<c04054fd>] ? do_notify_resume+0x768/0x795
[<c043c0f7>] ? getnstimeofday+0x34/0xd1
[<c0437b77>] ? autoremove_wake_function+0x0/0x33
[<f8b62368>] ? video_ioctl2+0xf/0x13 [videodev]
[<c048c6f0>] ? vfs_ioctl+0x50/0x69
[<c048c942>] ? do_vfs_ioctl+0x239/0x24c
[<c048c995>] ? sys_ioctl+0x40/0x5b
[<c0405bf2>] ? syscall_call+0x7/0xb
[<c0620000>] ? cpuid4_cache_sysfs_exit+0x3d/0x69
=======================
Code: Bad EIP value.
EIP: [<00000000>] 0x0 SS:ESP 0068:e2fd9d2c
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
static void rtreeSearchPointPop(RtreeCursor *p){
int i, j, k, n;
i = 1 - p->bPoint;
assert( i==0 || i==1 );
if( p->aNode[i] ){
nodeRelease(RTREE_OF_CURSOR(p), p->aNode[i]);
p->aNode[i] = 0;
}
if( p->bPoint ){
p->anQueue[p->sPoint.iLevel]--;
p->bPoint = 0;
}else if( p->nPoint ){
p->anQueue[p->aPoint[0].iLevel]--;
n = --p->nPoint;
p->aPoint[0] = p->aPoint[n];
if( n<RTREE_CACHE_SZ-1 ){
p->aNode[1] = p->aNode[n+1];
p->aNode[n+1] = 0;
}
i = 0;
while( (j = i*2+1)<n ){
k = j+1;
if( k<n && rtreeSearchPointCompare(&p->aPoint[k], &p->aPoint[j])<0 ){
if( rtreeSearchPointCompare(&p->aPoint[k], &p->aPoint[i])<0 ){
rtreeSearchPointSwap(p, i, k);
i = k;
}else{
break;
}
}else{
if( rtreeSearchPointCompare(&p->aPoint[j], &p->aPoint[i])<0 ){
rtreeSearchPointSwap(p, i, j);
i = j;
}else{
break;
}
}
}
}
} | 0 | [
"CWE-125"
] | sqlite | e41fd72acc7a06ce5a6a7d28154db1ffe8ba37a8 | 54,351,836,813,523,960,000,000,000,000,000,000,000 | 40 | Enhance the rtreenode() function of rtree (used for testing) so that it
uses the newer sqlite3_str object for better performance and improved
error reporting.
FossilOrigin-Name: 90acdbfce9c088582d5165589f7eac462b00062bbfffacdcc786eb9cf3ea5377 |
static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
{
if ((mask & MLX5_MKEY_MASK_PAGE_SIZE &&
MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) ||
(mask & MLX5_MKEY_MASK_A &&
MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)))
return -EPERM;
return 0;
} | 0 | [
"CWE-119",
"CWE-787"
] | linux | 0625b4ba1a5d4703c7fb01c497bd6c156908af00 | 72,227,375,781,989,100,000,000,000,000,000,000,000 | 9 | IB/mlx5: Fix leaking stack memory to userspace
mlx5_ib_create_qp_resp was never initialized and only the first 4 bytes
were written.
Fixes: 41d902cb7c32 ("RDMA/mlx5: Fix definition of mlx5_ib_create_qp_resp")
Cc: <[email protected]>
Acked-by: Leon Romanovsky <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]> |
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
{
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
ext4_grpblk_t offset;
blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >>
EXT4_SB(sb)->s_cluster_bits;
if (offsetp)
*offsetp = offset;
if (blockgrpp)
*blockgrpp = blocknr;
} | 0 | [] | linux | 7dac4a1726a9c64a517d595c40e95e2d0d135f6f | 155,193,510,647,801,700,000,000,000,000,000,000,000 | 15 | ext4: add validity checks for bitmap block numbers
An privileged attacker can cause a crash by mounting a crafted ext4
image which triggers a out-of-bounds read in the function
ext4_valid_block_bitmap() in fs/ext4/balloc.c.
This issue has been assigned CVE-2018-1093.
BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=199181
BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1560782
Reported-by: Wen Xu <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
Cc: [email protected] |
ews_update_store_move_recursive (CamelEwsStore *ews_store,
CamelFolderInfo *folder_info)
{
while (folder_info != NULL) {
if (folder_info->child != NULL)
ews_update_store_move_recursive (ews_store, folder_info->child);
camel_store_folder_created (CAMEL_STORE (ews_store), folder_info);
camel_subscribable_folder_subscribed (CAMEL_SUBSCRIBABLE (ews_store), folder_info);
folder_info = folder_info->next;
}
} | 0 | [
"CWE-295"
] | evolution-ews | 915226eca9454b8b3e5adb6f2fff9698451778de | 236,597,779,857,437,500,000,000,000,000,000,000,000 | 13 | I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27 |
void CClient::StatusCTCP(const CString& sLine) {
CString sCommand = sLine.Token(0);
if (sCommand.Equals("PING")) {
PutStatusNotice("\001PING " + sLine.Token(1, true) + "\001");
} else if (sCommand.Equals("VERSION")) {
PutStatusNotice("\001VERSION " + CZNC::GetTag() + "\001");
}
} | 0 | [
"CWE-476"
] | znc | 2390ad111bde16a78c98ac44572090b33c3bd2d8 | 175,382,018,808,511,150,000,000,000,000,000,000,000 | 9 | Fix null pointer dereference in echo-message
The bug was introduced while fixing #1705. If a client did not enable
echo-message, and doesn't have a network, it crashes.
Thanks to LunarBNC for reporting this |
void ElectronRenderFrameObserver::DraggableRegionsChanged() {
blink::WebVector<blink::WebDraggableRegion> webregions =
render_frame_->GetWebFrame()->GetDocument().DraggableRegions();
std::vector<mojom::DraggableRegionPtr> regions;
for (auto& webregion : webregions) {
auto region = mojom::DraggableRegion::New();
render_frame_->ConvertViewportToWindow(&webregion.bounds);
region->bounds = webregion.bounds;
region->draggable = webregion.draggable;
regions.push_back(std::move(region));
}
mojo::Remote<mojom::ElectronBrowser> browser_remote;
render_frame_->GetBrowserInterfaceBroker()->GetInterface(
browser_remote.BindNewPipeAndPassReceiver());
browser_remote->UpdateDraggableRegions(std::move(regions));
} | 1 | [] | electron | 6b04cce91ad1563bd9555f2007a2ad5aa5487304 | 33,762,274,111,619,054,000,000,000,000,000,000,000 | 17 | fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33367)
* Make ElectronBrowser mojo interface frame associated. (#32815)
Co-authored-by: Marek Haranczyk <[email protected]>
* fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33323) (#33350)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame
Co-authored-by: trop[bot] <37223003+trop[bot]@users.noreply.github.com>
Co-authored-by: Marek Haranczyk <[email protected]> |
static void test_bug2247()
{
MYSQL_STMT *stmt;
MYSQL_RES *res;
int rc;
int i;
const char *create= "CREATE TABLE bug2247(id INT UNIQUE AUTO_INCREMENT)";
const char *insert= "INSERT INTO bug2247 VALUES (NULL)";
const char *SELECT= "SELECT id FROM bug2247";
const char *update= "UPDATE bug2247 SET id=id+10";
const char *drop= "DROP TABLE IF EXISTS bug2247";
ulonglong exp_count;
enum { NUM_ROWS= 5 };
myheader("test_bug2247");
if (!opt_silent)
fprintf(stdout, "\nChecking if stmt_affected_rows is not affected by\n"
"mysql_query ... ");
/* create table and insert few rows */
rc= mysql_query(mysql, drop);
myquery(rc);
rc= mysql_query(mysql, create);
myquery(rc);
stmt= mysql_simple_prepare(mysql, insert);
check_stmt(stmt);
for (i= 0; i < NUM_ROWS; ++i)
{
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
}
exp_count= mysql_stmt_affected_rows(stmt);
DIE_UNLESS(exp_count == 1);
rc= mysql_query(mysql, SELECT);
myquery(rc);
/*
mysql_store_result overwrites mysql->affected_rows. Check that
mysql_stmt_affected_rows() returns the same value, whereas
mysql_affected_rows() value is correct.
*/
res= mysql_store_result(mysql);
mytest(res);
DIE_UNLESS(mysql_affected_rows(mysql) == NUM_ROWS);
DIE_UNLESS(exp_count == mysql_stmt_affected_rows(stmt));
rc= mysql_query(mysql, update);
myquery(rc);
DIE_UNLESS(mysql_affected_rows(mysql) == NUM_ROWS);
DIE_UNLESS(exp_count == mysql_stmt_affected_rows(stmt));
mysql_free_result(res);
mysql_stmt_close(stmt);
/* check that mysql_stmt_store_result modifies mysql_stmt_affected_rows */
stmt= mysql_simple_prepare(mysql, SELECT);
check_stmt(stmt);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_store_result(stmt);
check_execute(stmt, rc);
exp_count= mysql_stmt_affected_rows(stmt);
DIE_UNLESS(exp_count == NUM_ROWS);
rc= mysql_query(mysql, insert);
myquery(rc);
DIE_UNLESS(mysql_affected_rows(mysql) == 1);
DIE_UNLESS(mysql_stmt_affected_rows(stmt) == exp_count);
mysql_stmt_close(stmt);
if (!opt_silent)
fprintf(stdout, "OK");
} | 0 | [
"CWE-284",
"CWE-295"
] | mysql-server | 3bd5589e1a5a93f9c224badf983cd65c45215390 | 137,210,530,178,944,030,000,000,000,000,000,000,000 | 77 | WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options |
get_uint32 (const guint8 *stream, guint *result)
{
*result = (stream[0] << 24) + (stream[1] << 16) + (stream[2] << 8) + stream[3];
return stream + 4;
} | 0 | [] | gdk-pixbuf | deb78d971c4bcb9e3ccbb71e7925bc6baa707188 | 29,758,642,235,240,967,000,000,000,000,000,000,000 | 5 | Use g_try_malloc_n where it makes sense
This lets us avoid some manual overflow checks. |
getChunkOffsetTableSize(const Header& header)
{
//
// if there is a type in the header which indicates the part is not a currently supported type,
// use the chunkCount attribute
//
if(header.hasType() && !isSupportedType(header.type()))
{
if(header.hasChunkCount())
{
return header.chunkCount();
}
else
{
throw IEX_NAMESPACE::ArgExc ("unsupported header type to "
"get chunk offset table size");
}
}
//
// part is a known type - ignore the header attribute and compute the chunk size from the header
//
if (isTiled(header.type()) == false)
return getScanlineChunkOffsetTableSize(header);
else
return getTiledChunkOffsetTableSize(header);
} | 0 | [
"CWE-190"
] | openexr | 5db6f7aee79e3e75e8c3780b18b28699614dd08e | 218,755,444,552,932,660,000,000,000,000,000,000,000 | 30 | prevent overflow in bytesPerDeepLineTable (#1152)
* prevent overflow in bytesPerDeepLineTable
Signed-off-by: Peter Hillman <[email protected]>
* restore zapped 'const' from ImfMisc
Signed-off-by: Peter Hillman <[email protected]> |
static int shash_async_import(struct ahash_request *req, const void *in)
{
struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
desc->flags = req->base.flags;
return crypto_shash_import(desc, in);
} | 0 | [
"CWE-787"
] | linux | af3ff8045bbf3e32f1a448542e73abb4c8ceb6f1 | 320,963,378,115,950,950,000,000,000,000,000,000,000 | 10 | crypto: hmac - require that the underlying hash algorithm is unkeyed
Because the HMAC template didn't check that its underlying hash
algorithm is unkeyed, trying to use "hmac(hmac(sha3-512-generic))"
through AF_ALG or through KEYCTL_DH_COMPUTE resulted in the inner HMAC
being used without having been keyed, resulting in sha3_update() being
called without sha3_init(), causing a stack buffer overflow.
This is a very old bug, but it seems to have only started causing real
problems when SHA-3 support was added (requires CONFIG_CRYPTO_SHA3)
because the innermost hash's state is ->import()ed from a zeroed buffer,
and it just so happens that other hash algorithms are fine with that,
but SHA-3 is not. However, there could be arch or hardware-dependent
hash algorithms also affected; I couldn't test everything.
Fix the bug by introducing a function crypto_shash_alg_has_setkey()
which tests whether a shash algorithm is keyed. Then update the HMAC
template to require that its underlying hash algorithm is unkeyed.
Here is a reproducer:
#include <linux/if_alg.h>
#include <sys/socket.h>
int main()
{
int algfd;
struct sockaddr_alg addr = {
.salg_type = "hash",
.salg_name = "hmac(hmac(sha3-512-generic))",
};
char key[4096] = { 0 };
algfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(algfd, (const struct sockaddr *)&addr, sizeof(addr));
setsockopt(algfd, SOL_ALG, ALG_SET_KEY, key, sizeof(key));
}
Here was the KASAN report from syzbot:
BUG: KASAN: stack-out-of-bounds in memcpy include/linux/string.h:341 [inline]
BUG: KASAN: stack-out-of-bounds in sha3_update+0xdf/0x2e0 crypto/sha3_generic.c:161
Write of size 4096 at addr ffff8801cca07c40 by task syzkaller076574/3044
CPU: 1 PID: 3044 Comm: syzkaller076574 Not tainted 4.14.0-mm1+ #25
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:17 [inline]
dump_stack+0x194/0x257 lib/dump_stack.c:53
print_address_description+0x73/0x250 mm/kasan/report.c:252
kasan_report_error mm/kasan/report.c:351 [inline]
kasan_report+0x25b/0x340 mm/kasan/report.c:409
check_memory_region_inline mm/kasan/kasan.c:260 [inline]
check_memory_region+0x137/0x190 mm/kasan/kasan.c:267
memcpy+0x37/0x50 mm/kasan/kasan.c:303
memcpy include/linux/string.h:341 [inline]
sha3_update+0xdf/0x2e0 crypto/sha3_generic.c:161
crypto_shash_update+0xcb/0x220 crypto/shash.c:109
shash_finup_unaligned+0x2a/0x60 crypto/shash.c:151
crypto_shash_finup+0xc4/0x120 crypto/shash.c:165
hmac_finup+0x182/0x330 crypto/hmac.c:152
crypto_shash_finup+0xc4/0x120 crypto/shash.c:165
shash_digest_unaligned+0x9e/0xd0 crypto/shash.c:172
crypto_shash_digest+0xc4/0x120 crypto/shash.c:186
hmac_setkey+0x36a/0x690 crypto/hmac.c:66
crypto_shash_setkey+0xad/0x190 crypto/shash.c:64
shash_async_setkey+0x47/0x60 crypto/shash.c:207
crypto_ahash_setkey+0xaf/0x180 crypto/ahash.c:200
hash_setkey+0x40/0x90 crypto/algif_hash.c:446
alg_setkey crypto/af_alg.c:221 [inline]
alg_setsockopt+0x2a1/0x350 crypto/af_alg.c:254
SYSC_setsockopt net/socket.c:1851 [inline]
SyS_setsockopt+0x189/0x360 net/socket.c:1830
entry_SYSCALL_64_fastpath+0x1f/0x96
Reported-by: syzbot <[email protected]>
Cc: <[email protected]>
Signed-off-by: Eric Biggers <[email protected]>
Signed-off-by: Herbert Xu <[email protected]> |
static void ipxitf_clear_primary_net(void)
{
ipx_primary_net = NULL;
if (ipxcfg_auto_select_primary)
ipx_primary_net = ipx_interfaces_head();
} | 0 | [
"CWE-416"
] | linux | ee0d8d8482345ff97a75a7d747efc309f13b0d80 | 93,248,132,106,398,750,000,000,000,000,000,000,000 | 6 | ipx: call ipxitf_put() in ioctl error path
We should call ipxitf_put() if the copy_to_user() fails.
Reported-by: 李强 <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
pk11_numbits(CK_BYTE_PTR data, unsigned int bytecnt) {
unsigned int bitcnt, i;
CK_BYTE top;
if (bytecnt == 0)
return (0);
bitcnt = bytecnt * 8;
for (i = 0; i < bytecnt; i++) {
top = data[i];
if (top == 0) {
bitcnt -= 8;
continue;
}
if (top & 0x80)
return (bitcnt);
if (top & 0x40)
return (bitcnt - 1);
if (top & 0x20)
return (bitcnt - 2);
if (top & 0x10)
return (bitcnt - 3);
if (top & 0x08)
return (bitcnt - 4);
if (top & 0x04)
return (bitcnt - 5);
if (top & 0x02)
return (bitcnt - 6);
if (top & 0x01)
return (bitcnt - 7);
break;
}
INSIST(0);
ISC_UNREACHABLE();
} | 1 | [
"CWE-617"
] | bind9 | 8d807cc21655eaa6e6a08afafeec3682c0f3f2ab | 101,759,683,702,441,860,000,000,000,000,000,000,000 | 34 | Fix crash in pk11_numbits() when native-pkcs11 is used
When pk11_numbits() is passed a user provided input that contains all
zeroes (via crafted DNS message), it would crash with assertion
failure. Fix that by properly handling such input. |
ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
{
struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
if (devid)
nfs4_mark_deviceid_unavailable(devid);
} | 0 | [
"CWE-787"
] | linux | ed34695e15aba74f45247f1ee2cf7e09d449f925 | 161,527,333,093,434,790,000,000,000,000,000,000,000 | 7 | pNFS/flexfiles: fix incorrect size check in decode_nfs_fh()
We (adam zabrocki, alexander matrosov, alexander tereshkin, maksym
bazalii) observed the check:
if (fh->size > sizeof(struct nfs_fh))
should not use the size of the nfs_fh struct which includes an extra two
bytes from the size field.
struct nfs_fh {
unsigned short size;
unsigned char data[NFS_MAXFHSIZE];
}
but should determine the size from data[NFS_MAXFHSIZE] so the memcpy
will not write 2 bytes beyond destination. The proposed fix is to
compare against the NFS_MAXFHSIZE directly, as is done elsewhere in fs
code base.
Fixes: d67ae825a59d ("pnfs/flexfiles: Add the FlexFile Layout Driver")
Signed-off-by: Nikola Livic <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]> |
static struct usb_usbvision *usbvision_alloc(struct usb_device *dev,
struct usb_interface *intf)
{
struct usb_usbvision *usbvision;
usbvision = kzalloc(sizeof(struct usb_usbvision), GFP_KERNEL);
if (usbvision == NULL)
return NULL;
usbvision->dev = dev;
if (v4l2_device_register(&intf->dev, &usbvision->v4l2_dev))
goto err_free;
if (v4l2_ctrl_handler_init(&usbvision->hdl, 4))
goto err_unreg;
usbvision->v4l2_dev.ctrl_handler = &usbvision->hdl;
mutex_init(&usbvision->v4l2_lock);
/* prepare control urb for control messages during interrupts */
usbvision->ctrl_urb = usb_alloc_urb(USBVISION_URB_FRAMES, GFP_KERNEL);
if (usbvision->ctrl_urb == NULL)
goto err_unreg;
init_waitqueue_head(&usbvision->ctrl_urb_wq);
return usbvision;
err_unreg:
v4l2_ctrl_handler_free(&usbvision->hdl);
v4l2_device_unregister(&usbvision->v4l2_dev);
err_free:
kfree(usbvision);
return NULL;
} | 0 | [
"CWE-17"
] | media_tree | fa52bd506f274b7619955917abfde355e3d19ffe | 173,641,340,457,385,430,000,000,000,000,000,000,000 | 33 | [media] usbvision: fix crash on detecting device with invalid configuration
The usbvision driver crashes when a specially crafted usb device with invalid
number of interfaces or endpoints is detected. This fix adds checks that the
device has proper configuration expected by the driver.
Reported-by: Ralf Spenneberg <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
{
return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
} | 0 | [] | qemu | e4a511f8cc6f4a46d409fb5c9f72c38ba45f8d83 | 170,126,743,608,053,860,000,000,000,000,000,000,000 | 4 | exec: clamp accesses against the MemoryRegionSection
Because the clamping was done against the MemoryRegion,
address_space_rw was effectively broken if a write spanned
multiple sections that are not linear in underlying memory
(with the memory not being under an IOMMU).
This is visible with the MIPS rc4030 IOMMU, which is implemented
as a series of alias memory regions that point to the actual RAM.
Tested-by: Hervé Poussineau <[email protected]>
Tested-by: Mark Cave-Ayland <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
{
struct sadb_prop *p;
int i, k;
p = (struct sadb_prop*)skb_put(skb, sizeof(struct sadb_prop));
p->sadb_prop_len = sizeof(struct sadb_prop)/8;
p->sadb_prop_exttype = SADB_EXT_PROPOSAL;
p->sadb_prop_replay = 32;
memset(p->sadb_prop_reserved, 0, sizeof(p->sadb_prop_reserved));
for (i=0; ; i++) {
const struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
if (!ealg)
break;
if (!ealg->pfkey_supported)
continue;
if (!(ealg_tmpl_set(t, ealg) && ealg->available))
continue;
for (k = 1; ; k++) {
struct sadb_comb *c;
const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
if (!aalg)
break;
if (!aalg->pfkey_supported)
continue;
if (!(aalg_tmpl_set(t, aalg) && aalg->available))
continue;
c = (struct sadb_comb*)skb_put(skb, sizeof(struct sadb_comb));
memset(c, 0, sizeof(*c));
p->sadb_prop_len += sizeof(struct sadb_comb)/8;
c->sadb_comb_auth = aalg->desc.sadb_alg_id;
c->sadb_comb_auth_minbits = aalg->desc.sadb_alg_minbits;
c->sadb_comb_auth_maxbits = aalg->desc.sadb_alg_maxbits;
c->sadb_comb_encrypt = ealg->desc.sadb_alg_id;
c->sadb_comb_encrypt_minbits = ealg->desc.sadb_alg_minbits;
c->sadb_comb_encrypt_maxbits = ealg->desc.sadb_alg_maxbits;
c->sadb_comb_hard_addtime = 24*60*60;
c->sadb_comb_soft_addtime = 20*60*60;
c->sadb_comb_hard_usetime = 8*60*60;
c->sadb_comb_soft_usetime = 7*60*60;
}
}
} | 0 | [] | linux | 096f41d3a8fcbb8dde7f71379b1ca85fe213eded | 280,699,685,746,329,480,000,000,000,000,000,000,000 | 47 | af_key: Fix sadb_x_ipsecrequest parsing
The parsing of sadb_x_ipsecrequest is broken in a number of ways.
First of all we're not verifying sadb_x_ipsecrequest_len. This
is needed when the structure carries addresses at the end. Worse
we don't even look at the length when we parse those optional
addresses.
The migration code had similar parsing code that's better but
it also has some deficiencies. The length is overcounted first
of all as it includes the header itself. It also fails to check
the length before dereferencing the sa_family field.
This patch fixes those problems in parse_sockaddr_pair and then
uses it in parse_ipsecrequest.
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]> |
void mg_resolve_cancel(struct mg_connection *c) {
struct dns_data *tmp, *d;
for (d = s_reqs; d != NULL; d = tmp) {
tmp = d->next;
if (d->c == c) mg_dns_free(d);
}
} | 0 | [
"CWE-552"
] | mongoose | c65c8fdaaa257e0487ab0aaae9e8f6b439335945 | 128,560,437,396,790,510,000,000,000,000,000,000,000 | 7 | Protect against the directory traversal in mg_upload() |
void RemoteFsDevice::serviceAdded(const QString &name)
{
if (name==details.serviceName && constSambaAvahiProtocol==details.url.scheme()) {
sub=tr("Available");
updateStatus();
}
} | 1 | [
"CWE-20",
"CWE-22"
] | cantata | afc4f8315d3e96574925fb530a7004cc9e6ce3d3 | 15,274,209,997,685,657,000,000,000,000,000,000,000 | 7 | Remove internal Samba shre mounting code, this had some privilege escalation issues, and is not well tested |
static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
{
struct inet6_request_sock *treq;
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct tcp6_sock *newtcp6sk;
struct inet_sock *newinet;
struct tcp_sock *newtp;
struct sock *newsk;
struct ipv6_txoptions *opt;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *key;
#endif
if (skb->protocol == htons(ETH_P_IP)) {
/*
* v6 mapped
*/
newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
if (newsk == NULL)
return NULL;
newtcp6sk = (struct tcp6_sock *)newsk;
inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
newinet = inet_sk(newsk);
newnp = inet6_sk(newsk);
newtp = tcp_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
newsk->sk_backlog_rcv = tcp_v4_do_rcv;
#ifdef CONFIG_TCP_MD5SIG
newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
* here, tcp_create_openreq_child now does this for us, see the comment in
* that function for the gory details. -acme
*/
/* It is tricky place. Until this moment IPv4 tcp
worked with IPv6 icsk.icsk_af_ops.
Sync it now.
*/
tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
return newsk;
}
treq = inet6_rsk(req);
opt = np->opt;
if (sk_acceptq_is_full(sk))
goto out_overflow;
if (!dst) {
dst = inet6_csk_route_req(sk, req);
if (!dst)
goto out;
}
newsk = tcp_create_openreq_child(sk, req, skb);
if (newsk == NULL)
goto out_nonewsk;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks
* count here, tcp_create_openreq_child now does this for us, see the
* comment in that function for the gory details. -acme
*/
newsk->sk_gso_type = SKB_GSO_TCPV6;
__ip6_dst_store(newsk, dst, NULL, NULL);
newtcp6sk = (struct tcp6_sock *)newsk;
inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
newtp = tcp_sk(newsk);
newinet = inet_sk(newsk);
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
newsk->sk_bound_dev_if = treq->iif;
/* Now IPv6 options...
First: no IPv4 options.
*/
newinet->opt = NULL;
newnp->ipv6_fl_list = NULL;
/* Clone RX bits */
newnp->rxopt.all = np->rxopt.all;
/* Clone pktoptions received with SYN */
newnp->pktoptions = NULL;
if (treq->pktopts != NULL) {
newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
kfree_skb(treq->pktopts);
treq->pktopts = NULL;
if (newnp->pktoptions)
skb_set_owner_r(newnp->pktoptions, newsk);
}
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
/* Clone native IPv6 options from listening socket (if any)
Yes, keeping reference count would be much more clever,
but we make one more one thing there: reattach optmem
to newsk.
*/
if (opt) {
newnp->opt = ipv6_dup_options(newsk, opt);
if (opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
}
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (newnp->opt)
inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
newnp->opt->opt_flen);
tcp_mtup_init(newsk);
tcp_sync_mss(newsk, dst_mtu(dst));
newtp->advmss = dst_metric_advmss(dst);
tcp_initialize_rcv_mss(newsk);
newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
#ifdef CONFIG_TCP_MD5SIG
/* Copy over the MD5 key from the original socket */
if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
/* We're using one, so create a matching key
* on the newsk structure. If we fail to get
* memory, then we end up not copying the key
* across. Shucks.
*/
char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
if (newkey != NULL)
tcp_v6_md5_do_add(newsk, &newnp->daddr,
newkey, key->keylen);
}
#endif
if (__inet_inherit_port(sk, newsk) < 0) {
sock_put(newsk);
goto out;
}
__inet6_hash(newsk, NULL);
return newsk;
out_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk:
if (opt && opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
dst_release(dst);
out:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL;
} | 1 | [
"CWE-362"
] | linux-2.6 | f6d8bd051c391c1c0458a30b2a7abcd939329259 | 118,765,001,796,570,150,000,000,000,000,000,000,000 | 186 | inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
int ret;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case IPT_SO_GET_INFO:
ret = get_info(user, len, 0);
break;
case IPT_SO_GET_ENTRIES:
ret = get_entries(user, len);
break;
case IPT_SO_GET_REVISION_MATCH:
case IPT_SO_GET_REVISION_TARGET: {
struct ipt_get_revision rev;
int target;
if (*len != sizeof(rev)) {
ret = -EINVAL;
break;
}
if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
ret = -EFAULT;
break;
}
if (cmd == IPT_SO_GET_REVISION_TARGET)
target = 1;
else
target = 0;
try_then_request_module(xt_find_revision(AF_INET, rev.name,
rev.revision,
target, &ret),
"ipt_%s", rev.name);
break;
}
default:
duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
ret = -EINVAL;
}
return ret;
} | 0 | [
"CWE-787"
] | linux | 9fa492cdc160cd27ce1046cb36f47d3b2b1efa21 | 56,392,165,888,258,620,000,000,000,000,000,000,000 | 49 | [NETFILTER]: x_tables: simplify compat API
Split the xt_compat_match/xt_compat_target into smaller type-safe functions
performing just one operation. Handle all alignment and size-related
conversions centrally in these function instead of requiring each module to
implement a full-blown conversion function. Replace ->compat callback by
->compat_from_user and ->compat_to_user callbacks, responsible for
converting just a single private structure.
Signed-off-by: Patrick McHardy <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void
xmlXPathOptimizeExpression(xmlXPathCompExprPtr comp, xmlXPathStepOpPtr op)
{
/*
* Try to rewrite "descendant-or-self::node()/foo" to an optimized
* internal representation.
*/
if ((op->op == XPATH_OP_COLLECT /* 11 */) &&
(op->ch1 != -1) &&
(op->ch2 == -1 /* no predicate */))
{
xmlXPathStepOpPtr prevop = &comp->steps[op->ch1];
if ((prevop->op == XPATH_OP_COLLECT /* 11 */) &&
((xmlXPathAxisVal) prevop->value ==
AXIS_DESCENDANT_OR_SELF) &&
(prevop->ch2 == -1) &&
((xmlXPathTestVal) prevop->value2 == NODE_TEST_TYPE) &&
((xmlXPathTypeVal) prevop->value3 == NODE_TYPE_NODE))
{
/*
* This is a "descendant-or-self::node()" without predicates.
* Try to eliminate it.
*/
switch ((xmlXPathAxisVal) op->value) {
case AXIS_CHILD:
case AXIS_DESCENDANT:
/*
* Convert "descendant-or-self::node()/child::" or
* "descendant-or-self::node()/descendant::" to
* "descendant::"
*/
op->ch1 = prevop->ch1;
op->value = AXIS_DESCENDANT;
break;
case AXIS_SELF:
case AXIS_DESCENDANT_OR_SELF:
/*
* Convert "descendant-or-self::node()/self::" or
* "descendant-or-self::node()/descendant-or-self::" to
* to "descendant-or-self::"
*/
op->ch1 = prevop->ch1;
op->value = AXIS_DESCENDANT_OR_SELF;
break;
default:
break;
}
}
}
/* Recurse */
if (op->ch1 != -1)
xmlXPathOptimizeExpression(comp, &comp->steps[op->ch1]);
if (op->ch2 != -1) | 0 | [] | libxml2 | 03c6723043775122313f107695066e5744189a08 | 263,052,186,685,197,840,000,000,000,000,000,000,000 | 58 | Handling of XPath function arguments in error case
The XPath engine tries to guarantee that every XPath function can pop
'nargs' non-NULL values off the stack. libxslt, for example, relies on
this assumption. But the check isn't thorough enough if there are errors
during the evaluation of arguments. This can lead to segfaults:
https://mail.gnome.org/archives/xslt/2013-December/msg00005.html
This commit makes the handling of function arguments more robust.
* Bail out early when evaluation of XPath function arguments fails.
* Make sure that there are 'nargs' arguments in the current call frame. |
proto_tree_add_ipxnet_format_value(proto_tree *tree, int hfindex, tvbuff_t *tvb,
gint start, gint length, guint32 value,
const char *format, ...)
{
proto_item *pi;
va_list ap;
pi = proto_tree_add_ipxnet(tree, hfindex, tvb, start, length, value);
if (pi != tree) {
va_start(ap, format);
proto_tree_set_representation_value(pi, format, ap);
va_end(ap);
}
return pi;
} | 0 | [
"CWE-401"
] | wireshark | a9fc769d7bb4b491efb61c699d57c9f35269d871 | 118,360,033,098,433,380,000,000,000,000,000,000,000 | 16 | epan: Fix a memory leak.
Make sure _proto_tree_add_bits_ret_val allocates a bits array using the
packet scope, otherwise we leak memory. Fixes #17032. |
void (*SSL_get_info_callback(const SSL *ssl)) (const SSL * /* ssl */ ,
int /* type */ ,
int /* val */ ) {
return ssl->info_callback;
} | 0 | [
"CWE-310"
] | openssl | 56f1acf5ef8a432992497a04792ff4b3b2c6f286 | 127,419,286,828,639,560,000,000,000,000,000,000,000 | 5 | Disable SSLv2 default build, default negotiation and weak ciphers.
SSLv2 is by default disabled at build-time. Builds that are not
configured with "enable-ssl2" will not support SSLv2. Even if
"enable-ssl2" is used, users who want to negotiate SSLv2 via the
version-flexible SSLv23_method() will need to explicitly call either
of:
SSL_CTX_clear_options(ctx, SSL_OP_NO_SSLv2);
or
SSL_clear_options(ssl, SSL_OP_NO_SSLv2);
as appropriate. Even if either of those is used, or the application
explicitly uses the version-specific SSLv2_method() or its client
or server variants, SSLv2 ciphers vulnerable to exhaustive search
key recovery have been removed. Specifically, the SSLv2 40-bit
EXPORT ciphers, and SSLv2 56-bit DES are no longer available.
Mitigation for CVE-2016-0800
Reviewed-by: Emilia Käsper <[email protected]> |
Frame_positional_cursor(const Frame_cursor &position_cursor) :
position_cursor(position_cursor), top_bound(NULL),
bottom_bound(NULL), offset(NULL), overflowed(false),
negative_offset(false) {} | 0 | [] | server | ba4927e520190bbad763bb5260ae154f29a61231 | 262,348,241,958,362,500,000,000,000,000,000,000,000 | 4 | MDEV-19398: Assertion `item1->type() == Item::FIELD_ITEM ...
Window Functions code tries to minimize the number of times it
needs to sort the select's resultset by finding "compatible"
OVER (PARTITION BY ... ORDER BY ...) clauses.
This employs compare_order_elements(). That function assumed that
the order expressions are Item_field-derived objects (that refer
to a temp.table). But this is not always the case: one can
construct queries order expressions are arbitrary item expressions.
Add handling for such expressions: sort them according to the window
specification they appeared in.
This means we cannot detect that two compatible PARTITION BY clauses
that use expressions can share the sorting step.
But at least we won't crash. |
ikev2_auth_print(netdissect_options *ndo, u_char tpay,
const struct isakmp_gen *ext,
u_int item_len _U_, const u_char *ep,
uint32_t phase _U_, uint32_t doi _U_,
uint32_t proto _U_, int depth _U_)
{
struct ikev2_auth a;
const char *v2_auth[]={ "invalid", "rsasig",
"shared-secret", "dsssig" };
const u_char *authdata = (const u_char*)ext + sizeof(a);
unsigned int len;
ND_TCHECK(*ext);
UNALIGNED_MEMCPY(&a, ext, sizeof(a));
ikev2_pay_print(ndo, NPSTR(tpay), a.h.critical);
len = ntohs(a.h.len);
ND_PRINT((ndo," len=%d method=%s", len-4,
STR_OR_ID(a.auth_method, v2_auth)));
if (1 < ndo->ndo_vflag && 4 < len) {
ND_PRINT((ndo," authdata=("));
if (!rawprint(ndo, (const uint8_t *)authdata, len - sizeof(a)))
goto trunc;
ND_PRINT((ndo,") "));
} else if(ndo->ndo_vflag && 4 < len) {
if(!ike_show_somedata(ndo, authdata, ep)) goto trunc;
}
return (const u_char *)ext + len;
trunc:
ND_PRINT((ndo," [|%s]", NPSTR(tpay)));
return NULL;
} | 1 | [
"CWE-399",
"CWE-835"
] | tcpdump | c2ef693866beae071a24b45c49f9674af1df4028 | 281,122,763,584,197,050,000,000,000,000,000,000,000 | 34 | CVE-2017-12990/Fix printing of ISAKMPv1 Notification payload data.
The closest thing to a specification for the contents of the payload
data is draft-ietf-ipsec-notifymsg-04, and nothing in there says that it
is ever a complete ISAKMP message, so don't dissect types we don't have
specific code for as a complete ISAKMP message.
While we're at it, fix a comment, and clean up printing of V1 Nonce,
V2 Authentication payloads, and v2 Notice payloads.
This fixes an infinite loop discovered by Forcepoint's security
researchers Otto Airamo & Antti Levomäki.
Add a test using the capture file supplied by the reporter(s). |
ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
{
struct ieee80211_key *key;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) {
tx->key = NULL;
return TX_CONTINUE;
}
if (tx->sta &&
(key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
tx->key = key;
else if (ieee80211_is_group_privacy_action(tx->skb) &&
(key = rcu_dereference(tx->sdata->default_multicast_key)))
tx->key = key;
else if (ieee80211_is_mgmt(hdr->frame_control) &&
is_multicast_ether_addr(hdr->addr1) &&
ieee80211_is_robust_mgmt_frame(tx->skb) &&
(key = rcu_dereference(tx->sdata->default_mgmt_key)))
tx->key = key;
else if (is_multicast_ether_addr(hdr->addr1) &&
(key = rcu_dereference(tx->sdata->default_multicast_key)))
tx->key = key;
else if (!is_multicast_ether_addr(hdr->addr1) &&
(key = rcu_dereference(tx->sdata->default_unicast_key)))
tx->key = key;
else
tx->key = NULL;
if (tx->key) {
bool skip_hw = false;
/* TODO: add threshold stuff again */
switch (tx->key->conf.cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
if (!ieee80211_is_data_present(hdr->frame_control))
tx->key = NULL;
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
if (!ieee80211_is_data_present(hdr->frame_control) &&
!ieee80211_use_mfp(hdr->frame_control, tx->sta,
tx->skb) &&
!ieee80211_is_group_privacy_action(tx->skb))
tx->key = NULL;
else
skip_hw = (tx->key->conf.flags &
IEEE80211_KEY_FLAG_SW_MGMT_TX) &&
ieee80211_is_mgmt(hdr->frame_control);
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
if (!ieee80211_is_mgmt(hdr->frame_control))
tx->key = NULL;
break;
}
if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED &&
!ieee80211_is_deauth(hdr->frame_control)))
return TX_DROP;
if (!skip_hw && tx->key &&
tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
info->control.hw_key = &tx->key->conf;
} else if (ieee80211_is_data_present(hdr->frame_control) && tx->sta &&
test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
return TX_DROP;
}
return TX_CONTINUE;
} | 0 | [
"CWE-476"
] | linux | bddc0c411a45d3718ac535a070f349be8eca8d48 | 149,054,547,887,370,150,000,000,000,000,000,000,000 | 80 | mac80211: Fix NULL ptr deref for injected rate info
The commit cb17ed29a7a5 ("mac80211: parse radiotap header when selecting Tx
queue") moved the code to validate the radiotap header from
ieee80211_monitor_start_xmit to ieee80211_parse_tx_radiotap. This made is
possible to share more code with the new Tx queue selection code for
injected frames. But at the same time, it now required the call of
ieee80211_parse_tx_radiotap at the beginning of functions which wanted to
handle the radiotap header. And this broke the rate parser for radiotap
header parser.
The radiotap parser for rates is operating most of the time only on the
data in the actual radiotap header. But for the 802.11a/b/g rates, it must
also know the selected band from the chandef information. But this
information is only written to the ieee80211_tx_info at the end of the
ieee80211_monitor_start_xmit - long after ieee80211_parse_tx_radiotap was
already called. The info->band information was therefore always 0
(NL80211_BAND_2GHZ) when the parser code tried to access it.
For a 5GHz only device, injecting a frame with 802.11a rates would cause a
NULL pointer dereference because local->hw.wiphy->bands[NL80211_BAND_2GHZ]
would most likely have been NULL when the radiotap parser searched for the
correct rate index of the driver.
Cc: [email protected]
Reported-by: Ben Greear <[email protected]>
Fixes: cb17ed29a7a5 ("mac80211: parse radiotap header when selecting Tx queue")
Signed-off-by: Mathy Vanhoef <[email protected]>
[[email protected]: added commit message]
Signed-off-by: Sven Eckelmann <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Johannes Berg <[email protected]> |
void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
{
struct kvm_pit *pit = vcpu->kvm->arch.vpit;
struct hrtimer *timer;
if (!kvm_vcpu_is_bsp(vcpu) || !pit)
return;
timer = &pit->pit_state.timer;
mutex_lock(&pit->pit_state.lock);
if (hrtimer_cancel(timer))
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
mutex_unlock(&pit->pit_state.lock);
} | 0 | [
"CWE-362"
] | kvm | 2febc839133280d5a5e8e1179c94ea674489dae2 | 42,503,950,634,360,176,000,000,000,000,000,000,000 | 14 | KVM: x86: Improve thread safety in pit
There's a race condition in the PIT emulation code in KVM. In
__kvm_migrate_pit_timer the pit_timer object is accessed without
synchronization. If the race condition occurs at the wrong time this
can crash the host kernel.
This fixes CVE-2014-3611.
Cc: [email protected]
Signed-off-by: Andrew Honig <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
regatom(int *flagp)
{
char_u *ret;
int flags;
int c;
char_u *p;
int extra = 0;
int save_prev_at_start = prev_at_start;
*flagp = WORST; // Tentatively.
c = getchr();
switch (c)
{
case Magic('^'):
ret = regnode(BOL);
break;
case Magic('$'):
ret = regnode(EOL);
#if defined(FEAT_SYN_HL) || defined(PROTO)
had_eol = TRUE;
#endif
break;
case Magic('<'):
ret = regnode(BOW);
break;
case Magic('>'):
ret = regnode(EOW);
break;
case Magic('_'):
c = no_Magic(getchr());
if (c == '^') // "\_^" is start-of-line
{
ret = regnode(BOL);
break;
}
if (c == '$') // "\_$" is end-of-line
{
ret = regnode(EOL);
#if defined(FEAT_SYN_HL) || defined(PROTO)
had_eol = TRUE;
#endif
break;
}
extra = ADD_NL;
*flagp |= HASNL;
// "\_[" is character range plus newline
if (c == '[')
goto collection;
// "\_x" is character class plus newline
// FALLTHROUGH
// Character classes.
case Magic('.'):
case Magic('i'):
case Magic('I'):
case Magic('k'):
case Magic('K'):
case Magic('f'):
case Magic('F'):
case Magic('p'):
case Magic('P'):
case Magic('s'):
case Magic('S'):
case Magic('d'):
case Magic('D'):
case Magic('x'):
case Magic('X'):
case Magic('o'):
case Magic('O'):
case Magic('w'):
case Magic('W'):
case Magic('h'):
case Magic('H'):
case Magic('a'):
case Magic('A'):
case Magic('l'):
case Magic('L'):
case Magic('u'):
case Magic('U'):
p = vim_strchr(classchars, no_Magic(c));
if (p == NULL)
EMSG_RET_NULL(_(e_invalid_use_of_underscore));
// When '.' is followed by a composing char ignore the dot, so that
// the composing char is matched here.
if (enc_utf8 && c == Magic('.') && utf_iscomposing(peekchr()))
{
c = getchr();
goto do_multibyte;
}
ret = regnode(classcodes[p - classchars] + extra);
*flagp |= HASWIDTH | SIMPLE;
break;
case Magic('n'):
if (reg_string)
{
// In a string "\n" matches a newline character.
ret = regnode(EXACTLY);
regc(NL);
regc(NUL);
*flagp |= HASWIDTH | SIMPLE;
}
else
{
// In buffer text "\n" matches the end of a line.
ret = regnode(NEWL);
*flagp |= HASWIDTH | HASNL;
}
break;
case Magic('('):
if (one_exactly)
EMSG_ONE_RET_NULL;
ret = reg(REG_PAREN, &flags);
if (ret == NULL)
return NULL;
*flagp |= flags & (HASWIDTH | SPSTART | HASNL | HASLOOKBH);
break;
case NUL:
case Magic('|'):
case Magic('&'):
case Magic(')'):
if (one_exactly)
EMSG_ONE_RET_NULL;
// Supposed to be caught earlier.
IEMSG_RET_NULL(_(e_internal_error_in_regexp));
// NOTREACHED
case Magic('='):
case Magic('?'):
case Magic('+'):
case Magic('@'):
case Magic('{'):
case Magic('*'):
c = no_Magic(c);
EMSG3_RET_NULL(_(e_str_chr_follows_nothing),
(c == '*' ? reg_magic >= MAGIC_ON : reg_magic == MAGIC_ALL), c);
// NOTREACHED
case Magic('~'): // previous substitute pattern
if (reg_prev_sub != NULL)
{
char_u *lp;
ret = regnode(EXACTLY);
lp = reg_prev_sub;
while (*lp != NUL)
regc(*lp++);
regc(NUL);
if (*reg_prev_sub != NUL)
{
*flagp |= HASWIDTH;
if ((lp - reg_prev_sub) == 1)
*flagp |= SIMPLE;
}
}
else
EMSG_RET_NULL(_(e_no_previous_substitute_regular_expression));
break;
case Magic('1'):
case Magic('2'):
case Magic('3'):
case Magic('4'):
case Magic('5'):
case Magic('6'):
case Magic('7'):
case Magic('8'):
case Magic('9'):
{
int refnum;
refnum = c - Magic('0');
if (!seen_endbrace(refnum))
return NULL;
ret = regnode(BACKREF + refnum);
}
break;
case Magic('z'):
{
c = no_Magic(getchr());
switch (c)
{
#ifdef FEAT_SYN_HL
case '(': if ((reg_do_extmatch & REX_SET) == 0)
EMSG_RET_NULL(_(e_z_not_allowed_here));
if (one_exactly)
EMSG_ONE_RET_NULL;
ret = reg(REG_ZPAREN, &flags);
if (ret == NULL)
return NULL;
*flagp |= flags & (HASWIDTH|SPSTART|HASNL|HASLOOKBH);
re_has_z = REX_SET;
break;
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9': if ((reg_do_extmatch & REX_USE) == 0)
EMSG_RET_NULL(_(e_z1_z9_not_allowed_here));
ret = regnode(ZREF + c - '0');
re_has_z = REX_USE;
break;
#endif
case 's': ret = regnode(MOPEN + 0);
if (re_mult_next("\\zs") == FAIL)
return NULL;
break;
case 'e': ret = regnode(MCLOSE + 0);
if (re_mult_next("\\ze") == FAIL)
return NULL;
break;
default: EMSG_RET_NULL(_(e_invalid_character_after_bsl_z));
}
}
break;
case Magic('%'):
{
c = no_Magic(getchr());
switch (c)
{
// () without a back reference
case '(':
if (one_exactly)
EMSG_ONE_RET_NULL;
ret = reg(REG_NPAREN, &flags);
if (ret == NULL)
return NULL;
*flagp |= flags & (HASWIDTH | SPSTART | HASNL | HASLOOKBH);
break;
// Catch \%^ and \%$ regardless of where they appear in the
// pattern -- regardless of whether or not it makes sense.
case '^':
ret = regnode(RE_BOF);
break;
case '$':
ret = regnode(RE_EOF);
break;
case '#':
ret = regnode(CURSOR);
break;
case 'V':
ret = regnode(RE_VISUAL);
break;
case 'C':
ret = regnode(RE_COMPOSING);
break;
// \%[abc]: Emit as a list of branches, all ending at the last
// branch which matches nothing.
case '[':
if (one_exactly) // doesn't nest
EMSG_ONE_RET_NULL;
{
char_u *lastbranch;
char_u *lastnode = NULL;
char_u *br;
ret = NULL;
while ((c = getchr()) != ']')
{
if (c == NUL)
EMSG2_RET_NULL(_(e_missing_sb_after_str),
reg_magic == MAGIC_ALL);
br = regnode(BRANCH);
if (ret == NULL)
ret = br;
else
{
regtail(lastnode, br);
if (reg_toolong)
return NULL;
}
ungetchr();
one_exactly = TRUE;
lastnode = regatom(flagp);
one_exactly = FALSE;
if (lastnode == NULL)
return NULL;
}
if (ret == NULL)
EMSG2_RET_NULL(_(e_empty_str_brackets),
reg_magic == MAGIC_ALL);
lastbranch = regnode(BRANCH);
br = regnode(NOTHING);
if (ret != JUST_CALC_SIZE)
{
regtail(lastnode, br);
regtail(lastbranch, br);
// connect all branches to the NOTHING
// branch at the end
for (br = ret; br != lastnode; )
{
if (OP(br) == BRANCH)
{
regtail(br, lastbranch);
if (reg_toolong)
return NULL;
br = OPERAND(br);
}
else
br = regnext(br);
}
}
*flagp &= ~(HASWIDTH | SIMPLE);
break;
}
case 'd': // %d123 decimal
case 'o': // %o123 octal
case 'x': // %xab hex 2
case 'u': // %uabcd hex 4
case 'U': // %U1234abcd hex 8
{
long i;
switch (c)
{
case 'd': i = getdecchrs(); break;
case 'o': i = getoctchrs(); break;
case 'x': i = gethexchrs(2); break;
case 'u': i = gethexchrs(4); break;
case 'U': i = gethexchrs(8); break;
default: i = -1; break;
}
if (i < 0 || i > INT_MAX)
EMSG2_RET_NULL(
_(e_invalid_character_after_str_2),
reg_magic == MAGIC_ALL);
if (use_multibytecode(i))
ret = regnode(MULTIBYTECODE);
else
ret = regnode(EXACTLY);
if (i == 0)
regc(0x0a);
else
regmbc(i);
regc(NUL);
*flagp |= HASWIDTH;
break;
}
default:
if (VIM_ISDIGIT(c) || c == '<' || c == '>'
|| c == '\'' || c == '.')
{
long_u n = 0;
int cmp;
int cur = FALSE;
cmp = c;
if (cmp == '<' || cmp == '>')
c = getchr();
if (no_Magic(c) == '.')
{
cur = TRUE;
c = getchr();
}
while (VIM_ISDIGIT(c))
{
n = n * 10 + (c - '0');
c = getchr();
}
if (c == '\'' && n == 0)
{
// "\%'m", "\%<'m" and "\%>'m": Mark
c = getchr();
ret = regnode(RE_MARK);
if (ret == JUST_CALC_SIZE)
regsize += 2;
else
{
*regcode++ = c;
*regcode++ = cmp;
}
break;
}
else if (c == 'l' || c == 'c' || c == 'v')
{
if (cur && n)
{
semsg(_(e_regexp_number_after_dot_pos_search), no_Magic(c));
rc_did_emsg = TRUE;
return NULL;
}
if (c == 'l')
{
if (cur)
n = curwin->w_cursor.lnum;
ret = regnode(RE_LNUM);
if (save_prev_at_start)
at_start = TRUE;
}
else if (c == 'c')
{
if (cur)
{
n = curwin->w_cursor.col;
n++;
}
ret = regnode(RE_COL);
}
else
{
if (cur)
{
colnr_T vcol = 0;
getvvcol(curwin, &curwin->w_cursor,
NULL, NULL, &vcol);
++vcol;
n = vcol;
}
ret = regnode(RE_VCOL);
}
if (ret == JUST_CALC_SIZE)
regsize += 5;
else
{
// put the number and the optional
// comparator after the opcode
regcode = re_put_long(regcode, n);
*regcode++ = cmp;
}
break;
}
}
EMSG2_RET_NULL(_(e_invalid_character_after_str),
reg_magic == MAGIC_ALL);
}
}
break;
case Magic('['):
collection:
{
char_u *lp;
// If there is no matching ']', we assume the '[' is a normal
// character. This makes 'incsearch' and ":help [" work.
lp = skip_anyof(regparse);
if (*lp == ']') // there is a matching ']'
{
int startc = -1; // > 0 when next '-' is a range
int endc;
// In a character class, different parsing rules apply.
// Not even \ is special anymore, nothing is.
if (*regparse == '^') // Complement of range.
{
ret = regnode(ANYBUT + extra);
regparse++;
}
else
ret = regnode(ANYOF + extra);
// At the start ']' and '-' mean the literal character.
if (*regparse == ']' || *regparse == '-')
{
startc = *regparse;
regc(*regparse++);
}
while (*regparse != NUL && *regparse != ']')
{
if (*regparse == '-')
{
++regparse;
// The '-' is not used for a range at the end and
// after or before a '\n'.
if (*regparse == ']' || *regparse == NUL
|| startc == -1
|| (regparse[0] == '\\' && regparse[1] == 'n'))
{
regc('-');
startc = '-'; // [--x] is a range
}
else
{
// Also accept "a-[.z.]"
endc = 0;
if (*regparse == '[')
endc = get_coll_element(®parse);
if (endc == 0)
{
if (has_mbyte)
endc = mb_ptr2char_adv(®parse);
else
endc = *regparse++;
}
// Handle \o40, \x20 and \u20AC style sequences
if (endc == '\\' && !reg_cpo_lit && !reg_cpo_bsl)
endc = coll_get_char();
if (startc > endc)
EMSG_RET_NULL(_(e_reverse_range_in_character_class));
if (has_mbyte && ((*mb_char2len)(startc) > 1
|| (*mb_char2len)(endc) > 1))
{
// Limit to a range of 256 chars.
if (endc > startc + 256)
EMSG_RET_NULL(_(e_range_too_large_in_character_class));
while (++startc <= endc)
regmbc(startc);
}
else
{
while (++startc <= endc)
regc(startc);
}
startc = -1;
}
}
// Only "\]", "\^", "\]" and "\\" are special in Vi. Vim
// accepts "\t", "\e", etc., but only when the 'l' flag in
// 'cpoptions' is not included.
// Posix doesn't recognize backslash at all.
else if (*regparse == '\\'
&& !reg_cpo_bsl
&& (vim_strchr(REGEXP_INRANGE, regparse[1]) != NULL
|| (!reg_cpo_lit
&& vim_strchr(REGEXP_ABBR,
regparse[1]) != NULL)))
{
regparse++;
if (*regparse == 'n')
{
// '\n' in range: also match NL
if (ret != JUST_CALC_SIZE)
{
// Using \n inside [^] does not change what
// matches. "[^\n]" is the same as ".".
if (*ret == ANYOF)
{
*ret = ANYOF + ADD_NL;
*flagp |= HASNL;
}
// else: must have had a \n already
}
regparse++;
startc = -1;
}
else if (*regparse == 'd'
|| *regparse == 'o'
|| *regparse == 'x'
|| *regparse == 'u'
|| *regparse == 'U')
{
startc = coll_get_char();
if (startc == 0)
regc(0x0a);
else
regmbc(startc);
}
else
{
startc = backslash_trans(*regparse++);
regc(startc);
}
}
else if (*regparse == '[')
{
int c_class;
int cu;
c_class = get_char_class(®parse);
startc = -1;
// Characters assumed to be 8 bits!
switch (c_class)
{
case CLASS_NONE:
c_class = get_equi_class(®parse);
if (c_class != 0)
{
// produce equivalence class
reg_equi_class(c_class);
}
else if ((c_class =
get_coll_element(®parse)) != 0)
{
// produce a collating element
regmbc(c_class);
}
else
{
// literal '[', allow [[-x] as a range
startc = *regparse++;
regc(startc);
}
break;
case CLASS_ALNUM:
for (cu = 1; cu < 128; cu++)
if (isalnum(cu))
regmbc(cu);
break;
case CLASS_ALPHA:
for (cu = 1; cu < 128; cu++)
if (isalpha(cu))
regmbc(cu);
break;
case CLASS_BLANK:
regc(' ');
regc('\t');
break;
case CLASS_CNTRL:
for (cu = 1; cu <= 127; cu++)
if (iscntrl(cu))
regmbc(cu);
break;
case CLASS_DIGIT:
for (cu = 1; cu <= 127; cu++)
if (VIM_ISDIGIT(cu))
regmbc(cu);
break;
case CLASS_GRAPH:
for (cu = 1; cu <= 127; cu++)
if (isgraph(cu))
regmbc(cu);
break;
case CLASS_LOWER:
for (cu = 1; cu <= 255; cu++)
if (MB_ISLOWER(cu) && cu != 170
&& cu != 186)
regmbc(cu);
break;
case CLASS_PRINT:
for (cu = 1; cu <= 255; cu++)
if (vim_isprintc(cu))
regmbc(cu);
break;
case CLASS_PUNCT:
for (cu = 1; cu < 128; cu++)
if (ispunct(cu))
regmbc(cu);
break;
case CLASS_SPACE:
for (cu = 9; cu <= 13; cu++)
regc(cu);
regc(' ');
break;
case CLASS_UPPER:
for (cu = 1; cu <= 255; cu++)
if (MB_ISUPPER(cu))
regmbc(cu);
break;
case CLASS_XDIGIT:
for (cu = 1; cu <= 255; cu++)
if (vim_isxdigit(cu))
regmbc(cu);
break;
case CLASS_TAB:
regc('\t');
break;
case CLASS_RETURN:
regc('\r');
break;
case CLASS_BACKSPACE:
regc('\b');
break;
case CLASS_ESCAPE:
regc('\033');
break;
case CLASS_IDENT:
for (cu = 1; cu <= 255; cu++)
if (vim_isIDc(cu))
regmbc(cu);
break;
case CLASS_KEYWORD:
for (cu = 1; cu <= 255; cu++)
if (reg_iswordc(cu))
regmbc(cu);
break;
case CLASS_FNAME:
for (cu = 1; cu <= 255; cu++)
if (vim_isfilec(cu))
regmbc(cu);
break;
}
}
else
{
if (has_mbyte)
{
int len;
// produce a multibyte character, including any
// following composing characters
startc = mb_ptr2char(regparse);
len = (*mb_ptr2len)(regparse);
if (enc_utf8 && utf_char2len(startc) != len)
startc = -1; // composing chars
while (--len >= 0)
regc(*regparse++);
}
else
{
startc = *regparse++;
regc(startc);
}
}
}
regc(NUL);
prevchr_len = 1; // last char was the ']'
if (*regparse != ']')
EMSG_RET_NULL(_(e_too_many_brackets)); // Cannot happen?
skipchr(); // let's be friends with the lexer again
*flagp |= HASWIDTH | SIMPLE;
break;
}
else if (reg_strict)
EMSG2_RET_NULL(_(e_missing_rsb_after_str_lsb),
reg_magic > MAGIC_OFF);
}
// FALLTHROUGH
default:
{
int len;
// A multi-byte character is handled as a separate atom if it's
// before a multi and when it's a composing char.
if (use_multibytecode(c))
{
do_multibyte:
ret = regnode(MULTIBYTECODE);
regmbc(c);
*flagp |= HASWIDTH | SIMPLE;
break;
}
ret = regnode(EXACTLY);
// Append characters as long as:
// - there is no following multi, we then need the character in
// front of it as a single character operand
// - not running into a Magic character
// - "one_exactly" is not set
// But always emit at least one character. Might be a Multi,
// e.g., a "[" without matching "]".
for (len = 0; c != NUL && (len == 0
|| (re_multi_type(peekchr()) == NOT_MULTI
&& !one_exactly
&& !is_Magic(c))); ++len)
{
c = no_Magic(c);
if (has_mbyte)
{
regmbc(c);
if (enc_utf8)
{
int l;
// Need to get composing character too.
for (;;)
{
l = utf_ptr2len(regparse);
if (!UTF_COMPOSINGLIKE(regparse, regparse + l))
break;
regmbc(utf_ptr2char(regparse));
skipchr();
}
}
}
else
regc(c);
c = getchr();
}
ungetchr();
regc(NUL);
*flagp |= HASWIDTH;
if (len == 1)
*flagp |= SIMPLE;
}
break;
}
return ret;
} | 0 | [
"CWE-823",
"CWE-703"
] | vim | 6456fae9ba8e72c74b2c0c499eaf09974604ff30 | 15,785,976,359,706,843,000,000,000,000,000,000,000 | 808 | patch 8.2.4440: crash with specific regexp pattern and string
Problem: Crash with specific regexp pattern and string.
Solution: Stop at the start of the string. |
merged_1v_upsample(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
JDIMENSION *in_row_group_ctr,
JDIMENSION in_row_groups_avail, JSAMPARRAY output_buf,
JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)
/* 1:1 vertical sampling case: much easier, never need a spare row. */
{
my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
/* Just do the upsampling. */
(*upsample->upmethod) (cinfo, input_buf, *in_row_group_ctr,
output_buf + *out_row_ctr);
/* Adjust counts */
(*out_row_ctr)++;
(*in_row_group_ctr)++;
} | 1 | [
"CWE-476"
] | libjpeg-turbo | 9120a247436e84c0b4eea828cb11e8f665fcde30 | 81,831,127,210,120,300,000,000,000,000,000,000,000 | 15 | Fix jpeg_skip_scanlines() segfault w/merged upsamp
The additional segfault mentioned in #244 was due to the fact that
the merged upsamplers use a different private structure than the
non-merged upsamplers. jpeg_skip_scanlines() was assuming the latter, so
when merged upsampling was enabled, jpeg_skip_scanlines() clobbered one
of the IDCT method pointers in the merged upsampler's private structure.
For reasons unknown, the test image in #441 did not encounter this
segfault (too small?), but it encountered an issue similar to the one
fixed in 5bc43c7821df982f65aa1c738f67fbf7cba8bd69, whereby it was
necessary to set up a dummy postprocessing function in
read_and_discard_scanlines() when merged upsampling was enabled.
Failing to do so caused either a segfault in merged_2v_upsample() (due
to a NULL pointer being passed to jcopy_sample_rows()) or an error
("Corrupt JPEG data: premature end of data segment"), depending on the
number of scanlines skipped and whether the first scanline skipped was
an odd- or even-numbered row.
Fixes #441
Fixes #244 (for real this time) |
const SSL_CIPHER *ssl2_get_cipher(unsigned int u)
{
if (u < SSL2_NUM_CIPHERS)
return(&(ssl2_ciphers[SSL2_NUM_CIPHERS-1-u]));
else
return(NULL);
} | 0 | [
"CWE-310"
] | openssl | cf6da05304d554aaa885151451aa4ecaa977e601 | 47,466,057,315,774,350,000,000,000,000,000,000,000 | 7 | Support TLS_FALLBACK_SCSV.
Reviewed-by: Stephen Henson <[email protected]> |
static uint64_t reencrypt_data_offset(struct luks2_hdr *hdr, unsigned new)
{
json_object *jobj = reencrypt_segment(hdr, new);
if (jobj)
return json_segment_get_offset(jobj, 0);
return LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
} | 0 | [
"CWE-345"
] | cryptsetup | 0113ac2d889c5322659ad0596d4cfc6da53e356c | 3,628,749,321,403,154,000,000,000,000,000,000,000 | 8 | Fix CVE-2021-4122 - LUKS2 reencryption crash recovery attack
Fix possible attacks against data confidentiality through LUKS2 online
reencryption extension crash recovery.
An attacker can modify on-disk metadata to simulate decryption in
progress with crashed (unfinished) reencryption step and persistently
decrypt part of the LUKS device.
This attack requires repeated physical access to the LUKS device but
no knowledge of user passphrases.
The decryption step is performed after a valid user activates
the device with a correct passphrase and modified metadata.
There are no visible warnings for the user that such recovery happened
(except using the luksDump command). The attack can also be reversed
afterward (simulating crashed encryption from a plaintext) with
possible modification of revealed plaintext.
The problem was caused by reusing a mechanism designed for actual
reencryption operation without reassessing the security impact for new
encryption and decryption operations. While the reencryption requires
calculating and verifying both key digests, no digest was needed to
initiate decryption recovery if the destination is plaintext (no
encryption key). Also, some metadata (like encryption cipher) is not
protected, and an attacker could change it. Note that LUKS2 protects
visible metadata only when a random change occurs. It does not protect
against intentional modification but such modification must not cause
a violation of data confidentiality.
The fix introduces additional digest protection of reencryption
metadata. The digest is calculated from known keys and critical
reencryption metadata. Now an attacker cannot create correct metadata
digest without knowledge of a passphrase for used keyslots.
For more details, see LUKS2 On-Disk Format Specification version 1.1.0. |
static int __init cdrom_init(void)
{
cdrom_sysctl_register();
return 0;
} | 0 | [
"CWE-119",
"CWE-787"
] | linux | 9de4ee40547fd315d4a0ed1dd15a2fa3559ad707 | 314,903,789,188,553,600,000,000,000,000,000,000,000 | 6 | cdrom: information leak in cdrom_ioctl_media_changed()
This cast is wrong. "cdi->capacity" is an int and "arg" is an unsigned
long. The way the check is written now, if one of the high 32 bits is
set then we could read outside the info->slots[] array.
This bug is pretty old and it predates git.
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: [email protected]
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
static int drbg_kcapi_random(struct crypto_rng *tfm, u8 *rdata,
unsigned int dlen)
{
struct drbg_state *drbg = crypto_rng_ctx(tfm);
if (0 < dlen) {
return drbg_generate_long(drbg, rdata, dlen, NULL);
} else {
struct drbg_gen *data = (struct drbg_gen *)rdata;
struct drbg_string addtl;
/* catch NULL pointer */
if (!data)
return 0;
drbg_set_testdata(drbg, data->test_data);
/* linked list variable is now local to allow modification */
drbg_string_fill(&addtl, data->addtl->buf, data->addtl->len);
return drbg_generate_long(drbg, data->outbuf, data->outlen,
&addtl);
}
} | 1 | [
"CWE-476"
] | linux | 8fded5925d0a733c46f8d0b5edd1c9b315882b1d | 162,310,059,454,559,200,000,000,000,000,000,000,000 | 19 | crypto: drbg - Convert to new rng interface
This patch converts the DRBG implementation to the new low-level
rng interface.
This allows us to get rid of struct drbg_gen by using the new RNG
API instead.
Signed-off-by: Herbert Xu <[email protected]>
Acked-by: Stephan Mueller <[email protected]> |
storagePoolUpdateStateCallback(virStoragePoolObj *obj,
const void *opaque G_GNUC_UNUSED)
{
virStoragePoolDef *def = virStoragePoolObjGetDef(obj);
bool active = false;
virStorageBackend *backend;
g_autofree char *stateFile = NULL;
if ((backend = virStorageBackendForType(def->type)) == NULL) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Missing backend %d"), def->type);
return;
}
if (!(stateFile = virFileBuildPath(driver->stateDir, def->name, ".xml")))
return;
/* Backends which do not support 'checkPool' are considered
* inactive by default. */
if (backend->checkPool &&
backend->checkPool(obj, &active) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Failed to initialize storage pool '%s': %s"),
def->name, virGetLastErrorMessage());
unlink(stateFile);
active = false;
}
/* We can pass NULL as connection, most backends do not use
* it anyway, but if they do and fail, we want to log error and
* continue with other pools.
*/
if (active &&
storagePoolRefreshImpl(backend, obj, stateFile) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Failed to restart storage pool '%s': %s"),
def->name, virGetLastErrorMessage());
active = false;
}
virStoragePoolObjSetActive(obj, active);
if (!virStoragePoolObjIsActive(obj))
virStoragePoolUpdateInactive(obj);
return;
} | 0 | [] | libvirt | 447f69dec47e1b0bd15ecd7cd49a9fd3b050fb87 | 207,141,012,969,547,300,000,000,000,000,000,000,000 | 47 | storage_driver: Unlock object on ACL fail in storagePoolLookupByTargetPath
'virStoragePoolObjListSearch' returns a locked and refed object, thus we
must release it on ACL permission failure.
Fixes: 7aa0e8c0cb8
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1984318
Signed-off-by: Peter Krempa <[email protected]>
Reviewed-by: Michal Privoznik <[email protected]> |
GF_EXPORT
u32 gf_isom_sample_has_subsamples(GF_ISOFile *movie, u32 track, u32 sampleNumber, u32 flags)
{
GF_TrackBox *trak = gf_isom_get_track_from_file(movie, track);
if (!trak) return GF_BAD_PARAM;
if (!trak->Media->information->sampleTable->sub_samples) return 0;
if (!sampleNumber) return 1;
return gf_isom_sample_get_subsample_entry(movie, track, sampleNumber, flags, NULL); | 0 | [
"CWE-476"
] | gpac | ebfa346eff05049718f7b80041093b4c5581c24e | 147,671,873,292,629,260,000,000,000,000,000,000,000 | 8 | fixed #1706 |
logid (const char id[2])
{
hex (id[0]);
hex (id[1]);
} | 0 | [
"CWE-362"
] | ndjbdns | 847523271f3966cf4618c5689b905703c41dec1c | 101,218,316,445,094,360,000,000,000,000,000,000,000 | 5 | Merge identical outgoing requests.
This patch fixes dnscache to combine *same* client queries into one
single outgoing request, thus securing the server from possible cache
poisoning attacks. This fixes one of the cache poisoning vulnerability
reported by Mr Mark Johnson
-> https://bugzilla.redhat.com/show_bug.cgi?id=838965.
Nonetheless the original patch for this issue was created by
Mr Jeff king -> http://www.your.org/dnscache/
Sincere thanks to Mr Mark for reporting this issue and Mr Jeff for
creating the patch and releasing it under public domain. |
static void show_entry(struct diff_options *opt, const char *prefix, struct tree_desc *desc,
const char *base, int baselen)
{
unsigned mode;
const char *path;
const unsigned char *sha1 = tree_entry_extract(desc, &path, &mode);
int pathlen = tree_entry_len(path, sha1);
if (DIFF_OPT_TST(opt, RECURSIVE) && S_ISDIR(mode)) {
enum object_type type;
char *newbase = malloc_base(base, baselen, path, pathlen);
struct tree_desc inner;
void *tree;
unsigned long size;
tree = read_sha1_file(sha1, &type, &size);
if (!tree || type != OBJ_TREE)
die("corrupt tree sha %s", sha1_to_hex(sha1));
init_tree_desc(&inner, tree, size);
show_tree(opt, prefix, &inner, newbase, baselen + 1 + pathlen);
free(tree);
free(newbase);
} else {
char *fullname = malloc_fullname(base, baselen, path, pathlen);
opt->add_remove(opt, prefix[0], mode, sha1, fullname);
free(fullname);
}
} | 0 | [
"CWE-119"
] | git | fd55a19eb1d49ae54008d932a65f79cd6fda45c9 | 33,846,817,028,892,630,000,000,000,000,000,000,000 | 30 | Fix buffer overflow in git diff
If PATH_MAX on your system is smaller than a path stored, it may cause
buffer overflow and stack corruption in diff_addremove() and diff_change()
functions when running git-diff
Signed-off-by: Dmitry Potapov <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]> |
rsvg_paint_server_parse (gboolean * inherit, const RsvgDefs * defs, const char *str,
guint32 current_color)
{
char *name;
guint32 rgb;
if (inherit != NULL)
*inherit = 1;
if (!strcmp (str, "none"))
return NULL;
name = rsvg_get_url_string (str);
if (name) {
RsvgNode *val;
val = rsvg_defs_lookup (defs, name);
g_free (name);
if (val == NULL)
return NULL;
if (RSVG_NODE_TYPE (val) == RSVG_NODE_TYPE_LINEAR_GRADIENT)
return rsvg_paint_server_lin_grad ((RsvgLinearGradient *) val);
else if (RSVG_NODE_TYPE (val) == RSVG_NODE_TYPE_RADIAL_GRADIENT)
return rsvg_paint_server_rad_grad ((RsvgRadialGradient *) val);
else if (RSVG_NODE_TYPE (val) == RSVG_NODE_TYPE_PATTERN)
return rsvg_paint_server_pattern ((RsvgPattern *) val);
else
return NULL;
} else if (!strcmp (str, "inherit")) {
if (inherit != NULL)
*inherit = 0;
return rsvg_paint_server_solid (0);
} else if (!strcmp (str, "currentColor")) {
RsvgPaintServer *ps;
ps = rsvg_paint_server_solid_current_colour ();
return ps;
} else {
rgb = rsvg_css_parse_color (str, inherit);
return rsvg_paint_server_solid (rgb);
}
} | 0 | [] | librsvg | 34c95743ca692ea0e44778e41a7c0a129363de84 | 221,163,893,465,808,270,000,000,000,000,000,000,000 | 39 | Store node type separately in RsvgNode
The node name (formerly RsvgNode:type) cannot be used to infer
the sub-type of RsvgNode that we're dealing with, since for unknown
elements we put type = node-name. This lead to a (potentially exploitable)
crash e.g. when the element name started with "fe" which tricked
the old code into considering it as a RsvgFilterPrimitive.
CVE-2011-3146
https://bugzilla.gnome.org/show_bug.cgi?id=658014 |
gerbv_gdk_draw_prim5(GdkPixmap *pixmap, GdkGC *gc, gerbv_simplified_amacro_t *s,
double scale, gint x, gint y)
{
const int exposure_idx = 0;
const int nuf_vertices_idx = 1;
const int center_x_idx = 2;
const int center_y_idx = 3;
const int diameter_idx = 4;
const int rotation_idx = 5;
int nuf_vertices, i;
double vertex, tick, rotation, radius;
GdkPoint *points;
GdkGC *local_gc = gdk_gc_new(pixmap);
GdkColor color;
nuf_vertices = (int)s->parameter[nuf_vertices_idx];
points = g_new(GdkPoint, nuf_vertices);
if (!points) {
g_free(points);
return;
}
gdk_gc_copy(local_gc, gc);
/* Exposure */
if (s->parameter[exposure_idx] == 0.0) {
color.pixel = 0;
gdk_gc_set_foreground(local_gc, &color);
}
tick = 2 * M_PI / (double)nuf_vertices;
rotation = DEG2RAD(-s->parameter[rotation_idx]);
radius = s->parameter[diameter_idx] / 2.0;
for (i = 0; i < nuf_vertices; i++) {
vertex = tick * (double)i + rotation;
points[i].x = (int)round(scale * (radius * cos(vertex) + s->parameter[center_x_idx])) + x;
points[i].y = (int)round(scale * (radius * sin(vertex) - s->parameter[center_y_idx])) + y;
}
gdk_draw_polygon(pixmap, local_gc, 1, points, nuf_vertices);
gdk_gc_unref(local_gc);
g_free(points);
return;
} /* gerbv_gdk_draw_prim5 */ | 0 | [
"CWE-703"
] | gerbv | b2c2f8da851f2ac8079a91ce9d498d87ff96abcf | 213,919,104,274,086,920,000,000,000,000,000,000,000 | 46 | Avoid direct access on array of unknown size
Be requiring a `gerbv_simplified_amacro_t` the `dgk_draw_amacro_funcs` can be sure of the length of the parameter array. |
struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
{
struct intel_iommu *iommu;
u8 bus, devfn;
if (iommu_dummy(dev)) {
dev_warn(dev,
"No IOMMU translation for device; cannot enable SVM\n");
return NULL;
}
iommu = device_to_iommu(dev, &bus, &devfn);
if ((!iommu)) {
dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
return NULL;
}
return iommu;
} | 0 | [] | linux | d8b8591054575f33237556c32762d54e30774d28 | 230,922,129,268,840,100,000,000,000,000,000,000,000 | 19 | iommu/vt-d: Disable ATS support on untrusted devices
Commit fb58fdcd295b9 ("iommu/vt-d: Do not enable ATS for untrusted
devices") disables ATS support on the devices which have been marked
as untrusted. Unfortunately this is not enough to fix the DMA attack
vulnerabiltiies because IOMMU driver allows translated requests as
long as a device advertises the ATS capability. Hence a malicious
peripheral device could use this to bypass IOMMU.
This disables the ATS support on untrusted devices by clearing the
internal per-device ATS mark. As the result, IOMMU driver will block
any translated requests from any device marked as untrusted.
Cc: Jacob Pan <[email protected]>
Cc: Mika Westerberg <[email protected]>
Suggested-by: Kevin Tian <[email protected]>
Suggested-by: Ashok Raj <[email protected]>
Fixes: fb58fdcd295b9 ("iommu/vt-d: Do not enable ATS for untrusted devices")
Signed-off-by: Lu Baolu <[email protected]>
Signed-off-by: Joerg Roedel <[email protected]> |
static int esp_post_load(void *opaque, int version_id)
{
ESPState *s = ESP(opaque);
int len, i;
version_id = MIN(version_id, s->mig_version_id);
if (version_id < 5) {
esp_set_tc(s, s->mig_dma_left);
/* Migrate ti_buf to fifo */
len = s->mig_ti_wptr - s->mig_ti_rptr;
for (i = 0; i < len; i++) {
fifo8_push(&s->fifo, s->mig_ti_buf[i]);
}
/* Migrate cmdbuf to cmdfifo */
for (i = 0; i < s->mig_cmdlen; i++) {
fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
}
}
s->mig_version_id = vmstate_esp.version_id;
return 0;
} | 0 | [
"CWE-476"
] | qemu | 0db895361b8a82e1114372ff9f4857abea605701 | 153,625,294,674,958,830,000,000,000,000,000,000,000 | 25 | esp: always check current_req is not NULL before use in DMA callbacks
After issuing a SCSI command the SCSI layer can call the SCSIBusInfo .cancel
callback which resets both current_req and current_dev to NULL. If any data
is left in the transfer buffer (async_len != 0) then the next TI (Transfer
Information) command will attempt to reference the NULL pointer causing a
segfault.
Buglink: https://bugs.launchpad.net/qemu/+bug/1910723
Buglink: https://bugs.launchpad.net/qemu/+bug/1909247
Signed-off-by: Mark Cave-Ayland <[email protected]>
Tested-by: Alexander Bulekov <[email protected]>
Message-Id: <[email protected]> |
gboolean avdtp_remove_state_cb(unsigned int id)
{
GSList *l;
for (l = state_callbacks; l != NULL; l = l->next) {
struct avdtp_state_callback *cb = l->data;
if (cb && cb->id == id) {
state_callbacks = g_slist_remove(state_callbacks, cb);
g_free(cb);
return TRUE;
}
}
return FALSE;
} | 0 | [
"CWE-703"
] | bluez | 7a80d2096f1b7125085e21448112aa02f49f5e9a | 292,700,070,087,307,800,000,000,000,000,000,000,000 | 15 | avdtp: Fix accepting invalid/malformed capabilities
Check if capabilities are valid before attempting to copy them. |
static void mld_dad_work(struct work_struct *work)
{
struct inet6_dev *idev = container_of(to_delayed_work(work),
struct inet6_dev,
mc_dad_work);
mutex_lock(&idev->mc_lock);
mld_send_initial_cr(idev);
if (idev->mc_dad_count) {
idev->mc_dad_count--;
if (idev->mc_dad_count)
mld_dad_start_work(idev,
unsolicited_report_interval(idev));
}
mutex_unlock(&idev->mc_lock);
in6_dev_put(idev);
} | 0 | [
"CWE-703"
] | linux | 2d3916f3189172d5c69d33065c3c21119fe539fc | 120,522,684,137,429,870,000,000,000,000,000,000,000 | 16 | ipv6: fix skb drops in igmp6_event_query() and igmp6_event_report()
While investigating on why a synchronize_net() has been added recently
in ipv6_mc_down(), I found that igmp6_event_query() and igmp6_event_report()
might drop skbs in some cases.
Discussion about removing synchronize_net() from ipv6_mc_down()
will happen in a different thread.
Fixes: f185de28d9ae ("mld: add new workqueues for process mld events")
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Taehee Yoo <[email protected]>
Cc: Cong Wang <[email protected]>
Cc: David Ahern <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]> |
STACK_OF(X509) *load_certs(BIO *err, const char *file, int format,
const char *pass, ENGINE *e, const char *desc)
{
STACK_OF(X509) *certs;
if (!load_certs_crls(err, file, format, pass, e, desc, &certs, NULL))
return NULL;
return certs;
} | 0 | [] | openssl | a70da5b3ecc3160368529677006801c58cb369db | 135,890,125,753,579,490,000,000,000,000,000,000,000 | 8 | New functions to check a hostname email or IP address against a
certificate. Add options to s_client, s_server and x509 utilities
to print results of checks. |
GENERAL_NAME *TS_TST_INFO_get_tsa(TS_TST_INFO *a)
{
return a->tsa;
} | 0 | [] | openssl | c7235be6e36c4bef84594aa3b2f0561db84b63d8 | 174,761,947,785,183,200,000,000,000,000,000,000,000 | 4 | RFC 3161 compliant time stamp request creation, response generation
and response verification.
Submitted by: Zoltan Glozik <[email protected]>
Reviewed by: Ulf Moeller |
utf16be_mbc_to_code(const UChar* p, const UChar* end ARG_UNUSED)
{
OnigCodePoint code;
if (UTF16_IS_SURROGATE_FIRST(*p)) {
code = ((((p[0] - 0xd8) << 2) + ((p[1] & 0xc0) >> 6) + 1) << 16)
+ ((((p[1] & 0x3f) << 2) + (p[2] - 0xdc)) << 8)
+ p[3];
}
else {
code = p[0] * 256 + p[1];
}
return code;
} | 1 | [
"CWE-125"
] | php-src | 9d6c59eeea88a3e9d7039cb4fed5126ef704593a | 182,644,666,348,400,600,000,000,000,000,000,000,000 | 14 | Fix bug #77418 - Heap overflow in utf32be_mbc_to_code |
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
u64 (*prfill)(struct seq_file *,
struct blkg_policy_data *, int),
const struct blkcg_policy *pol, int data,
bool show_total)
{
struct blkcg_gq *blkg;
u64 total = 0;
rcu_read_lock();
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
spin_lock_irq(blkg->q->queue_lock);
if (blkcg_policy_enabled(blkg->q, pol))
total += prfill(sf, blkg->pd[pol->plid], data);
spin_unlock_irq(blkg->q->queue_lock);
}
rcu_read_unlock();
if (show_total)
seq_printf(sf, "Total %llu\n", (unsigned long long)total);
} | 0 | [
"CWE-415"
] | linux | 9b54d816e00425c3a517514e0d677bb3cec49258 | 210,772,555,359,078,360,000,000,000,000,000,000,000 | 21 | blkcg: fix double free of new_blkg in blkcg_init_queue
If blkg_create fails, new_blkg passed as an argument will
be freed by blkg_create, so there is no need to free it again.
Signed-off-by: Hou Tao <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
{
struct rds_ib_connection *ic = conn->c_transport_data;
if (credits == 0)
return;
rdsdebug("rds_ib_send_add_credits(%u): current=%u%s\n",
credits,
IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
rds_ib_stats_inc(s_ib_rx_credit_updates);
} | 0 | [] | linux-2.6 | 6094628bfd94323fc1cea05ec2c6affd98c18f7f | 35,907,986,306,346,880,000,000,000,000,000,000,000 | 20 | rds: prevent BUG_ON triggering on congestion map updates
Recently had this bug halt reported to me:
kernel BUG at net/rds/send.c:329!
Oops: Exception in kernel mode, sig: 5 [#1]
SMP NR_CPUS=1024 NUMA pSeries
Modules linked in: rds sunrpc ipv6 dm_mirror dm_region_hash dm_log ibmveth sg
ext4 jbd2 mbcache sd_mod crc_t10dif ibmvscsic scsi_transport_srp scsi_tgt
dm_mod [last unloaded: scsi_wait_scan]
NIP: d000000003ca68f4 LR: d000000003ca67fc CTR: d000000003ca8770
REGS: c000000175cab980 TRAP: 0700 Not tainted (2.6.32-118.el6.ppc64)
MSR: 8000000000029032 <EE,ME,CE,IR,DR> CR: 44000022 XER: 00000000
TASK = c00000017586ec90[1896] 'krdsd' THREAD: c000000175ca8000 CPU: 0
GPR00: 0000000000000150 c000000175cabc00 d000000003cb7340 0000000000002030
GPR04: ffffffffffffffff 0000000000000030 0000000000000000 0000000000000030
GPR08: 0000000000000001 0000000000000001 c0000001756b1e30 0000000000010000
GPR12: d000000003caac90 c000000000fa2500 c0000001742b2858 c0000001742b2a00
GPR16: c0000001742b2a08 c0000001742b2820 0000000000000001 0000000000000001
GPR20: 0000000000000040 c0000001742b2814 c000000175cabc70 0800000000000000
GPR24: 0000000000000004 0200000000000000 0000000000000000 c0000001742b2860
GPR28: 0000000000000000 c0000001756b1c80 d000000003cb68e8 c0000001742b27b8
NIP [d000000003ca68f4] .rds_send_xmit+0x4c4/0x8a0 [rds]
LR [d000000003ca67fc] .rds_send_xmit+0x3cc/0x8a0 [rds]
Call Trace:
[c000000175cabc00] [d000000003ca67fc] .rds_send_xmit+0x3cc/0x8a0 [rds]
(unreliable)
[c000000175cabd30] [d000000003ca7e64] .rds_send_worker+0x54/0x100 [rds]
[c000000175cabdb0] [c0000000000b475c] .worker_thread+0x1dc/0x3c0
[c000000175cabed0] [c0000000000baa9c] .kthread+0xbc/0xd0
[c000000175cabf90] [c000000000032114] .kernel_thread+0x54/0x70
Instruction dump:
4bfffd50 60000000 60000000 39080001 935f004c f91f0040 41820024 813d017c
7d094a78 7d290074 7929d182 394a0020 <0b090000> 40e2ff68 4bffffa4 39200000
Kernel panic - not syncing: Fatal exception
Call Trace:
[c000000175cab560] [c000000000012e04] .show_stack+0x74/0x1c0 (unreliable)
[c000000175cab610] [c0000000005a365c] .panic+0x80/0x1b4
[c000000175cab6a0] [c00000000002fbcc] .die+0x21c/0x2a0
[c000000175cab750] [c000000000030000] ._exception+0x110/0x220
[c000000175cab910] [c000000000004b9c] program_check_common+0x11c/0x180
Signed-off-by: David S. Miller <[email protected]> |
static bool ldm_parse_dsk3 (const u8 *buffer, int buflen, struct vblk *vb)
{
int r_objid, r_name, r_diskid, r_altname, len;
struct vblk_disk *disk;
BUG_ON (!buffer || !vb);
r_objid = ldm_relative (buffer, buflen, 0x18, 0);
r_name = ldm_relative (buffer, buflen, 0x18, r_objid);
r_diskid = ldm_relative (buffer, buflen, 0x18, r_name);
r_altname = ldm_relative (buffer, buflen, 0x18, r_diskid);
len = r_altname;
if (len < 0)
return false;
len += VBLK_SIZE_DSK3;
if (len != get_unaligned_be32(buffer + 0x14))
return false;
disk = &vb->vblk.disk;
ldm_get_vstr (buffer + 0x18 + r_diskid, disk->alt_name,
sizeof (disk->alt_name));
if (!ldm_parse_guid (buffer + 0x19 + r_name, disk->disk_id))
return false;
return true;
} | 0 | [
"CWE-119",
"CWE-787"
] | linux | cae13fe4cc3f24820ffb990c09110626837e85d4 | 142,746,003,906,080,160,000,000,000,000,000,000,000 | 27 | Fix for buffer overflow in ldm_frag_add not sufficient
As Ben Hutchings discovered [1], the patch for CVE-2011-1017 (buffer
overflow in ldm_frag_add) is not sufficient. The original patch in
commit c340b1d64000 ("fs/partitions/ldm.c: fix oops caused by corrupted
partition table") does not consider that, for subsequent fragments,
previously allocated memory is used.
[1] http://lkml.org/lkml/2011/5/6/407
Reported-by: Ben Hutchings <[email protected]>
Signed-off-by: Timo Warns <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
return "[vdso]";
return NULL;
} | 0 | [
"CWE-264"
] | linux-2.6 | 7d91d531900bfa1165d445390b3b13a8013f98f7 | 206,695,043,579,564,340,000,000,000,000,000,000,000 | 6 | [PATCH] i386 vDSO: use install_special_mapping
This patch uses install_special_mapping for the i386 vDSO setup, consolidating
duplicated code.
Signed-off-by: Roland McGrath <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Cc: Andi Kleen <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
update_read_create_offscreen_bitmap_order(wStream* s,
CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap)
{
UINT16 flags;
BOOL deleteListPresent;
OFFSCREEN_DELETE_LIST* deleteList;
if (Stream_GetRemainingLength(s) < 6)
return FALSE;
Stream_Read_UINT16(s, flags); /* flags (2 bytes) */
create_offscreen_bitmap->id = flags & 0x7FFF;
deleteListPresent = (flags & 0x8000) ? TRUE : FALSE;
Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */
Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */
deleteList = &(create_offscreen_bitmap->deleteList);
if (deleteListPresent)
{
UINT32 i;
if (Stream_GetRemainingLength(s) < 2)
return FALSE;
Stream_Read_UINT16(s, deleteList->cIndices);
if (deleteList->cIndices > deleteList->sIndices)
{
UINT16* new_indices;
new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2);
if (!new_indices)
return FALSE;
deleteList->sIndices = deleteList->cIndices;
deleteList->indices = new_indices;
}
if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices)
return FALSE;
for (i = 0; i < deleteList->cIndices; i++)
{
Stream_Read_UINT16(s, deleteList->indices[i]);
}
}
else
{
deleteList->cIndices = 0;
}
return TRUE;
} | 0 | [
"CWE-415"
] | FreeRDP | 67c2aa52b2ae0341d469071d1bc8aab91f8d2ed8 | 142,052,795,173,602,550,000,000,000,000,000,000,000 | 53 | Fixed #6013: Check new length is > 0 |
mrb_equal_m(mrb_state *mrb, mrb_value self)
{
mrb_value arg;
mrb_get_args(mrb, "o", &arg);
return mrb_bool_value(mrb_equal(mrb, self, arg));
} | 0 | [
"CWE-824"
] | mruby | b64ce17852b180dfeea81cf458660be41a78974d | 90,553,811,076,437,750,000,000,000,000,000,000,000 | 7 | Should not call `initialize_copy` for `TT_ICLASS`; fix #4027
Since `TT_ICLASS` is a internal object that should never be revealed
to Ruby world. |
int32_t cli_bcapi_version_compare(struct cli_bc_ctx *ctx , const uint8_t* lhs, uint32_t lhs_len,
const uint8_t* rhs, uint32_t rhs_len)
{
unsigned i = 0, j = 0;
unsigned long li=0, ri=0;
do {
while (i < lhs_len && j < rhs_len && lhs[i] == rhs[j] &&
!isdigit(lhs[i]) && !isdigit(rhs[j])) {
i++; j++;
}
if (i == lhs_len && j == rhs_len)
return 0;
if (i == lhs_len)
return -1;
if (j == rhs_len)
return 1;
if (!isdigit(lhs[i]) || !isdigit(rhs[j]))
return lhs[i] < rhs[j] ? -1 : 1;
while (isdigit(lhs[i]) && i < lhs_len)
li = 10*li + (lhs[i++] - '0');
while (isdigit(rhs[j]) && j < rhs_len)
ri = 10*ri + (rhs[j++] - '0');
if (li < ri)
return -1;
if (li > ri)
return 1;
} while (1);
} | 0 | [
"CWE-189"
] | clamav-devel | 3d664817f6ef833a17414a4ecea42004c35cc42f | 76,855,390,118,813,000,000,000,000,000,000,000,000 | 28 | fix recursion level crash (bb #3706).
Thanks to Stephane Chazelas for the analysis. |
static int cma_init_net(struct net *net)
{
struct cma_pernet *pernet = cma_pernet(net);
xa_init(&pernet->tcp_ps);
xa_init(&pernet->udp_ps);
xa_init(&pernet->ipoib_ps);
xa_init(&pernet->ib_ps);
return 0;
} | 0 | [
"CWE-416"
] | linux | bc0bdc5afaa740d782fbf936aaeebd65e5c2921d | 119,355,839,985,462,050,000,000,000,000,000,000,000 | 11 | RDMA/cma: Do not change route.addr.src_addr.ss_family
If the state is not idle then rdma_bind_addr() will immediately fail and
no change to global state should happen.
For instance if the state is already RDMA_CM_LISTEN then this will corrupt
the src_addr and would cause the test in cma_cancel_operation():
if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
To view a mangled src_addr, eg with a IPv6 loopback address but an IPv4
family, failing the test.
This would manifest as this trace from syzkaller:
BUG: KASAN: use-after-free in __list_add_valid+0x93/0xa0 lib/list_debug.c:26
Read of size 8 at addr ffff8881546491e0 by task syz-executor.1/32204
CPU: 1 PID: 32204 Comm: syz-executor.1 Not tainted 5.12.0-rc8-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:79 [inline]
dump_stack+0x141/0x1d7 lib/dump_stack.c:120
print_address_description.constprop.0.cold+0x5b/0x2f8 mm/kasan/report.c:232
__kasan_report mm/kasan/report.c:399 [inline]
kasan_report.cold+0x7c/0xd8 mm/kasan/report.c:416
__list_add_valid+0x93/0xa0 lib/list_debug.c:26
__list_add include/linux/list.h:67 [inline]
list_add_tail include/linux/list.h:100 [inline]
cma_listen_on_all drivers/infiniband/core/cma.c:2557 [inline]
rdma_listen+0x787/0xe00 drivers/infiniband/core/cma.c:3751
ucma_listen+0x16a/0x210 drivers/infiniband/core/ucma.c:1102
ucma_write+0x259/0x350 drivers/infiniband/core/ucma.c:1732
vfs_write+0x28e/0xa30 fs/read_write.c:603
ksys_write+0x1ee/0x250 fs/read_write.c:658
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
Which is indicating that an rdma_id_private was destroyed without doing
cma_cancel_listens().
Instead of trying to re-use the src_addr memory to indirectly create an
any address build one explicitly on the stack and bind to that as any
other normal flow would do.
Link: https://lore.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: 732d41c545bb ("RDMA/cma: Make the locking for automatic state transition more clear")
Reported-by: [email protected]
Tested-by: Hao Sun <[email protected]>
Reviewed-by: Leon Romanovsky <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]> |
T& max_min(t& min_val) {
if (is_empty())
throw CImgInstanceException(_cimg_instance
"max_min(): Empty instance.",
cimg_instance);
T *ptr_max = _data;
T max_value = *ptr_max, min_value = max_value;
cimg_for(*this,ptrs,T) {
const T val = *ptrs;
if (val>max_value) { max_value = val; ptr_max = ptrs; }
if (val<min_value) min_value = val;
}
min_val = (t)min_value;
return *ptr_max;
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 59,583,204,692,672,650,000,000,000,000,000,000,000 | 15 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
e1000e_set_dlen(E1000ECore *core, int index, uint32_t val)
{
core->mac[index] = val & E1000_XDLEN_MASK;
} | 0 | [
"CWE-835"
] | qemu | 4154c7e03fa55b4cf52509a83d50d6c09d743b77 | 72,174,334,312,010,180,000,000,000,000,000,000,000 | 4 | net: e1000e: fix an infinite loop issue
This issue is like the issue in e1000 network card addressed in
this commit:
e1000: eliminate infinite loops on out-of-bounds transfer start.
Signed-off-by: Li Qiang <[email protected]>
Reviewed-by: Dmitry Fleytman <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
static int asn1_d2i_read_bio(BIO *in, BUF_MEM **pb)
{
BUF_MEM *b;
unsigned char *p;
int i;
size_t want = HEADER_SIZE;
int eos = 0;
size_t off = 0;
size_t len = 0;
const unsigned char *q;
long slen;
int inf, tag, xclass;
b = BUF_MEM_new();
if (b == NULL) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ERR_R_MALLOC_FAILURE);
return -1;
}
ERR_clear_error();
for (;;) {
if (want >= (len - off)) {
want -= (len - off);
if (len + want < len || !BUF_MEM_grow_clean(b, len + want)) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ERR_R_MALLOC_FAILURE);
goto err;
}
i = BIO_read(in, &(b->data[len]), want);
if ((i < 0) && ((len - off) == 0)) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ASN1_R_NOT_ENOUGH_DATA);
goto err;
}
if (i > 0) {
if (len + i < len) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ASN1_R_TOO_LONG);
goto err;
}
len += i;
}
}
/* else data already loaded */
p = (unsigned char *)&(b->data[off]);
q = p;
inf = ASN1_get_object(&q, &slen, &tag, &xclass, len - off);
if (inf & 0x80) {
unsigned long e;
e = ERR_GET_REASON(ERR_peek_error());
if (e != ASN1_R_TOO_LONG)
goto err;
else
ERR_clear_error(); /* clear error */
}
i = q - p; /* header length */
off += i; /* end of data */
if (inf & 1) {
/* no data body so go round again */
eos++;
if (eos < 0) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ASN1_R_HEADER_TOO_LONG);
goto err;
}
want = HEADER_SIZE;
} else if (eos && (slen == 0) && (tag == V_ASN1_EOC)) {
/* eos value, so go back and read another header */
eos--;
if (eos <= 0)
break;
else
want = HEADER_SIZE;
} else {
/* suck in slen bytes of data */
want = slen;
if (want > (len - off)) {
size_t chunk_max = ASN1_CHUNK_INITIAL_SIZE;
want -= (len - off);
if (want > INT_MAX /* BIO_read takes an int length */ ||
len + want < len) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ASN1_R_TOO_LONG);
goto err;
}
while (want > 0) {
/*
* Read content in chunks of increasing size
* so we can return an error for EOF without
* having to allocate the entire content length
* in one go.
*/
size_t chunk = want > chunk_max ? chunk_max : want;
if (!BUF_MEM_grow_clean(b, len + chunk)) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ERR_R_MALLOC_FAILURE);
goto err;
}
want -= chunk;
while (chunk > 0) {
i = BIO_read(in, &(b->data[len]), chunk);
if (i <= 0) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO,
ASN1_R_NOT_ENOUGH_DATA);
goto err;
}
/*
* This can't overflow because |len+want| didn't
* overflow.
*/
len += i;
chunk -= i;
}
if (chunk_max < INT_MAX/2)
chunk_max *= 2;
}
}
if (off + slen < off) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ASN1_R_TOO_LONG);
goto err;
}
off += slen;
if (eos <= 0) {
break;
} else
want = HEADER_SIZE;
}
}
if (off > INT_MAX) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ASN1_R_TOO_LONG);
goto err;
}
*pb = b;
return off;
err:
BUF_MEM_free(b);
return -1;
} | 0 | [
"CWE-399"
] | openssl | c62981390d6cf9e3d612c489b8b77c2913b25807 | 141,143,285,656,599,970,000,000,000,000,000,000,000 | 141 | Harden ASN.1 BIO handling of large amounts of data.
If the ASN.1 BIO is presented with a large length field read it in
chunks of increasing size checking for EOF on each read. This prevents
small files allocating excessive amounts of data.
CVE-2016-2109
Thanks to Brian Carpenter for reporting this issue.
Reviewed-by: Viktor Dukhovni <[email protected]> |
static inline bool biovec_phys_mergeable(struct request_queue *q,
struct bio_vec *vec1, struct bio_vec *vec2)
{
unsigned long mask = queue_segment_boundary(q);
phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
if (addr1 + vec1->bv_len != addr2)
return false;
if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
return false;
if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
return false;
return true;
} | 0 | [
"CWE-416"
] | linux | c3e2219216c92919a6bd1711f340f5faa98695e6 | 237,604,727,325,684,300,000,000,000,000,000,000,000 | 15 | block: free sched's request pool in blk_cleanup_queue
In theory, IO scheduler belongs to request queue, and the request pool
of sched tags belongs to the request queue too.
However, the current tags allocation interfaces are re-used for both
driver tags and sched tags, and driver tags is definitely host wide,
and doesn't belong to any request queue, same with its request pool.
So we need tagset instance for freeing request of sched tags.
Meantime, blk_mq_free_tag_set() often follows blk_cleanup_queue() in case
of non-BLK_MQ_F_TAG_SHARED, this way requires that request pool of sched
tags to be freed before calling blk_mq_free_tag_set().
Commit 47cdee29ef9d94e ("block: move blk_exit_queue into __blk_release_queue")
moves blk_exit_queue into __blk_release_queue for simplying the fast
path in generic_make_request(), then causes oops during freeing requests
of sched tags in __blk_release_queue().
Fix the above issue by move freeing request pool of sched tags into
blk_cleanup_queue(), this way is safe becasue queue has been frozen and no any
in-queue requests at that time. Freeing sched tags has to be kept in queue's
release handler becasue there might be un-completed dispatch activity
which might refer to sched tags.
Cc: Bart Van Assche <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Fixes: 47cdee29ef9d94e485eb08f962c74943023a5271 ("block: move blk_exit_queue into __blk_release_queue")
Tested-by: Yi Zhang <[email protected]>
Reported-by: kernel test robot <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
Subsets and Splits