func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
win_strncat_to_utf16(struct archive_string *as16, const void *_p,
size_t length, struct archive_string_conv *sc, int bigendian)
{
const char *s = (const char *)_p;
char *u16;
size_t count, avail;
if (archive_string_ensure(as16,
as16->length + (length + 1) * 2) == NULL)
return (-1);
u16 = as16->s + as16->length;
avail = as16->buffer_length - 2;
if (sc->from_cp == CP_C_LOCALE) {
/*
* "C" locale special process.
*/
count = 0;
while (count < length && *s) {
if (bigendian)
archive_be16enc(u16, *s);
else
archive_le16enc(u16, *s);
u16 += 2;
s++;
count++;
}
as16->length += count << 1;
as16->s[as16->length] = 0;
as16->s[as16->length+1] = 0;
return (0);
}
do {
count = MultiByteToWideChar(sc->from_cp,
MB_PRECOMPOSED, s, (int)length, (LPWSTR)u16, (int)avail>>1);
/* Exit loop if we succeeded */
if (count != 0 ||
GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
break;
}
/* Expand buffer and try again */
count = MultiByteToWideChar(sc->from_cp,
MB_PRECOMPOSED, s, (int)length, NULL, 0);
if (archive_string_ensure(as16, (count +1) * 2)
== NULL)
return (-1);
u16 = as16->s + as16->length;
avail = as16->buffer_length - 2;
} while (1);
as16->length += count * 2;
as16->s[as16->length] = 0;
as16->s[as16->length+1] = 0;
if (count == 0)
return (-1);
if (is_big_endian()) {
if (!bigendian) {
while (count > 0) {
uint16_t v = archive_be16dec(u16);
archive_le16enc(u16, v);
u16 += 2;
count--;
}
}
} else {
if (bigendian) {
while (count > 0) {
uint16_t v = archive_le16dec(u16);
archive_be16enc(u16, v);
u16 += 2;
count--;
}
}
}
return (0);
}
| 0 |
[
"CWE-476"
] |
libarchive
|
42a3408ac7df1e69bea9ea12b72e14f59f7400c0
| 221,252,768,700,430,400,000,000,000,000,000,000,000 | 76 |
archive_strncat_l(): allocate and do not convert if length == 0
This ensures e.g. that archive_mstring_copy_mbs_len_l() does not set
aes_set = AES_SET_MBS with aes_mbs.s == NULL.
Resolves possible null-pointer dereference reported by OSS-Fuzz.
Reported-By: OSS-Fuzz issue 286
|
static int smack_task_movememory(struct task_struct *p)
{
return smk_curacc_on_task(p, MAY_WRITE);
}
| 0 |
[] |
linux-2.6
|
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
| 319,795,735,611,563,730,000,000,000,000,000,000,000 | 4 |
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]>
|
TEST_P(Http2CodecImplFlowControlTest, LargeServerBodyFlushTimeout) {
initialize();
InSequence s;
MockStreamCallbacks client_stream_callbacks;
request_encoder_->getStream().addCallbacks(client_stream_callbacks);
TestRequestHeaderMapImpl request_headers;
HttpTestUtility::addDefaultHeaders(request_headers);
EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));
request_encoder_->encodeHeaders(request_headers, true);
ON_CALL(client_connection_, write(_, _))
.WillByDefault(
Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); }));
TestResponseHeaderMapImpl response_headers{{":status", "200"}};
EXPECT_CALL(response_decoder_, decodeHeaders_(_, false));
response_encoder_->encodeHeaders(response_headers, false);
EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1));
auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_);
EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _));
Buffer::OwnedImpl body(std::string(1024 * 1024, 'a'));
response_encoder_->encodeData(body, true);
// Invoke a stream flush timeout. Make sure we don't get a reset locally for higher layers but
// we do get a reset on the client.
EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0);
EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _));
flush_timer->invokeCallback();
EXPECT_EQ(1, server_stats_store_.counter("http2.tx_flush_timeout").value());
}
| 0 |
[
"CWE-400"
] |
envoy
|
0e49a495826ea9e29134c1bd54fdeb31a034f40c
| 229,621,562,660,635,730,000,000,000,000,000,000,000 | 30 |
http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]>
|
NAN_METHOD(XmlDocument::GetDtd)
{
Nan::HandleScope scope;
XmlDocument* document = Nan::ObjectWrap::Unwrap<XmlDocument>(info.Holder());
assert(document);
xmlDtdPtr dtd = xmlGetIntSubset(document->xml_obj);
if (!dtd) {
return info.GetReturnValue().Set(Nan::Null());
}
const char* name = (const char *)dtd->name;
const char* extId = (const char *)dtd->ExternalID;
const char* sysId = (const char *)dtd->SystemID;
v8::Local<v8::Object> dtdObj = Nan::New<v8::Object>();
v8::Local<v8::Value> nameValue = (v8::Local<v8::Value>)Nan::Null();
v8::Local<v8::Value> extValue = (v8::Local<v8::Value>)Nan::Null();
v8::Local<v8::Value> sysValue = (v8::Local<v8::Value>)Nan::Null();
if (name != NULL) {
nameValue = (v8::Local<v8::Value>)Nan::New<v8::String>(name, strlen(name)).ToLocalChecked();
}
if (extId != NULL) {
extValue = (v8::Local<v8::Value>)Nan::New<v8::String>(extId, strlen(extId)).ToLocalChecked();
}
if (sysId != NULL) {
sysValue = (v8::Local<v8::Value>)Nan::New<v8::String>(sysId, strlen(sysId)).ToLocalChecked();
}
Nan::Set(dtdObj, Nan::New<v8::String>("name").ToLocalChecked(), nameValue);
Nan::Set(dtdObj, Nan::New<v8::String>("externalId").ToLocalChecked(), extValue);
Nan::Set(dtdObj, Nan::New<v8::String>("systemId").ToLocalChecked(), sysValue);
return info.GetReturnValue().Set(dtdObj);
}
| 0 |
[
"CWE-400"
] |
libxmljs
|
2501807bde9b38cfaed06d1e140487516d91379d
| 203,883,366,738,996,660,000,000,000,000,000,000,000 | 43 |
Ensure parseXml/parseHtml input is string or buffer (#594)
|
int pmd_huge(pmd_t pmd)
{
return !pmd_none(pmd) &&
(pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
}
| 0 |
[
"CWE-119"
] |
linux
|
1be7107fbe18eed3e319a6c3e83c78254b693acb
| 67,930,346,351,025,450,000,000,000,000,000,000,000 | 5 |
mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>
|
static int handle_ip_over_ddp(struct sk_buff *skb)
{
struct net_device *dev = __dev_get_by_name(&init_net, "ipddp0");
struct net_device_stats *stats;
/* This needs to be able to handle ipddp"N" devices */
if (!dev) {
kfree_skb(skb);
return NET_RX_DROP;
}
skb->protocol = htons(ETH_P_IP);
skb_pull(skb, 13);
skb->dev = dev;
skb_reset_transport_header(skb);
stats = netdev_priv(dev);
stats->rx_packets++;
stats->rx_bytes += skb->len + 13;
return netif_rx(skb); /* Send the SKB up to a higher place. */
}
| 0 |
[
"CWE-416"
] |
linux
|
6377f787aeb945cae7abbb6474798de129e1f3ac
| 215,341,547,246,117,080,000,000,000,000,000,000,000 | 21 |
appletalk: Fix use-after-free in atalk_proc_exit
KASAN report this:
BUG: KASAN: use-after-free in pde_subdir_find+0x12d/0x150 fs/proc/generic.c:71
Read of size 8 at addr ffff8881f41fe5b0 by task syz-executor.0/2806
CPU: 0 PID: 2806 Comm: syz-executor.0 Not tainted 5.0.0-rc7+ #45
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0xfa/0x1ce lib/dump_stack.c:113
print_address_description+0x65/0x270 mm/kasan/report.c:187
kasan_report+0x149/0x18d mm/kasan/report.c:317
pde_subdir_find+0x12d/0x150 fs/proc/generic.c:71
remove_proc_entry+0xe8/0x420 fs/proc/generic.c:667
atalk_proc_exit+0x18/0x820 [appletalk]
atalk_exit+0xf/0x5a [appletalk]
__do_sys_delete_module kernel/module.c:1018 [inline]
__se_sys_delete_module kernel/module.c:961 [inline]
__x64_sys_delete_module+0x3dc/0x5e0 kernel/module.c:961
do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x462e99
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007fb2de6b9c58 EFLAGS: 00000246 ORIG_RAX: 00000000000000b0
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99
RDX: 0000000000000000 RSI: 0000000000000000 RDI: 00000000200001c0
RBP: 0000000000000002 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007fb2de6ba6bc
R13: 00000000004bccaa R14: 00000000006f6bc8 R15: 00000000ffffffff
Allocated by task 2806:
set_track mm/kasan/common.c:85 [inline]
__kasan_kmalloc.constprop.3+0xa0/0xd0 mm/kasan/common.c:496
slab_post_alloc_hook mm/slab.h:444 [inline]
slab_alloc_node mm/slub.c:2739 [inline]
slab_alloc mm/slub.c:2747 [inline]
kmem_cache_alloc+0xcf/0x250 mm/slub.c:2752
kmem_cache_zalloc include/linux/slab.h:730 [inline]
__proc_create+0x30f/0xa20 fs/proc/generic.c:408
proc_mkdir_data+0x47/0x190 fs/proc/generic.c:469
0xffffffffc10c01bb
0xffffffffc10c0166
do_one_initcall+0xfa/0x5ca init/main.c:887
do_init_module+0x204/0x5f6 kernel/module.c:3460
load_module+0x66b2/0x8570 kernel/module.c:3808
__do_sys_finit_module+0x238/0x2a0 kernel/module.c:3902
do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Freed by task 2806:
set_track mm/kasan/common.c:85 [inline]
__kasan_slab_free+0x130/0x180 mm/kasan/common.c:458
slab_free_hook mm/slub.c:1409 [inline]
slab_free_freelist_hook mm/slub.c:1436 [inline]
slab_free mm/slub.c:2986 [inline]
kmem_cache_free+0xa6/0x2a0 mm/slub.c:3002
pde_put+0x6e/0x80 fs/proc/generic.c:647
remove_proc_entry+0x1d3/0x420 fs/proc/generic.c:684
0xffffffffc10c031c
0xffffffffc10c0166
do_one_initcall+0xfa/0x5ca init/main.c:887
do_init_module+0x204/0x5f6 kernel/module.c:3460
load_module+0x66b2/0x8570 kernel/module.c:3808
__do_sys_finit_module+0x238/0x2a0 kernel/module.c:3902
do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
The buggy address belongs to the object at ffff8881f41fe500
which belongs to the cache proc_dir_entry of size 256
The buggy address is located 176 bytes inside of
256-byte region [ffff8881f41fe500, ffff8881f41fe600)
The buggy address belongs to the page:
page:ffffea0007d07f80 count:1 mapcount:0 mapping:ffff8881f6e69a00 index:0x0
flags: 0x2fffc0000000200(slab)
raw: 02fffc0000000200 dead000000000100 dead000000000200 ffff8881f6e69a00
raw: 0000000000000000 00000000800c000c 00000001ffffffff 0000000000000000
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff8881f41fe480: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
ffff8881f41fe500: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
>ffff8881f41fe580: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
^
ffff8881f41fe600: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb
ffff8881f41fe680: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
It should check the return value of atalk_proc_init fails,
otherwise atalk_exit will trgger use-after-free in pde_subdir_find
while unload the module.This patch fix error cleanup path of atalk_init
Reported-by: Hulk Robot <[email protected]>
Signed-off-by: YueHaibing <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size,
int flags)
{
struct sock *sk = sock->sk;
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
}
| 0 |
[] |
linux-2.6
|
16e5726269611b71c930054ffe9b858c1cea88eb
| 270,556,754,145,714,700,000,000,000,000,000,000,000 | 11 |
af_unix: dont send SCM_CREDENTIALS by default
Since commit 7361c36c5224 (af_unix: Allow credentials to work across
user and pid namespaces) af_unix performance dropped a lot.
This is because we now take a reference on pid and cred in each write(),
and release them in read(), usually done from another process,
eventually from another cpu. This triggers false sharing.
# Events: 154K cycles
#
# Overhead Command Shared Object Symbol
# ........ ....... .................. .........................
#
10.40% hackbench [kernel.kallsyms] [k] put_pid
8.60% hackbench [kernel.kallsyms] [k] unix_stream_recvmsg
7.87% hackbench [kernel.kallsyms] [k] unix_stream_sendmsg
6.11% hackbench [kernel.kallsyms] [k] do_raw_spin_lock
4.95% hackbench [kernel.kallsyms] [k] unix_scm_to_skb
4.87% hackbench [kernel.kallsyms] [k] pid_nr_ns
4.34% hackbench [kernel.kallsyms] [k] cred_to_ucred
2.39% hackbench [kernel.kallsyms] [k] unix_destruct_scm
2.24% hackbench [kernel.kallsyms] [k] sub_preempt_count
1.75% hackbench [kernel.kallsyms] [k] fget_light
1.51% hackbench [kernel.kallsyms] [k]
__mutex_lock_interruptible_slowpath
1.42% hackbench [kernel.kallsyms] [k] sock_alloc_send_pskb
This patch includes SCM_CREDENTIALS information in a af_unix message/skb
only if requested by the sender, [man 7 unix for details how to include
ancillary data using sendmsg() system call]
Note: This might break buggy applications that expected SCM_CREDENTIAL
from an unaware write() system call, and receiver not using SO_PASSCRED
socket option.
If SOCK_PASSCRED is set on source or destination socket, we still
include credentials for mere write() syscalls.
Performance boost in hackbench : more than 50% gain on a 16 thread
machine (2 quad-core cpus, 2 threads per core)
hackbench 20 thread 2000
4.228 sec instead of 9.102 sec
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Tim Chen <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int fib6_rule_action_alt(struct fib_rule *rule, struct flowi *flp,
int flags, struct fib_lookup_arg *arg)
{
struct fib6_result *res = arg->result;
struct flowi6 *flp6 = &flp->u.ip6;
struct net *net = rule->fr_net;
struct fib6_table *table;
int err, *oif;
u32 tb_id;
switch (rule->action) {
case FR_ACT_TO_TBL:
break;
case FR_ACT_UNREACHABLE:
return -ENETUNREACH;
case FR_ACT_PROHIBIT:
return -EACCES;
case FR_ACT_BLACKHOLE:
default:
return -EINVAL;
}
tb_id = fib_rule_get_table(rule, arg);
table = fib6_get_table(net, tb_id);
if (!table)
return -EAGAIN;
oif = (int *)arg->lookup_data;
err = fib6_table_lookup(net, table, *oif, flp6, res, flags);
if (!err && res->f6i != net->ipv6.fib6_null_entry)
err = fib6_rule_saddr(net, rule, flags, flp6,
res->nh->fib_nh_dev);
else
err = -EAGAIN;
return err;
}
| 0 |
[
"CWE-772",
"CWE-401"
] |
linux
|
ca7a03c4175366a92cee0ccc4fec0038c3266e26
| 267,177,460,498,958,460,000,000,000,000,000,000,000 | 37 |
ipv6: do not free rt if FIB_LOOKUP_NOREF is set on suppress rule
Commit 7d9e5f422150 removed references from certain dsts, but accounting
for this never translated down into the fib6 suppression code. This bug
was triggered by WireGuard users who use wg-quick(8), which uses the
"suppress-prefix" directive to ip-rule(8) for routing all of their
internet traffic without routing loops. The test case added here
causes the reference underflow by causing packets to evaluate a suppress
rule.
Fixes: 7d9e5f422150 ("ipv6: convert major tx path to use RT6_LOOKUP_F_DST_NOREF")
Signed-off-by: Jason A. Donenfeld <[email protected]>
Acked-by: Wei Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int selinux_shm_shmctl(struct kern_ipc_perm *shp, int cmd)
{
int perms;
int err;
switch (cmd) {
case IPC_INFO:
case SHM_INFO:
/* No specific object, just general system-wide information. */
return avc_has_perm(&selinux_state,
current_sid(), SECINITSID_KERNEL,
SECCLASS_SYSTEM, SYSTEM__IPC_INFO, NULL);
case IPC_STAT:
case SHM_STAT:
case SHM_STAT_ANY:
perms = SHM__GETATTR | SHM__ASSOCIATE;
break;
case IPC_SET:
perms = SHM__SETATTR;
break;
case SHM_LOCK:
case SHM_UNLOCK:
perms = SHM__LOCK;
break;
case IPC_RMID:
perms = SHM__DESTROY;
break;
default:
return 0;
}
err = ipc_has_perm(shp, perms);
return err;
}
| 0 |
[
"CWE-349"
] |
linux
|
fb73974172ffaaf57a7c42f35424d9aece1a5af6
| 255,913,525,325,488,850,000,000,000,000,000,000,000 | 34 |
selinux: properly handle multiple messages in selinux_netlink_send()
Fix the SELinux netlink_send hook to properly handle multiple netlink
messages in a single sk_buff; each message is parsed and subject to
SELinux access control. Prior to this patch, SELinux only inspected
the first message in the sk_buff.
Cc: [email protected]
Reported-by: Dmitry Vyukov <[email protected]>
Reviewed-by: Stephen Smalley <[email protected]>
Signed-off-by: Paul Moore <[email protected]>
|
static int ntop_is_dir(lua_State* vm) {
char *path;
struct stat buf;
int rc;
ntop->getTrace()->traceEvent(TRACE_DEBUG, "%s() called", __FUNCTION__);
if(ntop_lua_check(vm, __FUNCTION__, 1, LUA_TSTRING)) return(CONST_LUA_ERROR);
path = (char*)lua_tostring(vm, 1);
rc = ((stat(path, &buf) != 0) || (!S_ISDIR(buf.st_mode))) ? 0 : 1;
lua_pushboolean(vm, rc);
return(CONST_LUA_OK);
}
| 0 |
[
"CWE-476"
] |
ntopng
|
01f47e04fd7c8d54399c9e465f823f0017069f8f
| 12,737,886,623,544,693,000,000,000,000,000,000,000 | 15 |
Security fix: prevents empty host from being used
|
irc_server_get_prefix_char_for_mode (struct t_irc_server *server, char mode)
{
const char *prefix_chars;
int index;
if (server)
{
prefix_chars = irc_server_get_prefix_chars (server);
index = irc_server_get_prefix_mode_index (server, mode);
if (index >= 0)
return prefix_chars[index];
}
return ' ';
}
| 0 |
[
"CWE-20"
] |
weechat
|
c265cad1c95b84abfd4e8d861f25926ef13b5d91
| 54,307,838,081,910,880,000,000,000,000,000,000,000 | 15 |
Fix verification of SSL certificates by calling gnutls verify callback (patch #7459)
|
static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
struct inode *dst, u64 dst_loff)
{
int ret;
u64 len = olen;
struct cmp_pages cmp;
int same_inode = 0;
u64 same_lock_start = 0;
u64 same_lock_len = 0;
if (src == dst)
same_inode = 1;
if (len == 0)
return 0;
if (same_inode) {
mutex_lock(&src->i_mutex);
ret = extent_same_check_offsets(src, loff, &len, olen);
if (ret)
goto out_unlock;
/*
* Single inode case wants the same checks, except we
* don't want our length pushed out past i_size as
* comparing that data range makes no sense.
*
* extent_same_check_offsets() will do this for an
* unaligned length at i_size, so catch it here and
* reject the request.
*
* This effectively means we require aligned extents
* for the single-inode case, whereas the other cases
* allow an unaligned length so long as it ends at
* i_size.
*/
if (len != olen) {
ret = -EINVAL;
goto out_unlock;
}
/* Check for overlapping ranges */
if (dst_loff + len > loff && dst_loff < loff + len) {
ret = -EINVAL;
goto out_unlock;
}
same_lock_start = min_t(u64, loff, dst_loff);
same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
} else {
btrfs_double_inode_lock(src, dst);
ret = extent_same_check_offsets(src, loff, &len, olen);
if (ret)
goto out_unlock;
ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
if (ret)
goto out_unlock;
}
/* don't make the dst file partly checksummed */
if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
(BTRFS_I(dst)->flags & BTRFS_INODE_NODATASUM)) {
ret = -EINVAL;
goto out_unlock;
}
ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
if (ret)
goto out_unlock;
if (same_inode)
lock_extent_range(src, same_lock_start, same_lock_len);
else
btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
/* pass original length for comparison so we stay within i_size */
ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp);
if (ret == 0)
ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
if (same_inode)
unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start,
same_lock_start + same_lock_len - 1);
else
btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
btrfs_cmp_data_free(&cmp);
out_unlock:
if (same_inode)
mutex_unlock(&src->i_mutex);
else
btrfs_double_inode_unlock(src, dst);
return ret;
}
| 0 |
[
"CWE-200"
] |
linux
|
8039d87d9e473aeb740d4fdbd59b9d2f89b2ced9
| 276,846,403,841,236,700,000,000,000,000,000,000,000 | 98 |
Btrfs: fix file corruption and data loss after cloning inline extents
Currently the clone ioctl allows to clone an inline extent from one file
to another that already has other (non-inlined) extents. This is a problem
because btrfs is not designed to deal with files having inline and regular
extents, if a file has an inline extent then it must be the only extent
in the file and must start at file offset 0. Having a file with an inline
extent followed by regular extents results in EIO errors when doing reads
or writes against the first 4K of the file.
Also, the clone ioctl allows one to lose data if the source file consists
of a single inline extent, with a size of N bytes, and the destination
file consists of a single inline extent with a size of M bytes, where we
have M > N. In this case the clone operation removes the inline extent
from the destination file and then copies the inline extent from the
source file into the destination file - we lose the M - N bytes from the
destination file, a read operation will get the value 0x00 for any bytes
in the the range [N, M] (the destination inode's i_size remained as M,
that's why we can read past N bytes).
So fix this by not allowing such destructive operations to happen and
return errno EOPNOTSUPP to user space.
Currently the fstest btrfs/035 tests the data loss case but it totally
ignores this - i.e. expects the operation to succeed and does not check
the we got data loss.
The following test case for fstests exercises all these cases that result
in file corruption and data loss:
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
tmp=/tmp/$$
status=1 # failure is the default!
trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
rm -f $tmp.*
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
# real QA test starts here
_need_to_be_root
_supported_fs btrfs
_supported_os Linux
_require_scratch
_require_cloner
_require_btrfs_fs_feature "no_holes"
_require_btrfs_mkfs_feature "no-holes"
rm -f $seqres.full
test_cloning_inline_extents()
{
local mkfs_opts=$1
local mount_opts=$2
_scratch_mkfs $mkfs_opts >>$seqres.full 2>&1
_scratch_mount $mount_opts
# File bar, the source for all the following clone operations, consists
# of a single inline extent (50 bytes).
$XFS_IO_PROG -f -c "pwrite -S 0xbb 0 50" $SCRATCH_MNT/bar \
| _filter_xfs_io
# Test cloning into a file with an extent (non-inlined) where the
# destination offset overlaps that extent. It should not be possible to
# clone the inline extent from file bar into this file.
$XFS_IO_PROG -f -c "pwrite -S 0xaa 0K 16K" $SCRATCH_MNT/foo \
| _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo
# Doing IO against any range in the first 4K of the file should work.
# Due to a past clone ioctl bug which allowed cloning the inline extent,
# these operations resulted in EIO errors.
echo "File foo data after clone operation:"
# All bytes should have the value 0xaa (clone operation failed and did
# not modify our file).
od -t x1 $SCRATCH_MNT/foo
$XFS_IO_PROG -c "pwrite -S 0xcc 0 100" $SCRATCH_MNT/foo | _filter_xfs_io
# Test cloning the inline extent against a file which has a hole in its
# first 4K followed by a non-inlined extent. It should not be possible
# as well to clone the inline extent from file bar into this file.
$XFS_IO_PROG -f -c "pwrite -S 0xdd 4K 12K" $SCRATCH_MNT/foo2 \
| _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo2
# Doing IO against any range in the first 4K of the file should work.
# Due to a past clone ioctl bug which allowed cloning the inline extent,
# these operations resulted in EIO errors.
echo "File foo2 data after clone operation:"
# All bytes should have the value 0x00 (clone operation failed and did
# not modify our file).
od -t x1 $SCRATCH_MNT/foo2
$XFS_IO_PROG -c "pwrite -S 0xee 0 90" $SCRATCH_MNT/foo2 | _filter_xfs_io
# Test cloning the inline extent against a file which has a size of zero
# but has a prealloc extent. It should not be possible as well to clone
# the inline extent from file bar into this file.
$XFS_IO_PROG -f -c "falloc -k 0 1M" $SCRATCH_MNT/foo3 | _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo3
# Doing IO against any range in the first 4K of the file should work.
# Due to a past clone ioctl bug which allowed cloning the inline extent,
# these operations resulted in EIO errors.
echo "First 50 bytes of foo3 after clone operation:"
# Should not be able to read any bytes, file has 0 bytes i_size (the
# clone operation failed and did not modify our file).
od -t x1 $SCRATCH_MNT/foo3
$XFS_IO_PROG -c "pwrite -S 0xff 0 90" $SCRATCH_MNT/foo3 | _filter_xfs_io
# Test cloning the inline extent against a file which consists of a
# single inline extent that has a size not greater than the size of
# bar's inline extent (40 < 50).
# It should be possible to do the extent cloning from bar to this file.
$XFS_IO_PROG -f -c "pwrite -S 0x01 0 40" $SCRATCH_MNT/foo4 \
| _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo4
# Doing IO against any range in the first 4K of the file should work.
echo "File foo4 data after clone operation:"
# Must match file bar's content.
od -t x1 $SCRATCH_MNT/foo4
$XFS_IO_PROG -c "pwrite -S 0x02 0 90" $SCRATCH_MNT/foo4 | _filter_xfs_io
# Test cloning the inline extent against a file which consists of a
# single inline extent that has a size greater than the size of bar's
# inline extent (60 > 50).
# It should not be possible to clone the inline extent from file bar
# into this file.
$XFS_IO_PROG -f -c "pwrite -S 0x03 0 60" $SCRATCH_MNT/foo5 \
| _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo5
# Reading the file should not fail.
echo "File foo5 data after clone operation:"
# Must have a size of 60 bytes, with all bytes having a value of 0x03
# (the clone operation failed and did not modify our file).
od -t x1 $SCRATCH_MNT/foo5
# Test cloning the inline extent against a file which has no extents but
# has a size greater than bar's inline extent (16K > 50).
# It should not be possible to clone the inline extent from file bar
# into this file.
$XFS_IO_PROG -f -c "truncate 16K" $SCRATCH_MNT/foo6 | _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo6
# Reading the file should not fail.
echo "File foo6 data after clone operation:"
# Must have a size of 16K, with all bytes having a value of 0x00 (the
# clone operation failed and did not modify our file).
od -t x1 $SCRATCH_MNT/foo6
# Test cloning the inline extent against a file which has no extents but
# has a size not greater than bar's inline extent (30 < 50).
# It should be possible to clone the inline extent from file bar into
# this file.
$XFS_IO_PROG -f -c "truncate 30" $SCRATCH_MNT/foo7 | _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo7
# Reading the file should not fail.
echo "File foo7 data after clone operation:"
# Must have a size of 50 bytes, with all bytes having a value of 0xbb.
od -t x1 $SCRATCH_MNT/foo7
# Test cloning the inline extent against a file which has a size not
# greater than the size of bar's inline extent (20 < 50) but has
# a prealloc extent that goes beyond the file's size. It should not be
# possible to clone the inline extent from bar into this file.
$XFS_IO_PROG -f -c "falloc -k 0 1M" \
-c "pwrite -S 0x88 0 20" \
$SCRATCH_MNT/foo8 | _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo8
echo "File foo8 data after clone operation:"
# Must have a size of 20 bytes, with all bytes having a value of 0x88
# (the clone operation did not modify our file).
od -t x1 $SCRATCH_MNT/foo8
_scratch_unmount
}
echo -e "\nTesting without compression and without the no-holes feature...\n"
test_cloning_inline_extents
echo -e "\nTesting with compression and without the no-holes feature...\n"
test_cloning_inline_extents "" "-o compress"
echo -e "\nTesting without compression and with the no-holes feature...\n"
test_cloning_inline_extents "-O no-holes" ""
echo -e "\nTesting with compression and with the no-holes feature...\n"
test_cloning_inline_extents "-O no-holes" "-o compress"
status=0
exit
Cc: [email protected]
Signed-off-by: Filipe Manana <[email protected]>
|
rpc_C_FindObjectsFinal (CK_X_FUNCTION_LIST *self,
p11_rpc_message *msg)
{
CK_SESSION_HANDLE session;
BEGIN_CALL (FindObjectsFinal);
IN_ULONG (session);
PROCESS_CALL ((self, session));
END_CALL;
}
| 0 |
[
"CWE-190"
] |
p11-kit
|
5307a1d21a50cacd06f471a873a018d23ba4b963
| 43,289,130,450,275,170,000,000,000,000,000,000,000 | 10 |
Check for arithmetic overflows before allocating
|
void sqlite3WhereEnd(WhereInfo *pWInfo){
Parse *pParse = pWInfo->pParse;
Vdbe *v = pParse->pVdbe;
int i;
WhereLevel *pLevel;
WhereLoop *pLoop;
SrcList *pTabList = pWInfo->pTabList;
sqlite3 *db = pParse->db;
int iEnd = sqlite3VdbeCurrentAddr(v);
int nRJ = 0;
/* Generate loop termination code.
*/
VdbeModuleComment((v, "End WHERE-core"));
for(i=pWInfo->nLevel-1; i>=0; i--){
int addr;
pLevel = &pWInfo->a[i];
if( pLevel->pRJ ){
/* Terminate the subroutine that forms the interior of the loop of
** the RIGHT JOIN table */
WhereRightJoin *pRJ = pLevel->pRJ;
sqlite3VdbeResolveLabel(v, pLevel->addrCont);
pLevel->addrCont = 0;
pRJ->endSubrtn = sqlite3VdbeCurrentAddr(v);
sqlite3VdbeAddOp3(v, OP_Return, pRJ->regReturn, pRJ->addrSubrtn, 1);
VdbeCoverage(v);
nRJ++;
}
pLoop = pLevel->pWLoop;
if( pLevel->op!=OP_Noop ){
#ifndef SQLITE_DISABLE_SKIPAHEAD_DISTINCT
int addrSeek = 0;
Index *pIdx;
int n;
if( pWInfo->eDistinct==WHERE_DISTINCT_ORDERED
&& i==pWInfo->nLevel-1 /* Ticket [ef9318757b152e3] 2017-10-21 */
&& (pLoop->wsFlags & WHERE_INDEXED)!=0
&& (pIdx = pLoop->u.btree.pIndex)->hasStat1
&& (n = pLoop->u.btree.nDistinctCol)>0
&& pIdx->aiRowLogEst[n]>=36
){
int r1 = pParse->nMem+1;
int j, op;
for(j=0; j<n; j++){
sqlite3VdbeAddOp3(v, OP_Column, pLevel->iIdxCur, j, r1+j);
}
pParse->nMem += n+1;
op = pLevel->op==OP_Prev ? OP_SeekLT : OP_SeekGT;
addrSeek = sqlite3VdbeAddOp4Int(v, op, pLevel->iIdxCur, 0, r1, n);
VdbeCoverageIf(v, op==OP_SeekLT);
VdbeCoverageIf(v, op==OP_SeekGT);
sqlite3VdbeAddOp2(v, OP_Goto, 1, pLevel->p2);
}
#endif /* SQLITE_DISABLE_SKIPAHEAD_DISTINCT */
/* The common case: Advance to the next row */
if( pLevel->addrCont ) sqlite3VdbeResolveLabel(v, pLevel->addrCont);
sqlite3VdbeAddOp3(v, pLevel->op, pLevel->p1, pLevel->p2, pLevel->p3);
sqlite3VdbeChangeP5(v, pLevel->p5);
VdbeCoverage(v);
VdbeCoverageIf(v, pLevel->op==OP_Next);
VdbeCoverageIf(v, pLevel->op==OP_Prev);
VdbeCoverageIf(v, pLevel->op==OP_VNext);
if( pLevel->regBignull ){
sqlite3VdbeResolveLabel(v, pLevel->addrBignull);
sqlite3VdbeAddOp2(v, OP_DecrJumpZero, pLevel->regBignull, pLevel->p2-1);
VdbeCoverage(v);
}
#ifndef SQLITE_DISABLE_SKIPAHEAD_DISTINCT
if( addrSeek ) sqlite3VdbeJumpHere(v, addrSeek);
#endif
}else if( pLevel->addrCont ){
sqlite3VdbeResolveLabel(v, pLevel->addrCont);
}
if( (pLoop->wsFlags & WHERE_IN_ABLE)!=0 && pLevel->u.in.nIn>0 ){
struct InLoop *pIn;
int j;
sqlite3VdbeResolveLabel(v, pLevel->addrNxt);
for(j=pLevel->u.in.nIn, pIn=&pLevel->u.in.aInLoop[j-1]; j>0; j--, pIn--){
assert( sqlite3VdbeGetOp(v, pIn->addrInTop+1)->opcode==OP_IsNull
|| pParse->db->mallocFailed );
sqlite3VdbeJumpHere(v, pIn->addrInTop+1);
if( pIn->eEndLoopOp!=OP_Noop ){
if( pIn->nPrefix ){
int bEarlyOut =
(pLoop->wsFlags & WHERE_VIRTUALTABLE)==0
&& (pLoop->wsFlags & WHERE_IN_EARLYOUT)!=0;
if( pLevel->iLeftJoin ){
/* For LEFT JOIN queries, cursor pIn->iCur may not have been
** opened yet. This occurs for WHERE clauses such as
** "a = ? AND b IN (...)", where the index is on (a, b). If
** the RHS of the (a=?) is NULL, then the "b IN (...)" may
** never have been coded, but the body of the loop run to
** return the null-row. So, if the cursor is not open yet,
** jump over the OP_Next or OP_Prev instruction about to
** be coded. */
sqlite3VdbeAddOp2(v, OP_IfNotOpen, pIn->iCur,
sqlite3VdbeCurrentAddr(v) + 2 + bEarlyOut);
VdbeCoverage(v);
}
if( bEarlyOut ){
sqlite3VdbeAddOp4Int(v, OP_IfNoHope, pLevel->iIdxCur,
sqlite3VdbeCurrentAddr(v)+2,
pIn->iBase, pIn->nPrefix);
VdbeCoverage(v);
/* Retarget the OP_IsNull against the left operand of IN so
** it jumps past the OP_IfNoHope. This is because the
** OP_IsNull also bypasses the OP_Affinity opcode that is
** required by OP_IfNoHope. */
sqlite3VdbeJumpHere(v, pIn->addrInTop+1);
}
}
sqlite3VdbeAddOp2(v, pIn->eEndLoopOp, pIn->iCur, pIn->addrInTop);
VdbeCoverage(v);
VdbeCoverageIf(v, pIn->eEndLoopOp==OP_Prev);
VdbeCoverageIf(v, pIn->eEndLoopOp==OP_Next);
}
sqlite3VdbeJumpHere(v, pIn->addrInTop-1);
}
}
sqlite3VdbeResolveLabel(v, pLevel->addrBrk);
if( pLevel->pRJ ){
sqlite3VdbeAddOp3(v, OP_Return, pLevel->pRJ->regReturn, 0, 1);
VdbeCoverage(v);
}
if( pLevel->addrSkip ){
sqlite3VdbeGoto(v, pLevel->addrSkip);
VdbeComment((v, "next skip-scan on %s", pLoop->u.btree.pIndex->zName));
sqlite3VdbeJumpHere(v, pLevel->addrSkip);
sqlite3VdbeJumpHere(v, pLevel->addrSkip-2);
}
#ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS
if( pLevel->addrLikeRep ){
sqlite3VdbeAddOp2(v, OP_DecrJumpZero, (int)(pLevel->iLikeRepCntr>>1),
pLevel->addrLikeRep);
VdbeCoverage(v);
}
#endif
if( pLevel->iLeftJoin ){
int ws = pLoop->wsFlags;
addr = sqlite3VdbeAddOp1(v, OP_IfPos, pLevel->iLeftJoin); VdbeCoverage(v);
assert( (ws & WHERE_IDX_ONLY)==0 || (ws & WHERE_INDEXED)!=0 );
if( (ws & WHERE_IDX_ONLY)==0 ){
assert( pLevel->iTabCur==pTabList->a[pLevel->iFrom].iCursor );
sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iTabCur);
}
if( (ws & WHERE_INDEXED)
|| ((ws & WHERE_MULTI_OR) && pLevel->u.pCoveringIdx)
){
if( ws & WHERE_MULTI_OR ){
Index *pIx = pLevel->u.pCoveringIdx;
int iDb = sqlite3SchemaToIndex(db, pIx->pSchema);
sqlite3VdbeAddOp3(v, OP_ReopenIdx, pLevel->iIdxCur, pIx->tnum, iDb);
sqlite3VdbeSetP4KeyInfo(pParse, pIx);
}
sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iIdxCur);
}
if( pLevel->op==OP_Return ){
sqlite3VdbeAddOp2(v, OP_Gosub, pLevel->p1, pLevel->addrFirst);
}else{
sqlite3VdbeGoto(v, pLevel->addrFirst);
}
sqlite3VdbeJumpHere(v, addr);
}
VdbeModuleComment((v, "End WHERE-loop%d: %s", i,
pWInfo->pTabList->a[pLevel->iFrom].pTab->zName));
}
assert( pWInfo->nLevel<=pTabList->nSrc );
if( pWInfo->pExprMods ) whereUndoExprMods(pWInfo);
for(i=0, pLevel=pWInfo->a; i<pWInfo->nLevel; i++, pLevel++){
int k, last;
VdbeOp *pOp, *pLastOp;
Index *pIdx = 0;
SrcItem *pTabItem = &pTabList->a[pLevel->iFrom];
Table *pTab = pTabItem->pTab;
assert( pTab!=0 );
pLoop = pLevel->pWLoop;
/* Do RIGHT JOIN processing. Generate code that will output the
** unmatched rows of the right operand of the RIGHT JOIN with
** all of the columns of the left operand set to NULL.
*/
if( pLevel->pRJ ){
sqlite3WhereRightJoinLoop(pWInfo, i, pLevel);
continue;
}
/* For a co-routine, change all OP_Column references to the table of
** the co-routine into OP_Copy of result contained in a register.
** OP_Rowid becomes OP_Null.
*/
if( pTabItem->fg.viaCoroutine ){
testcase( pParse->db->mallocFailed );
translateColumnToCopy(pParse, pLevel->addrBody, pLevel->iTabCur,
pTabItem->regResult, 0);
continue;
}
/* If this scan uses an index, make VDBE code substitutions to read data
** from the index instead of from the table where possible. In some cases
** this optimization prevents the table from ever being read, which can
** yield a significant performance boost.
**
** Calls to the code generator in between sqlite3WhereBegin and
** sqlite3WhereEnd will have created code that references the table
** directly. This loop scans all that code looking for opcodes
** that reference the table and converts them into opcodes that
** reference the index.
*/
if( pLoop->wsFlags & (WHERE_INDEXED|WHERE_IDX_ONLY) ){
pIdx = pLoop->u.btree.pIndex;
}else if( pLoop->wsFlags & WHERE_MULTI_OR ){
pIdx = pLevel->u.pCoveringIdx;
}
if( pIdx
&& !db->mallocFailed
){
if( pWInfo->eOnePass==ONEPASS_OFF || !HasRowid(pIdx->pTable) ){
last = iEnd;
}else{
last = pWInfo->iEndWhere;
}
k = pLevel->addrBody + 1;
#ifdef SQLITE_DEBUG
if( db->flags & SQLITE_VdbeAddopTrace ){
printf("TRANSLATE opcodes in range %d..%d\n", k, last-1);
}
/* Proof that the "+1" on the k value above is safe */
pOp = sqlite3VdbeGetOp(v, k - 1);
assert( pOp->opcode!=OP_Column || pOp->p1!=pLevel->iTabCur );
assert( pOp->opcode!=OP_Rowid || pOp->p1!=pLevel->iTabCur );
assert( pOp->opcode!=OP_IfNullRow || pOp->p1!=pLevel->iTabCur );
#endif
pOp = sqlite3VdbeGetOp(v, k);
pLastOp = pOp + (last - k);
assert( pOp<=pLastOp );
do{
if( pOp->p1!=pLevel->iTabCur ){
/* no-op */
}else if( pOp->opcode==OP_Column
#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC
|| pOp->opcode==OP_Offset
#endif
){
int x = pOp->p2;
assert( pIdx->pTable==pTab );
#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC
if( pOp->opcode==OP_Offset ){
/* Do not need to translate the column number */
}else
#endif
if( !HasRowid(pTab) ){
Index *pPk = sqlite3PrimaryKeyIndex(pTab);
x = pPk->aiColumn[x];
assert( x>=0 );
}else{
testcase( x!=sqlite3StorageColumnToTable(pTab,x) );
x = sqlite3StorageColumnToTable(pTab,x);
}
x = sqlite3TableColumnToIndex(pIdx, x);
if( x>=0 ){
pOp->p2 = x;
pOp->p1 = pLevel->iIdxCur;
OpcodeRewriteTrace(db, k, pOp);
}else{
/* Unable to translate the table reference into an index
** reference. Verify that this is harmless - that the
** table being referenced really is open.
*/
#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC
assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0
|| cursorIsOpen(v,pOp->p1,k)
|| pOp->opcode==OP_Offset
);
#else
assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0
|| cursorIsOpen(v,pOp->p1,k)
);
#endif
}
}else if( pOp->opcode==OP_Rowid ){
pOp->p1 = pLevel->iIdxCur;
pOp->opcode = OP_IdxRowid;
OpcodeRewriteTrace(db, k, pOp);
}else if( pOp->opcode==OP_IfNullRow ){
pOp->p1 = pLevel->iIdxCur;
OpcodeRewriteTrace(db, k, pOp);
}
#ifdef SQLITE_DEBUG
k++;
#endif
}while( (++pOp)<pLastOp );
#ifdef SQLITE_DEBUG
if( db->flags & SQLITE_VdbeAddopTrace ) printf("TRANSLATE complete\n");
#endif
}
}
/* The "break" point is here, just past the end of the outer loop.
** Set it.
*/
sqlite3VdbeResolveLabel(v, pWInfo->iBreak);
/* Final cleanup
*/
pParse->nQueryLoop = pWInfo->savedNQueryLoop;
whereInfoFree(db, pWInfo);
pParse->withinRJSubrtn -= nRJ;
return;
}
| 0 |
[
"CWE-129"
] |
sqlite
|
effc07ec9c6e08d3bd17665f8800054770f8c643
| 4,700,005,216,578,405,500,000,000,000,000,000,000 | 310 |
Fix the whereKeyStats() routine (part of STAT4 processing only) so that it
is able to cope with row-value comparisons against the primary key index
of a WITHOUT ROWID table.
[forum:/forumpost/3607259d3c|Forum post 3607259d3c].
FossilOrigin-Name: 2a6f761864a462de5c2d5bc666b82fb0b7e124a03443cd1482620dde344b34bb
|
htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
false, false);
}
| 0 |
[
"CWE-787"
] |
bpf
|
c4eb1f403243fc7bbb7de644db8587c03de36da6
| 7,835,091,108,191,554,000,000,000,000,000,000,000 | 6 |
bpf: Fix integer overflow involving bucket_size
In __htab_map_lookup_and_delete_batch(), hash buckets are iterated
over to count the number of elements in each bucket (bucket_size).
If bucket_size is large enough, the multiplication to calculate
kvmalloc() size could overflow, resulting in out-of-bounds write
as reported by KASAN:
[...]
[ 104.986052] BUG: KASAN: vmalloc-out-of-bounds in __htab_map_lookup_and_delete_batch+0x5ce/0xb60
[ 104.986489] Write of size 4194224 at addr ffffc9010503be70 by task crash/112
[ 104.986889]
[ 104.987193] CPU: 0 PID: 112 Comm: crash Not tainted 5.14.0-rc4 #13
[ 104.987552] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014
[ 104.988104] Call Trace:
[ 104.988410] dump_stack_lvl+0x34/0x44
[ 104.988706] print_address_description.constprop.0+0x21/0x140
[ 104.988991] ? __htab_map_lookup_and_delete_batch+0x5ce/0xb60
[ 104.989327] ? __htab_map_lookup_and_delete_batch+0x5ce/0xb60
[ 104.989622] kasan_report.cold+0x7f/0x11b
[ 104.989881] ? __htab_map_lookup_and_delete_batch+0x5ce/0xb60
[ 104.990239] kasan_check_range+0x17c/0x1e0
[ 104.990467] memcpy+0x39/0x60
[ 104.990670] __htab_map_lookup_and_delete_batch+0x5ce/0xb60
[ 104.990982] ? __wake_up_common+0x4d/0x230
[ 104.991256] ? htab_of_map_free+0x130/0x130
[ 104.991541] bpf_map_do_batch+0x1fb/0x220
[...]
In hashtable, if the elements' keys have the same jhash() value, the
elements will be put into the same bucket. By putting a lot of elements
into a single bucket, the value of bucket_size can be increased to
trigger the integer overflow.
Triggering the overflow is possible for both callers with CAP_SYS_ADMIN
and callers without CAP_SYS_ADMIN.
It will be trivial for a caller with CAP_SYS_ADMIN to intentionally
reach this overflow by enabling BPF_F_ZERO_SEED. As this flag will set
the random seed passed to jhash() to 0, it will be easy for the caller
to prepare keys which will be hashed into the same value, and thus put
all the elements into the same bucket.
If the caller does not have CAP_SYS_ADMIN, BPF_F_ZERO_SEED cannot be
used. However, it will be still technically possible to trigger the
overflow, by guessing the random seed value passed to jhash() (32bit)
and repeating the attempt to trigger the overflow. In this case,
the probability to trigger the overflow will be low and will take
a very long time.
Fix the integer overflow by calling kvmalloc_array() instead of
kvmalloc() to allocate memory.
Fixes: 057996380a42 ("bpf: Add batch ops to all htab bpf map")
Signed-off-by: Tatsuhiko Yasumatsu <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Link: https://lore.kernel.org/bpf/[email protected]
|
upnp_get_redirection_infos_by_index(int index,
unsigned short * eport, char * protocol,
unsigned short * iport,
char * iaddr, int iaddrlen,
char * desc, int desclen,
char * rhost, int rhostlen,
unsigned int * leaseduration)
{
/*char ifname[IFNAMSIZ];*/
int proto = 0;
unsigned int timestamp;
time_t current_time;
if(desc && (desclen > 0))
desc[0] = '\0';
if(rhost && (rhostlen > 0))
rhost[0] = '\0';
if(get_redirect_rule_by_index(index, 0/*ifname*/, eport, iaddr, iaddrlen,
iport, &proto, desc, desclen,
rhost, rhostlen, ×tamp,
0, 0) < 0)
return -1;
else
{
current_time = upnp_time();
*leaseduration = (timestamp > (unsigned int)current_time)
? (timestamp - current_time)
: 0;
if(proto == IPPROTO_TCP)
memcpy(protocol, "TCP", 4);
#ifdef IPPROTO_UDPLITE
else if(proto == IPPROTO_UDPLITE)
memcpy(protocol, "UDPLITE", 8);
#endif /* IPPROTO_UDPLITE */
else
memcpy(protocol, "UDP", 4);
return 0;
}
}
| 0 |
[
"CWE-476"
] |
miniupnp
|
f321c2066b96d18afa5158dfa2d2873a2957ef38
| 209,143,269,991,691,460,000,000,000,000,000,000,000 | 39 |
upnp_redirect(): accept NULL desc argument
|
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
int rc;
ctxt->ops->get_fpu(ctxt);
rc = asm_safe("fwait");
ctxt->ops->put_fpu(ctxt);
if (unlikely(rc != X86EMUL_CONTINUE))
return emulate_exception(ctxt, MF_VECTOR, 0, false);
return X86EMUL_CONTINUE;
}
| 0 |
[
"CWE-284"
] |
linux
|
33ab91103b3415e12457e3104f0e4517ce12d0f3
| 33,321,102,639,135,080,000,000,000,000,000,000,000 | 13 |
KVM: x86: fix emulation of "MOV SS, null selector"
This is CVE-2017-2583. On Intel this causes a failed vmentry because
SS's type is neither 3 nor 7 (even though the manual says this check is
only done for usable SS, and the dmesg splat says that SS is unusable!).
On AMD it's worse: svm.c is confused and sets CPL to 0 in the vmcb.
The fix fabricates a data segment descriptor when SS is set to a null
selector, so that CPL and SS.DPL are set correctly in the VMCS/vmcb.
Furthermore, only allow setting SS to a NULL selector if SS.RPL < 3;
this in turn ensures CPL < 3 because RPL must be equal to CPL.
Thanks to Andy Lutomirski and Willy Tarreau for help in analyzing
the bug and deciphering the manuals.
Reported-by: Xiaohan Zhang <[email protected]>
Fixes: 79d5b4c3cd809c770d4bf9812635647016c56011
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]>
|
void HeaderMapImpl::clear() {
clearInline();
headers_.clear();
cached_byte_size_ = 0;
}
| 0 |
[] |
envoy
|
2c60632d41555ec8b3d9ef5246242be637a2db0f
| 306,168,561,472,669,000,000,000,000,000,000,000,000 | 5 |
http: header map security fixes for duplicate headers (#197)
Previously header matching did not match on all headers for
non-inline headers. This patch changes the default behavior to
always logically match on all headers. Multiple individual
headers will be logically concatenated with ',' similar to what
is done with inline headers. This makes the behavior effectively
consistent. This behavior can be temporary reverted by setting
the runtime value "envoy.reloadable_features.header_match_on_all_headers"
to "false".
Targeted fixes have been additionally performed on the following
extensions which make them consider all duplicate headers by default as
a comma concatenated list:
1) Any extension using CEL matching on headers.
2) The header to metadata filter.
3) The JWT filter.
4) The Lua filter.
Like primary header matching used in routing, RBAC, etc. this behavior
can be disabled by setting the runtime value
"envoy.reloadable_features.header_match_on_all_headers" to false.
Finally, the setCopy() header map API previously only set the first
header in the case of duplicate non-inline headers. setCopy() now
behaves similiarly to the other set*() APIs and replaces all found
headers with a single value. This may have had security implications
in the extauth filter which uses this API. This behavior can be disabled
by setting the runtime value
"envoy.reloadable_features.http_set_copy_replace_all_headers" to false.
Fixes https://github.com/envoyproxy/envoy-setec/issues/188
Signed-off-by: Matt Klein <[email protected]>
|
GF_Err ftyp_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_FileTypeBox *ptr = (GF_FileTypeBox *) s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->majorBrand);
gf_bs_write_u32(bs, ptr->minorVersion);
for (i=0; i<ptr->altCount; i++) {
gf_bs_write_u32(bs, ptr->altBrand[i]);
}
return GF_OK;
}
| 0 |
[
"CWE-787"
] |
gpac
|
388ecce75d05e11fc8496aa4857b91245007d26e
| 331,121,480,050,794,300,000,000,000,000,000,000,000 | 15 |
fixed #1587
|
archive_read_format_rar_seek_data(struct archive_read *a, int64_t offset,
int whence)
{
int64_t client_offset, ret;
unsigned int i;
struct rar *rar = (struct rar *)(a->format->data);
if (rar->compression_method == COMPRESS_METHOD_STORE)
{
/* Modify the offset for use with SEEK_SET */
switch (whence)
{
case SEEK_CUR:
client_offset = rar->offset_seek;
break;
case SEEK_END:
client_offset = rar->unp_size;
break;
case SEEK_SET:
default:
client_offset = 0;
}
client_offset += offset;
if (client_offset < 0)
{
/* Can't seek past beginning of data block */
return -1;
}
else if (client_offset > rar->unp_size)
{
/*
* Set the returned offset but only seek to the end of
* the data block.
*/
rar->offset_seek = client_offset;
client_offset = rar->unp_size;
}
client_offset += rar->dbo[0].start_offset;
i = 0;
while (i < rar->cursor)
{
i++;
client_offset += rar->dbo[i].start_offset - rar->dbo[i-1].end_offset;
}
if (rar->main_flags & MHD_VOLUME)
{
/* Find the appropriate offset among the multivolume archive */
while (1)
{
if (client_offset < rar->dbo[rar->cursor].start_offset &&
rar->file_flags & FHD_SPLIT_BEFORE)
{
/* Search backwards for the correct data block */
if (rar->cursor == 0)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Attempt to seek past beginning of RAR data block");
return (ARCHIVE_FAILED);
}
rar->cursor--;
client_offset -= rar->dbo[rar->cursor+1].start_offset -
rar->dbo[rar->cursor].end_offset;
if (client_offset < rar->dbo[rar->cursor].start_offset)
continue;
ret = __archive_read_seek(a, rar->dbo[rar->cursor].start_offset -
rar->dbo[rar->cursor].header_size, SEEK_SET);
if (ret < (ARCHIVE_OK))
return ret;
ret = archive_read_format_rar_read_header(a, a->entry);
if (ret != (ARCHIVE_OK))
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Error during seek of RAR file");
return (ARCHIVE_FAILED);
}
rar->cursor--;
break;
}
else if (client_offset > rar->dbo[rar->cursor].end_offset &&
rar->file_flags & FHD_SPLIT_AFTER)
{
/* Search forward for the correct data block */
rar->cursor++;
if (rar->cursor < rar->nodes &&
client_offset > rar->dbo[rar->cursor].end_offset)
{
client_offset += rar->dbo[rar->cursor].start_offset -
rar->dbo[rar->cursor-1].end_offset;
continue;
}
rar->cursor--;
ret = __archive_read_seek(a, rar->dbo[rar->cursor].end_offset,
SEEK_SET);
if (ret < (ARCHIVE_OK))
return ret;
ret = archive_read_format_rar_read_header(a, a->entry);
if (ret == (ARCHIVE_EOF))
{
rar->has_endarc_header = 1;
ret = archive_read_format_rar_read_header(a, a->entry);
}
if (ret != (ARCHIVE_OK))
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Error during seek of RAR file");
return (ARCHIVE_FAILED);
}
client_offset += rar->dbo[rar->cursor].start_offset -
rar->dbo[rar->cursor-1].end_offset;
continue;
}
break;
}
}
ret = __archive_read_seek(a, client_offset, SEEK_SET);
if (ret < (ARCHIVE_OK))
return ret;
rar->bytes_remaining = rar->dbo[rar->cursor].end_offset - ret;
i = rar->cursor;
while (i > 0)
{
i--;
ret -= rar->dbo[i+1].start_offset - rar->dbo[i].end_offset;
}
ret -= rar->dbo[0].start_offset;
/* Always restart reading the file after a seek */
__archive_reset_read_data(&a->archive);
rar->bytes_unconsumed = 0;
rar->offset = 0;
/*
* If a seek past the end of file was requested, return the requested
* offset.
*/
if (ret == rar->unp_size && rar->offset_seek > rar->unp_size)
return rar->offset_seek;
/* Return the new offset */
rar->offset_seek = ret;
return rar->offset_seek;
}
else
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Seeking of compressed RAR files is unsupported");
}
return (ARCHIVE_FAILED);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
libarchive
|
05caadc7eedbef471ac9610809ba683f0c698700
| 191,276,710,718,262,960,000,000,000,000,000,000,000 | 152 |
Issue 719: Fix for TALOS-CAN-154
A RAR file with an invalid zero dictionary size was not being
rejected, leading to a zero-sized allocation for the dictionary
storage which was then overwritten during the dictionary initialization.
Thanks to the Open Source and Threat Intelligence project at Cisco for
reporting this.
|
static int snd_seq_deliver_single_event(struct snd_seq_client *client,
struct snd_seq_event *event,
int filter, int atomic, int hop)
{
struct snd_seq_client *dest = NULL;
struct snd_seq_client_port *dest_port = NULL;
int result = -ENOENT;
int direct;
direct = snd_seq_ev_is_direct(event);
dest = get_event_dest_client(event, filter);
if (dest == NULL)
goto __skip;
dest_port = snd_seq_port_use_ptr(dest, event->dest.port);
if (dest_port == NULL)
goto __skip;
/* check permission */
if (! check_port_perm(dest_port, SNDRV_SEQ_PORT_CAP_WRITE)) {
result = -EPERM;
goto __skip;
}
if (dest_port->timestamping)
update_timestamp_of_queue(event, dest_port->time_queue,
dest_port->time_real);
switch (dest->type) {
case USER_CLIENT:
if (dest->data.user.fifo)
result = snd_seq_fifo_event_in(dest->data.user.fifo, event);
break;
case KERNEL_CLIENT:
if (dest_port->event_input == NULL)
break;
result = dest_port->event_input(event, direct,
dest_port->private_data,
atomic, hop);
break;
default:
break;
}
__skip:
if (dest_port)
snd_seq_port_unlock(dest_port);
if (dest)
snd_seq_client_unlock(dest);
if (result < 0 && !direct) {
result = bounce_error_event(client, event, result, atomic, hop);
}
return result;
}
| 0 |
[
"CWE-703"
] |
linux
|
030e2c78d3a91dd0d27fef37e91950dde333eba1
| 59,450,599,900,935,000,000,000,000,000,000,000,000 | 56 |
ALSA: seq: Fix missing NULL check at remove_events ioctl
snd_seq_ioctl_remove_events() calls snd_seq_fifo_clear()
unconditionally even if there is no FIFO assigned, and this leads to
an Oops due to NULL dereference. The fix is just to add a proper NULL
check.
Reported-by: Dmitry Vyukov <[email protected]>
Tested-by: Dmitry Vyukov <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
|
_public_ int sd_bus_set_connected_signal(sd_bus *bus, int b) {
assert_return(bus, -EINVAL);
assert_return(bus = bus_resolve(bus), -ENOPKG);
assert_return(bus->state == BUS_UNSET, -EPERM);
assert_return(!bus_pid_changed(bus), -ECHILD);
bus->connected_signal = !!b;
return 0;
}
| 0 |
[
"CWE-416"
] |
systemd
|
1068447e6954dc6ce52f099ed174c442cb89ed54
| 231,860,202,840,721,100,000,000,000,000,000,000,000 | 9 |
sd-bus: introduce API for re-enqueuing incoming messages
When authorizing via PolicyKit we want to process incoming method calls
twice: once to process and figure out that we need PK authentication,
and a second time after we aquired PK authentication to actually execute
the operation. With this new call sd_bus_enqueue_for_read() we have a
way to put an incoming message back into the read queue for this
purpose.
This might have other uses too, for example debugging.
|
void seq_parameter_set::set_TB_log2size_range(int mini,int maxi)
{
log2_min_transform_block_size = mini;
log2_diff_max_min_transform_block_size = maxi-mini;
}
| 0 |
[
"CWE-787"
] |
libde265
|
8e89fe0e175d2870c39486fdd09250b230ec10b8
| 300,181,450,398,762,760,000,000,000,000,000,000,000 | 5 |
error on out-of-range cpb_cnt_minus1 (oss-fuzz issue 27590)
|
move_lines(buf_T *frombuf, buf_T *tobuf)
{
buf_T *tbuf = curbuf;
int retval = OK;
linenr_T lnum;
char_u *p;
/* Copy the lines in "frombuf" to "tobuf". */
curbuf = tobuf;
for (lnum = 1; lnum <= frombuf->b_ml.ml_line_count; ++lnum)
{
p = vim_strsave(ml_get_buf(frombuf, lnum, FALSE));
if (p == NULL || ml_append(lnum - 1, p, 0, FALSE) == FAIL)
{
vim_free(p);
retval = FAIL;
break;
}
vim_free(p);
}
/* Delete all the lines in "frombuf". */
if (retval != FAIL)
{
curbuf = frombuf;
for (lnum = curbuf->b_ml.ml_line_count; lnum > 0; --lnum)
if (ml_delete(lnum, FALSE) == FAIL)
{
/* Oops! We could try putting back the saved lines, but that
* might fail again... */
retval = FAIL;
break;
}
}
curbuf = tbuf;
return retval;
}
| 0 |
[
"CWE-200",
"CWE-668"
] |
vim
|
5a73e0ca54c77e067c3b12ea6f35e3e8681e8cf8
| 315,798,467,125,172,800,000,000,000,000,000,000,000 | 38 |
patch 8.0.1263: others can read the swap file if a user is careless
Problem: Others can read the swap file if a user is careless with his
primary group.
Solution: If the group permission allows for reading but the world
permissions doesn't, make sure the group is right.
|
void OAuth2Filter::onGetAccessTokenSuccess(const std::string& access_code,
const std::string& id_token,
const std::string& refresh_token,
std::chrono::seconds expires_in) {
access_token_ = access_code;
id_token_ = id_token;
refresh_token_ = refresh_token;
const auto new_epoch = time_source_.systemTime() + expires_in;
new_expires_ = std::to_string(
std::chrono::duration_cast<std::chrono::seconds>(new_epoch.time_since_epoch()).count());
finishFlow();
}
| 0 |
[
"CWE-416"
] |
envoy
|
7ffda4e809dec74449ebc330cebb9d2f4ab61360
| 157,522,002,912,694,100,000,000,000,000,000,000,000 | 14 |
oauth2: do not blindly accept requests with a token in the Authorization headera (781)
The logic was broken because it assumed an additional call would be
performed to the auth server, which isn't the case. Per the filter
documentation, a request is only considered subsequently authenticated
if there's valid cookie that was set after the access token was received
from the auth server:
https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/oauth2_filter
More info about how to validate an access token (which we don't do, per
above):
https://www.oauth.com/oauth2-servers/token-introspection-endpoint/
https://datatracker.ietf.org/doc/html/rfc7662
Also fix the fact that ee shouldn't be calling continueDecoding() after
decoder_callbacks_->encodeHeaders().
Signed-off-by: Raul Gutierrez Segales <[email protected]>
Signed-off-by: Matt Klein <[email protected]>
Signed-off-by: Pradeep Rao <[email protected]>
|
R_API int r_socket_read_block(RSocket *s, ut8 *buf, int len) {
int ret = 0;
for (ret = 0; ret < len; ) {
int r = r_socket_read (s, buf + ret, len - ret);
if (r == -1) {
#if HAVE_LIB_SSL
if (SSL_get_error (s->sfd, r) == SSL_ERROR_WANT_READ) {
if (r_socket_ready (s, 1, 0) == 1) {
continue;
}
}
#endif
return -1;
}
if (r < 1) {
break;
}
ret += r;
}
return ret;
}
| 0 |
[
"CWE-78"
] |
radare2
|
04edfa82c1f3fa2bc3621ccdad2f93bdbf00e4f9
| 264,959,666,055,801,900,000,000,000,000,000,000,000 | 21 |
Fix command injection on PDB download (#16966)
* Fix r_sys_mkdirp with absolute path on Windows
* Fix build with --with-openssl
* Use RBuffer in r_socket_http_answer()
* r_socket_http_answer: Fix read for big responses
* Implement r_str_escape_sh()
* Cleanup r_socket_connect() on Windows
* Fix socket being created without a protocol
* Fix socket connect with SSL ##socket
* Use select() in r_socket_ready()
* Fix read failing if received only protocol answer
* Fix double-free
* r_socket_http_get: Fail if req. SSL with no support
* Follow redirects in r_socket_http_answer()
* Fix r_socket_http_get result length with R2_CURL=1
* Also follow redirects
* Avoid using curl for downloading PDBs
* Use r_socket_http_get() on UNIXs
* Use WinINet API on Windows for r_socket_http_get()
* Fix command injection
* Fix r_sys_cmd_str_full output for binary data
* Validate GUID on PDB download
* Pass depth to socket_http_get_recursive()
* Remove 'r_' and '__' from static function names
* Fix is_valid_guid
* Fix for comments
|
ServerLifecycleNotifier::HandlePtr InstanceImpl::registerCallback(Stage stage,
StageCallback callback) {
auto& callbacks = stage_callbacks_[stage];
return std::make_unique<LifecycleCallbackHandle<StageCallback>>(callbacks, callback);
}
| 0 |
[
"CWE-400"
] |
envoy
|
542f84c66e9f6479bc31c6f53157c60472b25240
| 233,643,594,883,966,870,000,000,000,000,000,000,000 | 5 |
overload: Runtime configurable global connection limits (#147)
Signed-off-by: Tony Allen <[email protected]>
|
void getElemMatchOrPushdownDescendants(MatchExpression* node, std::vector<MatchExpression*>* out) {
if (node->getTag() && node->getTag()->getType() == TagType::OrPushdownTag) {
out->push_back(node);
} else if (node->matchType() == MatchExpression::ELEM_MATCH_OBJECT ||
node->matchType() == MatchExpression::AND) {
for (size_t i = 0; i < node->numChildren(); ++i) {
getElemMatchOrPushdownDescendants(node->getChild(i), out);
}
} else if (node->matchType() == MatchExpression::NOT) {
// The immediate child of NOT may be tagged, but there should be no tags deeper than this.
auto* childNode = node->getChild(0);
if (childNode->getTag() && childNode->getTag()->getType() == TagType::OrPushdownTag) {
out->push_back(node);
}
}
}
| 0 |
[
"CWE-834"
] |
mongo
|
94d0e046baa64d1aa1a6af97e2d19bb466cc1ff5
| 234,537,332,770,948,160,000,000,000,000,000,000,000 | 16 |
SERVER-38164 $or pushdown optimization does not correctly handle $not within an $elemMatch
|
R_API int r_core_bin_set_env(RCore *r, RBinFile *binfile) {
RBinObject *binobj = binfile ? binfile->o: NULL;
RBinInfo *info = binobj ? binobj->info: NULL;
if (info) {
int va = info->has_va;
const char * arch = info->arch;
ut16 bits = info->bits;
ut64 baseaddr = r_bin_get_baddr (r->bin);
r_config_set_i (r->config, "bin.baddr", baseaddr);
r_config_set (r->config, "asm.arch", arch);
r_config_set_i (r->config, "asm.bits", bits);
r_config_set (r->config, "anal.arch", arch);
if (info->cpu && *info->cpu) {
r_config_set (r->config, "anal.cpu", info->cpu);
} else {
r_config_set (r->config, "anal.cpu", arch);
}
r_asm_use (r->assembler, arch);
r_core_bin_info (r, R_CORE_BIN_ACC_ALL, R_CORE_BIN_SET, va, NULL, NULL);
r_core_bin_set_cur (r, binfile);
return true;
}
return false;
}
| 0 |
[
"CWE-125"
] |
radare2
|
1f37c04f2a762500222dda2459e6a04646feeedf
| 46,688,739,190,908,290,000,000,000,000,000,000,000 | 24 |
Fix #9904 - crash in r2_hoobr_r_read_le32 (over 9000 entrypoints) and read_le oobread (#9923)
|
int PKCS7_set_digest(PKCS7 *p7, const EVP_MD *md)
{
if (PKCS7_type_is_digest(p7)) {
if (!(p7->d.digest->md->parameter = ASN1_TYPE_new())) {
PKCS7err(PKCS7_F_PKCS7_SET_DIGEST, ERR_R_MALLOC_FAILURE);
return 0;
}
p7->d.digest->md->parameter->type = V_ASN1_NULL;
p7->d.digest->md->algorithm = OBJ_nid2obj(EVP_MD_nid(md));
return 1;
}
PKCS7err(PKCS7_F_PKCS7_SET_DIGEST, PKCS7_R_WRONG_CONTENT_TYPE);
return 1;
}
| 0 |
[] |
openssl
|
c0334c2c92dd1bc3ad8138ba6e74006c3631b0f9
| 99,068,826,012,039,780,000,000,000,000,000,000,000 | 15 |
PKCS#7: avoid NULL pointer dereferences with missing content
In PKCS#7, the ASN.1 content component is optional.
This typically applies to inner content (detached signatures),
however we must also handle unexpected missing outer content
correctly.
This patch only addresses functions reachable from parsing,
decryption and verification, and functions otherwise associated
with reading potentially untrusted data.
Correcting all low-level API calls requires further work.
CVE-2015-0289
Thanks to Michal Zalewski (Google) for reporting this issue.
Reviewed-by: Steve Henson <[email protected]>
|
int ldb_dn_get_extended_comp_num(struct ldb_dn *dn)
{
if ( ! ldb_dn_validate(dn)) {
return -1;
}
return dn->ext_comp_num;
}
| 0 |
[
"CWE-200"
] |
samba
|
7f51ec8c4ed9ba1f53d722e44fb6fb3cde933b72
| 45,259,874,072,529,130,000,000,000,000,000,000,000 | 7 |
CVE-2015-5330: ldb_dn: simplify and fix ldb_dn_escape_internal()
Previously we relied on NUL terminated strings and jumped back and
forth between copying escaped bytes and memcpy()ing un-escaped chunks.
This simple version is easier to reason about and works with
unterminated strings. It may also be faster as it avoids reading the
string twice (first with strcspn, then with memcpy).
Bug: https://bugzilla.samba.org/show_bug.cgi?id=11599
Signed-off-by: Douglas Bagnall <[email protected]>
Pair-programmed-with: Andrew Bartlett <[email protected]>
Reviewed-by: Ralph Boehme <[email protected]>
|
void imap_get_parent(const char *mbox, char delim, char *buf, size_t buflen)
{
/* Make a copy of the mailbox name, but only if the pointers are different */
if (mbox != buf)
mutt_str_copy(buf, mbox, buflen);
int n = mutt_str_len(buf);
/* Let's go backwards until the next delimiter
*
* If buf[n] is a '/', the first n-- will allow us
* to ignore it. If it isn't, then buf looks like
* "/aaaaa/bbbb". There is at least one "b", so we can't skip
* the "/" after the 'a's.
*
* If buf == '/', then n-- => n == 0, so the loop ends
* immediately */
for (n--; (n >= 0) && (buf[n] != delim); n--)
; // do nothing
/* We stopped before the beginning. There is a trailing slash. */
if (n > 0)
{
/* Strip the trailing delimiter. */
buf[n] = '\0';
}
else
{
buf[0] = (n == 0) ? delim : '\0';
}
}
| 0 |
[
"CWE-125"
] |
neomutt
|
fa1db5785e5cfd9d3cd27b7571b9fe268d2ec2dc
| 143,020,363,752,895,000,000,000,000,000,000,000,000 | 31 |
Fix seqset iterator when it ends in a comma
If the seqset ended with a comma, the substr_end marker would be just
before the trailing nul. In the next call, the loop to skip the
marker would iterate right past the end of string too.
The fix is simple: place the substr_end marker and skip past it
immediately.
|
static pj_status_t ssl_do_handshake(pj_ssl_sock_t *ssock)
{
ossl_sock_t *ossock = (ossl_sock_t *)ssock;
pj_status_t status;
int err;
/* Perform SSL handshake */
pj_lock_acquire(ssock->write_mutex);
err = SSL_do_handshake(ossock->ossl_ssl);
pj_lock_release(ssock->write_mutex);
/* SSL_do_handshake() may put some pending data into SSL write BIO,
* flush it if any.
*/
status = flush_circ_buf_output(ssock, &ssock->handshake_op_key, 0, 0);
if (status != PJ_SUCCESS && status != PJ_EPENDING) {
return status;
}
if (err < 0) {
int err2 = SSL_get_error(ossock->ossl_ssl, err);
if (err2 != SSL_ERROR_NONE && err2 != SSL_ERROR_WANT_READ)
{
/* Handshake fails */
status = STATUS_FROM_SSL_ERR2("Handshake", ssock, err, err2, 0);
return status;
}
}
/* Check if handshake has been completed */
if (SSL_is_init_finished(ossock->ossl_ssl)) {
ssock->ssl_state = SSL_STATE_ESTABLISHED;
return PJ_SUCCESS;
}
return PJ_EPENDING;
}
| 0 |
[
"CWE-362",
"CWE-703"
] |
pjproject
|
d5f95aa066f878b0aef6a64e60b61e8626e664cd
| 295,662,942,928,312,780,000,000,000,000,000,000,000 | 37 |
Merge pull request from GHSA-cv8x-p47p-99wr
* - Avoid SSL socket parent/listener getting destroyed during handshake by increasing parent's reference count.
- Add missing SSL socket close when the newly accepted SSL socket is discarded in SIP TLS transport.
* - Fix silly mistake: accepted active socket created without group lock in SSL socket.
- Replace assertion with normal validation check of SSL socket instance in OpenSSL verification callback (verify_cb()) to avoid crash, e.g: if somehow race condition with SSL socket destroy happens or OpenSSL application data index somehow gets corrupted.
|
void dpy_gl_ctx_destroy(QemuConsole *con, QEMUGLContext ctx)
{
assert(con->gl);
con->gl->ops->dpy_gl_ctx_destroy(con->gl, ctx);
}
| 0 |
[
"CWE-416"
] |
qemu
|
a4afa548fc6dd9842ed86639b4d37d4d1c4ad480
| 338,523,614,243,551,740,000,000,000,000,000,000,000 | 5 |
char: move front end handlers in CharBackend
Since the hanlders are associated with a CharBackend, rather than the
CharDriverState, it is more appropriate to store in CharBackend. This
avoids the handler copy dance in qemu_chr_fe_set_handlers() then
mux_chr_update_read_handler(), by storing the CharBackend pointer
directly.
Also a mux CharDriver should go through mux->backends[focused], since
chr->be will stay NULL. Before that, it was possible to call
chr->handler by mistake with surprising results, for ex through
qemu_chr_be_can_write(), which would result in calling the last set
handler front end, not the one with focus.
Signed-off-by: Marc-André Lureau <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static double mp_self_bitwise_or(_cimg_math_parser& mp) {
double &val = _mp_arg(1);
return val = (double)((longT)val | (longT)_mp_arg(2));
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 60,705,174,599,560,370,000,000,000,000,000,000,000 | 4 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
int setup_ui_method(void)
{
ui_method = UI_create_method("OpenSSL application user interface");
UI_method_set_opener(ui_method, ui_open);
UI_method_set_reader(ui_method, ui_read);
UI_method_set_writer(ui_method, ui_write);
UI_method_set_closer(ui_method, ui_close);
return 0;
}
| 0 |
[] |
openssl
|
a70da5b3ecc3160368529677006801c58cb369db
| 281,433,086,369,651,120,000,000,000,000,000,000,000 | 9 |
New functions to check a hostname email or IP address against a
certificate. Add options to s_client, s_server and x509 utilities
to print results of checks.
|
static inline size_t xfrm_sa_len(struct xfrm_state *x)
{
size_t l = 0;
if (x->aead)
l += nla_total_size(aead_len(x->aead));
if (x->aalg) {
l += nla_total_size(sizeof(struct xfrm_algo) +
(x->aalg->alg_key_len + 7) / 8);
l += nla_total_size(xfrm_alg_auth_len(x->aalg));
}
if (x->ealg)
l += nla_total_size(xfrm_alg_len(x->ealg));
if (x->calg)
l += nla_total_size(sizeof(*x->calg));
if (x->encap)
l += nla_total_size(sizeof(*x->encap));
if (x->tfcpad)
l += nla_total_size(sizeof(x->tfcpad));
if (x->replay_esn)
l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
if (x->security)
l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
x->security->ctx_len);
if (x->coaddr)
l += nla_total_size(sizeof(*x->coaddr));
if (x->props.extra_flags)
l += nla_total_size(sizeof(x->props.extra_flags));
/* Must count x->lastused as it may become non-zero behind our back. */
l += nla_total_size(sizeof(u64));
return l;
}
| 0 |
[
"CWE-264"
] |
net
|
90f62cf30a78721641e08737bda787552428061e
| 72,727,723,525,290,580,000,000,000,000,000,000,000 | 33 |
net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
u8 **pprog, int addr, u8 *image,
bool *callee_regs_used, u32 stack_depth)
{
int tcc_off = -4 - round_up(stack_depth, 8);
u8 *prog = *pprog;
int pop_bytes = 0;
int off1 = 20;
int poke_off;
int cnt = 0;
/* count the additional bytes used for popping callee regs to stack
* that need to be taken into account for jump offset that is used for
* bailing out from of the tail call when limit is reached
*/
pop_bytes = get_pop_bytes(callee_regs_used);
off1 += pop_bytes;
/*
* total bytes for:
* - nop5/ jmpq $off
* - pop callee regs
* - sub rsp, $val if depth > 0
* - pop rax
*/
poke_off = X86_PATCH_SIZE + pop_bytes + 1;
if (stack_depth) {
poke_off += 7;
off1 += 7;
}
/*
* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
*/
EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
EMIT2(X86_JA, off1); /* ja out */
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE);
poke->adj_off = X86_TAIL_CALL_OFFSET;
poke->tailcall_target = image + (addr - X86_PATCH_SIZE);
poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
poke->tailcall_bypass);
*pprog = prog;
pop_callee_regs(pprog, callee_regs_used);
prog = *pprog;
EMIT1(0x58); /* pop rax */
if (stack_depth)
EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
prog += X86_PATCH_SIZE;
/* out: */
*pprog = prog;
}
| 0 |
[
"CWE-77"
] |
linux
|
e4d4d456436bfb2fe412ee2cd489f7658449b098
| 2,876,051,313,119,000,400,000,000,000,000,000,000 | 62 |
bpf, x86: Validate computation of branch displacements for x86-64
The branch displacement logic in the BPF JIT compilers for x86 assumes
that, for any generated branch instruction, the distance cannot
increase between optimization passes.
But this assumption can be violated due to how the distances are
computed. Specifically, whenever a backward branch is processed in
do_jit(), the distance is computed by subtracting the positions in the
machine code from different optimization passes. This is because part
of addrs[] is already updated for the current optimization pass, before
the branch instruction is visited.
And so the optimizer can expand blocks of machine code in some cases.
This can confuse the optimizer logic, where it assumes that a fixed
point has been reached for all machine code blocks once the total
program size stops changing. And then the JIT compiler can output
abnormal machine code containing incorrect branch displacements.
To mitigate this issue, we assert that a fixed point is reached while
populating the output image. This rejects any problematic programs.
The issue affects both x86-32 and x86-64. We mitigate separately to
ease backporting.
Signed-off-by: Piotr Krysiuk <[email protected]>
Reviewed-by: Daniel Borkmann <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
|
static void iattr_to_fattr(struct fuse_conn *fc, struct iattr *iattr,
struct fuse_setattr_in *arg, bool trust_local_cmtime)
{
unsigned ivalid = iattr->ia_valid;
if (ivalid & ATTR_MODE)
arg->valid |= FATTR_MODE, arg->mode = iattr->ia_mode;
if (ivalid & ATTR_UID)
arg->valid |= FATTR_UID, arg->uid = from_kuid(fc->user_ns, iattr->ia_uid);
if (ivalid & ATTR_GID)
arg->valid |= FATTR_GID, arg->gid = from_kgid(fc->user_ns, iattr->ia_gid);
if (ivalid & ATTR_SIZE)
arg->valid |= FATTR_SIZE, arg->size = iattr->ia_size;
if (ivalid & ATTR_ATIME) {
arg->valid |= FATTR_ATIME;
arg->atime = iattr->ia_atime.tv_sec;
arg->atimensec = iattr->ia_atime.tv_nsec;
if (!(ivalid & ATTR_ATIME_SET))
arg->valid |= FATTR_ATIME_NOW;
}
if ((ivalid & ATTR_MTIME) && update_mtime(ivalid, trust_local_cmtime)) {
arg->valid |= FATTR_MTIME;
arg->mtime = iattr->ia_mtime.tv_sec;
arg->mtimensec = iattr->ia_mtime.tv_nsec;
if (!(ivalid & ATTR_MTIME_SET) && !trust_local_cmtime)
arg->valid |= FATTR_MTIME_NOW;
}
if ((ivalid & ATTR_CTIME) && trust_local_cmtime) {
arg->valid |= FATTR_CTIME;
arg->ctime = iattr->ia_ctime.tv_sec;
arg->ctimensec = iattr->ia_ctime.tv_nsec;
}
}
| 0 |
[
"CWE-459"
] |
linux
|
5d069dbe8aaf2a197142558b6fb2978189ba3454
| 72,075,864,780,034,220,000,000,000,000,000,000,000 | 33 |
fuse: fix bad inode
Jan Kara's analysis of the syzbot report (edited):
The reproducer opens a directory on FUSE filesystem, it then attaches
dnotify mark to the open directory. After that a fuse_do_getattr() call
finds that attributes returned by the server are inconsistent, and calls
make_bad_inode() which, among other things does:
inode->i_mode = S_IFREG;
This then confuses dnotify which doesn't tear down its structures
properly and eventually crashes.
Avoid calling make_bad_inode() on a live inode: switch to a private flag on
the fuse inode. Also add the test to ops which the bad_inode_ops would
have caught.
This bug goes back to the initial merge of fuse in 2.6.14...
Reported-by: [email protected]
Signed-off-by: Miklos Szeredi <[email protected]>
Tested-by: Jan Kara <[email protected]>
Cc: <[email protected]>
|
void vrend_set_tess_state(UNUSED struct vrend_context *ctx, const float tess_factors[6])
{
if (has_feature(feat_tessellation)) {
if (!vrend_state.use_gles) {
glPatchParameterfv(GL_PATCH_DEFAULT_OUTER_LEVEL, tess_factors);
glPatchParameterfv(GL_PATCH_DEFAULT_INNER_LEVEL, &tess_factors[4]);
} else {
memcpy(vrend_state.tess_factors, tess_factors, 6 * sizeof (float));
}
}
}
| 0 |
[
"CWE-787"
] |
virglrenderer
|
cbc8d8b75be360236cada63784046688aeb6d921
| 101,177,214,147,638,680,000,000,000,000,000,000,000 | 11 |
vrend: check transfer bounds for negative values too and report error
Closes #138
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Emil Velikov <[email protected]>
|
static int cpu_shares_write_uint(struct cgroup *cgrp, struct cftype *cftype,
u64 shareval)
{
return sched_group_set_shares(cgroup_tg(cgrp), shareval);
}
| 0 |
[] |
linux-2.6
|
8f1bc385cfbab474db6c27b5af1e439614f3025c
| 253,102,871,804,469,900,000,000,000,000,000,000,000 | 5 |
sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
int php_module_shutdown_wrapper(sapi_module_struct *sapi_globals)
{
php_module_shutdown();
return SUCCESS;
}
| 0 |
[] |
php-src
|
9a07245b728714de09361ea16b9c6fcf70cb5685
| 87,622,482,530,967,570,000,000,000,000,000,000,000 | 5 |
Fixed bug #71273 A wrong ext directory setup in php.ini leads to crash
|
ffs_fs_kill_sb(struct super_block *sb)
{
ENTER();
kill_litter_super(sb);
if (sb->s_fs_info) {
ffs_release_dev(sb->s_fs_info);
ffs_data_closed(sb->s_fs_info);
ffs_data_put(sb->s_fs_info);
}
}
| 0 |
[
"CWE-416",
"CWE-362"
] |
linux
|
38740a5b87d53ceb89eb2c970150f6e94e00373a
| 56,817,304,301,397,460,000,000,000,000,000,000,000 | 11 |
usb: gadget: f_fs: Fix use-after-free
When using asynchronous read or write operations on the USB endpoints the
issuer of the IO request is notified by calling the ki_complete() callback
of the submitted kiocb when the URB has been completed.
Calling this ki_complete() callback will free kiocb. Make sure that the
structure is no longer accessed beyond that point, otherwise undefined
behaviour might occur.
Fixes: 2e4c7553cd6f ("usb: gadget: f_fs: add aio support")
Cc: <[email protected]> # v3.15+
Signed-off-by: Lars-Peter Clausen <[email protected]>
Signed-off-by: Felipe Balbi <[email protected]>
|
static const char *func_id_name(int id)
{
BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
return func_id_str[id];
else
return "unknown";
}
| 0 |
[
"CWE-200"
] |
linux
|
0d0e57697f162da4aa218b5feafe614fb666db07
| 127,697,952,206,217,560,000,000,000,000,000,000,000 | 9 |
bpf: don't let ldimm64 leak map addresses on unprivileged
The patch fixes two things at once:
1) It checks the env->allow_ptr_leaks and only prints the map address to
the log if we have the privileges to do so, otherwise it just dumps 0
as we would when kptr_restrict is enabled on %pK. Given the latter is
off by default and not every distro sets it, I don't want to rely on
this, hence the 0 by default for unprivileged.
2) Printing of ldimm64 in the verifier log is currently broken in that
we don't print the full immediate, but only the 32 bit part of the
first insn part for ldimm64. Thus, fix this up as well; it's okay to
access, since we verified all ldimm64 earlier already (including just
constants) through replace_map_fd_with_map_ptr().
Fixes: 1be7f75d1668 ("bpf: enable non-root eBPF programs")
Fixes: cbd357008604 ("bpf: verifier (add ability to receive verification log)")
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
goa_utils_keyfile_set_boolean (GoaAccount *account, const gchar *key, gboolean value)
{
GError *error;
GKeyFile *key_file;
gchar *contents;
gchar *group;
gchar *path;
gsize length;
contents = NULL;
path = g_strdup_printf ("%s/goa-1.0/accounts.conf", g_get_user_config_dir ());
group = g_strdup_printf ("Account %s", goa_account_get_id (account));
key_file = g_key_file_new ();
error = NULL;
if (!g_key_file_load_from_file (key_file,
path,
G_KEY_FILE_KEEP_COMMENTS | G_KEY_FILE_KEEP_TRANSLATIONS,
&error))
{
goa_warning ("Error loading keyfile %s: %s (%s, %d)",
path,
error->message,
g_quark_to_string (error->domain),
error->code);
g_error_free (error);
goto out;
}
g_key_file_set_boolean (key_file, group, key, value);
contents = g_key_file_to_data (key_file, &length, NULL);
error = NULL;
if (!g_file_set_contents (path, contents, length, &error))
{
g_prefix_error (&error, "Error writing key-value-file %s: ", path);
goa_warning ("%s (%s, %d)", error->message, g_quark_to_string (error->domain), error->code);
g_error_free (error);
goto out;
}
out:
g_free (contents);
g_key_file_free (key_file);
g_free (group);
g_free (path);
}
| 0 |
[
"CWE-310"
] |
gnome-online-accounts
|
edde7c63326242a60a075341d3fea0be0bc4d80e
| 217,126,147,435,805,270,000,000,000,000,000,000,000 | 48 |
Guard against invalid SSL certificates
None of the branded providers (eg., Google, Facebook and Windows Live)
should ever have an invalid certificate. So set "ssl-strict" on the
SoupSession object being used by GoaWebView.
Providers like ownCloud and Exchange might have to deal with
certificates that are not up to the mark. eg., self-signed
certificates. For those, show a warning when the account is being
created, and only proceed if the user decides to ignore it. In any
case, save the status of the certificate that was used to create the
account. So an account created with a valid certificate will never
work with an invalid one, and one created with an invalid certificate
will not throw any further warnings.
Fixes: CVE-2013-0240
|
abandon_cmdline(void)
{
VIM_CLEAR(ccline.cmdbuff);
if (msg_scrolled == 0)
compute_cmdrow();
msg("");
redraw_cmdline = TRUE;
}
| 0 |
[
"CWE-122",
"CWE-787"
] |
vim
|
85b6747abc15a7a81086db31289cf1b8b17e6cb1
| 12,189,494,339,543,424,000,000,000,000,000,000,000 | 8 |
patch 8.2.4214: illegal memory access with large 'tabstop' in Ex mode
Problem: Illegal memory access with large 'tabstop' in Ex mode.
Solution: Allocate enough memory.
|
static s32 gf_media_vvc_read_vps_bs_internal(GF_BitStream *bs, VVCState *vvc, Bool stop_at_vps_ext)
{
u32 i, j;
s32 vps_id;
VVC_VPS *vps;
Bool vps_default_ptl_dpb_hrd_max_tid_flag=0;
//nalu header already parsed
vps_id = gf_bs_read_int_log(bs, 4, "vps_id");
if ((vps_id<0) || (vps_id >= 16)) return -1;
if (!vps_id) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] VPS ID 0 is forbidden\n"));
return -1;
}
vps = &vvc->vps[vps_id];
if (!vps->state) {
vps->id = vps_id;
vps->state = 1;
}
vps->max_layers = 1 + gf_bs_read_int_log(bs, 6, "max_layers");
if (vps->max_layers > MAX_LHVC_LAYERS) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] sorry, %d layers in VPS but only %d supported\n", vps->max_layers, MAX_LHVC_LAYERS));
return -1;
}
vps->max_sub_layers = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1") + 1;
if ((vps->max_layers>1) && (vps->max_sub_layers>1))
vps_default_ptl_dpb_hrd_max_tid_flag = gf_bs_read_int_log(bs, 1, "vps_default_ptl_dpb_hrd_max_tid_flag");
if (vps->max_layers>1)
vps->all_layers_independent = gf_bs_read_int_log(bs, 1, "all_layers_independent");
for (i=0; i<vps->max_layers; i++) {
u32 layer_id = gf_bs_read_int_log_idx(bs, 6, "layer_id", i);
if (layer_id>vps->max_layer_id) vps->max_layer_id = layer_id;
if (i && !vps->all_layers_independent) {
Bool layer_indep = gf_bs_read_int_log_idx(bs, 1, "layer_independent", i);
if (!layer_indep) {
Bool vps_max_tid_ref_present_flag = gf_bs_read_int_log_idx(bs, 1, "vps_max_tid_ref_present_flag", i);
for (j=0; j<i; j++) {
Bool vps_direct_ref_layer_flag = gf_bs_read_int_log_idx2(bs, 1, "vps_direct_ref_layer_flag", i, j);
if (vps_max_tid_ref_present_flag && vps_direct_ref_layer_flag) {
gf_bs_read_int_log_idx2(bs, 3, "vps_max_tid_il_ref_pics_plus1", i, j);
}
}
}
}
}
vps->num_ptl = 1;
if (vps->max_layers > 1) {
if (vps->all_layers_independent) {
vps->each_layer_is_ols = gf_bs_read_int_log(bs, 1, "each_layer_is_ols");
}
if (!vps->each_layer_is_ols) {
u32 vps_ols_mode_idc = 2;
if (!vps->all_layers_independent) {
vps_ols_mode_idc = gf_bs_read_int_log(bs, 2, "vps_ols_mode_idc");
}
if (vps_ols_mode_idc==2) {
u8 vps_num_output_layer_sets = 2 + gf_bs_read_int_log(bs, 8, "vps_num_output_layer_sets_minus2");
for (i=0; i<vps_num_output_layer_sets; i++) {
for (j=0; j<vps->max_layers; j++) {
gf_bs_read_int_log_idx2(bs, 1, "vps_ols_output_layer_flag", i, j);
}
}
}
}
vps->num_ptl = 1 + gf_bs_read_int_log(bs, 8, "num_ptl_minus1");
}
vps->ptl[0].pt_present = 1;
for (i=0; i<vps->num_ptl; i++) {
if (i)
vps->ptl[i].pt_present = gf_bs_read_int_log_idx(bs, 1, "pt_present", i);
if (!vps_default_ptl_dpb_hrd_max_tid_flag)
vps->ptl[i].ptl_max_tid = gf_bs_read_int_log_idx(bs, 3, "ptl_max_tid", i);
else
vps->ptl[i].ptl_max_tid = vps->max_sub_layers - 1;;
}
//align
gf_bs_align(bs);
for (i=0; i<vps->num_ptl; i++) {
vvc_profile_tier_level(bs, &vps->ptl[i], i);
}
//TODO, parse multilayer stuff
return vps_id;
}
| 0 |
[
"CWE-190",
"CWE-787"
] |
gpac
|
51cdb67ff7c5f1242ac58c5aa603ceaf1793b788
| 187,951,357,298,057,500,000,000,000,000,000,000,000 | 88 |
add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722
|
correlation_coefficient(double sxx, double syy, double sxy)
{
double coe, tmp;
tmp = sxx * syy;
if (tmp < Tiny)
tmp = Tiny;
coe = sxy / sqrt(tmp);
if (coe > 1.)
return 1.;
if (coe < -1.)
return -1.;
return coe;
}
| 0 |
[
"CWE-119"
] |
w3m
|
67a3db378f5ee3047c158eae4342f7e3245a2ab1
| 228,716,102,704,351,830,000,000,000,000,000,000,000 | 13 |
Fix table rowspan and colspan
Origin: https://github.com/tats/w3m/pull/19
Bug-Debian: https://github.com/tats/w3m/issues/8
|
server_client_suspend(struct client *c)
{
struct session *s = c->session;
if (s == NULL || (c->flags & CLIENT_DETACHING))
return;
tty_stop_tty(&c->tty);
c->flags |= CLIENT_SUSPENDED;
proc_send(c->peer, MSG_SUSPEND, -1, NULL, 0);
}
| 0 |
[] |
src
|
b32e1d34e10a0da806823f57f02a4ae6e93d756e
| 237,287,119,811,900,170,000,000,000,000,000,000,000 | 11 |
evbuffer_new and bufferevent_new can both fail (when malloc fails) and
return NULL. GitHub issue 1547.
|
onig_global_callout_names_free(void)
{
free_callout_func_list(GlobalCalloutNameList);
GlobalCalloutNameList = 0;
global_callout_name_table_free();
return ONIG_NORMAL;
}
| 0 |
[
"CWE-400",
"CWE-399",
"CWE-674"
] |
oniguruma
|
4097828d7cc87589864fecf452f2cd46c5f37180
| 59,930,580,043,408,250,000,000,000,000,000,000,000 | 8 |
fix #147: Stack Exhaustion Problem caused by some parsing functions in regcomp.c making recursive calls to themselves.
|
CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string)
{
cJSON_Delete(cJSON_DetachItemFromObject(object, string));
}
| 0 |
[
"CWE-754",
"CWE-787"
] |
cJSON
|
be749d7efa7c9021da746e685bd6dec79f9dd99b
| 329,136,467,871,654,200,000,000,000,000,000,000,000 | 4 |
Fix crash of cJSON_GetObjectItemCaseSensitive when calling it on arrays
|
void child_process_clear(struct child_process *child)
{
argv_array_clear(&child->args);
argv_array_clear(&child->env_array);
}
| 0 |
[] |
git
|
321fd82389742398d2924640ce3a61791fd27d60
| 61,275,820,500,831,800,000,000,000,000,000,000,000 | 5 |
run-command: mark path lookup errors with ENOENT
Since commit e3a434468f (run-command: use the
async-signal-safe execv instead of execvp, 2017-04-19),
prepare_cmd() does its own PATH lookup for any commands we
run (on non-Windows platforms).
However, its logic does not match the old execvp call when
we fail to find a matching entry in the PATH. Instead of
feeding the name directly to execv, execvp would consider
that an ENOENT error. By continuing and passing the name
directly to execv, we effectively behave as if "." was
included at the end of the PATH. This can have confusing and
even dangerous results.
The fix itself is pretty straight-forward. There's a new
test in t0061 to cover this explicitly, and I've also added
a duplicate of the ENOENT test to ensure that we return the
correct errno for this case.
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]>
|
ImagingNewArray(const char *mode, int xsize, int ysize)
{
Imaging im;
ImagingSectionCookie cookie;
int y;
char* p;
im = ImagingNewPrologue(mode, xsize, ysize);
if (!im)
return NULL;
ImagingSectionEnter(&cookie);
/* Allocate image as an array of lines */
for (y = 0; y < im->ysize; y++) {
/* malloc check linesize checked in prologue */
p = (char *) calloc(1, im->linesize);
if (!p) {
ImagingDestroyArray(im);
break;
}
im->image[y] = p;
}
ImagingSectionLeave(&cookie);
if (y == im->ysize)
im->destroy = ImagingDestroyArray;
return ImagingNewEpilogue(im);
}
| 0 |
[
"CWE-284"
] |
Pillow
|
5d8a0be45aad78c5a22c8d099118ee26ef8144af
| 274,060,827,322,753,200,000,000,000,000,000,000,000 | 32 |
Memory error in Storage.c when accepting negative image size arguments
|
static int vhost_worker(void *data)
{
struct vhost_dev *dev = data;
struct vhost_work *work, *work_next;
struct llist_node *node;
mm_segment_t oldfs = get_fs();
set_fs(USER_DS);
use_mm(dev->mm);
for (;;) {
/* mb paired w/ kthread_stop */
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop()) {
__set_current_state(TASK_RUNNING);
break;
}
node = llist_del_all(&dev->work_list);
if (!node)
schedule();
node = llist_reverse_order(node);
/* make sure flag is seen after deletion */
smp_wmb();
llist_for_each_entry_safe(work, work_next, node, node) {
clear_bit(VHOST_WORK_QUEUED, &work->flags);
__set_current_state(TASK_RUNNING);
work->fn(work);
if (need_resched())
schedule();
}
}
unuse_mm(dev->mm);
set_fs(oldfs);
return 0;
}
| 0 |
[
"CWE-120"
] |
linux
|
060423bfdee3f8bc6e2c1bac97de24d5415e2bc4
| 241,720,952,644,208,950,000,000,000,000,000,000,000 | 38 |
vhost: make sure log_num < in_num
The code assumes log_num < in_num everywhere, and that is true as long as
in_num is incremented by descriptor iov count, and log_num by 1. However
this breaks if there's a zero sized descriptor.
As a result, if a malicious guest creates a vring desc with desc.len = 0,
it may cause the host kernel to crash by overflowing the log array. This
bug can be triggered during the VM migration.
There's no need to log when desc.len = 0, so just don't increment log_num
in this case.
Fixes: 3a4d5c94e959 ("vhost_net: a kernel-level virtio server")
Cc: [email protected]
Reviewed-by: Lidong Chen <[email protected]>
Signed-off-by: ruippan <[email protected]>
Signed-off-by: yongduan <[email protected]>
Acked-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Tyler Hicks <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
|
static int fts3RollbackMethod(sqlite3_vtab *pVtab){
Fts3Table *p = (Fts3Table*)pVtab;
sqlite3Fts3PendingTermsClear(p);
assert( p->inTransaction!=0 );
TESTONLY( p->inTransaction = 0 );
TESTONLY( p->mxSavepoint = -1; );
return SQLITE_OK;
}
| 0 |
[
"CWE-787"
] |
sqlite
|
c72f2fb7feff582444b8ffdc6c900c69847ce8a9
| 194,959,407,062,163,650,000,000,000,000,000,000,000 | 8 |
More improvements to shadow table corruption detection in FTS3.
FossilOrigin-Name: 51525f9c3235967bc00a090e84c70a6400698c897aa4742e817121c725b8c99d
|
attributeCertificateExactNormalize(
slap_mask_t usage,
Syntax *syntax,
MatchingRule *mr,
struct berval *val,
struct berval *normalized,
void *ctx )
{
BerElementBuffer berbuf;
BerElement *ber = (BerElement *)&berbuf;
ber_tag_t tag;
ber_len_t len;
char issuer_serialbuf[SLAP_SN_BUFLEN], serialbuf[SLAP_SN_BUFLEN];
struct berval sn, i_sn, sn2 = BER_BVNULL, i_sn2 = BER_BVNULL;
struct berval issuer_dn = BER_BVNULL, bvdn;
char *p;
int rc = LDAP_INVALID_SYNTAX;
if ( BER_BVISEMPTY( val ) ) {
return rc;
}
if ( SLAP_MR_IS_VALUE_OF_ASSERTION_SYNTAX(usage) ) {
return serialNumberAndIssuerSerialNormalize( 0, NULL, NULL, val, normalized, ctx );
}
assert( SLAP_MR_IS_VALUE_OF_ATTRIBUTE_SYNTAX(usage) != 0 );
ber_init2( ber, val, LBER_USE_DER );
tag = ber_skip_tag( ber, &len ); /* Signed Sequence */
tag = ber_skip_tag( ber, &len ); /* Sequence */
tag = ber_skip_tag( ber, &len ); /* (Mandatory) version; must be v2(1) */
ber_skip_data( ber, len );
tag = ber_skip_tag( ber, &len ); /* Holder Sequence */
ber_skip_data( ber, len );
/* Issuer */
tag = ber_skip_tag( ber, &len ); /* Sequence */
/* issuerName (GeneralNames sequence; optional)? */
tag = ber_skip_tag( ber, &len ); /* baseCertificateID (sequence; optional)? */
tag = ber_skip_tag( ber, &len ); /* GeneralNames (sequence) */
tag = ber_skip_tag( ber, &len ); /* directoryName (we only accept this form of GeneralName) */
if ( tag != SLAP_X509_GN_DIRECTORYNAME ) {
return LDAP_INVALID_SYNTAX;
}
tag = ber_peek_tag( ber, &len ); /* sequence of RDN */
len = ber_ptrlen( ber );
bvdn.bv_val = val->bv_val + len;
bvdn.bv_len = val->bv_len - len;
rc = dnX509normalize( &bvdn, &issuer_dn );
if ( rc != LDAP_SUCCESS ) {
rc = LDAP_INVALID_SYNTAX;
goto done;
}
tag = ber_skip_tag( ber, &len ); /* sequence of RDN */
ber_skip_data( ber, len );
tag = ber_skip_tag( ber, &len ); /* serial number */
if ( tag != LBER_INTEGER ) {
rc = LDAP_INVALID_SYNTAX;
goto done;
}
i_sn.bv_val = (char *)ber->ber_ptr;
i_sn.bv_len = len;
i_sn2.bv_val = issuer_serialbuf;
i_sn2.bv_len = sizeof(issuer_serialbuf);
if ( slap_bin2hex( &i_sn, &i_sn2, ctx ) ) {
rc = LDAP_INVALID_SYNTAX;
goto done;
}
ber_skip_data( ber, len );
/* issuerUID (bitstring; optional)? */
/* objectDigestInfo (sequence; optional)? */
tag = ber_skip_tag( ber, &len ); /* Signature (sequence) */
ber_skip_data( ber, len );
tag = ber_skip_tag( ber, &len ); /* serial number */
if ( tag != LBER_INTEGER ) {
rc = LDAP_INVALID_SYNTAX;
goto done;
}
sn.bv_val = (char *)ber->ber_ptr;
sn.bv_len = len;
sn2.bv_val = serialbuf;
sn2.bv_len = sizeof(serialbuf);
if ( slap_bin2hex( &sn, &sn2, ctx ) ) {
rc = LDAP_INVALID_SYNTAX;
goto done;
}
ber_skip_data( ber, len );
normalized->bv_len = STRLENOF( "{ serialNumber , issuer { baseCertificateID { issuer { directoryName:rdnSequence:\"\" }, serial } } }" )
+ sn2.bv_len + issuer_dn.bv_len + i_sn2.bv_len;
normalized->bv_val = ch_malloc( normalized->bv_len + 1 );
p = normalized->bv_val;
p = lutil_strcopy( p, "{ serialNumber " );
p = lutil_strbvcopy( p, &sn2 );
p = lutil_strcopy( p, ", issuer { baseCertificateID { issuer { directoryName:rdnSequence:\"" );
p = lutil_strbvcopy( p, &issuer_dn );
p = lutil_strcopy( p, "\" }, serial " );
p = lutil_strbvcopy( p, &i_sn2 );
p = lutil_strcopy( p, " } } }" );
Debug( LDAP_DEBUG_TRACE, "attributeCertificateExactNormalize: %s\n",
normalized->bv_val );
rc = LDAP_SUCCESS;
done:
if ( issuer_dn.bv_val ) ber_memfree( issuer_dn.bv_val );
if ( i_sn2.bv_val != issuer_serialbuf ) ber_memfree_x( i_sn2.bv_val, ctx );
if ( sn2.bv_val != serialbuf ) ber_memfree_x( sn2.bv_val, ctx );
return rc;
}
| 0 |
[
"CWE-617"
] |
openldap
|
3539fc33212b528c56b716584f2c2994af7c30b0
| 274,125,306,659,879,100,000,000,000,000,000,000,000 | 118 |
ITS#9454 fix issuerAndThisUpdateCheck
|
static void jpc_qcc_destroyparms(jpc_ms_t *ms)
{
jpc_qcc_t *qcc = &ms->parms.qcc;
jpc_qcx_destroycompparms(&qcc->compparms);
}
| 0 |
[] |
jasper
|
4031ca321d8cb5798c316ab39c7a5dc88a61fdd7
| 82,327,777,048,794,720,000,000,000,000,000,000,000 | 5 |
Incorporated changes from patch
jasper-1.900.3-libjasper-stepsizes-overflow.patch
|
void *X509_STORE_CTX_get_ex_data(X509_STORE_CTX *ctx, int idx)
{
return CRYPTO_get_ex_data(&ctx->ex_data, idx);
}
| 0 |
[
"CWE-119"
] |
openssl
|
370ac320301e28bb615cee80124c042649c95d14
| 311,417,332,860,164,700,000,000,000,000,000,000,000 | 4 |
Fix length checks in X509_cmp_time to avoid out-of-bounds reads.
Also tighten X509_cmp_time to reject more than three fractional
seconds in the time; and to reject trailing garbage after the offset.
CVE-2015-1789
Reviewed-by: Viktor Dukhovni <[email protected]>
Reviewed-by: Richard Levitte <[email protected]>
|
message_add_date (mu_message_t msg)
{
mu_header_t hdr;
char buf[MU_DATETIME_RFC822_LENGTH+1];
struct tm ltm;
time_t t;
int rc;
rc = mu_message_get_header (msg, &hdr);
if (rc)
{
mu_diag_funcall (MU_DIAG_ERROR, "mu_message_get_header", NULL, rc);
return;
}
t = time (NULL);
localtime_r (&t, <m);
mu_strftime (buf, sizeof (buf), MU_DATETIME_FORM_RFC822, <m);
rc = mu_header_set_value (hdr, MU_HEADER_DATE, buf, 1);
if (rc)
mu_diag_funcall (MU_DIAG_ERROR, "mu_header_set_value", MU_HEADER_DATE, rc);
}
| 0 |
[] |
mailutils
|
4befcfd015256c568121653038accbd84820198f
| 1,955,786,356,151,080,000,000,000,000,000,000,000 | 23 |
mail: disable compose escapes in non-interctive mode.
* NEWS: Document changes.
* doc/texinfo/programs/mail.texi: Document changes.
* mail/send.c (mail_compose_send): Recognize escapes only in
interactive mode.
|
static int sched_rt_global_constraints(void)
{
unsigned long flags;
int i;
spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = global_rt_runtime();
spin_unlock(&rt_rq->rt_runtime_lock);
}
spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
return 0;
}
| 0 |
[] |
linux-2.6
|
8f1bc385cfbab474db6c27b5af1e439614f3025c
| 204,622,566,499,479,100,000,000,000,000,000,000,000 | 17 |
sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
getJsmnType(const ParseCtx *parseCtx) {
if(parseCtx->index >= parseCtx->tokenCount)
return JSMN_UNDEFINED;
return parseCtx->tokenArray[parseCtx->index].type;
}
| 0 |
[
"CWE-703",
"CWE-787"
] |
open62541
|
c800e2987b10bb3af6ef644b515b5d6392f8861d
| 51,204,856,751,650,530,000,000,000,000,000,000,000 | 5 |
fix(json): Check max recursion depth in more places
|
read_layer_block (FILE *f,
gint image_ID,
guint total_len,
PSPimage *ia)
{
gint i;
long block_start, sub_block_start, channel_start;
gint sub_id;
guint32 sub_init_len, sub_total_len;
gchar *name = NULL;
guint16 namelen;
guchar type, opacity, blend_mode, visibility, transparency_protected;
guchar link_group_id, mask_linked, mask_disabled;
guint32 image_rect[4], saved_image_rect[4], mask_rect[4], saved_mask_rect[4];
gboolean null_layer = FALSE;
guint16 bitmap_count, channel_count;
GimpImageType drawable_type;
guint32 layer_ID = 0;
GimpLayerMode layer_mode;
guint32 channel_init_len, channel_total_len;
guint32 compressed_len, uncompressed_len;
guint16 bitmap_type, channel_type;
gint width, height, bytespp, offset;
guchar **pixels, *pixel;
GeglBuffer *buffer;
block_start = ftell (f);
while (ftell (f) < block_start + total_len)
{
/* Read the layer sub-block header */
sub_id = read_block_header (f, &sub_init_len, &sub_total_len);
if (sub_id == -1)
return -1;
if (sub_id != PSP_LAYER_BLOCK)
{
g_message ("Invalid layer sub-block %s, should be LAYER",
block_name (sub_id));
return -1;
}
sub_block_start = ftell (f);
/* Read layer information chunk */
if (psp_ver_major >= 4)
{
if (fseek (f, 4, SEEK_CUR) < 0
|| fread (&namelen, 2, 1, f) < 1
|| ((namelen = GUINT16_FROM_LE (namelen)) && FALSE)
|| (name = g_malloc (namelen + 1)) == NULL
|| fread (name, namelen, 1, f) < 1
|| fread (&type, 1, 1, f) < 1
|| fread (&image_rect, 16, 1, f) < 1
|| fread (&saved_image_rect, 16, 1, f) < 1
|| fread (&opacity, 1, 1, f) < 1
|| fread (&blend_mode, 1, 1, f) < 1
|| fread (&visibility, 1, 1, f) < 1
|| fread (&transparency_protected, 1, 1, f) < 1
|| fread (&link_group_id, 1, 1, f) < 1
|| fread (&mask_rect, 16, 1, f) < 1
|| fread (&saved_mask_rect, 16, 1, f) < 1
|| fread (&mask_linked, 1, 1, f) < 1
|| fread (&mask_disabled, 1, 1, f) < 1
|| fseek (f, 47, SEEK_CUR) < 0
|| fread (&bitmap_count, 2, 1, f) < 1
|| fread (&channel_count, 2, 1, f) < 1)
{
g_message ("Error reading layer information chunk");
g_free (name);
return -1;
}
name[namelen] = 0;
type = PSP_LAYER_NORMAL; /* ??? */
}
else
{
name = g_malloc (257);
name[256] = 0;
if (fread (name, 256, 1, f) < 1
|| fread (&type, 1, 1, f) < 1
|| fread (&image_rect, 16, 1, f) < 1
|| fread (&saved_image_rect, 16, 1, f) < 1
|| fread (&opacity, 1, 1, f) < 1
|| fread (&blend_mode, 1, 1, f) < 1
|| fread (&visibility, 1, 1, f) < 1
|| fread (&transparency_protected, 1, 1, f) < 1
|| fread (&link_group_id, 1, 1, f) < 1
|| fread (&mask_rect, 16, 1, f) < 1
|| fread (&saved_mask_rect, 16, 1, f) < 1
|| fread (&mask_linked, 1, 1, f) < 1
|| fread (&mask_disabled, 1, 1, f) < 1
|| fseek (f, 43, SEEK_CUR) < 0
|| fread (&bitmap_count, 2, 1, f) < 1
|| fread (&channel_count, 2, 1, f) < 1)
{
g_message ("Error reading layer information chunk");
g_free (name);
return -1;
}
}
if (type == PSP_LAYER_FLOATING_SELECTION)
g_message ("Floating selection restored as normal layer");
swab_rect (image_rect);
swab_rect (saved_image_rect);
swab_rect (mask_rect);
swab_rect (saved_mask_rect);
bitmap_count = GUINT16_FROM_LE (bitmap_count);
channel_count = GUINT16_FROM_LE (channel_count);
layer_mode = gimp_layer_mode_from_psp_blend_mode (blend_mode);
if ((int) layer_mode == -1)
{
g_message ("Unsupported PSP layer blend mode %s "
"for layer %s, setting layer invisible",
blend_mode_name (blend_mode), name);
layer_mode = GIMP_LAYER_MODE_NORMAL_LEGACY;
visibility = FALSE;
}
width = saved_image_rect[2] - saved_image_rect[0];
height = saved_image_rect[3] - saved_image_rect[1];
if ((width < 0) || (width > GIMP_MAX_IMAGE_SIZE) /* w <= 2^18 */
|| (height < 0) || (height > GIMP_MAX_IMAGE_SIZE) /* h <= 2^18 */
|| ((width / 256) * (height / 256) >= 8192)) /* w * h < 2^29 */
{
g_message ("Invalid layer dimensions: %dx%d", width, height);
return -1;
}
IFDBG(2) g_message
("layer: %s %dx%d (%dx%d) @%d,%d opacity %d blend_mode %s "
"%d bitmaps %d channels",
name,
image_rect[2] - image_rect[0], image_rect[3] - image_rect[1],
width, height,
saved_image_rect[0], saved_image_rect[1],
opacity, blend_mode_name (blend_mode),
bitmap_count, channel_count);
IFDBG(2) g_message
("mask %dx%d (%dx%d) @%d,%d",
mask_rect[2] - mask_rect[0],
mask_rect[3] - mask_rect[1],
saved_mask_rect[2] - saved_mask_rect[0],
saved_mask_rect[3] - saved_mask_rect[1],
saved_mask_rect[0], saved_mask_rect[1]);
if (width == 0)
{
width++;
null_layer = TRUE;
}
if (height == 0)
{
height++;
null_layer = TRUE;
}
if (ia->grayscale)
if (!null_layer && bitmap_count == 1)
drawable_type = GIMP_GRAY_IMAGE, bytespp = 1;
else
drawable_type = GIMP_GRAYA_IMAGE, bytespp = 1;
else
if (!null_layer && bitmap_count == 1)
drawable_type = GIMP_RGB_IMAGE, bytespp = 3;
else
drawable_type = GIMP_RGBA_IMAGE, bytespp = 4;
layer_ID = gimp_layer_new (image_ID, name,
width, height,
drawable_type,
100.0 * opacity / 255.0,
layer_mode);
if (layer_ID == -1)
{
g_message ("Error creating layer");
return -1;
}
g_free (name);
gimp_image_insert_layer (image_ID, layer_ID, -1, -1);
if (saved_image_rect[0] != 0 || saved_image_rect[1] != 0)
gimp_layer_set_offsets (layer_ID,
saved_image_rect[0], saved_image_rect[1]);
if (!visibility)
gimp_item_set_visible (layer_ID, FALSE);
gimp_layer_set_lock_alpha (layer_ID, transparency_protected);
if (psp_ver_major < 4)
if (try_fseek (f, sub_block_start + sub_init_len, SEEK_SET) < 0)
{
return -1;
}
pixel = g_malloc0 (height * width * bytespp);
if (null_layer)
{
pixels = NULL;
}
else
{
pixels = g_new (guchar *, height);
for (i = 0; i < height; i++)
pixels[i] = pixel + width * bytespp * i;
}
buffer = gimp_drawable_get_buffer (layer_ID);
/* Read the layer channel sub-blocks */
while (ftell (f) < sub_block_start + sub_total_len)
{
sub_id = read_block_header (f, &channel_init_len,
&channel_total_len);
if (sub_id == -1)
{
gimp_image_delete (image_ID);
return -1;
}
if (sub_id != PSP_CHANNEL_BLOCK)
{
g_message ("Invalid layer sub-block %s, should be CHANNEL",
block_name (sub_id));
return -1;
}
channel_start = ftell (f);
if (psp_ver_major == 4)
fseek (f, 4, SEEK_CUR); /* Unknown field */
if (fread (&compressed_len, 4, 1, f) < 1
|| fread (&uncompressed_len, 4, 1, f) < 1
|| fread (&bitmap_type, 2, 1, f) < 1
|| fread (&channel_type, 2, 1, f) < 1)
{
g_message ("Error reading channel information chunk");
return -1;
}
compressed_len = GUINT32_FROM_LE (compressed_len);
uncompressed_len = GUINT32_FROM_LE (uncompressed_len);
bitmap_type = GUINT16_FROM_LE (bitmap_type);
channel_type = GUINT16_FROM_LE (channel_type);
if (bitmap_type > PSP_DIB_USER_MASK)
{
g_message ("Invalid bitmap type %d in channel information chunk",
bitmap_type);
return -1;
}
if (channel_type > PSP_CHANNEL_BLUE)
{
g_message ("Invalid channel type %d in channel information chunk",
channel_type);
return -1;
}
IFDBG(2) g_message ("channel: %s %s %d (%d) bytes %d bytespp",
bitmap_type_name (bitmap_type),
channel_type_name (channel_type),
uncompressed_len, compressed_len,
bytespp);
if (bitmap_type == PSP_DIB_TRANS_MASK)
offset = 3;
else
offset = channel_type - PSP_CHANNEL_RED;
if (psp_ver_major < 4)
if (try_fseek (f, channel_start + channel_init_len, SEEK_SET) < 0)
{
return -1;
}
if (!null_layer)
if (read_channel_data (f, ia, pixels, bytespp,
offset, buffer, compressed_len) == -1)
{
return -1;
}
if (try_fseek (f, channel_start + channel_total_len, SEEK_SET) < 0)
{
return -1;
}
}
gegl_buffer_set (buffer, GEGL_RECTANGLE (0, 0, width, height), 0,
NULL, pixel, GEGL_AUTO_ROWSTRIDE);
g_object_unref (buffer);
g_free (pixels);
g_free (pixel);
}
if (try_fseek (f, block_start + total_len, SEEK_SET) < 0)
{
return -1;
}
return layer_ID;
}
| 0 |
[
"CWE-125"
] |
gimp
|
eb2980683e6472aff35a3117587c4f814515c74d
| 265,376,429,059,256,680,000,000,000,000,000,000,000 | 316 |
Bug 790853 - (CVE-2017-17787) heap overread in psp importer.
As any external data, we have to check that strings being read at fixed
length are properly nul-terminated.
|
gx_ttfReader *gx_ttfReader__create(gs_memory_t *mem)
{
gx_ttfReader *r = gs_alloc_struct(mem, gx_ttfReader, &st_gx_ttfReader, "gx_ttfReader__create");
if (r != NULL) {
r->super.Eof = gx_ttfReader__Eof;
r->super.Read = gx_ttfReader__Read;
r->super.Seek = gx_ttfReader__Seek;
r->super.Tell = gx_ttfReader__Tell;
r->super.Error = gx_ttfReader__Error;
r->super.LoadGlyph = gx_ttfReader__LoadGlyph;
r->super.ReleaseGlyph = gx_ttfReader__ReleaseGlyph;
r->pos = 0;
r->error = false;
r->extra_glyph_index = -1;
memset(&r->glyph_data, 0, sizeof(r->glyph_data));
r->pfont = NULL;
r->memory = mem;
gx_ttfReader__Reset(r);
}
return r;
}
| 0 |
[
"CWE-125"
] |
ghostpdl
|
937ccd17ac65935633b2ebc06cb7089b91e17e6b
| 237,985,100,204,944,450,000,000,000,000,000,000,000 | 22 |
Bug 698056: make bounds check in gx_ttfReader__Read more robust
|
const string name() override { return "complete_multipart"; }
| 0 |
[
"CWE-770"
] |
ceph
|
ab29bed2fc9f961fe895de1086a8208e21ddaddc
| 292,355,334,234,175,800,000,000,000,000,000,000,000 | 1 |
rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests
|
int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *scalar,
size_t num, const EC_POINT *points[], const BIGNUM *scalars[],
BN_CTX *ctx)
{
const EC_POINT *generator = NULL;
EC_POINT *tmp = NULL;
size_t totalnum;
size_t blocksize = 0, numblocks = 0; /* for wNAF splitting */
size_t pre_points_per_block = 0;
size_t i, j;
int k;
int r_is_inverted = 0;
int r_is_at_infinity = 1;
size_t *wsize = NULL; /* individual window sizes */
signed char **wNAF = NULL; /* individual wNAFs */
size_t *wNAF_len = NULL;
size_t max_len = 0;
size_t num_val;
EC_POINT **val = NULL; /* precomputation */
EC_POINT **v;
EC_POINT ***val_sub = NULL; /* pointers to sub-arrays of 'val' or
* 'pre_comp->points' */
const EC_PRE_COMP *pre_comp = NULL;
int num_scalar = 0; /* flag: will be set to 1 if 'scalar' must be
* treated like other scalars, i.e.
* precomputation is not available */
int ret = 0;
if (!BN_is_zero(group->order) && !BN_is_zero(group->cofactor)) {
/*-
* Handle the common cases where the scalar is secret, enforcing a
* scalar multiplication implementation based on a Montgomery ladder,
* with various timing attack defenses.
*/
if ((scalar != NULL) && (num == 0)) {
/*-
* In this case we want to compute scalar * GeneratorPoint: this
* codepath is reached most prominently by (ephemeral) key
* generation of EC cryptosystems (i.e. ECDSA keygen and sign setup,
* ECDH keygen/first half), where the scalar is always secret. This
* is why we ignore if BN_FLG_CONSTTIME is actually set and we
* always call the ladder version.
*/
return ec_scalar_mul_ladder(group, r, scalar, NULL, ctx);
}
if ((scalar == NULL) && (num == 1)) {
/*-
* In this case we want to compute scalar * VariablePoint: this
* codepath is reached most prominently by the second half of ECDH,
* where the secret scalar is multiplied by the peer's public point.
* To protect the secret scalar, we ignore if BN_FLG_CONSTTIME is
* actually set and we always call the ladder version.
*/
return ec_scalar_mul_ladder(group, r, scalars[0], points[0], ctx);
}
}
if (scalar != NULL) {
generator = EC_GROUP_get0_generator(group);
if (generator == NULL) {
ECerr(EC_F_EC_WNAF_MUL, EC_R_UNDEFINED_GENERATOR);
goto err;
}
/* look if we can use precomputed multiples of generator */
pre_comp = group->pre_comp.ec;
if (pre_comp && pre_comp->numblocks
&& (EC_POINT_cmp(group, generator, pre_comp->points[0], ctx) ==
0)) {
blocksize = pre_comp->blocksize;
/*
* determine maximum number of blocks that wNAF splitting may
* yield (NB: maximum wNAF length is bit length plus one)
*/
numblocks = (BN_num_bits(scalar) / blocksize) + 1;
/*
* we cannot use more blocks than we have precomputation for
*/
if (numblocks > pre_comp->numblocks)
numblocks = pre_comp->numblocks;
pre_points_per_block = (size_t)1 << (pre_comp->w - 1);
/* check that pre_comp looks sane */
if (pre_comp->num != (pre_comp->numblocks * pre_points_per_block)) {
ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
goto err;
}
} else {
/* can't use precomputation */
pre_comp = NULL;
numblocks = 1;
num_scalar = 1; /* treat 'scalar' like 'num'-th element of
* 'scalars' */
}
}
totalnum = num + numblocks;
wsize = OPENSSL_malloc(totalnum * sizeof(wsize[0]));
wNAF_len = OPENSSL_malloc(totalnum * sizeof(wNAF_len[0]));
/* include space for pivot */
wNAF = OPENSSL_malloc((totalnum + 1) * sizeof(wNAF[0]));
val_sub = OPENSSL_malloc(totalnum * sizeof(val_sub[0]));
/* Ensure wNAF is initialised in case we end up going to err */
if (wNAF != NULL)
wNAF[0] = NULL; /* preliminary pivot */
if (wsize == NULL || wNAF_len == NULL || wNAF == NULL || val_sub == NULL) {
ECerr(EC_F_EC_WNAF_MUL, ERR_R_MALLOC_FAILURE);
goto err;
}
/*
* num_val will be the total number of temporarily precomputed points
*/
num_val = 0;
for (i = 0; i < num + num_scalar; i++) {
size_t bits;
bits = i < num ? BN_num_bits(scalars[i]) : BN_num_bits(scalar);
wsize[i] = EC_window_bits_for_scalar_size(bits);
num_val += (size_t)1 << (wsize[i] - 1);
wNAF[i + 1] = NULL; /* make sure we always have a pivot */
wNAF[i] =
bn_compute_wNAF((i < num ? scalars[i] : scalar), wsize[i],
&wNAF_len[i]);
if (wNAF[i] == NULL)
goto err;
if (wNAF_len[i] > max_len)
max_len = wNAF_len[i];
}
if (numblocks) {
/* we go here iff scalar != NULL */
if (pre_comp == NULL) {
if (num_scalar != 1) {
ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
goto err;
}
/* we have already generated a wNAF for 'scalar' */
} else {
signed char *tmp_wNAF = NULL;
size_t tmp_len = 0;
if (num_scalar != 0) {
ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
goto err;
}
/*
* use the window size for which we have precomputation
*/
wsize[num] = pre_comp->w;
tmp_wNAF = bn_compute_wNAF(scalar, wsize[num], &tmp_len);
if (!tmp_wNAF)
goto err;
if (tmp_len <= max_len) {
/*
* One of the other wNAFs is at least as long as the wNAF
* belonging to the generator, so wNAF splitting will not buy
* us anything.
*/
numblocks = 1;
totalnum = num + 1; /* don't use wNAF splitting */
wNAF[num] = tmp_wNAF;
wNAF[num + 1] = NULL;
wNAF_len[num] = tmp_len;
/*
* pre_comp->points starts with the points that we need here:
*/
val_sub[num] = pre_comp->points;
} else {
/*
* don't include tmp_wNAF directly into wNAF array - use wNAF
* splitting and include the blocks
*/
signed char *pp;
EC_POINT **tmp_points;
if (tmp_len < numblocks * blocksize) {
/*
* possibly we can do with fewer blocks than estimated
*/
numblocks = (tmp_len + blocksize - 1) / blocksize;
if (numblocks > pre_comp->numblocks) {
ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
OPENSSL_free(tmp_wNAF);
goto err;
}
totalnum = num + numblocks;
}
/* split wNAF in 'numblocks' parts */
pp = tmp_wNAF;
tmp_points = pre_comp->points;
for (i = num; i < totalnum; i++) {
if (i < totalnum - 1) {
wNAF_len[i] = blocksize;
if (tmp_len < blocksize) {
ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
OPENSSL_free(tmp_wNAF);
goto err;
}
tmp_len -= blocksize;
} else
/*
* last block gets whatever is left (this could be
* more or less than 'blocksize'!)
*/
wNAF_len[i] = tmp_len;
wNAF[i + 1] = NULL;
wNAF[i] = OPENSSL_malloc(wNAF_len[i]);
if (wNAF[i] == NULL) {
ECerr(EC_F_EC_WNAF_MUL, ERR_R_MALLOC_FAILURE);
OPENSSL_free(tmp_wNAF);
goto err;
}
memcpy(wNAF[i], pp, wNAF_len[i]);
if (wNAF_len[i] > max_len)
max_len = wNAF_len[i];
if (*tmp_points == NULL) {
ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
OPENSSL_free(tmp_wNAF);
goto err;
}
val_sub[i] = tmp_points;
tmp_points += pre_points_per_block;
pp += blocksize;
}
OPENSSL_free(tmp_wNAF);
}
}
}
/*
* All points we precompute now go into a single array 'val'.
* 'val_sub[i]' is a pointer to the subarray for the i-th point, or to a
* subarray of 'pre_comp->points' if we already have precomputation.
*/
val = OPENSSL_malloc((num_val + 1) * sizeof(val[0]));
if (val == NULL) {
ECerr(EC_F_EC_WNAF_MUL, ERR_R_MALLOC_FAILURE);
goto err;
}
val[num_val] = NULL; /* pivot element */
/* allocate points for precomputation */
v = val;
for (i = 0; i < num + num_scalar; i++) {
val_sub[i] = v;
for (j = 0; j < ((size_t)1 << (wsize[i] - 1)); j++) {
*v = EC_POINT_new(group);
if (*v == NULL)
goto err;
v++;
}
}
if (!(v == val + num_val)) {
ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
goto err;
}
if ((tmp = EC_POINT_new(group)) == NULL)
goto err;
/*-
* prepare precomputed values:
* val_sub[i][0] := points[i]
* val_sub[i][1] := 3 * points[i]
* val_sub[i][2] := 5 * points[i]
* ...
*/
for (i = 0; i < num + num_scalar; i++) {
if (i < num) {
if (!EC_POINT_copy(val_sub[i][0], points[i]))
goto err;
} else {
if (!EC_POINT_copy(val_sub[i][0], generator))
goto err;
}
if (wsize[i] > 1) {
if (!EC_POINT_dbl(group, tmp, val_sub[i][0], ctx))
goto err;
for (j = 1; j < ((size_t)1 << (wsize[i] - 1)); j++) {
if (!EC_POINT_add
(group, val_sub[i][j], val_sub[i][j - 1], tmp, ctx))
goto err;
}
}
}
if (!EC_POINTs_make_affine(group, num_val, val, ctx))
goto err;
r_is_at_infinity = 1;
for (k = max_len - 1; k >= 0; k--) {
if (!r_is_at_infinity) {
if (!EC_POINT_dbl(group, r, r, ctx))
goto err;
}
for (i = 0; i < totalnum; i++) {
if (wNAF_len[i] > (size_t)k) {
int digit = wNAF[i][k];
int is_neg;
if (digit) {
is_neg = digit < 0;
if (is_neg)
digit = -digit;
if (is_neg != r_is_inverted) {
if (!r_is_at_infinity) {
if (!EC_POINT_invert(group, r, ctx))
goto err;
}
r_is_inverted = !r_is_inverted;
}
/* digit > 0 */
if (r_is_at_infinity) {
if (!EC_POINT_copy(r, val_sub[i][digit >> 1]))
goto err;
r_is_at_infinity = 0;
} else {
if (!EC_POINT_add
(group, r, r, val_sub[i][digit >> 1], ctx))
goto err;
}
}
}
}
}
if (r_is_at_infinity) {
if (!EC_POINT_set_to_infinity(group, r))
goto err;
} else {
if (r_is_inverted)
if (!EC_POINT_invert(group, r, ctx))
goto err;
}
ret = 1;
err:
EC_POINT_free(tmp);
OPENSSL_free(wsize);
OPENSSL_free(wNAF_len);
if (wNAF != NULL) {
signed char **w;
for (w = wNAF; *w != NULL; w++)
OPENSSL_free(*w);
OPENSSL_free(wNAF);
}
if (val != NULL) {
for (v = val; *v != NULL; v++)
EC_POINT_clear_free(*v);
OPENSSL_free(val);
}
OPENSSL_free(val_sub);
return ret;
}
| 0 |
[
"CWE-327",
"CWE-320"
] |
openssl
|
b1d6d55ece1c26fa2829e2b819b038d7b6d692b4
| 129,859,981,798,289,770,000,000,000,000,000,000,000 | 383 |
Timing vulnerability in ECDSA signature generation (CVE-2018-0735)
Preallocate an extra limb for some of the big numbers to avoid a reallocation
that can potentially provide a side channel.
Reviewed-by: Bernd Edlinger <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/7486)
(cherry picked from commit 99540ec79491f59ed8b46b4edf130e17dc907f52)
|
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
size_t size, int dir, u64 dma_mask)
{
struct dmar_domain *domain;
phys_addr_t start_paddr;
unsigned long iova_pfn;
int prot = 0;
int ret;
struct intel_iommu *iommu;
unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
BUG_ON(dir == DMA_NONE);
if (iommu_no_mapping(dev))
return paddr;
domain = get_valid_domain_for_dev(dev);
if (!domain)
return 0;
iommu = domain_get_iommu(domain);
size = aligned_nrpages(paddr, size);
iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
if (!iova_pfn)
goto error;
/*
* Check if DMAR supports zero-length reads on write only
* mappings..
*/
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
!cap_zlr(iommu->cap))
prot |= DMA_PTE_READ;
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
prot |= DMA_PTE_WRITE;
/*
* paddr - (paddr + size) might be partial page, we should map the whole
* page. Note: if two part of one page are separately mapped, we
* might have two guest_addr mapping to the same host paddr, but this
* is not a big problem
*/
ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
mm_to_dma_pfn(paddr_pfn), size, prot);
if (ret)
goto error;
start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
start_paddr += paddr & ~PAGE_MASK;
return start_paddr;
error:
if (iova_pfn)
free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
dev_name(dev), size, (unsigned long long)paddr, dir);
return 0;
}
| 0 |
[] |
linux
|
fb58fdcd295b914ece1d829b24df00a17a9624bc
| 283,049,073,282,824,240,000,000,000,000,000,000,000 | 58 |
iommu/vt-d: Do not enable ATS for untrusted devices
Currently Linux automatically enables ATS (Address Translation Service)
for any device that supports it (and IOMMU is turned on). ATS is used to
accelerate DMA access as the device can cache translations locally so
there is no need to do full translation on IOMMU side. However, as
pointed out in [1] ATS can be used to bypass IOMMU based security
completely by simply sending PCIe read/write transaction with AT
(Address Translation) field set to "translated".
To mitigate this modify the Intel IOMMU code so that it does not enable
ATS for any device that is marked as being untrusted. In case this turns
out to cause performance issues we may selectively allow ATS based on
user decision but currently use big hammer and disable it completely to
be on the safe side.
[1] https://www.repository.cam.ac.uk/handle/1810/274352
Signed-off-by: Mika Westerberg <[email protected]>
Reviewed-by: Ashok Raj <[email protected]>
Reviewed-by: Joerg Roedel <[email protected]>
Acked-by: Joerg Roedel <[email protected]>
|
static char *get_frame_label(int type) {
static char label[128];
int nf = egg->lang.nfunctions;
int nb = egg->lang.nbrackets;
int ct = context;
/* TODO: this type hack to substruct nb and ctx looks weird */
#if 1
if (type == 1) {
nb--;
} else if (type == 2) {
ct--;
}
#endif
/* THIS IS GAS_ONLY */
snprintf (label, sizeof (label), FRAME_FMT, nf, nb, ct);
return label;
}
| 0 |
[
"CWE-416"
] |
radare2
|
93af319e0af787ede96537d46210369f5c24240c
| 64,887,660,470,703,560,000,000,000,000,000,000,000 | 17 |
Fix #14296 - Segfault in ragg2 (#14308)
|
static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
struct usb_interface *intf, int ctrl_idx)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
int sizeof_candev = peak_usb_adapter->sizeof_dev_private;
struct peak_usb_device *dev;
struct net_device *netdev;
int i, err;
u16 tmp16;
if (sizeof_candev < sizeof(struct peak_usb_device))
sizeof_candev = sizeof(struct peak_usb_device);
netdev = alloc_candev(sizeof_candev, PCAN_USB_MAX_TX_URBS);
if (!netdev) {
dev_err(&intf->dev, "%s: couldn't alloc candev\n",
PCAN_USB_DRIVER_NAME);
return -ENOMEM;
}
dev = netdev_priv(netdev);
/* allocate a buffer large enough to send commands */
dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
if (!dev->cmd_buf) {
err = -ENOMEM;
goto lbl_free_candev;
}
dev->udev = usb_dev;
dev->netdev = netdev;
dev->adapter = peak_usb_adapter;
dev->ctrl_idx = ctrl_idx;
dev->state = PCAN_USB_STATE_CONNECTED;
dev->ep_msg_in = peak_usb_adapter->ep_msg_in;
dev->ep_msg_out = peak_usb_adapter->ep_msg_out[ctrl_idx];
dev->can.clock = peak_usb_adapter->clock;
dev->can.bittiming_const = peak_usb_adapter->bittiming_const;
dev->can.do_set_bittiming = peak_usb_set_bittiming;
dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming;
dev->can.do_set_mode = peak_usb_set_mode;
dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter;
dev->can.ctrlmode_supported = peak_usb_adapter->ctrlmode_supported;
netdev->netdev_ops = &peak_usb_netdev_ops;
netdev->flags |= IFF_ECHO; /* we support local echo */
init_usb_anchor(&dev->rx_submitted);
init_usb_anchor(&dev->tx_submitted);
atomic_set(&dev->active_tx_urbs, 0);
for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++)
dev->tx_contexts[i].echo_index = PCAN_USB_MAX_TX_URBS;
dev->prev_siblings = usb_get_intfdata(intf);
usb_set_intfdata(intf, dev);
SET_NETDEV_DEV(netdev, &intf->dev);
netdev->dev_id = ctrl_idx;
err = register_candev(netdev);
if (err) {
dev_err(&intf->dev, "couldn't register CAN device: %d\n", err);
goto lbl_restore_intf_data;
}
if (dev->prev_siblings)
(dev->prev_siblings)->next_siblings = dev;
/* keep hw revision into the netdevice */
tmp16 = le16_to_cpu(usb_dev->descriptor.bcdDevice);
dev->device_rev = tmp16 >> 8;
if (dev->adapter->dev_init) {
err = dev->adapter->dev_init(dev);
if (err)
goto lbl_unregister_candev;
}
/* set bus off */
if (dev->adapter->dev_set_bus) {
err = dev->adapter->dev_set_bus(dev, 0);
if (err)
goto lbl_unregister_candev;
}
/* get device number early */
if (dev->adapter->dev_get_device_id)
dev->adapter->dev_get_device_id(dev, &dev->device_number);
netdev_info(netdev, "attached to %s channel %u (device %u)\n",
peak_usb_adapter->name, ctrl_idx, dev->device_number);
return 0;
lbl_unregister_candev:
unregister_candev(netdev);
lbl_restore_intf_data:
usb_set_intfdata(intf, dev->prev_siblings);
kfree(dev->cmd_buf);
lbl_free_candev:
free_candev(netdev);
return err;
}
| 1 |
[
"CWE-909"
] |
linux
|
f7a1337f0d29b98733c8824e165fca3371d7d4fd
| 337,480,791,575,728,400,000,000,000,000,000,000,000 | 112 |
can: peak_usb: fix slab info leak
Fix a small slab info leak due to a failure to clear the command buffer
at allocation.
The first 16 bytes of the command buffer are always sent to the device
in pcan_usb_send_cmd() even though only the first two may have been
initialised in case no argument payload is provided (e.g. when waiting
for a response).
Fixes: bb4785551f64 ("can: usb: PEAK-System Technik USB adapters driver core")
Cc: stable <[email protected]> # 3.4
Reported-by: [email protected]
Signed-off-by: Johan Hovold <[email protected]>
Signed-off-by: Marc Kleine-Budde <[email protected]>
|
_lyd_insert_hash(struct lyd_node *node, int keyless_list_check)
{
struct lyd_node *iter;
int i;
if (node->parent) {
if ((node->schema->nodetype != LYS_LIST) || lyd_list_has_keys(node)) {
if ((node->schema->nodetype == LYS_LEAF) && lys_is_key((struct lys_node_leaf *)node->schema, NULL)) {
/* we are adding a key which means that it may be the last missing key for our parent's hash */
if (!lyd_hash(node->parent)) {
/* yep, we successfully hashed node->parent so it is technically now added to its parent (hash-wise) */
_lyd_insert_hash(node->parent, 0);
}
}
/* create parent hash table if required, otherwise just add the new child */
if (!node->parent->ht) {
for (i = 0, iter = node->parent->child; iter; ++i, iter = iter->next) {
if ((iter->schema->nodetype == LYS_LIST) && !lyd_list_has_keys(iter)) {
/* it will either never have keys and will never be hashed or has not all keys created yet */
--i;
}
}
assert(i <= LY_CACHE_HT_MIN_CHILDREN);
if (i == LY_CACHE_HT_MIN_CHILDREN) {
/* create hash table, insert all the children */
node->parent->ht = lyht_new(1, sizeof(struct lyd_node *), lyd_hash_table_val_equal, NULL, 1);
LY_TREE_FOR(node->parent->child, iter) {
if ((iter->schema->nodetype == LYS_LIST) && !lyd_list_has_keys(iter)) {
/* skip lists without keys */
continue;
}
if (lyht_insert(node->parent->ht, &iter, iter->hash, NULL)) {
assert(0);
}
}
}
} else {
if (lyht_insert(node->parent->ht, &node, node->hash, NULL)) {
assert(0);
}
}
/* if node was in a state data subtree, wasn't it a part of a key-less list hash? */
if (keyless_list_check) {
lyd_keyless_list_hash_change(node->parent);
}
}
}
}
| 0 |
[
"CWE-119"
] |
libyang
|
32fb4993bc8bb49e93e84016af3c10ea53964be5
| 285,987,883,810,735,970,000,000,000,000,000,000,000 | 51 |
schema tree BUGFIX do not check features while still resolving schema
Fixes #723
|
static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
{
int ret;
int node;
struct kmem_cache_node *n;
for_each_online_node(node) {
ret = setup_kmem_cache_node(cachep, node, gfp, true);
if (ret)
goto fail;
}
return 0;
fail:
if (!cachep->list.next) {
/* Cache is not active yet. Roll back what we did */
node--;
while (node >= 0) {
n = get_node(cachep, node);
if (n) {
kfree(n->shared);
free_alien_cache(n->alien);
kfree(n);
cachep->node[node] = NULL;
}
node--;
}
}
return -ENOMEM;
}
| 0 |
[
"CWE-703"
] |
linux
|
c4e490cf148e85ead0d1b1c2caaba833f1d5b29f
| 254,959,806,428,147,870,000,000,000,000,000,000,000 | 32 |
mm/slab.c: fix SLAB freelist randomization duplicate entries
This patch fixes a bug in the freelist randomization code. When a high
random number is used, the freelist will contain duplicate entries. It
will result in different allocations sharing the same chunk.
It will result in odd behaviours and crashes. It should be uncommon but
it depends on the machines. We saw it happening more often on some
machines (every few hours of running tests).
Fixes: c7ce4f60ac19 ("mm: SLAB freelist randomization")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: John Sperbeck <[email protected]>
Signed-off-by: Thomas Garnier <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static inline void nm_order(NM *low, NM *high) {
if(u128_cmp((*low)->neta, (*high)->neta) > 0) {
NM tmp = *low;
*low = *high;
*high = tmp;
}
}
| 0 |
[] |
netmask
|
29a9c239bd1008363f5b34ffd6c2cef906f3660c
| 146,878,439,598,457,090,000,000,000,000,000,000,000 | 7 |
bump version to 2.4.4
* remove checks for negative unsigned ints, fixes #2
* harden error logging functions, fixes #3
|
struct inode *proc_pid_make_inode(struct super_block * sb,
struct task_struct *task, umode_t mode)
{
struct inode * inode;
struct proc_inode *ei;
/* We need a new inode */
inode = new_inode(sb);
if (!inode)
goto out;
/* Common stuff */
ei = PROC_I(inode);
inode->i_mode = mode;
inode->i_ino = get_next_ino();
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
inode->i_op = &proc_def_inode_operations;
/*
* grab the reference to task.
*/
ei->pid = get_task_pid(task, PIDTYPE_PID);
if (!ei->pid)
goto out_unlock;
task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid);
security_task_to_inode(task, inode);
out:
return inode;
out_unlock:
iput(inode);
return NULL;
}
| 0 |
[
"CWE-119"
] |
linux
|
7f7ccc2ccc2e70c6054685f5e3522efa81556830
| 143,523,665,840,500,000,000,000,000,000,000,000,000 | 36 |
proc: do not access cmdline nor environ from file-backed areas
proc_pid_cmdline_read() and environ_read() directly access the target
process' VM to retrieve the command line and environment. If this
process remaps these areas onto a file via mmap(), the requesting
process may experience various issues such as extra delays if the
underlying device is slow to respond.
Let's simply refuse to access file-backed areas in these functions.
For this we add a new FOLL_ANON gup flag that is passed to all calls
to access_remote_vm(). The code already takes care of such failures
(including unmapped areas). Accesses via /proc/pid/mem were not
changed though.
This was assigned CVE-2018-1120.
Note for stable backports: the patch may apply to kernels prior to 4.11
but silently miss one location; it must be checked that no call to
access_remote_vm() keeps zero as the last argument.
Reported-by: Qualys Security Advisory <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: [email protected]
Signed-off-by: Willy Tarreau <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
ms_escher_get_data (MSEscherState *state,
gint offset, /* bytes from logical start of the stream */
gint num_bytes, /*how many bytes we want, NOT incl prefix */
gboolean * needs_free)
{
BiffQuery *q = state->q;
guint8 *res;
g_return_val_if_fail (offset >= state->start_offset, NULL);
/* find the 1st containing record */
while (offset >= state->end_offset) {
if (!ms_biff_query_next (q)) {
g_warning ("unexpected end of stream;");
return NULL;
}
if (q->opcode != BIFF_MS_O_DRAWING &&
q->opcode != BIFF_MS_O_DRAWING_GROUP &&
q->opcode != BIFF_MS_O_DRAWING_SELECTION &&
q->opcode != BIFF_CHART_gelframe &&
q->opcode != BIFF_CONTINUE) {
g_warning ("Unexpected record type 0x%x len=0x%x @ 0x%lx;", q->opcode, q->length, (long)q->streamPos);
return NULL;
}
d (1, g_printerr ("Target is 0x%x bytes at 0x%x, current = 0x%x..0x%x;\n"
"Adding biff-0x%x of length 0x%x;\n",
num_bytes, offset,
state->start_offset,
state->end_offset,
q->opcode, q->length););
state->start_offset = state->end_offset;
state->end_offset += q->length;
state->segment_len = q->length;
}
g_return_val_if_fail (offset >= state->start_offset, NULL);
g_return_val_if_fail ((size_t)(offset - state->start_offset) < q->length, NULL);
res = q->data + offset - state->start_offset;
if ((*needs_free = ((offset + num_bytes) > state->end_offset))) {
guint8 *buffer = g_malloc (num_bytes);
guint8 *tmp = buffer;
/* Setup front stub */
int len = q->length - (res - q->data);
int counter = 0;
d (1, g_printerr ("MERGE needed (%d) which is >= %d + %d;\n",
num_bytes, offset, state->end_offset););
do {
d (1, g_printerr ("record %d) add %d bytes;\n", ++counter, len););
/* copy necessary portion of current record */
memcpy (tmp, res, len);
tmp += len;
/* Get next record */
if (!ms_biff_query_next (q)) {
g_warning ("unexpected end of stream;");
return NULL;
}
/* We should only see DRAW records now */
if (q->opcode != BIFF_MS_O_DRAWING &&
q->opcode != BIFF_MS_O_DRAWING_GROUP &&
q->opcode != BIFF_MS_O_DRAWING_SELECTION &&
q->opcode != BIFF_CHART_gelframe &&
q->opcode != BIFF_CONTINUE) {
g_warning ("Unexpected record type 0x%x @ 0x%lx;", q->opcode, (long)q->streamPos);
return NULL;
}
state->start_offset = state->end_offset;
state->end_offset += q->length;
state->segment_len = q->length;
res = q->data;
len = q->length;
} while ((num_bytes - (tmp - buffer)) > len);
/* Copy back stub */
memcpy (tmp, res, num_bytes - (tmp-buffer));
d (1, g_printerr ("record %d) add %d bytes;\n",
++counter,
num_bytes - (int)(tmp-buffer)););
return buffer;
}
return res;
}
| 1 |
[
"CWE-119"
] |
gnumeric
|
b5480b69345b3c6d56ee0ed9c9e9880bb2a08cdc
| 257,840,832,045,771,400,000,000,000,000,000,000,000 | 94 |
xls: fuzzed file crash.
|
void sctp_addr_wq_mgmt(struct net *net, struct sctp_sockaddr_entry *addr, int cmd)
{
struct sctp_sockaddr_entry *addrw;
unsigned long timeo_val;
/* first, we check if an opposite message already exist in the queue.
* If we found such message, it is removed.
* This operation is a bit stupid, but the DHCP client attaches the
* new address after a couple of addition and deletion of that address
*/
spin_lock_bh(&net->sctp.addr_wq_lock);
/* Offsets existing events in addr_wq */
addrw = sctp_addr_wq_lookup(net, addr);
if (addrw) {
if (addrw->state != cmd) {
pr_debug("%s: offsets existing entry for %d, addr:%pISc "
"in wq:%p\n", __func__, addrw->state, &addrw->a.sa,
&net->sctp.addr_waitq);
list_del(&addrw->list);
kfree(addrw);
}
spin_unlock_bh(&net->sctp.addr_wq_lock);
return;
}
/* OK, we have to add the new address to the wait queue */
addrw = kmemdup(addr, sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
if (addrw == NULL) {
spin_unlock_bh(&net->sctp.addr_wq_lock);
return;
}
addrw->state = cmd;
list_add_tail(&addrw->list, &net->sctp.addr_waitq);
pr_debug("%s: add new entry for cmd:%d, addr:%pISc in wq:%p\n",
__func__, addrw->state, &addrw->a.sa, &net->sctp.addr_waitq);
if (!timer_pending(&net->sctp.addr_wq_timer)) {
timeo_val = jiffies;
timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
mod_timer(&net->sctp.addr_wq_timer, timeo_val);
}
spin_unlock_bh(&net->sctp.addr_wq_lock);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
8e2d61e0aed2b7c4ecb35844fe07e0b2b762dee4
| 309,540,950,325,623,580,000,000,000,000,000,000,000 | 46 |
sctp: fix race on protocol/netns initialization
Consider sctp module is unloaded and is being requested because an user
is creating a sctp socket.
During initialization, sctp will add the new protocol type and then
initialize pernet subsys:
status = sctp_v4_protosw_init();
if (status)
goto err_protosw_init;
status = sctp_v6_protosw_init();
if (status)
goto err_v6_protosw_init;
status = register_pernet_subsys(&sctp_net_ops);
The problem is that after those calls to sctp_v{4,6}_protosw_init(), it
is possible for userspace to create SCTP sockets like if the module is
already fully loaded. If that happens, one of the possible effects is
that we will have readers for net->sctp.local_addr_list list earlier
than expected and sctp_net_init() does not take precautions while
dealing with that list, leading to a potential panic but not limited to
that, as sctp_sock_init() will copy a bunch of blank/partially
initialized values from net->sctp.
The race happens like this:
CPU 0 | CPU 1
socket() |
__sock_create | socket()
inet_create | __sock_create
list_for_each_entry_rcu( |
answer, &inetsw[sock->type], |
list) { | inet_create
/* no hits */ |
if (unlikely(err)) { |
... |
request_module() |
/* socket creation is blocked |
* the module is fully loaded |
*/ |
sctp_init |
sctp_v4_protosw_init |
inet_register_protosw |
list_add_rcu(&p->list, |
last_perm); |
| list_for_each_entry_rcu(
| answer, &inetsw[sock->type],
sctp_v6_protosw_init | list) {
| /* hit, so assumes protocol
| * is already loaded
| */
| /* socket creation continues
| * before netns is initialized
| */
register_pernet_subsys |
Simply inverting the initialization order between
register_pernet_subsys() and sctp_v4_protosw_init() is not possible
because register_pernet_subsys() will create a control sctp socket, so
the protocol must be already visible by then. Deferring the socket
creation to a work-queue is not good specially because we loose the
ability to handle its errors.
So, as suggested by Vlad, the fix is to split netns initialization in
two moments: defaults and control socket, so that the defaults are
already loaded by when we register the protocol, while control socket
initialization is kept at the same moment it is today.
Fixes: 4db67e808640 ("sctp: Make the address lists per network namespace")
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: Marcelo Ricardo Leitner <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int set_journal_csum_feature_set(struct super_block *sb)
{
int ret = 1;
int compat, incompat;
struct ext4_sb_info *sbi = EXT4_SB(sb);
if (ext4_has_metadata_csum(sb)) {
/* journal checksum v3 */
compat = 0;
incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
} else {
/* journal checksum v1 */
compat = JBD2_FEATURE_COMPAT_CHECKSUM;
incompat = 0;
}
jbd2_journal_clear_features(sbi->s_journal,
JBD2_FEATURE_COMPAT_CHECKSUM, 0,
JBD2_FEATURE_INCOMPAT_CSUM_V3 |
JBD2_FEATURE_INCOMPAT_CSUM_V2);
if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
ret = jbd2_journal_set_features(sbi->s_journal,
compat, 0,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
incompat);
} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
ret = jbd2_journal_set_features(sbi->s_journal,
compat, 0,
incompat);
jbd2_journal_clear_features(sbi->s_journal, 0, 0,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
} else {
jbd2_journal_clear_features(sbi->s_journal, 0, 0,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
}
return ret;
}
| 0 |
[
"CWE-362"
] |
linux
|
ea3d7209ca01da209cda6f0dea8be9cc4b7a933b
| 146,552,137,835,944,850,000,000,000,000,000,000 | 38 |
ext4: fix races between page faults and hole punching
Currently, page faults and hole punching are completely unsynchronized.
This can result in page fault faulting in a page into a range that we
are punching after truncate_pagecache_range() has been called and thus
we can end up with a page mapped to disk blocks that will be shortly
freed. Filesystem corruption will shortly follow. Note that the same
race is avoided for truncate by checking page fault offset against
i_size but there isn't similar mechanism available for punching holes.
Fix the problem by creating new rw semaphore i_mmap_sem in inode and
grab it for writing over truncate, hole punching, and other functions
removing blocks from extent tree and for read over page faults. We
cannot easily use i_data_sem for this since that ranks below transaction
start and we need something ranking above it so that it can be held over
the whole truncate / hole punching operation. Also remove various
workarounds we had in the code to reduce race window when page fault
could have created pages with stale mapping information.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
|
void ass_blur1246_vert_c(int16_t *dst, const int16_t *src,
uintptr_t src_width, uintptr_t src_height,
const int16_t *param)
{
uintptr_t dst_height = src_height + 12;
uintptr_t step = STRIPE_WIDTH * src_height;
for (uintptr_t x = 0; x < src_width; x += STRIPE_WIDTH) {
uintptr_t offs = 0;
for (uintptr_t y = 0; y < dst_height; ++y) {
const int16_t *p4 = get_line(src, offs - 12 * STRIPE_WIDTH, step);
const int16_t *p3 = get_line(src, offs - 10 * STRIPE_WIDTH, step);
const int16_t *p2 = get_line(src, offs - 8 * STRIPE_WIDTH, step);
const int16_t *p1 = get_line(src, offs - 7 * STRIPE_WIDTH, step);
const int16_t *z0 = get_line(src, offs - 6 * STRIPE_WIDTH, step);
const int16_t *n1 = get_line(src, offs - 5 * STRIPE_WIDTH, step);
const int16_t *n2 = get_line(src, offs - 4 * STRIPE_WIDTH, step);
const int16_t *n3 = get_line(src, offs - 2 * STRIPE_WIDTH, step);
const int16_t *n4 = get_line(src, offs - 0 * STRIPE_WIDTH, step);
for (int k = 0; k < STRIPE_WIDTH; ++k)
dst[k] = blur_func(p4[k], p3[k], p2[k], p1[k], z0[k],
n1[k], n2[k], n3[k], n4[k], param);
dst += STRIPE_WIDTH;
offs += STRIPE_WIDTH;
}
src += step;
}
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
libass
|
08e754612019ed84d1db0d1fc4f5798248decd75
| 31,465,668,515,541,167,000,000,000,000,000,000,000 | 28 |
Fix blur coefficient calculation buffer overflow
Found by fuzzer test case id:000082,sig:11,src:002579,op:havoc,rep:8.
Correctness should be checked, but this fixes the overflow for good.
|
zroute_lookup(u_int zroute)
{
u_int i;
if (zroute >= sizeof(route_types)/sizeof(route_types[0]))
{
zlog_err("unknown zebra route type: %u", zroute);
return &unknown;
}
if (zroute == route_types[zroute].type)
return &route_types[zroute];
for (i = 0; i < sizeof(route_types)/sizeof(route_types[0]); i++)
{
if (zroute == route_types[i].type)
{
zlog_warn("internal error: route type table out of order "
"while searching for %u, please notify developers", zroute);
return &route_types[i];
}
}
zlog_err("internal error: cannot find route type %u in table!", zroute);
return &unknown;
}
| 0 |
[
"CWE-125"
] |
frr
|
6d58272b4cf96f0daa846210dd2104877900f921
| 86,967,922,627,418,260,000,000,000,000,000,000,000 | 23 |
[bgpd] cleanup, compact and consolidate capability parsing code
2007-07-26 Paul Jakma <[email protected]>
* (general) Clean up and compact capability parsing slightly.
Consolidate validation of length and logging of generic TLV, and
memcpy of capability data, thus removing such from cap specifc
code (not always present or correct).
* bgp_open.h: Add structures for the generic capability TLV header
and for the data formats of the various specific capabilities we
support. Hence remove the badly named, or else misdefined, struct
capability.
* bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data.
Do the length checks *before* memcpy()'ing based on that length
(stored capability - should have been validated anyway on input,
but..).
(bgp_afi_safi_valid_indices) new function to validate (afi,safi)
which is about to be used as index into arrays, consolidates
several instances of same, at least one of which appeared to be
incomplete..
(bgp_capability_mp) Much condensed.
(bgp_capability_orf_entry) New, process one ORF entry
(bgp_capability_orf) Condensed. Fixed to process all ORF entries.
(bgp_capability_restart) Condensed, and fixed to use a
cap-specific type, rather than abusing capability_mp.
(struct message capcode_str) added to aid generic logging.
(size_t cap_minsizes[]) added to aid generic validation of
capability length field.
(bgp_capability_parse) Generic logging and validation of TLV
consolidated here. Code compacted as much as possible.
* bgp_packet.c: (bgp_open_receive) Capability parsers now use
streams, so no more need here to manually fudge the input stream
getp.
(bgp_capability_msg_parse) use struct capability_mp_data. Validate
lengths /before/ memcpy. Use bgp_afi_safi_valid_indices.
(bgp_capability_receive) Exported for use by test harness.
* bgp_vty.c: (bgp_show_summary) fix conversion warning
(bgp_show_peer) ditto
* bgp_debug.h: Fix storage 'extern' after type 'const'.
* lib/log.c: (mes_lookup) warning about code not being in
same-number array slot should be debug, not warning. E.g. BGP
has several discontigious number spaces, allocating from
different parts of a space is not uncommon (e.g. IANA
assigned versus vendor-assigned code points in some number
space).
|
static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features,
struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
netdev_features_t features),
__be16 new_protocol, bool is_ipv6)
{
int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
bool remcsum, need_csum, offload_csum, ufo;
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct udphdr *uh = udp_hdr(skb);
u16 mac_offset = skb->mac_header;
__be16 protocol = skb->protocol;
u16 mac_len = skb->mac_len;
int udp_offset, outer_hlen;
__wsum partial;
if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
goto out;
/* Adjust partial header checksum to negate old length.
* We cannot rely on the value contained in uh->len as it is
* possible that the actual value exceeds the boundaries of the
* 16 bit length field due to the header being added outside of an
* IP or IPv6 frame that was already limited to 64K - 1.
*/
partial = csum_sub(csum_unfold(uh->check),
(__force __wsum)htonl(skb->len));
/* setup inner skb. */
skb->encapsulation = 0;
__skb_pull(skb, tnl_hlen);
skb_reset_mac_header(skb);
skb_set_network_header(skb, skb_inner_network_offset(skb));
skb->mac_len = skb_inner_network_offset(skb);
skb->protocol = new_protocol;
need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
skb->encap_hdr_csum = need_csum;
remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
skb->remcsum_offload = remcsum;
ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
/* Try to offload checksum if possible */
offload_csum = !!(need_csum &&
(skb->dev->features &
(is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) :
(NETIF_F_HW_CSUM | NETIF_F_IP_CSUM))));
features &= skb->dev->hw_enc_features;
/* The only checksum offload we care about from here on out is the
* outer one so strip the existing checksum feature flags and
* instead set the flag based on our outer checksum offload value.
*/
if (remcsum || ufo) {
features &= ~NETIF_F_CSUM_MASK;
if (!need_csum || offload_csum)
features |= NETIF_F_HW_CSUM;
}
/* segment inner packet. */
segs = gso_inner_segment(skb, features);
if (IS_ERR_OR_NULL(segs)) {
skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
mac_len);
goto out;
}
outer_hlen = skb_tnl_header_len(skb);
udp_offset = outer_hlen - tnl_hlen;
skb = segs;
do {
__be16 len;
if (remcsum)
skb->ip_summed = CHECKSUM_NONE;
/* Set up inner headers if we are offloading inner checksum */
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_reset_inner_headers(skb);
skb->encapsulation = 1;
}
skb->mac_len = mac_len;
skb->protocol = protocol;
__skb_push(skb, outer_hlen);
skb_reset_mac_header(skb);
skb_set_network_header(skb, mac_len);
skb_set_transport_header(skb, udp_offset);
len = htons(skb->len - udp_offset);
uh = udp_hdr(skb);
uh->len = len;
if (!need_csum)
continue;
uh->check = ~csum_fold(csum_add(partial, (__force __wsum)len));
if (skb->encapsulation || !offload_csum) {
uh->check = gso_make_checksum(skb, ~uh->check);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
} else {
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
}
} while ((skb = skb->next));
out:
return segs;
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
linux
|
fac8e0f579695a3ecbc4d3cac369139d7f819971
| 260,311,272,867,991,450,000,000,000,000,000,000,000 | 114 |
tunnels: Don't apply GRO to multiple layers of encapsulation.
When drivers express support for TSO of encapsulated packets, they
only mean that they can do it for one layer of encapsulation.
Supporting additional levels would mean updating, at a minimum,
more IP length fields and they are unaware of this.
No encapsulation device expresses support for handling offloaded
encapsulated packets, so we won't generate these types of frames
in the transmit path. However, GRO doesn't have a check for
multiple levels of encapsulation and will attempt to build them.
UDP tunnel GRO actually does prevent this situation but it only
handles multiple UDP tunnels stacked on top of each other. This
generalizes that solution to prevent any kind of tunnel stacking
that would cause problems.
Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack")
Signed-off-by: Jesse Gross <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
STATIC int GC_hblk_fl_from_blocks(word blocks_needed)
{
if (blocks_needed <= UNIQUE_THRESHOLD) return (int)blocks_needed;
if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
return (int)(blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
+ UNIQUE_THRESHOLD;
}
| 0 |
[
"CWE-119"
] |
bdwgc
|
7292c02fac2066d39dd1bcc37d1a7054fd1e32ee
| 64,099,042,106,534,220,000,000,000,000,000,000,000 | 8 |
Fix malloc routines to prevent size value wrap-around
See issue #135 on Github.
* allchblk.c (GC_allochblk, GC_allochblk_nth): Use
OBJ_SZ_TO_BLOCKS_CHECKED instead of OBJ_SZ_TO_BLOCKS.
* malloc.c (GC_alloc_large): Likewise.
* alloc.c (GC_expand_hp_inner): Type of "bytes" local variable changed
from word to size_t; cast ROUNDUP_PAGESIZE argument to size_t; prevent
overflow when computing GC_heapsize+bytes > GC_max_heapsize.
* dbg_mlc.c (GC_debug_malloc, GC_debug_malloc_ignore_off_page,
GC_debug_malloc_atomic_ignore_off_page, GC_debug_generic_malloc,
GC_debug_generic_malloc_inner,
GC_debug_generic_malloc_inner_ignore_off_page,
GC_debug_malloc_stubborn, GC_debug_malloc_atomic,
GC_debug_malloc_uncollectable, GC_debug_malloc_atomic_uncollectable):
Use SIZET_SAT_ADD (instead of "+" operator) to add extra bytes to lb
value.
* fnlz_mlc.c (GC_finalized_malloc): Likewise.
* gcj_mlc.c (GC_debug_gcj_malloc): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Likewise.
* include/private/gcconfig.h (GET_MEM): Likewise.
* mallocx.c (GC_malloc_many, GC_memalign): Likewise.
* os_dep.c (GC_wince_get_mem, GC_win32_get_mem): Likewise.
* typd_mlc.c (GC_malloc_explicitly_typed,
GC_malloc_explicitly_typed_ignore_off_page,
GC_calloc_explicitly_typed): Likewise.
* headers.c (GC_scratch_alloc): Change type of bytes_to_get from word
to size_t (because ROUNDUP_PAGESIZE_IF_MMAP result type changed).
* include/private/gc_priv.h: Include limits.h (unless SIZE_MAX already
defined).
* include/private/gc_priv.h (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Move from
malloc.c file.
* include/private/gc_priv.h (SIZET_SAT_ADD): New macro (defined before
include gcconfig.h).
* include/private/gc_priv.h (EXTRA_BYTES, GC_page_size): Change type
to size_t.
* os_dep.c (GC_page_size): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Add comment about the argument.
* include/private/gcconfig.h (GET_MEM): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, OBJ_SZ_TO_BLOCKS, ROUNDUP_PAGESIZE,
ROUNDUP_PAGESIZE_IF_MMAP): Rename argument to "lb".
* include/private/gc_priv.h (OBJ_SZ_TO_BLOCKS_CHECKED): New macro.
* include/private/gcconfig.h (GC_win32_get_mem, GC_wince_get_mem,
GC_unix_get_mem): Change argument type from word to int.
* os_dep.c (GC_unix_mmap_get_mem, GC_unix_get_mem,
GC_unix_sbrk_get_mem, GC_wince_get_mem, GC_win32_get_mem): Likewise.
* malloc.c (GC_alloc_large_and_clear): Call OBJ_SZ_TO_BLOCKS only
if no value wrap around is guaranteed.
* malloc.c (GC_generic_malloc): Do not check for lb_rounded < lb case
(because ROUNDED_UP_GRANULES and GRANULES_TO_BYTES guarantees no value
wrap around).
* mallocx.c (GC_generic_malloc_ignore_off_page): Likewise.
* misc.c (GC_init_size_map): Change "i" local variable type from int
to size_t.
* os_dep.c (GC_write_fault_handler, catch_exception_raise): Likewise.
* misc.c (GC_envfile_init): Cast len to size_t when passed to
ROUNDUP_PAGESIZE_IF_MMAP.
* os_dep.c (GC_setpagesize): Cast GC_sysinfo.dwPageSize and
GETPAGESIZE() to size_t (when setting GC_page_size).
* os_dep.c (GC_unix_mmap_get_mem, GC_unmap_start, GC_remove_protection):
Expand ROUNDUP_PAGESIZE macro but without value wrap-around checking
(the argument is of word type).
* os_dep.c (GC_unix_mmap_get_mem): Replace -GC_page_size with
~GC_page_size+1 (because GC_page_size is unsigned); remove redundant
cast to size_t.
* os_dep.c (GC_unix_sbrk_get_mem): Add explicit cast of GC_page_size
to SBRK_ARG_T.
* os_dep.c (GC_wince_get_mem): Change type of res_bytes local variable
to size_t.
* typd_mlc.c: Do not include limits.h.
* typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Remove (as defined in
gc_priv.h now).
|
pdf14_spot_get_color_comp_index(gx_device *dev, const char *pname,
int name_size, int component_type, int num_process_colors)
{
pdf14_device *pdev = (pdf14_device *)dev;
gx_device *tdev = pdev->target;
gs_devn_params *pdevn_params = &pdev->devn_params;
gs_separations *pseparations;
int comp_index;
dev_proc_get_color_comp_index(*target_get_color_comp_index);
int offset = 4 - num_process_colors;
while (tdev->child) {
tdev = tdev->child;
}
/* If something has gone wrong and this is no longer the pdf14 compositor, */
/* get the devn_params from the target to avoid accessing using the wrong */
/* pointer. Bug 696372. */
if (tdev == (gx_device *)pdev)
pdevn_params = dev_proc(pdev, ret_devn_params)(dev);
pseparations = &pdevn_params->separations;
/* If num_process_colors is 3 or 1 (RGB or Gray) then we are in a situation
* where we are in a blend color space that is RGB or Gray based and we
* have a spot colorant. If the spot colorant name is Cyan, Magenta
* Yellow or Black, then we should use the alternate tint transform */
if (num_process_colors < 4) {
int k;
for (k = 0; k < 4; k++) {
if (strncmp(pname, pdev->devn_params.std_colorant_names[k], name_size) == 0)
return -1;
}
}
target_get_color_comp_index = dev_proc(tdev, get_color_comp_index);
/* The pdf14_clist_create_compositor may have set the color procs.
We need the real target procs */
if (target_get_color_comp_index == pdf14_cmykspot_get_color_comp_index)
target_get_color_comp_index =
((pdf14_clist_device *)pdev)->saved_target_get_color_comp_index;
/*
* If this is not a separation name then simply forward it to the target
* device.
*/
if (component_type == NO_COMP_NAME_TYPE)
return (*target_get_color_comp_index)(tdev, pname, name_size, component_type);
/*
* Check if the component is in either the process color model list
* or in the SeparationNames list.
*/
comp_index = check_pcm_and_separation_names(dev, pdevn_params, pname,
name_size, component_type);
/*
* Return the colorant number if we know this name. Note adjustment for
* compensating of blend color space.
*/
if (comp_index >= 0)
return comp_index - offset;
/*
* If we do not know this color, check if the output (target) device does.
*/
comp_index = (*target_get_color_comp_index)(tdev, pname, name_size, component_type);
/*
* Ignore color if unknown to the output device or if color is not being
* imaged due to the SeparationOrder device parameter.
*/
if (comp_index < 0 || comp_index == GX_DEVICE_COLOR_MAX_COMPONENTS)
return comp_index - offset;
/*
* This is a new colorant. Add it to our list of colorants.
*/
if (pseparations->num_separations < GX_DEVICE_COLOR_MAX_COMPONENTS - 1) {
int sep_num = pseparations->num_separations++;
int color_component_number;
byte * sep_name;
sep_name = gs_alloc_bytes(dev->memory->stable_memory,
name_size, "pdf14_spot_get_color_comp_index");
memcpy(sep_name, pname, name_size);
pseparations->names[sep_num].size = name_size;
pseparations->names[sep_num].data = sep_name;
color_component_number = sep_num + num_process_colors;
if (color_component_number >= dev->color_info.max_components)
color_component_number = GX_DEVICE_COLOR_MAX_COMPONENTS;
else
pdevn_params->separation_order_map[color_component_number] =
color_component_number;
return color_component_number;
}
return GX_DEVICE_COLOR_MAX_COMPONENTS;
}
| 0 |
[] |
ghostpdl
|
c432131c3fdb2143e148e8ba88555f7f7a63b25e
| 331,480,083,711,168,840,000,000,000,000,000,000,000 | 92 |
Bug 699661: Avoid sharing pointers between pdf14 compositors
If a copdevice is triggered when the pdf14 compositor is the device, we make
a copy of the device, then throw an error because, by default we're only allowed
to copy the device prototype - then freeing it calls the finalize, which frees
several pointers shared with the parent.
Make a pdf14 specific finish_copydevice() which NULLs the relevant pointers,
before, possibly, throwing the same error as the default method.
This also highlighted a problem with reopening the X11 devices, where a custom
error handler could be replaced with itself, meaning it also called itself,
and infifite recursion resulted.
Keep a note of if the handler replacement has been done, and don't do it a
second time.
|
void FilterManager::contextOnContinue(ScopeTrackedObjectStack& tracked_object_stack) {
tracked_object_stack.add(connection_);
tracked_object_stack.add(filter_manager_callbacks_.scope());
}
| 0 |
[
"CWE-416"
] |
envoy
|
148de954ed3585d8b4298b424aa24916d0de6136
| 332,350,549,121,030,300,000,000,000,000,000,000,000 | 4 |
CVE-2021-43825
Response filter manager crash
Signed-off-by: Yan Avlasov <[email protected]>
|
static xmlNodePtr to_xml_object(encodeTypePtr type, zval *data, int style, xmlNodePtr parent TSRMLS_DC)
{
xmlNodePtr xmlParam;
HashTable *prop = NULL;
int i;
sdlTypePtr sdlType = type->sdl_type;
if (!data || Z_TYPE_P(data) == IS_NULL) {
xmlParam = xmlNewNode(NULL, BAD_CAST("BOGUS"));
xmlAddChild(parent, xmlParam);
if (style == SOAP_ENCODED) {
set_xsi_nil(xmlParam);
set_ns_and_type(xmlParam, type);
}
return xmlParam;
}
if (Z_TYPE_P(data) == IS_OBJECT) {
prop = Z_OBJPROP_P(data);
} else if (Z_TYPE_P(data) == IS_ARRAY) {
prop = Z_ARRVAL_P(data);
}
if (sdlType) {
if (sdlType->kind == XSD_TYPEKIND_RESTRICTION &&
sdlType->encode && type != &sdlType->encode->details) {
encodePtr enc;
enc = sdlType->encode;
while (enc && enc->details.sdl_type &&
enc->details.sdl_type->kind != XSD_TYPEKIND_SIMPLE &&
enc->details.sdl_type->kind != XSD_TYPEKIND_LIST &&
enc->details.sdl_type->kind != XSD_TYPEKIND_UNION) {
enc = enc->details.sdl_type->encode;
}
if (enc) {
zval *tmp = get_zval_property(data, "_" TSRMLS_CC);
if (tmp) {
xmlParam = master_to_xml(enc, tmp, style, parent TSRMLS_CC);
} else if (prop == NULL) {
xmlParam = master_to_xml(enc, data, style, parent TSRMLS_CC);
} else {
xmlParam = xmlNewNode(NULL, BAD_CAST("BOGUS"));
xmlAddChild(parent, xmlParam);
}
} else {
xmlParam = xmlNewNode(NULL, BAD_CAST("BOGUS"));
xmlAddChild(parent, xmlParam);
}
} else if (sdlType->kind == XSD_TYPEKIND_EXTENSION &&
sdlType->encode && type != &sdlType->encode->details) {
if (sdlType->encode->details.sdl_type &&
sdlType->encode->details.sdl_type->kind != XSD_TYPEKIND_SIMPLE &&
sdlType->encode->details.sdl_type->kind != XSD_TYPEKIND_LIST &&
sdlType->encode->details.sdl_type->kind != XSD_TYPEKIND_UNION) {
if (prop) prop->nApplyCount++;
xmlParam = master_to_xml(sdlType->encode, data, style, parent TSRMLS_CC);
if (prop) prop->nApplyCount--;
} else {
zval *tmp = get_zval_property(data, "_" TSRMLS_CC);
if (tmp) {
xmlParam = master_to_xml(sdlType->encode, tmp, style, parent TSRMLS_CC);
} else if (prop == NULL) {
xmlParam = master_to_xml(sdlType->encode, data, style, parent TSRMLS_CC);
} else {
xmlParam = xmlNewNode(NULL, BAD_CAST("BOGUS"));
xmlAddChild(parent, xmlParam);
}
}
} else {
xmlParam = xmlNewNode(NULL, BAD_CAST("BOGUS"));
xmlAddChild(parent, xmlParam);
}
if (soap_check_zval_ref(data, xmlParam TSRMLS_CC)) {
return xmlParam;
}
if (prop != NULL) {
sdlTypePtr array_el;
if (Z_TYPE_P(data) == IS_ARRAY &&
!is_map(data) &&
sdlType->attributes == NULL &&
sdlType->model != NULL &&
(array_el = model_array_element(sdlType->model)) != NULL) {
zval **val;
zend_hash_internal_pointer_reset(prop);
while (zend_hash_get_current_data(prop,(void**)&val) == SUCCESS) {
xmlNodePtr property;
if (Z_TYPE_PP(val) == IS_NULL && array_el->nillable) {
property = xmlNewNode(NULL, BAD_CAST("BOGUS"));
xmlAddChild(xmlParam, property);
set_xsi_nil(property);
} else {
property = master_to_xml(array_el->encode, *val, style, xmlParam TSRMLS_CC);
}
xmlNodeSetName(property, BAD_CAST(array_el->name));
if (style == SOAP_LITERAL &&
array_el->namens &&
array_el->form == XSD_FORM_QUALIFIED) {
xmlNsPtr nsp = encode_add_ns(property, array_el->namens);
xmlSetNs(property, nsp);
}
zend_hash_move_forward(prop);
}
} else if (sdlType->model) {
model_to_xml_object(xmlParam, sdlType->model, data, style, 1 TSRMLS_CC);
}
if (sdlType->attributes) {
sdlAttributePtr *attr;
zval *zattr;
HashPosition pos;
zend_hash_internal_pointer_reset_ex(sdlType->attributes, &pos);
while (zend_hash_get_current_data_ex(sdlType->attributes, (void**)&attr, &pos) == SUCCESS) {
if ((*attr)->name) {
zattr = get_zval_property(data, (*attr)->name TSRMLS_CC);
if (zattr) {
xmlNodePtr dummy;
dummy = master_to_xml((*attr)->encode, zattr, SOAP_LITERAL, xmlParam TSRMLS_CC);
if (dummy->children && dummy->children->content) {
if ((*attr)->fixed && strcmp((*attr)->fixed, (char*)dummy->children->content) != 0) {
soap_error3(E_ERROR, "Encoding: Attribute '%s' has fixed value '%s' (value '%s' is not allowed)", (*attr)->name, (*attr)->fixed, dummy->children->content);
}
/* we need to handle xml: namespace specially, since it is
an implicit schema. Otherwise, use form.
*/
if ((*attr)->namens &&
(!strncmp((*attr)->namens, XML_NAMESPACE, sizeof(XML_NAMESPACE)) ||
(*attr)->form == XSD_FORM_QUALIFIED)) {
xmlNsPtr nsp = encode_add_ns(xmlParam, (*attr)->namens);
xmlSetNsProp(xmlParam, nsp, BAD_CAST((*attr)->name), dummy->children->content);
} else {
xmlSetProp(xmlParam, BAD_CAST((*attr)->name), dummy->children->content);
}
}
xmlUnlinkNode(dummy);
xmlFreeNode(dummy);
}
}
zend_hash_move_forward_ex(sdlType->attributes, &pos);
}
}
}
if (style == SOAP_ENCODED) {
set_ns_and_type(xmlParam, type);
}
} else {
xmlParam = xmlNewNode(NULL, BAD_CAST("BOGUS"));
xmlAddChild(parent, xmlParam);
if (soap_check_zval_ref(data, xmlParam TSRMLS_CC)) {
return xmlParam;
}
if (prop != NULL) {
i = zend_hash_num_elements(prop);
zend_hash_internal_pointer_reset(prop);
for (;i > 0;i--) {
xmlNodePtr property;
zval **zprop;
char *str_key;
ulong index;
int key_type;
unsigned int str_key_len;
key_type = zend_hash_get_current_key_ex(prop, &str_key, &str_key_len, &index, FALSE, NULL);
zend_hash_get_current_data(prop, (void **)&zprop);
property = master_to_xml(get_conversion((*zprop)->type), (*zprop), style, xmlParam TSRMLS_CC);
if (key_type == HASH_KEY_IS_STRING) {
const char *prop_name;
if (Z_TYPE_P(data) == IS_OBJECT) {
const char *class_name;
zend_unmangle_property_name(str_key, str_key_len-1, &class_name, &prop_name);
} else {
prop_name = str_key;
}
if (prop_name) {
xmlNodeSetName(property, BAD_CAST(prop_name));
}
}
zend_hash_move_forward(prop);
}
}
if (style == SOAP_ENCODED) {
set_ns_and_type(xmlParam, type);
}
}
return xmlParam;
}
| 0 |
[
"CWE-19"
] |
php-src
|
c8eaca013a3922e8383def6158ece2b63f6ec483
| 175,743,267,038,816,780,000,000,000,000,000,000,000 | 199 |
Added type checks
|
static int gtextfield_focus(GGadget *g, GEvent *event) {
GTextField *gt = (GTextField *) g;
if ( g->state == gs_invisible || g->state == gs_disabled )
return( false );
if ( gt->cursor!=NULL ) {
GDrawCancelTimer(gt->cursor);
gt->cursor = NULL;
gt->cursor_on = false;
}
if ( gt->hidden_cursor && !event->u.focus.gained_focus ) {
GDrawSetCursor(gt->g.base,gt->old_cursor);
gt->hidden_cursor = false;
}
gt->g.has_focus = event->u.focus.gained_focus;
if ( event->u.focus.gained_focus ) {
gt->cursor = GDrawRequestTimer(gt->g.base,400,400,NULL);
gt->cursor_on = true;
if ( event->u.focus.mnemonic_focus != mf_normal )
GTextFieldSelect(>->g,0,-1);
if ( gt->gic!=NULL )
GTPositionGIC(gt);
else if ( GWidgetGetInputContext(gt->g.base)!=NULL )
GDrawSetGIC(gt->g.base,GWidgetGetInputContext(gt->g.base),10000,10000);
}
_ggadget_redraw(g);
GTextFieldFocusChanged(gt,event->u.focus.gained_focus);
return( true );
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
fontforge
|
626f751752875a0ddd74b9e217b6f4828713573c
| 24,082,460,988,952,600,000,000,000,000,000,000,000 | 30 |
Warn users before discarding their unsaved scripts (#3852)
* Warn users before discarding their unsaved scripts
This closes #3846.
|
dns_transmit_free (struct dns_transmit *d)
{
queryfree (d);
socketfree (d);
packetfree (d);
}
| 0 |
[
"CWE-362"
] |
ndjbdns
|
177b5522e9b3d25778001c8cebfddd4d2973fcfd
| 7,787,545,845,369,236,000,000,000,000,000,000,000 | 6 |
Merge identical outgoing requests - patch 2.
This patch fixes dnscache to combine *same* client queries into one
single outgoing request, thus securing the server from possible cache
poisoning attacks. The merges operation takes place in the
dns_transmit layer, rather than between query and dns_transmit layers,
as done in the previous patch.
This fixes one of the cache poisoning vulnerability reported by
Mr Mark Johnson -> https://bugzilla.redhat.com/show_bug.cgi?id=838965.
Nonetheless the original patch for this issue was created by
Mr Jeff king -> http://marc.info/?l=djbdns&m=123859517723684&w=3#2
Sincere thanks to Mr Mark for reporting this issue and Mr Jeff for
creating the patch and releasing it under GPLv2.
|
static inline enum fbq_type fbq_classify_rq(struct rq *rq)
{
if (rq->nr_running > rq->nr_numa_running)
return regular;
if (rq->nr_running > rq->nr_preferred_running)
return remote;
return all;
}
| 0 |
[
"CWE-400",
"CWE-703",
"CWE-835"
] |
linux
|
c40f7d74c741a907cfaeb73a7697081881c497d0
| 219,370,794,266,081,640,000,000,000,000,000,000,000 | 8 |
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c
Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the
scheduler under high loads, starting at around the v4.18 time frame,
and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list
manipulation.
Do a (manual) revert of:
a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
It turns out that the list_del_leaf_cfs_rq() introduced by this commit
is a surprising property that was not considered in followup commits
such as:
9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list")
As Vincent Guittot explains:
"I think that there is a bigger problem with commit a9e7f6544b9c and
cfs_rq throttling:
Let take the example of the following topology TG2 --> TG1 --> root:
1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1
cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in
one path because it has never been used and can't be throttled so
tmp_alone_branch will point to leaf_cfs_rq_list at the end.
2) Then TG1 is throttled
3) and we add TG3 as a new child of TG1.
4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1
cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list.
With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list.
So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1
cfs_rq is removed from the list.
Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list
but tmp_alone_branch still points to TG3 cfs_rq because its throttled
parent can't be enqueued when the lock is released.
tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should.
So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch
points on another TG cfs_rq, the next TG cfs_rq that will be added,
will be linked outside rq->leaf_cfs_rq_list - which is bad.
In addition, we can break the ordering of the cfs_rq in
rq->leaf_cfs_rq_list but this ordering is used to update and
propagate the update from leaf down to root."
Instead of trying to work through all these cases and trying to reproduce
the very high loads that produced the lockup to begin with, simplify
the code temporarily by reverting a9e7f6544b9c - which change was clearly
not thought through completely.
This (hopefully) gives us a kernel that doesn't lock up so people
can continue to enjoy their holidays without worrying about regressions. ;-)
[ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ]
Analyzed-by: Xie XiuQi <[email protected]>
Analyzed-by: Vincent Guittot <[email protected]>
Reported-by: Zhipeng Xie <[email protected]>
Reported-by: Sargun Dhillon <[email protected]>
Reported-by: Xie XiuQi <[email protected]>
Tested-by: Zhipeng Xie <[email protected]>
Tested-by: Sargun Dhillon <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Acked-by: Vincent Guittot <[email protected]>
Cc: <[email protected]> # v4.13+
Cc: Bin Li <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
static int jpc_coc_dumpparms(jpc_ms_t *ms, FILE *out)
{
jpc_coc_t *coc = &ms->parms.coc;
fprintf(out, "compno = %"PRIuFAST16"; csty = 0x%02x; numdlvls = %d;\n",
coc->compno, coc->compparms.csty, coc->compparms.numdlvls);
fprintf(out, "cblkwidthval = %d; cblkheightval = %d; "
"cblksty = 0x%02x; qmfbid = %d;\n", coc->compparms.cblkwidthval,
coc->compparms.cblkheightval, coc->compparms.cblksty, coc->compparms.qmfbid);
return 0;
}
| 0 |
[
"CWE-20",
"CWE-399"
] |
jasper
|
ba2b9d000660313af7b692542afbd374c5685865
| 336,695,328,501,029,900,000,000,000,000,000,000,000 | 10 |
Ensure that not all tiles lie outside the image area.
|
word_list_split (list)
WORD_LIST *list;
{
WORD_LIST *result, *t, *tresult, *e;
for (t = list, result = (WORD_LIST *)NULL; t; t = t->next)
{
tresult = word_split (t->word, ifs_value);
if (result == 0)
result = e = tresult;
else
{
e->next = tresult;
while (e->next)
e = e->next;
}
}
return (result);
}
| 0 |
[] |
bash
|
955543877583837c85470f7fb8a97b7aa8d45e6c
| 86,480,590,485,243,530,000,000,000,000,000,000,000 | 19 |
bash-4.4-rc2 release
|
uint offset(uchar *record) const
{
return (uint) (ptr - record);
}
| 0 |
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
| 169,200,847,404,871,860,000,000,000,000,000,000,000 | 4 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]>
|
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
{
struct ftrace_profile_page *pg;
pg = stat->pages = stat->start;
while (pg) {
memset(pg->records, 0, PROFILE_RECORDS_SIZE);
pg->index = 0;
pg = pg->next;
}
memset(stat->hash, 0,
FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
}
| 0 |
[
"CWE-703"
] |
linux
|
6a76f8c0ab19f215af2a3442870eeb5f0e81998d
| 325,589,909,986,693,250,000,000,000,000,000,000,000 | 15 |
tracing: Fix possible NULL pointer dereferences
Currently set_ftrace_pid and set_graph_function files use seq_lseek
for their fops. However seq_open() is called only for FMODE_READ in
the fops->open() so that if an user tries to seek one of those file
when she open it for writing, it sees NULL seq_file and then panic.
It can be easily reproduced with following command:
$ cd /sys/kernel/debug/tracing
$ echo 1234 | sudo tee -a set_ftrace_pid
In this example, GNU coreutils' tee opens the file with fopen(, "a")
and then the fopen() internally calls lseek().
Link: http://lkml.kernel.org/r/[email protected]
Cc: Frederic Weisbecker <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Namhyung Kim <[email protected]>
Cc: [email protected]
Signed-off-by: Namhyung Kim <[email protected]>
Signed-off-by: Steven Rostedt <[email protected]>
|
static int mxf_parse_structural_metadata(MXFContext *mxf)
{
MXFPackage *material_package = NULL;
MXFPackage *temp_package = NULL;
int i, j, k, ret;
av_dlog(mxf->fc, "metadata sets count %d\n", mxf->metadata_sets_count);
/* TODO: handle multiple material packages (OP3x) */
for (i = 0; i < mxf->packages_count; i++) {
material_package = mxf_resolve_strong_ref(mxf, &mxf->packages_refs[i], MaterialPackage);
if (material_package) break;
}
if (!material_package) {
av_log(mxf->fc, AV_LOG_ERROR, "no material package found\n");
return AVERROR_INVALIDDATA;
}
for (i = 0; i < material_package->tracks_count; i++) {
MXFPackage *source_package = NULL;
MXFTrack *material_track = NULL;
MXFTrack *source_track = NULL;
MXFTrack *temp_track = NULL;
MXFDescriptor *descriptor = NULL;
MXFStructuralComponent *component = NULL;
MXFTimecodeComponent *mxf_tc = NULL;
UID *essence_container_ul = NULL;
const MXFCodecUL *codec_ul = NULL;
const MXFCodecUL *container_ul = NULL;
const MXFCodecUL *pix_fmt_ul = NULL;
AVStream *st;
AVTimecode tc;
int flags;
if (!(material_track = mxf_resolve_strong_ref(mxf, &material_package->tracks_refs[i], Track))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve material track strong ref\n");
continue;
}
if ((component = mxf_resolve_strong_ref(mxf, &material_track->sequence_ref, TimecodeComponent))) {
mxf_tc = (MXFTimecodeComponent*)component;
flags = mxf_tc->drop_frame == 1 ? AV_TIMECODE_FLAG_DROPFRAME : 0;
if (av_timecode_init(&tc, mxf_tc->rate, flags, mxf_tc->start_frame, mxf->fc) == 0) {
mxf_add_timecode_metadata(&mxf->fc->metadata, "timecode", &tc);
}
}
if (!(material_track->sequence = mxf_resolve_strong_ref(mxf, &material_track->sequence_ref, Sequence))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve material track sequence strong ref\n");
continue;
}
for (j = 0; j < material_track->sequence->structural_components_count; j++) {
component = mxf_resolve_strong_ref(mxf, &material_track->sequence->structural_components_refs[j], TimecodeComponent);
if (!component)
continue;
mxf_tc = (MXFTimecodeComponent*)component;
flags = mxf_tc->drop_frame == 1 ? AV_TIMECODE_FLAG_DROPFRAME : 0;
if (av_timecode_init(&tc, mxf_tc->rate, flags, mxf_tc->start_frame, mxf->fc) == 0) {
mxf_add_timecode_metadata(&mxf->fc->metadata, "timecode", &tc);
break;
}
}
/* TODO: handle multiple source clips */
for (j = 0; j < material_track->sequence->structural_components_count; j++) {
component = mxf_resolve_strong_ref(mxf, &material_track->sequence->structural_components_refs[j], SourceClip);
if (!component)
continue;
for (k = 0; k < mxf->packages_count; k++) {
temp_package = mxf_resolve_strong_ref(mxf, &mxf->packages_refs[k], SourcePackage);
if (!temp_package)
continue;
if (!memcmp(temp_package->package_uid, component->source_package_uid, 16)) {
source_package = temp_package;
break;
}
}
if (!source_package) {
av_dlog(mxf->fc, "material track %d: no corresponding source package found\n", material_track->track_id);
break;
}
for (k = 0; k < source_package->tracks_count; k++) {
if (!(temp_track = mxf_resolve_strong_ref(mxf, &source_package->tracks_refs[k], Track))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve source track strong ref\n");
ret = AVERROR_INVALIDDATA;
goto fail_and_free;
}
if (temp_track->track_id == component->source_track_id) {
source_track = temp_track;
break;
}
}
if (!source_track) {
av_log(mxf->fc, AV_LOG_ERROR, "material track %d: no corresponding source track found\n", material_track->track_id);
break;
}
}
if (!source_track || !component)
continue;
if (!(source_track->sequence = mxf_resolve_strong_ref(mxf, &source_track->sequence_ref, Sequence))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve source track sequence strong ref\n");
ret = AVERROR_INVALIDDATA;
goto fail_and_free;
}
/* 0001GL00.MXF.A1.mxf_opatom.mxf has the same SourcePackageID as 0001GL.MXF.V1.mxf_opatom.mxf
* This would result in both files appearing to have two streams. Work around this by sanity checking DataDefinition */
if (memcmp(material_track->sequence->data_definition_ul, source_track->sequence->data_definition_ul, 16)) {
av_log(mxf->fc, AV_LOG_ERROR, "material track %d: DataDefinition mismatch\n", material_track->track_id);
continue;
}
st = avformat_new_stream(mxf->fc, NULL);
if (!st) {
av_log(mxf->fc, AV_LOG_ERROR, "could not allocate stream\n");
ret = AVERROR(ENOMEM);
goto fail_and_free;
}
st->id = source_track->track_id;
st->priv_data = source_track;
source_track->original_duration = st->duration = component->duration;
if (st->duration == -1)
st->duration = AV_NOPTS_VALUE;
st->start_time = component->start_position;
if (material_track->edit_rate.num <= 0 ||
material_track->edit_rate.den <= 0) {
av_log(mxf->fc, AV_LOG_WARNING,
"Invalid edit rate (%d/%d) found on stream #%d, "
"defaulting to 25/1\n",
material_track->edit_rate.num,
material_track->edit_rate.den, st->index);
material_track->edit_rate = (AVRational){25, 1};
}
avpriv_set_pts_info(st, 64, material_track->edit_rate.den, material_track->edit_rate.num);
/* ensure SourceTrack EditRate == MaterialTrack EditRate since only
* the former is accessible via st->priv_data */
source_track->edit_rate = material_track->edit_rate;
PRINT_KEY(mxf->fc, "data definition ul", source_track->sequence->data_definition_ul);
codec_ul = mxf_get_codec_ul(ff_mxf_data_definition_uls, &source_track->sequence->data_definition_ul);
st->codec->codec_type = codec_ul->id;
source_package->descriptor = mxf_resolve_strong_ref(mxf, &source_package->descriptor_ref, AnyType);
if (source_package->descriptor) {
if (source_package->descriptor->type == MultipleDescriptor) {
for (j = 0; j < source_package->descriptor->sub_descriptors_count; j++) {
MXFDescriptor *sub_descriptor = mxf_resolve_strong_ref(mxf, &source_package->descriptor->sub_descriptors_refs[j], Descriptor);
if (!sub_descriptor) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve sub descriptor strong ref\n");
continue;
}
if (sub_descriptor->linked_track_id == source_track->track_id) {
descriptor = sub_descriptor;
break;
}
}
} else if (source_package->descriptor->type == Descriptor)
descriptor = source_package->descriptor;
}
if (!descriptor) {
av_log(mxf->fc, AV_LOG_INFO, "source track %d: stream %d, no descriptor found\n", source_track->track_id, st->index);
continue;
}
PRINT_KEY(mxf->fc, "essence codec ul", descriptor->essence_codec_ul);
PRINT_KEY(mxf->fc, "essence container ul", descriptor->essence_container_ul);
essence_container_ul = &descriptor->essence_container_ul;
/* HACK: replacing the original key with mxf_encrypted_essence_container
* is not allowed according to s429-6, try to find correct information anyway */
if (IS_KLV_KEY(essence_container_ul, mxf_encrypted_essence_container)) {
av_log(mxf->fc, AV_LOG_INFO, "broken encrypted mxf file\n");
for (k = 0; k < mxf->metadata_sets_count; k++) {
MXFMetadataSet *metadata = mxf->metadata_sets[k];
if (metadata->type == CryptoContext) {
essence_container_ul = &((MXFCryptoContext *)metadata)->source_container_ul;
break;
}
}
}
/* TODO: drop PictureEssenceCoding and SoundEssenceCompression, only check EssenceContainer */
codec_ul = mxf_get_codec_ul(ff_mxf_codec_uls, &descriptor->essence_codec_ul);
st->codec->codec_id = (enum AVCodecID)codec_ul->id;
av_log(mxf->fc, AV_LOG_VERBOSE, "%s: Universal Label: ",
avcodec_get_name(st->codec->codec_id));
for (k = 0; k < 16; k++) {
av_log(mxf->fc, AV_LOG_VERBOSE, "%.2x",
descriptor->essence_codec_ul[k]);
if (!(k+1 & 19) || k == 5)
av_log(mxf->fc, AV_LOG_VERBOSE, ".");
}
av_log(mxf->fc, AV_LOG_VERBOSE, "\n");
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
source_track->intra_only = mxf_is_intra_only(descriptor);
container_ul = mxf_get_codec_ul(mxf_picture_essence_container_uls, essence_container_ul);
if (st->codec->codec_id == AV_CODEC_ID_NONE)
st->codec->codec_id = container_ul->id;
st->codec->width = descriptor->width;
st->codec->height = descriptor->height; /* Field height, not frame height */
switch (descriptor->frame_layout) {
case SegmentedFrame:
/* This one is a weird layout I don't fully understand. */
av_log(mxf->fc, AV_LOG_INFO, "SegmentedFrame layout isn't currently supported\n");
break;
case FullFrame:
st->codec->field_order = AV_FIELD_PROGRESSIVE;
break;
case OneField:
/* Every other line is stored and needs to be duplicated. */
av_log(mxf->fc, AV_LOG_INFO, "OneField frame layout isn't currently supported\n");
break; /* The correct thing to do here is fall through, but by breaking we might be
able to decode some streams at half the vertical resolution, rather than not al all.
It's also for compatibility with the old behavior. */
case MixedFields:
break;
case SeparateFields:
switch (descriptor->field_dominance) {
case MXF_TFF:
st->codec->field_order = AV_FIELD_TT;
break;
case MXF_BFF:
st->codec->field_order = AV_FIELD_BB;
break;
default:
avpriv_request_sample(mxf->fc,
"Field dominance %d support",
descriptor->field_dominance);
break;
}
/* Turn field height into frame height. */
st->codec->height *= 2;
break;
default:
av_log(mxf->fc, AV_LOG_INFO, "Unknown frame layout type: %d\n", descriptor->frame_layout);
}
if (st->codec->codec_id == AV_CODEC_ID_RAWVIDEO) {
st->codec->pix_fmt = descriptor->pix_fmt;
if (st->codec->pix_fmt == AV_PIX_FMT_NONE) {
pix_fmt_ul = mxf_get_codec_ul(ff_mxf_pixel_format_uls,
&descriptor->essence_codec_ul);
st->codec->pix_fmt = (enum AVPixelFormat)pix_fmt_ul->id;
if (st->codec->pix_fmt == AV_PIX_FMT_NONE) {
/* support files created before RP224v10 by defaulting to UYVY422
if subsampling is 4:2:2 and component depth is 8-bit */
if (descriptor->horiz_subsampling == 2 &&
descriptor->vert_subsampling == 1 &&
descriptor->component_depth == 8) {
st->codec->pix_fmt = AV_PIX_FMT_UYVY422;
}
}
}
}
st->need_parsing = AVSTREAM_PARSE_HEADERS;
if (material_track->sequence->origin) {
av_dict_set_int(&st->metadata, "material_track_origin", material_track->sequence->origin, 0);
}
if (source_track->sequence->origin) {
av_dict_set_int(&st->metadata, "source_track_origin", source_track->sequence->origin, 0);
}
} else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
container_ul = mxf_get_codec_ul(mxf_sound_essence_container_uls, essence_container_ul);
/* Only overwrite existing codec ID if it is unset or A-law, which is the default according to SMPTE RP 224. */
if (st->codec->codec_id == AV_CODEC_ID_NONE || (st->codec->codec_id == AV_CODEC_ID_PCM_ALAW && (enum AVCodecID)container_ul->id != AV_CODEC_ID_NONE))
st->codec->codec_id = (enum AVCodecID)container_ul->id;
st->codec->channels = descriptor->channels;
st->codec->bits_per_coded_sample = descriptor->bits_per_sample;
if (descriptor->sample_rate.den > 0) {
st->codec->sample_rate = descriptor->sample_rate.num / descriptor->sample_rate.den;
avpriv_set_pts_info(st, 64, descriptor->sample_rate.den, descriptor->sample_rate.num);
} else {
av_log(mxf->fc, AV_LOG_WARNING, "invalid sample rate (%d/%d) "
"found for stream #%d, time base forced to 1/48000\n",
descriptor->sample_rate.num, descriptor->sample_rate.den,
st->index);
avpriv_set_pts_info(st, 64, 1, 48000);
}
/* if duration is set, rescale it from EditRate to SampleRate */
if (st->duration != AV_NOPTS_VALUE)
st->duration = av_rescale_q(st->duration,
av_inv_q(material_track->edit_rate),
st->time_base);
/* TODO: implement AV_CODEC_ID_RAWAUDIO */
if (st->codec->codec_id == AV_CODEC_ID_PCM_S16LE) {
if (descriptor->bits_per_sample > 16 && descriptor->bits_per_sample <= 24)
st->codec->codec_id = AV_CODEC_ID_PCM_S24LE;
else if (descriptor->bits_per_sample == 32)
st->codec->codec_id = AV_CODEC_ID_PCM_S32LE;
} else if (st->codec->codec_id == AV_CODEC_ID_PCM_S16BE) {
if (descriptor->bits_per_sample > 16 && descriptor->bits_per_sample <= 24)
st->codec->codec_id = AV_CODEC_ID_PCM_S24BE;
else if (descriptor->bits_per_sample == 32)
st->codec->codec_id = AV_CODEC_ID_PCM_S32BE;
} else if (st->codec->codec_id == AV_CODEC_ID_MP2) {
st->need_parsing = AVSTREAM_PARSE_FULL;
}
} else if (st->codec->codec_type == AVMEDIA_TYPE_DATA) {
int codec_id = mxf_get_codec_ul(mxf_data_essence_container_uls,
essence_container_ul)->id;
if (codec_id >= 0 &&
codec_id < FF_ARRAY_ELEMS(mxf_data_essence_descriptor)) {
av_dict_set(&st->metadata, "data_type",
mxf_data_essence_descriptor[codec_id], 0);
}
}
if (descriptor->extradata) {
if (!ff_alloc_extradata(st->codec, descriptor->extradata_size)) {
memcpy(st->codec->extradata, descriptor->extradata, descriptor->extradata_size);
}
} else if (st->codec->codec_id == AV_CODEC_ID_H264) {
ret = ff_generate_avci_extradata(st);
if (ret < 0)
return ret;
}
if (st->codec->codec_type != AVMEDIA_TYPE_DATA && (*essence_container_ul)[15] > 0x01) {
/* TODO: decode timestamps */
st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS;
}
}
ret = 0;
fail_and_free:
return ret;
}
| 0 |
[
"CWE-703"
] |
FFmpeg
|
f173cdfe669556aa92857adafe60cbe5f2aa1210
| 245,187,236,012,802,480,000,000,000,000,000,000,000 | 331 |
avformat/mxfdec: Fix DoS issues in mxf_read_index_entry_array()
Fixes: 20170829A.mxf
Co-Author: 张洪亮(望初)" <[email protected]>
Found-by: Xiaohei and Wangchu from Alibaba Security Team
Signed-off-by: Michael Niedermayer <[email protected]>
(cherry picked from commit 900f39692ca0337a98a7cf047e4e2611071810c2)
Signed-off-by: Michael Niedermayer <[email protected]>
|
static const char * pgpValStr(pgpValTbl vs, uint8_t val)
{
do {
if (vs->val == val)
break;
} while ((++vs)->val != -1);
return vs->str;
}
| 0 |
[
"CWE-347",
"CWE-284"
] |
rpm
|
bd36c5dc9fb6d90c46fbfed8c2d67516fc571ec8
| 4,205,087,090,995,429,000,000,000,000,000,000,000 | 8 |
Validate and require subkey binding signatures on PGP public keys
All subkeys must be followed by a binding signature by the primary key
as per the OpenPGP RFC, enforce the presence and validity in the parser.
The implementation is as kludgey as they come to work around our
simple-minded parser structure without touching API, to maximise
backportability. Store all the raw packets internally as we decode them
to be able to access previous elements at will, needed to validate ordering
and access the actual data. Add testcases for manipulated keys whose
import previously would succeed.
Depends on the two previous commits:
7b399fcb8f52566e6f3b4327197a85facd08db91 and
236b802a4aa48711823a191d1b7f753c82a89ec5
Fixes CVE-2021-3521.
|
static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
{
if (sizeof(time->tv_sec) > 4)
time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK)
<< 32;
time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> 2;
| 0 |
[
"CWE-399"
] |
linux-2.6
|
06a279d636734da32bb62dd2f7b0ade666f65d7c
| 3,770,292,732,448,600,000,000,000,000,000,000,000 | 7 |
ext4: only use i_size_high for regular files
Directories are not allowed to be bigger than 2GB, so don't use
i_size_high for anything other than regular files. E2fsck should
complain about these inodes, but the simplest thing to do for the
kernel is to only use i_size_high for regular files.
This prevents an intentially corrupted filesystem from causing the
kernel to burn a huge amount of CPU and issuing error messages such
as:
EXT4-fs warning (device loop0): ext4_block_to_path: block 135090028 > max
Thanks to David Maciejak from Fortinet's FortiGuard Global Security
Research Team for reporting this issue.
http://bugzilla.kernel.org/show_bug.cgi?id=12375
Signed-off-by: "Theodore Ts'o" <[email protected]>
Cc: [email protected]
|
void SetDataTypeToAttr(DataType dtype, const string& attr_name, NodeDef* node) {
(*node->mutable_attr())[attr_name].set_type(dtype);
}
| 0 |
[
"CWE-476"
] |
tensorflow
|
e6340f0665d53716ef3197ada88936c2a5f7a2d3
| 25,829,078,784,244,700,000,000,000,000,000,000,000 | 3 |
Handle a special grappler case resulting in crash.
It might happen that a malformed input could be used to trick Grappler into trying to optimize a node with no inputs. This, in turn, would produce a null pointer dereference and a segfault.
PiperOrigin-RevId: 369242852
Change-Id: I2e5cbe7aec243d34a6d60220ac8ac9b16f136f6b
|
static int send_msg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
int needs_conn;
long timeout_val;
int res = -EINVAL;
if (unlikely(!dest))
return -EDESTADDRREQ;
if (unlikely((m->msg_namelen < sizeof(*dest)) ||
(dest->family != AF_TIPC)))
return -EINVAL;
if (total_len > TIPC_MAX_USER_MSG_SIZE)
return -EMSGSIZE;
if (iocb)
lock_sock(sk);
needs_conn = (sock->state != SS_READY);
if (unlikely(needs_conn)) {
if (sock->state == SS_LISTENING) {
res = -EPIPE;
goto exit;
}
if (sock->state != SS_UNCONNECTED) {
res = -EISCONN;
goto exit;
}
if (tport->published) {
res = -EOPNOTSUPP;
goto exit;
}
if (dest->addrtype == TIPC_ADDR_NAME) {
tport->conn_type = dest->addr.name.name.type;
tport->conn_instance = dest->addr.name.name.instance;
}
/* Abort any pending connection attempts (very unlikely) */
reject_rx_queue(sk);
}
timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
do {
if (dest->addrtype == TIPC_ADDR_NAME) {
res = dest_name_check(dest, m);
if (res)
break;
res = tipc_send2name(tport->ref,
&dest->addr.name.name,
dest->addr.name.domain,
m->msg_iov,
total_len);
} else if (dest->addrtype == TIPC_ADDR_ID) {
res = tipc_send2port(tport->ref,
&dest->addr.id,
m->msg_iov,
total_len);
} else if (dest->addrtype == TIPC_ADDR_MCAST) {
if (needs_conn) {
res = -EOPNOTSUPP;
break;
}
res = dest_name_check(dest, m);
if (res)
break;
res = tipc_multicast(tport->ref,
&dest->addr.nameseq,
m->msg_iov,
total_len);
}
if (likely(res != -ELINKCONG)) {
if (needs_conn && (res >= 0))
sock->state = SS_CONNECTING;
break;
}
if (timeout_val <= 0L) {
res = timeout_val ? timeout_val : -EWOULDBLOCK;
break;
}
release_sock(sk);
timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
!tport->congested, timeout_val);
lock_sock(sk);
} while (1);
exit:
if (iocb)
release_sock(sk);
return res;
}
| 0 |
[
"CWE-20",
"CWE-269"
] |
linux
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
| 161,381,026,077,327,930,000,000,000,000,000,000,000 | 94 |
net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
aclmask_direct(const Acl *acl, Oid roleid, Oid ownerId,
AclMode mask, AclMaskHow how)
{
AclMode result;
AclItem *aidat;
int i,
num;
/*
* Null ACL should not happen, since caller should have inserted
* appropriate default
*/
if (acl == NULL)
elog(ERROR, "null ACL");
check_acl(acl);
/* Quick exit for mask == 0 */
if (mask == 0)
return 0;
result = 0;
/* Owner always implicitly has all grant options */
if ((mask & ACLITEM_ALL_GOPTION_BITS) &&
roleid == ownerId)
{
result = mask & ACLITEM_ALL_GOPTION_BITS;
if ((how == ACLMASK_ALL) ? (result == mask) : (result != 0))
return result;
}
num = ACL_NUM(acl);
aidat = ACL_DAT(acl);
/*
* Check privileges granted directly to roleid (and not to public)
*/
for (i = 0; i < num; i++)
{
AclItem *aidata = &aidat[i];
if (aidata->ai_grantee == roleid)
{
result |= aidata->ai_privs & mask;
if ((how == ACLMASK_ALL) ? (result == mask) : (result != 0))
return result;
}
}
return result;
}
| 0 |
[
"CWE-264"
] |
postgres
|
fea164a72a7bfd50d77ba5fb418d357f8f2bb7d0
| 203,833,802,081,055,570,000,000,000,000,000,000,000 | 52 |
Shore up ADMIN OPTION restrictions.
Granting a role without ADMIN OPTION is supposed to prevent the grantee
from adding or removing members from the granted role. Issuing SET ROLE
before the GRANT bypassed that, because the role itself had an implicit
right to add or remove members. Plug that hole by recognizing that
implicit right only when the session user matches the current role.
Additionally, do not recognize it during a security-restricted operation
or during execution of a SECURITY DEFINER function. The restriction on
SECURITY DEFINER is not security-critical. However, it seems best for a
user testing his own SECURITY DEFINER function to see the same behavior
others will see. Back-patch to 8.4 (all supported versions).
The SQL standards do not conflate roles and users as PostgreSQL does;
only SQL roles have members, and only SQL users initiate sessions. An
application using PostgreSQL users and roles as SQL users and roles will
never attempt to grant membership in the role that is the session user,
so the implicit right to add or remove members will never arise.
The security impact was mostly that a role member could revoke access
from others, contrary to the wishes of his own grantor. Unapproved role
member additions are less notable, because the member can still largely
achieve that by creating a view or a SECURITY DEFINER function.
Reviewed by Andres Freund and Tom Lane. Reported, independently, by
Jonas Sundman and Noah Misch.
Security: CVE-2014-0060
|
Stream::Stream(Http2Handler *handler, int32_t stream_id)
: balloc(1024, 1024),
header{},
handler(handler),
file_ent(nullptr),
body_length(0),
body_offset(0),
header_buffer_size(0),
stream_id(stream_id),
echo_upload(false) {
auto config = handler->get_config();
ev_timer_init(&rtimer, stream_timeout_cb, 0., config->stream_read_timeout);
ev_timer_init(&wtimer, stream_timeout_cb, 0., config->stream_write_timeout);
rtimer.data = this;
wtimer.data = this;
}
| 0 |
[] |
nghttp2
|
95efb3e19d174354ca50c65d5d7227d92bcd60e1
| 110,001,704,945,509,040,000,000,000,000,000,000,000 | 16 |
Don't read too greedily
|
ex_startinsert(exarg_T *eap)
{
if (eap->forceit)
{
// cursor line can be zero on startup
if (!curwin->w_cursor.lnum)
curwin->w_cursor.lnum = 1;
set_cursor_for_append_to_line();
}
#ifdef FEAT_TERMINAL
// Ignore this when running in an active terminal.
if (term_job_running(curbuf->b_term))
return;
#endif
// Ignore the command when already in Insert mode. Inserting an
// expression register that invokes a function can do this.
if (State & INSERT)
return;
if (eap->cmdidx == CMD_startinsert)
restart_edit = 'a';
else if (eap->cmdidx == CMD_startreplace)
restart_edit = 'R';
else
restart_edit = 'V';
if (!eap->forceit)
{
if (eap->cmdidx == CMD_startinsert)
restart_edit = 'i';
curwin->w_curswant = 0; // avoid MAXCOL
}
if (VIsual_active)
showmode();
}
| 0 |
[
"CWE-122"
] |
vim
|
35a319b77f897744eec1155b736e9372c9c5575f
| 259,198,561,400,060,000,000,000,000,000,000,000,000 | 37 |
patch 8.2.3489: ml_get error after search with range
Problem: ml_get error after search with range.
Solution: Limit the line number to the buffer line count.
|
static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
if (!vmm_exclusive)
kvm_cpu_vmxon(phys_addr);
else if (vmx->loaded_vmcs->cpu != cpu)
loaded_vmcs_clear(vmx->loaded_vmcs);
if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
vmcs_load(vmx->loaded_vmcs->vmcs);
}
if (vmx->loaded_vmcs->cpu != cpu) {
struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
unsigned long sysenter_esp;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
local_irq_disable();
crash_disable_local_vmclear(cpu);
/*
* Read loaded_vmcs->cpu should be before fetching
* loaded_vmcs->loaded_vmcss_on_cpu_link.
* See the comments in __loaded_vmcs_clear().
*/
smp_rmb();
list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
&per_cpu(loaded_vmcss_on_cpu, cpu));
crash_enable_local_vmclear(cpu);
local_irq_enable();
/*
* Linux uses per-cpu TSS and GDT, so set these when switching
* processors.
*/
vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
vmx->loaded_vmcs->cpu = cpu;
}
}
| 0 |
[] |
kvm
|
a642fc305053cc1c6e47e4f4df327895747ab485
| 311,545,832,637,719,370,000,000,000,000,000,000,000 | 47 |
kvm: vmx: handle invvpid vm exit gracefully
On systems with invvpid instruction support (corresponding bit in
IA32_VMX_EPT_VPID_CAP MSR is set) guest invocation of invvpid
causes vm exit, which is currently not handled and results in
propagation of unknown exit to userspace.
Fix this by installing an invvpid vm exit handler.
This is CVE-2014-3646.
Cc: [email protected]
Signed-off-by: Petr Matousek <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static void h2_session_ev_conn_error(h2_session *session, int arg, const char *msg)
{
switch (session->state) {
case H2_SESSION_ST_INIT:
case H2_SESSION_ST_DONE:
/* just leave */
transit(session, "conn error", H2_SESSION_ST_DONE);
break;
default:
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
H2_SSSN_LOG(APLOGNO(03401), session,
"conn error -> shutdown"));
h2_session_shutdown(session, arg, msg, 0);
break;
}
}
| 0 |
[] |
mod_h2
|
5e75e5685dd043fe93a5a08a15edd087a43f6968
| 75,843,014,729,313,780,000,000,000,000,000,000,000 | 17 |
v1.11.0
--------------------------------------------------------------------------------
* connection IO event handling reworked. Instead of reacting on incoming bytes, the
state machine now acts on incoming frames that are affecting it. This reduces
state transitions.
* pytest suite now covers some basic tests on h2 selection, GET and POST
* started to add pytest suite from existing bash tests
|
static BROTLI_INLINE BROTLI_BOOL DecodeCommandBlockSwitchInternal(
int safe, BrotliDecoderState* s) {
if (!DecodeBlockTypeAndLength(safe, s, 1)) {
return BROTLI_FALSE;
}
s->htree_command = s->insert_copy_hgroup.htrees[s->block_type_rb[3]];
return BROTLI_TRUE;
}
| 0 |
[
"CWE-120"
] |
brotli
|
223d80cfbec8fd346e32906c732c8ede21f0cea6
| 325,762,299,558,380,060,000,000,000,000,000,000,000 | 8 |
Update (#826)
* IMPORTANT: decoder: fix potential overflow when input chunk is >2GiB
* simplify max Huffman table size calculation
* eliminate symbol duplicates (static arrays in .h files)
* minor combing in research/ code
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.