func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
|---|---|---|---|---|---|---|---|
RZ_API RZ_BORROW RzBuffer *rz_bin_dex_relocations(RZ_NONNULL RzBinDex *dex) {
rz_return_val_if_fail(dex, NULL);
return dex->relocs_buffer;
}
| 0
|
[
"CWE-787"
] |
rizin
|
1524f85211445e41506f98180f8f69f7bf115406
| 100,015,129,915,620,330,000,000,000,000,000,000,000
| 4
|
fix #2969 - oob write (1 byte) in dex.c
|
policy_summary_reject(smartlist_t *summary,
maskbits_t maskbits,
uint16_t prt_min, uint16_t prt_max)
{
int i = policy_summary_split(summary, prt_min, prt_max);
/* XXX: ipv4 specific */
uint64_t count = (U64_LITERAL(1) << (32-maskbits));
while (i < smartlist_len(summary) &&
AT(i)->prt_max <= prt_max) {
AT(i)->reject_count += count;
i++;
}
tor_assert(i < smartlist_len(summary) || prt_max==65535);
}
| 0
|
[
"CWE-119"
] |
tor
|
43414eb98821d3b5c6c65181d7545ce938f82c8e
| 240,565,363,988,024,940,000,000,000,000,000,000,000
| 14
|
Fix bounds-checking in policy_summarize
Found by piebeer.
|
GF_Err gf_hevc_get_sps_info_with_state(HEVCState *hevc, u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d)
{
s32 idx;
idx = gf_hevc_read_sps(sps_data, sps_size, hevc);
if (idx < 0) {
return GF_NON_COMPLIANT_BITSTREAM;
}
if (sps_id) *sps_id = idx;
if (width) *width = hevc->sps[idx].width;
if (height) *height = hevc->sps[idx].height;
if (par_n) *par_n = hevc->sps[idx].aspect_ratio_info_present_flag ? hevc->sps[idx].sar_width : (u32)-1;
if (par_d) *par_d = hevc->sps[idx].aspect_ratio_info_present_flag ? hevc->sps[idx].sar_height : (u32)-1;
return GF_OK;
}
| 0
|
[
"CWE-190",
"CWE-787"
] |
gpac
|
51cdb67ff7c5f1242ac58c5aa603ceaf1793b788
| 142,025,442,902,057,520,000,000,000,000,000,000,000
| 15
|
add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722
|
void ssl3_cbc_copy_mac(unsigned char* out,
const SSL3_RECORD *rec,
unsigned md_size,unsigned orig_len)
{
#if defined(CBC_MAC_ROTATE_IN_PLACE)
unsigned char rotated_mac_buf[64+EVP_MAX_MD_SIZE];
unsigned char *rotated_mac;
#else
unsigned char rotated_mac[EVP_MAX_MD_SIZE];
#endif
/* mac_end is the index of |rec->data| just after the end of the MAC. */
unsigned mac_end = rec->length;
unsigned mac_start = mac_end - md_size;
/* scan_start contains the number of bytes that we can ignore because
* the MAC's position can only vary by 255 bytes. */
unsigned scan_start = 0;
unsigned i, j;
unsigned div_spoiler;
unsigned rotate_offset;
OPENSSL_assert(orig_len >= md_size);
OPENSSL_assert(md_size <= EVP_MAX_MD_SIZE);
#if defined(CBC_MAC_ROTATE_IN_PLACE)
rotated_mac = rotated_mac_buf + ((0-(size_t)rotated_mac_buf)&63);
#endif
/* This information is public so it's safe to branch based on it. */
if (orig_len > md_size + 255 + 1)
scan_start = orig_len - (md_size + 255 + 1);
/* div_spoiler contains a multiple of md_size that is used to cause the
* modulo operation to be constant time. Without this, the time varies
* based on the amount of padding when running on Intel chips at least.
*
* The aim of right-shifting md_size is so that the compiler doesn't
* figure out that it can remove div_spoiler as that would require it
* to prove that md_size is always even, which I hope is beyond it. */
div_spoiler = md_size >> 1;
div_spoiler <<= (sizeof(div_spoiler)-1)*8;
rotate_offset = (div_spoiler + mac_start - scan_start) % md_size;
memset(rotated_mac, 0, md_size);
for (i = scan_start, j = 0; i < orig_len; i++)
{
unsigned char mac_started = constant_time_ge(i, mac_start);
unsigned char mac_ended = constant_time_ge(i, mac_end);
unsigned char b = rec->data[i];
rotated_mac[j++] |= b & mac_started & ~mac_ended;
j &= constant_time_lt(j,md_size);
}
/* Now rotate the MAC */
#if defined(CBC_MAC_ROTATE_IN_PLACE)
j = 0;
for (i = 0; i < md_size; i++)
{
/* in case cache-line is 32 bytes, touch second line */
((volatile unsigned char *)rotated_mac)[rotate_offset^32];
out[j++] = rotated_mac[rotate_offset++];
rotate_offset &= constant_time_lt(rotate_offset,md_size);
}
#else
memset(out, 0, md_size);
rotate_offset = md_size - rotate_offset;
rotate_offset &= constant_time_lt(rotate_offset,md_size);
for (i = 0; i < md_size; i++)
{
for (j = 0; j < md_size; j++)
out[j] |= rotated_mac[i] & constant_time_eq_8(j, rotate_offset);
rotate_offset++;
rotate_offset &= constant_time_lt(rotate_offset,md_size);
}
#endif
}
| 0
|
[
"CWE-310"
] |
openssl
|
579f3a631ebeef5eb0135977640a835968d3ad6c
| 300,209,906,286,106,400,000,000,000,000,000,000,000
| 75
|
s3_cbc.c: make CBC_MAC_ROTATE_IN_PLACE universal.
(cherry picked from commit f93a41877d8d7a287debb7c63d7b646abaaf269c)
|
int ldb_msg_find_attr_as_int(const struct ldb_message *msg,
const char *attr_name,
int default_value)
{
const struct ldb_val *v = ldb_msg_find_ldb_val(msg, attr_name);
char buf[sizeof("-2147483648")];
char *end = NULL;
int ret;
if (!v || !v->data) {
return default_value;
}
ZERO_STRUCT(buf);
if (v->length >= sizeof(buf)) {
return default_value;
}
memcpy(buf, v->data, v->length);
errno = 0;
ret = (int) strtoll(buf, &end, 10);
if (errno != 0) {
return default_value;
}
if (end && end[0] != '\0') {
return default_value;
}
return ret;
}
| 0
|
[
"CWE-200"
] |
samba
|
7efe8182c165fbf17d2f88c173527a7a554e214b
| 326,306,801,787,977,700,000,000,000,000,000,000,000
| 29
|
CVE-2022-32746 ldb: Add flag to mark message element values as shared
When making a shallow copy of an ldb message, mark the message elements
of the copy as sharing their values with the message elements in the
original message.
This flag value will be heeded in the next commit.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]>
|
static void igmp_ifc_timer_expire(struct timer_list *t)
{
struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer);
igmpv3_send_cr(in_dev);
if (in_dev->mr_ifc_count) {
in_dev->mr_ifc_count--;
igmp_ifc_start_timer(in_dev,
unsolicited_report_interval(in_dev));
}
in_dev_put(in_dev);
}
| 0
|
[
"CWE-362"
] |
linux
|
23d2b94043ca8835bd1e67749020e839f396a1c2
| 198,869,088,190,887,880,000,000,000,000,000,000,000
| 12
|
igmp: Add ip_mc_list lock in ip_check_mc_rcu
I got below panic when doing fuzz test:
Kernel panic - not syncing: panic_on_warn set ...
CPU: 0 PID: 4056 Comm: syz-executor.3 Tainted: G B 5.14.0-rc1-00195-gcff5c4254439-dirty #2
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014
Call Trace:
dump_stack_lvl+0x7a/0x9b
panic+0x2cd/0x5af
end_report.cold+0x5a/0x5a
kasan_report+0xec/0x110
ip_check_mc_rcu+0x556/0x5d0
__mkroute_output+0x895/0x1740
ip_route_output_key_hash_rcu+0x2d0/0x1050
ip_route_output_key_hash+0x182/0x2e0
ip_route_output_flow+0x28/0x130
udp_sendmsg+0x165d/0x2280
udpv6_sendmsg+0x121e/0x24f0
inet6_sendmsg+0xf7/0x140
sock_sendmsg+0xe9/0x180
____sys_sendmsg+0x2b8/0x7a0
___sys_sendmsg+0xf0/0x160
__sys_sendmmsg+0x17e/0x3c0
__x64_sys_sendmmsg+0x9e/0x100
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x462eb9
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8
48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48>
3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f3df5af1c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000133
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462eb9
RDX: 0000000000000312 RSI: 0000000020001700 RDI: 0000000000000007
RBP: 0000000000000004 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f3df5af26bc
R13: 00000000004c372d R14: 0000000000700b10 R15: 00000000ffffffff
It is one use-after-free in ip_check_mc_rcu.
In ip_mc_del_src, the ip_sf_list of pmc has been freed under pmc->lock protection.
But access to ip_sf_list in ip_check_mc_rcu is not protected by the lock.
Signed-off-by: Liu Jian <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
__always_inline
do_set_mallopt_check (int32_t value)
{
return 1;
| 0
|
[
"CWE-787"
] |
glibc
|
d6db68e66dff25d12c3bc5641b60cbd7fb6ab44f
| 155,045,851,740,106,970,000,000,000,000,000,000,000
| 4
|
malloc: Mitigate null-byte overflow attacks
* malloc/malloc.c (_int_free): Check for corrupt prev_size vs size.
(malloc_consolidate): Likewise.
|
void OGRKMLLayer::ResetReading()
{
iNextKMLId_ = 0;
nLastAsked = -1;
nLastCount = -1;
}
| 0
|
[
"CWE-787"
] |
gdal
|
27b9bf644bcf1208f7d6594bdd104cc8a8bb0646
| 258,449,861,573,828,770,000,000,000,000,000,000
| 6
|
KML: set OAMS_TRADITIONAL_GIS_ORDER for SRS returned on returned layers
|
int wc_EccKeyDerSize(ecc_key* key, int pub)
{
word32 sz = 0;
int ret;
ret = wc_BuildEccKeyDer(key, NULL, &sz, pub);
if (ret != LENGTH_ONLY_E) {
return ret;
}
return sz;
}
| 0
|
[
"CWE-125",
"CWE-345"
] |
wolfssl
|
f93083be72a3b3d956b52a7ec13f307a27b6e093
| 220,261,778,332,032,840,000,000,000,000,000,000,000
| 12
|
OCSP: improve handling of OCSP no check extension
|
static int fill_tile_gaps(void) {
int x, y, run, saw;
int n, diffs = 0, ct;
/* horizontal: */
for (y=0; y < ntiles_y; y++) {
run = 0;
saw = 0;
for (x=0; x < ntiles_x; x++) {
ct = gap_try(x, y, &run, &saw, 1);
if (ct < 0) return ct; /* fatal */
}
}
/* vertical: */
for (x=0; x < ntiles_x; x++) {
run = 0;
saw = 0;
for (y=0; y < ntiles_y; y++) {
ct = gap_try(x, y, &run, &saw, 0);
if (ct < 0) return ct; /* fatal */
}
}
for (n=0; n < ntiles; n++) {
if (tile_has_diff[n]) {
diffs++;
}
}
return diffs;
}
| 0
|
[
"CWE-862",
"CWE-284",
"CWE-732"
] |
x11vnc
|
69eeb9f7baa14ca03b16c9de821f9876def7a36a
| 108,544,214,661,372,030,000,000,000,000,000,000,000
| 31
|
scan: limit access to shared memory segments to current user
|
void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
{
int i;
for (i = 0; i < dev->nvqs; ++i) {
if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
vhost_poll_stop(&dev->vqs[i].poll);
vhost_poll_flush(&dev->vqs[i].poll);
}
/* Wait for all lower device DMAs done. */
if (dev->vqs[i].ubufs)
vhost_ubuf_put_and_wait(dev->vqs[i].ubufs);
/* Signal guest as appropriate. */
vhost_zerocopy_signal_used(&dev->vqs[i]);
if (dev->vqs[i].error_ctx)
eventfd_ctx_put(dev->vqs[i].error_ctx);
if (dev->vqs[i].error)
fput(dev->vqs[i].error);
if (dev->vqs[i].kick)
fput(dev->vqs[i].kick);
if (dev->vqs[i].call_ctx)
eventfd_ctx_put(dev->vqs[i].call_ctx);
if (dev->vqs[i].call)
fput(dev->vqs[i].call);
vhost_vq_reset(dev, dev->vqs + i);
}
vhost_dev_free_iovecs(dev);
if (dev->log_ctx)
eventfd_ctx_put(dev->log_ctx);
dev->log_ctx = NULL;
if (dev->log_file)
fput(dev->log_file);
dev->log_file = NULL;
/* No one will access memory at this point */
kfree(rcu_dereference_protected(dev->memory,
locked ==
lockdep_is_held(&dev->mutex)));
RCU_INIT_POINTER(dev->memory, NULL);
WARN_ON(!list_empty(&dev->work_list));
if (dev->worker) {
kthread_stop(dev->worker);
dev->worker = NULL;
}
if (dev->mm)
mmput(dev->mm);
dev->mm = NULL;
}
| 0
|
[] |
linux-2.6
|
bd97120fc3d1a11f3124c7c9ba1d91f51829eb85
| 141,469,166,522,323,540,000,000,000,000,000,000,000
| 49
|
vhost: fix length for cross region descriptor
If a single descriptor crosses a region, the
second chunk length should be decremented
by size translated so far, instead it includes
the full descriptor length.
Signed-off-by: Michael S. Tsirkin <[email protected]>
Acked-by: Jason Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int mnt_fs_match_source(struct libmnt_fs *fs, const char *source,
struct libmnt_cache *cache)
{
char *cn;
const char *src, *t, *v;
if (!fs)
return 0;
/* 1) native paths... */
if (mnt_fs_streq_srcpath(fs, source) == 1)
return 1;
if (!source || !fs->source)
return 0;
/* ... and tags */
if (fs->tagname && strcmp(source, fs->source) == 0)
return 1;
if (!cache)
return 0;
if (fs->flags & (MNT_FS_NET | MNT_FS_PSEUDO))
return 0;
cn = mnt_resolve_spec(source, cache);
if (!cn)
return 0;
/* 2) canonicalized and native */
src = mnt_fs_get_srcpath(fs);
if (src && mnt_fs_streq_srcpath(fs, cn))
return 1;
/* 3) canonicalized and canonicalized */
if (src) {
src = mnt_resolve_path(src, cache);
if (src && !strcmp(cn, src))
return 1;
}
if (src || mnt_fs_get_tag(fs, &t, &v))
/* src path does not match and the tag is not defined */
return 0;
/* read @source's tags to the cache */
if (mnt_cache_read_tags(cache, cn) < 0) {
if (errno == EACCES) {
/* we don't have permissions to read TAGs from
* @source, but can translate the @fs tag to devname.
*
* (because libblkid uses udev symlinks and this is
* accessible for non-root uses)
*/
char *x = mnt_resolve_tag(t, v, cache);
if (x && !strcmp(x, cn))
return 1;
}
return 0;
}
/* 4) has the @source a tag that matches with the tag from @fs ? */
if (mnt_cache_device_has_tag(cache, cn, t, v))
return 1;
return 0;
}
| 0
|
[
"CWE-552",
"CWE-703"
] |
util-linux
|
166e87368ae88bf31112a30e078cceae637f4cdb
| 145,790,905,073,967,770,000,000,000,000,000,000,000
| 66
|
libmount: remove support for deleted mount table entries
The "(deleted)" suffix has been originally used by kernel for deleted
mountpoints. Since kernel commit 9d4d65748a5ca26ea8650e50ba521295549bf4e3
(Dec 2014) kernel does not use this suffix for mount stuff in /proc at
all. Let's remove this support from libmount too.
Signed-off-by: Karel Zak <[email protected]>
|
CtPtr ProtocolV2::throttle_message() {
ldout(cct, 20) << __func__ << dendl;
if (connection->policy.throttler_messages) {
ldout(cct, 10) << __func__ << " wants " << 1
<< " message from policy throttler "
<< connection->policy.throttler_messages->get_current()
<< "/" << connection->policy.throttler_messages->get_max()
<< dendl;
if (!connection->policy.throttler_messages->get_or_fail()) {
ldout(cct, 10) << __func__ << " wants 1 message from policy throttle "
<< connection->policy.throttler_messages->get_current()
<< "/" << connection->policy.throttler_messages->get_max()
<< " failed, just wait." << dendl;
// following thread pool deal with th full message queue isn't a
// short time, so we can wait a ms.
if (connection->register_time_events.empty()) {
connection->register_time_events.insert(
connection->center->create_time_event(1000,
connection->wakeup_handler));
}
return nullptr;
}
}
state = THROTTLE_BYTES;
return CONTINUE(throttle_bytes);
}
| 0
|
[
"CWE-323"
] |
ceph
|
20b7bb685c5ea74c651ca1ea547ac66b0fee7035
| 280,204,584,934,248,750,000,000,000,000,000,000,000
| 28
|
msg/async/ProtocolV2: avoid AES-GCM nonce reuse vulnerabilities
The secure mode uses AES-128-GCM with 96-bit nonces consisting of a
32-bit counter followed by a 64-bit salt. The counter is incremented
after processing each frame, the salt is fixed for the duration of
the session. Both are initialized from the session key generated
during session negotiation, so the counter starts with essentially
a random value. It is allowed to wrap, and, after 2**32 frames, it
repeats, resulting in nonce reuse (the actual sequence numbers that
the messenger works with are 64-bit, so the session continues on).
Because of how GCM works, this completely breaks both confidentiality
and integrity aspects of the secure mode. A single nonce reuse reveals
the XOR of two plaintexts and almost completely reveals the subkey
used for producing authentication tags. After a few nonces get used
twice, all confidentiality and integrity goes out the window and the
attacker can potentially encrypt-authenticate plaintext of their
choice.
We can't easily change the nonce format to extend the counter to
64 bits (and possibly XOR it with a longer salt). Instead, just
remember the initial nonce and cut the session before it repeats,
forcing renegotiation.
Signed-off-by: Ilya Dryomov <[email protected]>
Reviewed-by: Radoslaw Zarzynski <[email protected]>
Reviewed-by: Sage Weil <[email protected]>
Conflicts:
src/msg/async/ProtocolV2.h [ context: commit ed3ec4c01d17
("msg: Build target 'common' without using namespace in
headers") not in octopus ]
|
TEST(FormatterTest, NamedArg) {
EXPECT_EQ("1/a/A", format("{_1}/{a_}/{A_}", fmt::arg("a_", 'a'),
fmt::arg("A_", "A"), fmt::arg("_1", 1)));
char a = 'A', b = 'B', c = 'C';
EXPECT_EQ("BB/AA/CC", format("{1}{b}/{0}{a}/{2}{c}", FMT_CAPTURE(a, b, c)));
EXPECT_EQ(" A", format("{a:>2}", FMT_CAPTURE(a)));
EXPECT_THROW_MSG(format("{a+}", FMT_CAPTURE(a)), FormatError,
"missing '}' in format string");
EXPECT_THROW_MSG(format("{a}"), FormatError, "argument not found");
EXPECT_THROW_MSG(format("{d}", FMT_CAPTURE(a, b, c)), FormatError,
"argument not found");
EXPECT_THROW_MSG(format("{a}{}", FMT_CAPTURE(a)),
FormatError, "cannot switch from manual to automatic argument indexing");
EXPECT_THROW_MSG(format("{}{a}", FMT_CAPTURE(a)),
FormatError, "cannot switch from automatic to manual argument indexing");
EXPECT_EQ(" -42", format("{0:{width}}", -42, fmt::arg("width", 4)));
EXPECT_EQ("st", format("{0:.{precision}}", "str", fmt::arg("precision", 2)));
int n = 100;
EXPECT_EQ(L"n=100", format(L"n={n}", FMT_CAPTURE_W(n)));
}
| 0
|
[
"CWE-134",
"CWE-119",
"CWE-787"
] |
fmt
|
8cf30aa2be256eba07bb1cefb998c52326e846e7
| 5,441,308,131,791,693,000,000,000,000,000,000,000
| 20
|
Fix segfault on complex pointer formatting (#642)
|
static inline unsigned int xfrm_expire_msgsize(void)
{
return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
+ nla_total_size(sizeof(struct xfrm_mark));
}
| 0
|
[
"CWE-125"
] |
linux
|
b805d78d300bcf2c83d6df7da0c818b0fee41427
| 62,728,795,278,413,430,000,000,000,000,000,000,000
| 5
|
xfrm: policy: Fix out-of-bound array accesses in __xfrm_policy_unlink
UBSAN report this:
UBSAN: Undefined behaviour in net/xfrm/xfrm_policy.c:1289:24
index 6 is out of range for type 'unsigned int [6]'
CPU: 1 PID: 0 Comm: swapper/1 Not tainted 4.4.162-514.55.6.9.x86_64+ #13
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
0000000000000000 1466cf39b41b23c9 ffff8801f6b07a58 ffffffff81cb35f4
0000000041b58ab3 ffffffff83230f9c ffffffff81cb34e0 ffff8801f6b07a80
ffff8801f6b07a20 1466cf39b41b23c9 ffffffff851706e0 ffff8801f6b07ae8
Call Trace:
<IRQ> [<ffffffff81cb35f4>] __dump_stack lib/dump_stack.c:15 [inline]
<IRQ> [<ffffffff81cb35f4>] dump_stack+0x114/0x1a0 lib/dump_stack.c:51
[<ffffffff81d94225>] ubsan_epilogue+0x12/0x8f lib/ubsan.c:164
[<ffffffff81d954db>] __ubsan_handle_out_of_bounds+0x16e/0x1b2 lib/ubsan.c:382
[<ffffffff82a25acd>] __xfrm_policy_unlink+0x3dd/0x5b0 net/xfrm/xfrm_policy.c:1289
[<ffffffff82a2e572>] xfrm_policy_delete+0x52/0xb0 net/xfrm/xfrm_policy.c:1309
[<ffffffff82a3319b>] xfrm_policy_timer+0x30b/0x590 net/xfrm/xfrm_policy.c:243
[<ffffffff813d3927>] call_timer_fn+0x237/0x990 kernel/time/timer.c:1144
[<ffffffff813d8e7e>] __run_timers kernel/time/timer.c:1218 [inline]
[<ffffffff813d8e7e>] run_timer_softirq+0x6ce/0xb80 kernel/time/timer.c:1401
[<ffffffff8120d6f9>] __do_softirq+0x299/0xe10 kernel/softirq.c:273
[<ffffffff8120e676>] invoke_softirq kernel/softirq.c:350 [inline]
[<ffffffff8120e676>] irq_exit+0x216/0x2c0 kernel/softirq.c:391
[<ffffffff82c5edab>] exiting_irq arch/x86/include/asm/apic.h:652 [inline]
[<ffffffff82c5edab>] smp_apic_timer_interrupt+0x8b/0xc0 arch/x86/kernel/apic/apic.c:926
[<ffffffff82c5c985>] apic_timer_interrupt+0xa5/0xb0 arch/x86/entry/entry_64.S:735
<EOI> [<ffffffff81188096>] ? native_safe_halt+0x6/0x10 arch/x86/include/asm/irqflags.h:52
[<ffffffff810834d7>] arch_safe_halt arch/x86/include/asm/paravirt.h:111 [inline]
[<ffffffff810834d7>] default_idle+0x27/0x430 arch/x86/kernel/process.c:446
[<ffffffff81085f05>] arch_cpu_idle+0x15/0x20 arch/x86/kernel/process.c:437
[<ffffffff8132abc3>] default_idle_call+0x53/0x90 kernel/sched/idle.c:92
[<ffffffff8132b32d>] cpuidle_idle_call kernel/sched/idle.c:156 [inline]
[<ffffffff8132b32d>] cpu_idle_loop kernel/sched/idle.c:251 [inline]
[<ffffffff8132b32d>] cpu_startup_entry+0x60d/0x9a0 kernel/sched/idle.c:299
[<ffffffff8113e119>] start_secondary+0x3c9/0x560 arch/x86/kernel/smpboot.c:245
The issue is triggered as this:
xfrm_add_policy
-->verify_newpolicy_info //check the index provided by user with XFRM_POLICY_MAX
//In my case, the index is 0x6E6BB6, so it pass the check.
-->xfrm_policy_construct //copy the user's policy and set xfrm_policy_timer
-->xfrm_policy_insert
--> __xfrm_policy_link //use the orgin dir, in my case is 2
--> xfrm_gen_index //generate policy index, there is 0x6E6BB6
then xfrm_policy_timer be fired
xfrm_policy_timer
--> xfrm_policy_id2dir //get dir from (policy index & 7), in my case is 6
--> xfrm_policy_delete
--> __xfrm_policy_unlink //access policy_count[dir], trigger out of range access
Add xfrm_policy_id2dir check in verify_newpolicy_info, make sure the computed dir is
valid, to fix the issue.
Reported-by: Hulk Robot <[email protected]>
Fixes: e682adf021be ("xfrm: Try to honor policy index if it's supplied by user")
Signed-off-by: YueHaibing <[email protected]>
Acked-by: Herbert Xu <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]>
|
int yr_object_copy(
YR_OBJECT* object,
YR_OBJECT** object_copy)
{
YR_OBJECT* copy;
YR_OBJECT* o;
YR_STRUCTURE_MEMBER* structure_member;
YR_OBJECT_FUNCTION* func;
YR_OBJECT_FUNCTION* func_copy;
int i;
*object_copy = NULL;
FAIL_ON_ERROR(yr_object_create(
object->type,
object->identifier,
NULL,
©));
switch(object->type)
{
case OBJECT_TYPE_INTEGER:
((YR_OBJECT_INTEGER*) copy)->value = ((YR_OBJECT_INTEGER*) object)->value;
break;
case OBJECT_TYPE_STRING:
if (((YR_OBJECT_STRING*) object)->value != NULL)
{
((YR_OBJECT_STRING*) copy)->value = sized_string_dup(
((YR_OBJECT_STRING*) object)->value);
}
else
{
((YR_OBJECT_STRING*) copy)->value = NULL;
}
break;
case OBJECT_TYPE_FLOAT:
((YR_OBJECT_DOUBLE*) copy)->value = ((YR_OBJECT_DOUBLE*) object)->value;
break;
case OBJECT_TYPE_FUNCTION:
func = (YR_OBJECT_FUNCTION*) object;
func_copy = (YR_OBJECT_FUNCTION*) copy;
FAIL_ON_ERROR_WITH_CLEANUP(
yr_object_copy(func->return_obj, &func_copy->return_obj),
yr_object_destroy(copy));
for (i = 0; i < MAX_OVERLOADED_FUNCTIONS; i++)
func_copy->prototypes[i] = func->prototypes[i];
break;
case OBJECT_TYPE_STRUCTURE:
structure_member = ((YR_OBJECT_STRUCTURE*) object)->members;
while (structure_member != NULL)
{
FAIL_ON_ERROR_WITH_CLEANUP(
yr_object_copy(structure_member->object, &o),
yr_object_destroy(copy));
FAIL_ON_ERROR_WITH_CLEANUP(
yr_object_structure_set_member(copy, o),
yr_free(o);
yr_object_destroy(copy));
structure_member = structure_member->next;
}
break;
case OBJECT_TYPE_ARRAY:
yr_object_copy(
((YR_OBJECT_ARRAY *) object)->prototype_item,
&o);
((YR_OBJECT_ARRAY *)copy)->prototype_item = o;
break;
case OBJECT_TYPE_DICTIONARY:
yr_object_copy(
((YR_OBJECT_DICTIONARY *) object)->prototype_item,
&o);
((YR_OBJECT_DICTIONARY *)copy)->prototype_item = o;
break;
default:
assert(FALSE);
}
*object_copy = copy;
return ERROR_SUCCESS;
}
| 0
|
[
"CWE-416"
] |
yara
|
053e67e3ec81cc9268ce30eaf0d6663d8639ed1e
| 71,303,071,565,797,750,000,000,000,000,000,000,000
| 106
|
Fix issue #658
|
filter_refs(GraphicsManager *self, const void* data, bool free_images, bool (*filter_func)(const ImageRef*, Image*, const void*, CellPixelSize), CellPixelSize cell) {
for (size_t i = self->image_count; i-- > 0;) {
Image *img = self->images + i;
for (size_t j = img->refcnt; j-- > 0;) {
ImageRef *ref = img->refs + j;
if (filter_func(ref, img, data, cell)) {
remove_i_from_array(img->refs, j, img->refcnt);
self->layers_dirty = true;
}
}
if (img->refcnt == 0 && (free_images || img->client_id == 0)) remove_image(self, i);
}
}
| 0
|
[
"CWE-787"
] |
kitty
|
82c137878c2b99100a3cdc1c0f0efea069313901
| 45,913,130,355,492,445,000,000,000,000,000,000,000
| 13
|
Graphics protocol: Dont return filename in the error message when opening file fails, since filenames can contain control characters
Fixes #3128
|
static bool __ne_get_resources(r_bin_ne_obj_t *bin) {
if (!bin->resources) {
bin->resources = r_list_newf (__free_resource);
}
ut16 resoff = bin->ne_header->ResTableOffset + bin->header_offset;
ut16 alignment = r_buf_read_le16_at (bin->buf, resoff);
ut32 off = resoff + 2;
while (true) {
NE_image_typeinfo_entry ti = {0};
r_ne_resource *res = R_NEW0 (r_ne_resource);
if (!res) {
break;
}
res->entry = r_list_newf (__free_resource_entry);
if (!res->entry) {
break;
}
r_buf_read_at (bin->buf, off, (ut8 *)&ti, sizeof (ti));
if (!ti.rtTypeID) {
break;
} else if (ti.rtTypeID & 0x8000) {
res->name = __resource_type_str (ti.rtTypeID & ~0x8000);
} else {
// Offset to resident name table
res->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ti.rtTypeID);
}
off += sizeof (NE_image_typeinfo_entry);
int i;
for (i = 0; i < ti.rtResourceCount; i++) {
NE_image_nameinfo_entry ni;
r_ne_resource_entry *ren = R_NEW0 (r_ne_resource_entry);
if (!ren) {
break;
}
r_buf_read_at (bin->buf, off, (ut8 *)&ni, sizeof (NE_image_nameinfo_entry));
ren->offset = ni.rnOffset << alignment;
ren->size = ni.rnLength;
if (ni.rnID & 0x8000) {
ren->name = r_str_newf ("%d", ni.rnID & ~0x8000);
} else {
// Offset to resident name table
ren->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ni.rnID);
}
r_list_append (res->entry, ren);
off += sizeof (NE_image_nameinfo_entry);
}
r_list_append (bin->resources, res);
}
return true;
}
| 0
|
[
"CWE-476"
] |
radare2
|
18d1d064bf599a255d55f09fca3104776fc34a67
| 107,263,151,557,833,500,000,000,000,000,000,000,000
| 50
|
Fix null deref in the ne parser ##crash
* Reported by @hmsec via huntr.dev
* Reproducer: nepoc00
* BountyID: bfeb8fb8-644d-4587-80d4-cb704c404013
|
static void ndpi_int_tls_add_connection(struct ndpi_detection_module_struct *ndpi_struct,
struct ndpi_flow_struct *flow, u_int32_t protocol) {
#if DEBUG_TLS
printf("[TLS] %s()\n", __FUNCTION__);
#endif
if((flow->packet.udp != NULL) && (protocol == NDPI_PROTOCOL_TLS))
protocol = NDPI_PROTOCOL_DTLS;
if((flow->detected_protocol_stack[0] == protocol)
|| (flow->detected_protocol_stack[1] == protocol)) {
if(!flow->check_extra_packets)
tlsInitExtraPacketProcessing(ndpi_struct, flow);
return;
}
if(protocol != NDPI_PROTOCOL_TLS)
;
else
protocol = ndpi_tls_refine_master_protocol(ndpi_struct, flow, protocol);
ndpi_set_detected_protocol(ndpi_struct, flow, protocol, protocol);
tlsInitExtraPacketProcessing(ndpi_struct, flow);
}
| 0
|
[
"CWE-787"
] |
nDPI
|
1ec621c85b9411cc611652fd57a892cfef478af3
| 328,390,623,859,708,170,000,000,000,000,000,000,000
| 25
|
Added further checks
|
static PCRE2_SPTR SLJIT_FUNC do_extuni_no_utf(jit_arguments *args, PCRE2_SPTR cc)
{
PCRE2_SPTR start_subject = args->begin;
PCRE2_SPTR end_subject = args->end;
int lgb, rgb, ricount;
PCRE2_SPTR bptr;
uint32_t c;
c = *cc++;
lgb = UCD_GRAPHBREAK(c);
while (cc < end_subject)
{
c = *cc;
rgb = UCD_GRAPHBREAK(c);
if ((PRIV(ucp_gbtable)[lgb] & (1 << rgb)) == 0) break;
/* Not breaking between Regional Indicators is allowed only if there
are an even number of preceding RIs. */
if (lgb == ucp_gbRegionalIndicator && rgb == ucp_gbRegionalIndicator)
{
ricount = 0;
bptr = cc - 1;
/* bptr is pointing to the left-hand character */
while (bptr > start_subject)
{
bptr--;
c = *bptr;
if (UCD_GRAPHBREAK(c) != ucp_gbRegionalIndicator) break;
ricount++;
}
if ((ricount & 1) != 0) break; /* Grapheme break required */
}
/* If Extend or ZWJ follows Extended_Pictographic, do not update lgb; this
allows any number of them before a following Extended_Pictographic. */
if ((rgb != ucp_gbExtend && rgb != ucp_gbZWJ) ||
lgb != ucp_gbExtended_Pictographic)
lgb = rgb;
cc++;
}
return cc;
}
| 0
|
[
"CWE-125"
] |
php-src
|
8947fd9e9fdce87cd6c59817b1db58e789538fe9
| 53,992,940,305,778,060,000,000,000,000,000,000,000
| 52
|
Fix #78338: Array cross-border reading in PCRE
We backport r1092 from pcre2.
|
destroy_username_lookup_operation (UsernameLookupOperation *operation)
{
g_object_unref (operation->manager);
g_object_unref (operation->display);
g_free (operation->username);
g_free (operation);
}
| 0
|
[] |
gdm
|
ff98b2817014684ae1acec78ff06f0f461a56a9f
| 166,334,774,547,562,000,000,000,000,000,000,000,000
| 7
|
manager: if falling back to X11 retry autologin
Right now, we get one shot to autologin. If it fails, we fall back to
the greeter. We should give it another go if the reason for the failure
was wayland fallback to X.
https://bugzilla.gnome.org/show_bug.cgi?id=780520
|
op_addsub(
oparg_T *oap,
linenr_T Prenum1, // Amount of add/subtract
int g_cmd) // was g<c-a>/g<c-x>
{
pos_T pos;
struct block_def bd;
int change_cnt = 0;
linenr_T amount = Prenum1;
// do_addsub() might trigger re-evaluation of 'foldexpr' halfway, when the
// buffer is not completely updated yet. Postpone updating folds until before
// the call to changed_lines().
#ifdef FEAT_FOLDING
disable_fold_update++;
#endif
if (!VIsual_active)
{
pos = curwin->w_cursor;
if (u_save_cursor() == FAIL)
{
#ifdef FEAT_FOLDING
disable_fold_update--;
#endif
return;
}
change_cnt = do_addsub(oap->op_type, &pos, 0, amount);
#ifdef FEAT_FOLDING
disable_fold_update--;
#endif
if (change_cnt)
changed_lines(pos.lnum, 0, pos.lnum + 1, 0L);
}
else
{
int one_change;
int length;
pos_T startpos;
if (u_save((linenr_T)(oap->start.lnum - 1),
(linenr_T)(oap->end.lnum + 1)) == FAIL)
{
#ifdef FEAT_FOLDING
disable_fold_update--;
#endif
return;
}
pos = oap->start;
for (; pos.lnum <= oap->end.lnum; ++pos.lnum)
{
if (oap->block_mode) // Visual block mode
{
block_prep(oap, &bd, pos.lnum, FALSE);
pos.col = bd.textcol;
length = bd.textlen;
}
else if (oap->motion_type == MLINE)
{
curwin->w_cursor.col = 0;
pos.col = 0;
length = (colnr_T)STRLEN(ml_get(pos.lnum));
}
else // oap->motion_type == MCHAR
{
if (pos.lnum == oap->start.lnum && !oap->inclusive)
dec(&(oap->end));
length = (colnr_T)STRLEN(ml_get(pos.lnum));
pos.col = 0;
if (pos.lnum == oap->start.lnum)
{
pos.col += oap->start.col;
length -= oap->start.col;
}
if (pos.lnum == oap->end.lnum)
{
length = (int)STRLEN(ml_get(oap->end.lnum));
if (oap->end.col >= length)
oap->end.col = length - 1;
length = oap->end.col - pos.col + 1;
}
}
one_change = do_addsub(oap->op_type, &pos, length, amount);
if (one_change)
{
// Remember the start position of the first change.
if (change_cnt == 0)
startpos = curbuf->b_op_start;
++change_cnt;
}
#ifdef FEAT_NETBEANS_INTG
if (netbeans_active() && one_change)
{
char_u *ptr;
netbeans_removed(curbuf, pos.lnum, pos.col, (long)length);
ptr = ml_get_buf(curbuf, pos.lnum, FALSE);
netbeans_inserted(curbuf, pos.lnum, pos.col,
&ptr[pos.col], length);
}
#endif
if (g_cmd && one_change)
amount += Prenum1;
}
#ifdef FEAT_FOLDING
disable_fold_update--;
#endif
if (change_cnt)
changed_lines(oap->start.lnum, 0, oap->end.lnum + 1, 0L);
if (!change_cnt && oap->is_VIsual)
// No change: need to remove the Visual selection
redraw_curbuf_later(UPD_INVERTED);
// Set '[ mark if something changed. Keep the last end
// position from do_addsub().
if (change_cnt > 0 && (cmdmod.cmod_flags & CMOD_LOCKMARKS) == 0)
curbuf->b_op_start = startpos;
if (change_cnt > p_report)
smsg(NGETTEXT("%d line changed", "%d lines changed",
change_cnt), change_cnt);
}
}
| 0
|
[
"CWE-122"
] |
vim
|
c249913edc35c0e666d783bfc21595cf9f7d9e0d
| 71,826,902,960,952,410,000,000,000,000,000,000,000
| 127
|
patch 9.0.0483: illegal memory access when replacing in virtualedit mode
Problem: Illegal memory access when replacing in virtualedit mode.
Solution: Check for replacing NUL after Tab.
|
run_pyc_file(FILE *fp, const char *filename, PyObject *globals,
PyObject *locals, PyCompilerFlags *flags)
{
PyCodeObject *co;
PyObject *v;
long magic;
long PyImport_GetMagicNumber(void);
magic = PyMarshal_ReadLongFromFile(fp);
if (magic != PyImport_GetMagicNumber()) {
if (!PyErr_Occurred())
PyErr_SetString(PyExc_RuntimeError,
"Bad magic number in .pyc file");
goto error;
}
/* Skip the rest of the header. */
(void) PyMarshal_ReadLongFromFile(fp);
(void) PyMarshal_ReadLongFromFile(fp);
(void) PyMarshal_ReadLongFromFile(fp);
if (PyErr_Occurred()) {
goto error;
}
v = PyMarshal_ReadLastObjectFromFile(fp);
if (v == NULL || !PyCode_Check(v)) {
Py_XDECREF(v);
PyErr_SetString(PyExc_RuntimeError,
"Bad code object in .pyc file");
goto error;
}
fclose(fp);
co = (PyCodeObject *)v;
v = PyEval_EvalCode((PyObject*)co, globals, locals);
if (v && flags)
flags->cf_flags |= (co->co_flags & PyCF_MASK);
Py_DECREF(co);
return v;
error:
fclose(fp);
return NULL;
}
| 0
|
[
"CWE-125"
] |
cpython
|
dcfcd146f8e6fc5c2fc16a4c192a0c5f5ca8c53c
| 149,567,003,224,221,550,000,000,000,000,000,000,000
| 40
|
bpo-35766: Merge typed_ast back into CPython (GH-11645)
|
void BinaryProtocolReader::checkContainerSize(int32_t size) {
if (size < 0) {
TProtocolException::throwNegativeSize();
} else if (this->container_limit_ && size > this->container_limit_) {
TProtocolException::throwExceededSizeLimit();
}
}
| 0
|
[
"CWE-703",
"CWE-770"
] |
fbthrift
|
c9a903e5902834e95bbd4ab0e9fa53ba0189f351
| 160,741,907,337,064,320,000,000,000,000,000,000,000
| 7
|
Better handling of truncated data when reading strings
Summary:
Currently we read string size and blindly pre-allocate it. This allows malicious attacker to send a few bytes message and cause server to allocate huge amount of memory (>1GB).
This diff changes the logic to check if we have enough data in the buffer before allocating the string.
This is a second part of a fix for CVE-2019-3553.
Reviewed By: vitaut
Differential Revision: D14393393
fbshipit-source-id: e2046d2f5b087d3abc9a9d2c6c107cf088673057
|
cockpit_session_launch (CockpitAuth *self,
GIOStream *connection,
GHashTable *headers,
const gchar *type,
const gchar *authorization,
const gchar *application,
GError **error)
{
CockpitTransport *transport = NULL;
CockpitSession *session = NULL;
CockpitCreds *creds = NULL;
const gchar *host;
const gchar *action;
const gchar *command;
const gchar *section;
const gchar *program_default;
gchar **env = g_get_environ ();
const gchar *argv[] = {
"command",
"host",
NULL,
};
host = application_parse_host (application);
action = type_option (type, "action", "localhost");
if (g_strcmp0 (action, ACTION_NONE) == 0)
{
g_set_error (error, COCKPIT_ERROR, COCKPIT_ERROR_AUTHENTICATION_FAILED,
"Authentication disabled");
goto out;
}
/* These are the credentials we'll carry around for this session */
creds = build_session_credentials (self, connection, headers,
application, type, authorization);
if (host)
section = COCKPIT_CONF_SSH_SECTION;
else if (self->login_loopback && g_strcmp0 (type, "basic") == 0)
section = COCKPIT_CONF_SSH_SECTION;
else if (g_strcmp0 (action, ACTION_SSH) == 0)
section = COCKPIT_CONF_SSH_SECTION;
else
section = type;
if (g_strcmp0 (section, COCKPIT_CONF_SSH_SECTION) == 0)
{
if (!host)
host = type_option (COCKPIT_CONF_SSH_SECTION, "host", "127.0.0.1");
program_default = cockpit_ws_ssh_program;
}
else
{
program_default = cockpit_ws_session_program;
}
command = type_option (section, "command", program_default);
if (cockpit_creds_get_rhost (creds))
{
env = g_environ_setenv (env, "COCKPIT_REMOTE_PEER",
cockpit_creds_get_rhost (creds),
TRUE);
}
argv[0] = command;
argv[1] = host ? host : "localhost";
transport = session_start_process (argv, (const gchar **)env);
if (!transport)
{
g_set_error (error, COCKPIT_ERROR, COCKPIT_ERROR_FAILED,
"Authentication failed to start");
goto out;
}
session = cockpit_session_create (self, argv[0], creds, transport);
/* How long to wait for the auth process to send some data */
session->authorize_timeout = timeout_option ("timeout", section, cockpit_ws_auth_process_timeout);
/* How long to wait for a response from the client to a auth prompt */
session->client_timeout = timeout_option ("response-timeout", section, cockpit_ws_auth_response_timeout);
out:
g_strfreev (env);
if (creds)
cockpit_creds_unref (creds);
if (transport)
g_object_unref (transport);
return session;
}
| 0
|
[] |
cockpit
|
c51f6177576d7e12614c64d316cf0b67addd17c9
| 327,610,702,033,880,370,000,000,000,000,000,000,000
| 97
|
ws: Fix bug parsing invalid base64 headers
The len parameter to g_base64_decode_inplace() is a inout
parameter, and needs to be initialized. Lets just use
the simpler g_base64_decode() function. This fixes a segfault.
Closes #10819
|
customcodecvt_do_conversion_writer(char_t_to*& _to_next, char_t_to* _to_end) :
to_next(_to_next),
to_end(_to_end)
{}
| 0
|
[
"CWE-200"
] |
wesnoth
|
f8914468182e8d0a1551b430c0879ba236fe4d6d
| 217,795,662,884,361,330,000,000,000,000,000,000,000
| 4
|
Disallow inclusion of .pbl files from WML (bug #23504)
Note that this will also cause Lua wesnoth.have_file() to return false
on .pbl files.
|
parser_pattern_process_nested_pattern (parser_context_t *context_p, /**< context */
parser_pattern_flags_t flags, /**< flags */
uint16_t rhs_opcode, /**< opcode to process the rhs value */
uint16_t literal_index) /**< literal index for object pattern */
{
JERRY_ASSERT (context_p->token.type == LEXER_LEFT_BRACE || context_p->token.type == LEXER_LEFT_SQUARE);
parser_pattern_flags_t options = (PARSER_PATTERN_NESTED_PATTERN
| PARSER_PATTERN_TARGET_ON_STACK
| (flags & (PARSER_PATTERN_BINDING
| PARSER_PATTERN_LET
| PARSER_PATTERN_CONST
| PARSER_PATTERN_LOCAL
| PARSER_PATTERN_ARGUMENTS)));
JERRY_ASSERT (context_p->next_scanner_info_p->source_p != context_p->source_p
|| context_p->next_scanner_info_p->type == SCANNER_TYPE_INITIALIZER
|| context_p->next_scanner_info_p->type == SCANNER_TYPE_LITERAL_FLAGS);
if (context_p->next_scanner_info_p->source_p == context_p->source_p)
{
if (context_p->next_scanner_info_p->type == SCANNER_TYPE_INITIALIZER)
{
if (context_p->next_scanner_info_p->u8_arg & SCANNER_LITERAL_OBJECT_HAS_REST)
{
options |= PARSER_PATTERN_HAS_REST_ELEMENT;
}
if (!(flags & PARSER_PATTERN_REST_ELEMENT))
{
options |= PARSER_PATTERN_TARGET_DEFAULT;
}
else
{
scanner_release_next (context_p, sizeof (scanner_location_info_t));
}
}
else
{
if (context_p->next_scanner_info_p->u8_arg & SCANNER_LITERAL_OBJECT_HAS_REST)
{
options |= PARSER_PATTERN_HAS_REST_ELEMENT;
}
scanner_release_next (context_p, sizeof (scanner_info_t));
}
}
parser_pattern_emit_rhs (context_p, rhs_opcode, literal_index);
if (context_p->token.type == LEXER_LEFT_BRACE)
{
parser_parse_object_initializer (context_p, options);
}
else
{
parser_parse_array_initializer (context_p, options);
}
parser_emit_cbc (context_p, CBC_POP);
} /* parser_pattern_process_nested_pattern */
| 0
|
[
"CWE-416"
] |
jerryscript
|
3bcd48f72d4af01d1304b754ef19fe1a02c96049
| 230,783,752,615,625,840,000,000,000,000,000,000,000
| 60
|
Improve parse_identifier (#4691)
Ascii string length is no longer computed during string allocation.
JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected]
|
int sched_trace_rq_cpu(struct rq *rq)
{
return rq ? cpu_of(rq) : -1;
}
| 0
|
[
"CWE-400",
"CWE-703"
] |
linux
|
de53fd7aedb100f03e5d2231cfce0e4993282425
| 130,117,345,803,633,090,000,000,000,000,000,000,000
| 4
|
sched/fair: Fix low cpu usage with high throttling by removing expiration of cpu-local slices
It has been observed, that highly-threaded, non-cpu-bound applications
running under cpu.cfs_quota_us constraints can hit a high percentage of
periods throttled while simultaneously not consuming the allocated
amount of quota. This use case is typical of user-interactive non-cpu
bound applications, such as those running in kubernetes or mesos when
run on multiple cpu cores.
This has been root caused to cpu-local run queue being allocated per cpu
bandwidth slices, and then not fully using that slice within the period.
At which point the slice and quota expires. This expiration of unused
slice results in applications not being able to utilize the quota for
which they are allocated.
The non-expiration of per-cpu slices was recently fixed by
'commit 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift
condition")'. Prior to that it appears that this had been broken since
at least 'commit 51f2176d74ac ("sched/fair: Fix unlocked reads of some
cfs_b->quota/period")' which was introduced in v3.16-rc1 in 2014. That
added the following conditional which resulted in slices never being
expired.
if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
/* extend local deadline, drift is bounded above by 2 ticks */
cfs_rq->runtime_expires += TICK_NSEC;
Because this was broken for nearly 5 years, and has recently been fixed
and is now being noticed by many users running kubernetes
(https://github.com/kubernetes/kubernetes/issues/67577) it is my opinion
that the mechanisms around expiring runtime should be removed
altogether.
This allows quota already allocated to per-cpu run-queues to live longer
than the period boundary. This allows threads on runqueues that do not
use much CPU to continue to use their remaining slice over a longer
period of time than cpu.cfs_period_us. However, this helps prevent the
above condition of hitting throttling while also not fully utilizing
your cpu quota.
This theoretically allows a machine to use slightly more than its
allotted quota in some periods. This overflow would be bounded by the
remaining quota left on each per-cpu runqueueu. This is typically no
more than min_cfs_rq_runtime=1ms per cpu. For CPU bound tasks this will
change nothing, as they should theoretically fully utilize all of their
quota in each period. For user-interactive tasks as described above this
provides a much better user/application experience as their cpu
utilization will more closely match the amount they requested when they
hit throttling. This means that cpu limits no longer strictly apply per
period for non-cpu bound applications, but that they are still accurate
over longer timeframes.
This greatly improves performance of high-thread-count, non-cpu bound
applications with low cfs_quota_us allocation on high-core-count
machines. In the case of an artificial testcase (10ms/100ms of quota on
80 CPU machine), this commit resulted in almost 30x performance
improvement, while still maintaining correct cpu quota restrictions.
That testcase is available at https://github.com/indeedeng/fibtest.
Fixes: 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift condition")
Signed-off-by: Dave Chiluk <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Phil Auld <[email protected]>
Reviewed-by: Ben Segall <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: John Hammond <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Kyle Anderson <[email protected]>
Cc: Gabriel Munos <[email protected]>
Cc: Peter Oskolkov <[email protected]>
Cc: Cong Wang <[email protected]>
Cc: Brendan Gregg <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
|
TfLiteRegistration* Register_FLOOR_REF() {
static TfLiteRegistration r = {/*init=*/nullptr,
/*free=*/nullptr, floor::Prepare,
floor::Eval<floor::kReference>};
return &r;
}
| 0
|
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
1970c2158b1ffa416d159d03c3370b9a462aee35
| 82,116,123,372,451,240,000,000,000,000,000,000,000
| 6
|
[tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
|
void CLASS setCanonBodyFeatures (unsigned id)
{
imgdata.lens.makernotes.CamID = id;
if (
(id == 0x80000001) || // 1D
(id == 0x80000174) || // 1D2
(id == 0x80000232) || // 1D2N
(id == 0x80000169) || // 1D3
(id == 0x80000281) // 1D4
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSH;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF;
}
else
if (
(id == 0x80000167) || // 1Ds
(id == 0x80000188) || // 1Ds2
(id == 0x80000215) || // 1Ds3
(id == 0x80000213) || // 5D
(id == 0x80000218) || // 5D2
(id == 0x80000285) || // 5D3
(id == 0x80000302) || // 6D
(id == 0x80000269) || // 1DX
(id == 0x80000324) || // 1DC
(id == 0x80000382) || // 5DS
(id == 0x80000401) // 5DS R
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF;
}
else
if (
(id == 0x80000331) || // M
(id == 0x80000355) || // M2
(id == 0x80000374) // M3
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF_M;
}
else
if (
(id == 0x01140000) || // D30
(id == 0x01668000) || // D60
(id > 0x80000000)
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Unknown;
}
else
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
return;
}
| 0
|
[
"CWE-129"
] |
LibRaw
|
89d065424f09b788f443734d44857289489ca9e2
| 135,421,771,902,347,110,000,000,000,000,000,000,000
| 61
|
fixed two more problems found by fuzzer
|
static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ:
{
int amount = sk_wmem_alloc_get(sk);
return put_user(amount, (int __user *)arg);
}
case SIOCINQ:
{
struct sk_buff *skb;
unsigned long amount;
amount = 0;
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb) {
/* We will only return the amount
* of this packet since that is all
* that will be read.
*/
amount = skb->len - ieee802154_hdr_length(skb);
}
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
}
return -ENOIOCTLCMD;
}
| 0
|
[
"CWE-276"
] |
linux
|
e69dbd4619e7674c1679cba49afd9dd9ac347eef
| 97,179,828,759,338,380,000,000,000,000,000,000,000
| 32
|
ieee802154: enforce CAP_NET_RAW for raw sockets
When creating a raw AF_IEEE802154 socket, CAP_NET_RAW needs to be
checked first.
Signed-off-by: Ori Nimron <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
Acked-by: Stefan Schmidt <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int unlink_framebuffer(struct fb_info *fb_info)
{
int i;
i = fb_info->node;
if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
return -EINVAL;
if (fb_info->dev) {
device_destroy(fb_class, MKDEV(FB_MAJOR, i));
fb_info->dev = NULL;
}
return 0;
}
| 0
|
[
"CWE-703",
"CWE-189"
] |
linux
|
fc9bbca8f650e5f738af8806317c0a041a48ae4a
| 56,238,790,406,895,140,000,000,000,000,000,000,000
| 14
|
vm: convert fb_mmap to vm_iomap_memory() helper
This is my example conversion of a few existing mmap users. The
fb_mmap() case is a good example because it is a bit more complicated
than some: fb_mmap() mmaps one of two different memory areas depending
on the page offset of the mmap (but happily there is never any mixing of
the two, so the helper function still works).
Signed-off-by: Linus Torvalds <[email protected]>
|
sh_single_quote (string)
const char *string;
{
register int c;
char *result, *r;
const char *s;
result = (char *)xmalloc (3 + (4 * strlen (string)));
r = result;
*r++ = '\'';
for (s = string; s && (c = *s); s++)
{
*r++ = c;
if (c == '\'')
{
*r++ = '\\'; /* insert escaped single quote */
*r++ = '\'';
*r++ = '\''; /* start new quoted string */
}
}
*r++ = '\'';
*r = '\0';
return (result);
}
| 0
|
[] |
bash
|
863d31ae775d56b785dc5b0105b6d251515d81d5
| 60,710,741,800,373,180,000,000,000,000,000,000,000
| 28
|
commit bash-20120224 snapshot
|
static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct ipcm_cookie ipc;
struct rtable *rt = NULL;
int free = 0;
__be32 daddr;
__be32 saddr;
u8 tos;
int err;
err = -EMSGSIZE;
if (len > 0xFFFF)
goto out;
/*
* Check the flags.
*/
err = -EOPNOTSUPP;
if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */
goto out; /* compatibility */
/*
* Get and verify the address.
*/
if (msg->msg_namelen) {
struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
err = -EINVAL;
if (msg->msg_namelen < sizeof(*usin))
goto out;
if (usin->sin_family != AF_INET) {
static int complained;
if (!complained++)
printk(KERN_INFO "%s forgot to set AF_INET in "
"raw sendmsg. Fix it!\n",
current->comm);
err = -EAFNOSUPPORT;
if (usin->sin_family)
goto out;
}
daddr = usin->sin_addr.s_addr;
/* ANK: I did not forget to get protocol from port field.
* I just do not know, who uses this weirdness.
* IP_HDRINCL is much more convenient.
*/
} else {
err = -EDESTADDRREQ;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
daddr = inet->inet_daddr;
}
ipc.addr = inet->inet_saddr;
ipc.opt = NULL;
ipc.tx_flags = 0;
ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
err = ip_cmsg_send(sock_net(sk), msg, &ipc);
if (err)
goto out;
if (ipc.opt)
free = 1;
}
saddr = ipc.addr;
ipc.addr = daddr;
if (!ipc.opt)
ipc.opt = inet->opt;
if (ipc.opt) {
err = -EINVAL;
/* Linux does not mangle headers on raw sockets,
* so that IP options + IP_HDRINCL is non-sense.
*/
if (inet->hdrincl)
goto done;
if (ipc.opt->srr) {
if (!daddr)
goto done;
daddr = ipc.opt->faddr;
}
}
tos = RT_CONN_FLAGS(sk);
if (msg->msg_flags & MSG_DONTROUTE)
tos |= RTO_ONLINK;
if (ipv4_is_multicast(daddr)) {
if (!ipc.oif)
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
}
{
struct flowi4 fl4;
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
FLOWI_FLAG_CAN_SLEEP, daddr, saddr, 0, 0);
if (!inet->hdrincl) {
err = raw_probe_proto_opt(&fl4, msg);
if (err)
goto done;
}
security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
goto done;
}
}
err = -EACCES;
if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST))
goto done;
if (msg->msg_flags & MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
if (inet->hdrincl)
err = raw_send_hdrinc(sk, msg->msg_iov, len,
&rt, msg->msg_flags);
else {
if (!ipc.addr)
ipc.addr = rt->rt_dst;
lock_sock(sk);
err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0,
&ipc, &rt, msg->msg_flags);
if (err)
ip_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE)) {
err = ip_push_pending_frames(sk);
if (err == -ENOBUFS && !inet->recverr)
err = 0;
}
release_sock(sk);
}
done:
if (free)
kfree(ipc.opt);
ip_rt_put(rt);
out:
if (err < 0)
return err;
return len;
do_confirm:
dst_confirm(&rt->dst);
if (!(msg->msg_flags & MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto done;
}
| 1
|
[
"CWE-362"
] |
linux-2.6
|
f6d8bd051c391c1c0458a30b2a7abcd939329259
| 235,106,450,896,413,600,000,000,000,000,000,000,000
| 165
|
inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int main(int argc, char ** argv)
{
int c;
unsigned long flags = MS_MANDLOCK;
char * orgoptions = NULL;
char * share_name = NULL;
const char * ipaddr = NULL;
char * uuid = NULL;
char * mountpoint = NULL;
char * options = NULL;
char * optionstail;
char * resolved_path = NULL;
char * temp;
char * dev_name;
int rc = 0;
int rsize = 0;
int wsize = 0;
int nomtab = 0;
int uid = 0;
int gid = 0;
int optlen = 0;
int orgoptlen = 0;
size_t options_size = 0;
size_t current_len;
int retry = 0; /* set when we have to retry mount with uppercase */
struct addrinfo *addrhead = NULL, *addr;
struct utsname sysinfo;
struct mntent mountent;
struct sockaddr_in *addr4;
struct sockaddr_in6 *addr6;
FILE * pmntfile;
/* setlocale(LC_ALL, "");
bindtextdomain(PACKAGE, LOCALEDIR);
textdomain(PACKAGE); */
if(argc && argv)
thisprogram = argv[0];
else
mount_cifs_usage(stderr);
if(thisprogram == NULL)
thisprogram = "mount.cifs";
uname(&sysinfo);
/* BB add workstation name and domain and pass down */
/* #ifdef _GNU_SOURCE
fprintf(stderr, " node: %s machine: %s sysname %s domain %s\n", sysinfo.nodename,sysinfo.machine,sysinfo.sysname,sysinfo.domainname);
#endif */
if(argc > 2) {
dev_name = argv[1];
share_name = strndup(argv[1], MAX_UNC_LEN);
if (share_name == NULL) {
fprintf(stderr, "%s: %s", argv[0], strerror(ENOMEM));
exit(EX_SYSERR);
}
mountpoint = argv[2];
} else if (argc == 2) {
if ((strcmp(argv[1], "-V") == 0) ||
(strcmp(argv[1], "--version") == 0))
{
print_cifs_mount_version();
exit(0);
}
if ((strcmp(argv[1], "-h") == 0) ||
(strcmp(argv[1], "-?") == 0) ||
(strcmp(argv[1], "--help") == 0))
mount_cifs_usage(stdout);
mount_cifs_usage(stderr);
} else {
mount_cifs_usage(stderr);
}
/* add sharename in opts string as unc= parm */
while ((c = getopt_long (argc, argv, "afFhilL:no:O:rsSU:vVwt:",
longopts, NULL)) != -1) {
switch (c) {
/* No code to do the following options yet */
/* case 'l':
list_with_volumelabel = 1;
break;
case 'L':
volumelabel = optarg;
break; */
/* case 'a':
++mount_all;
break; */
case '?':
case 'h': /* help */
mount_cifs_usage(stdout);
case 'n':
++nomtab;
break;
case 'b':
#ifdef MS_BIND
flags |= MS_BIND;
#else
fprintf(stderr,
"option 'b' (MS_BIND) not supported\n");
#endif
break;
case 'm':
#ifdef MS_MOVE
flags |= MS_MOVE;
#else
fprintf(stderr,
"option 'm' (MS_MOVE) not supported\n");
#endif
break;
case 'o':
orgoptions = strdup(optarg);
break;
case 'r': /* mount readonly */
flags |= MS_RDONLY;
break;
case 'U':
uuid = optarg;
break;
case 'v':
++verboseflag;
break;
case 'V':
print_cifs_mount_version();
exit (0);
case 'w':
flags &= ~MS_RDONLY;
break;
case 'R':
rsize = atoi(optarg) ;
break;
case 'W':
wsize = atoi(optarg);
break;
case '1':
if (isdigit(*optarg)) {
char *ep;
uid = strtoul(optarg, &ep, 10);
if (*ep) {
fprintf(stderr, "bad uid value \"%s\"\n", optarg);
exit(EX_USAGE);
}
} else {
struct passwd *pw;
if (!(pw = getpwnam(optarg))) {
fprintf(stderr, "bad user name \"%s\"\n", optarg);
exit(EX_USAGE);
}
uid = pw->pw_uid;
endpwent();
}
break;
case '2':
if (isdigit(*optarg)) {
char *ep;
gid = strtoul(optarg, &ep, 10);
if (*ep) {
fprintf(stderr, "bad gid value \"%s\"\n", optarg);
exit(EX_USAGE);
}
} else {
struct group *gr;
if (!(gr = getgrnam(optarg))) {
fprintf(stderr, "bad user name \"%s\"\n", optarg);
exit(EX_USAGE);
}
gid = gr->gr_gid;
endpwent();
}
break;
case 'u':
got_user = 1;
user_name = optarg;
break;
case 'd':
domain_name = optarg; /* BB fix this - currently ignored */
got_domain = 1;
break;
case 'p':
if(mountpassword == NULL)
mountpassword = (char *)calloc(MOUNT_PASSWD_SIZE+1,1);
if(mountpassword) {
got_password = 1;
strlcpy(mountpassword,optarg,MOUNT_PASSWD_SIZE+1);
}
break;
case 'S':
get_password_from_file(0 /* stdin */,NULL);
break;
case 't':
break;
case 'f':
++fakemnt;
break;
default:
fprintf(stderr, "unknown mount option %c\n",c);
mount_cifs_usage(stderr);
}
}
if((argc < 3) || (dev_name == NULL) || (mountpoint == NULL)) {
mount_cifs_usage(stderr);
}
/* make sure mountpoint is legit */
rc = check_mountpoint(thisprogram, mountpoint);
if (rc)
goto mount_exit;
/* sanity check for unprivileged mounts */
if (getuid()) {
rc = check_fstab(thisprogram, mountpoint, dev_name,
&orgoptions);
if (rc)
goto mount_exit;
/* enable any default user mount flags */
flags |= CIFS_SETUID_FLAGS;
}
if (getenv("PASSWD")) {
if(mountpassword == NULL)
mountpassword = (char *)calloc(MOUNT_PASSWD_SIZE+1,1);
if(mountpassword) {
strlcpy(mountpassword,getenv("PASSWD"),MOUNT_PASSWD_SIZE+1);
got_password = 1;
}
} else if (getenv("PASSWD_FD")) {
get_password_from_file(atoi(getenv("PASSWD_FD")),NULL);
} else if (getenv("PASSWD_FILE")) {
get_password_from_file(0, getenv("PASSWD_FILE"));
}
if (orgoptions && parse_options(&orgoptions, &flags)) {
rc = EX_USAGE;
goto mount_exit;
}
if (getuid()) {
#if !CIFS_LEGACY_SETUID_CHECK
if (!(flags & (MS_USERS|MS_USER))) {
fprintf(stderr, "%s: permission denied\n", thisprogram);
rc = EX_USAGE;
goto mount_exit;
}
#endif /* !CIFS_LEGACY_SETUID_CHECK */
if (geteuid()) {
fprintf(stderr, "%s: not installed setuid - \"user\" "
"CIFS mounts not supported.",
thisprogram);
rc = EX_FAIL;
goto mount_exit;
}
}
flags &= ~(MS_USERS|MS_USER);
addrhead = addr = parse_server(&share_name);
if((addrhead == NULL) && (got_ip == 0)) {
fprintf(stderr, "No ip address specified and hostname not found\n");
rc = EX_USAGE;
goto mount_exit;
}
/* BB save off path and pop after mount returns? */
resolved_path = (char *)malloc(PATH_MAX+1);
if(resolved_path) {
/* Note that if we can not canonicalize the name, we get
another chance to see if it is valid when we chdir to it */
if (realpath(mountpoint, resolved_path)) {
mountpoint = resolved_path;
}
}
if(got_user == 0) {
/* Note that the password will not be retrieved from the
USER env variable (ie user%password form) as there is
already a PASSWD environment varaible */
if (getenv("USER"))
user_name = strdup(getenv("USER"));
if (user_name == NULL)
user_name = getusername();
got_user = 1;
}
if(got_password == 0) {
char *tmp_pass = getpass("Password: "); /* BB obsolete sys call but
no good replacement yet. */
mountpassword = (char *)calloc(MOUNT_PASSWD_SIZE+1,1);
if (!tmp_pass || !mountpassword) {
fprintf(stderr, "Password not entered, exiting\n");
exit(EX_USAGE);
}
strlcpy(mountpassword, tmp_pass, MOUNT_PASSWD_SIZE+1);
got_password = 1;
}
/* FIXME launch daemon (handles dfs name resolution and credential change)
remember to clear parms and overwrite password field before launching */
if(orgoptions) {
optlen = strlen(orgoptions);
orgoptlen = optlen;
} else
optlen = 0;
if(share_name)
optlen += strlen(share_name) + 4;
else {
fprintf(stderr, "No server share name specified\n");
fprintf(stderr, "\nMounting the DFS root for server not implemented yet\n");
exit(EX_USAGE);
}
if(user_name)
optlen += strlen(user_name) + 6;
optlen += MAX_ADDRESS_LEN + 4;
if(mountpassword)
optlen += strlen(mountpassword) + 6;
mount_retry:
SAFE_FREE(options);
options_size = optlen + 10 + DOMAIN_SIZE;
options = (char *)malloc(options_size /* space for commas in password */ + 8 /* space for domain= , domain name itself was counted as part of the length username string above */);
if(options == NULL) {
fprintf(stderr, "Could not allocate memory for mount options\n");
exit(EX_SYSERR);
}
strlcpy(options, "unc=", options_size);
strlcat(options,share_name,options_size);
/* scan backwards and reverse direction of slash */
temp = strrchr(options, '/');
if(temp > options + 6)
*temp = '\\';
if(user_name) {
/* check for syntax like user=domain\user */
if(got_domain == 0)
domain_name = check_for_domain(&user_name);
strlcat(options,",user=",options_size);
strlcat(options,user_name,options_size);
}
if(retry == 0) {
if(domain_name) {
/* extra length accounted for in option string above */
strlcat(options,",domain=",options_size);
strlcat(options,domain_name,options_size);
}
}
strlcat(options,",ver=",options_size);
strlcat(options,MOUNT_CIFS_VERSION_MAJOR,options_size);
if(orgoptions) {
strlcat(options,",",options_size);
strlcat(options,orgoptions,options_size);
}
if(prefixpath) {
strlcat(options,",prefixpath=",options_size);
strlcat(options,prefixpath,options_size); /* no need to cat the / */
}
/* convert all '\\' to '/' in share portion so that /proc/mounts looks pretty */
replace_char(dev_name, '\\', '/', strlen(share_name));
if (!got_ip && addr) {
strlcat(options, ",ip=", options_size);
current_len = strnlen(options, options_size);
optionstail = options + current_len;
switch (addr->ai_addr->sa_family) {
case AF_INET6:
addr6 = (struct sockaddr_in6 *) addr->ai_addr;
ipaddr = inet_ntop(AF_INET6, &addr6->sin6_addr, optionstail,
options_size - current_len);
break;
case AF_INET:
addr4 = (struct sockaddr_in *) addr->ai_addr;
ipaddr = inet_ntop(AF_INET, &addr4->sin_addr, optionstail,
options_size - current_len);
break;
default:
ipaddr = NULL;
}
/* if the address looks bogus, try the next one */
if (!ipaddr) {
addr = addr->ai_next;
if (addr)
goto mount_retry;
rc = EX_SYSERR;
goto mount_exit;
}
}
if (addr->ai_addr->sa_family == AF_INET6 && addr6->sin6_scope_id) {
strlcat(options, "%", options_size);
current_len = strnlen(options, options_size);
optionstail = options + current_len;
snprintf(optionstail, options_size - current_len, "%u",
addr6->sin6_scope_id);
}
if(verboseflag)
fprintf(stderr, "\nmount.cifs kernel mount options: %s", options);
if (mountpassword) {
/*
* Commas have to be doubled, or else they will
* look like the parameter separator
*/
if(retry == 0)
check_for_comma(&mountpassword);
strlcat(options,",pass=",options_size);
strlcat(options,mountpassword,options_size);
if (verboseflag)
fprintf(stderr, ",pass=********");
}
if (verboseflag)
fprintf(stderr, "\n");
if (!fakemnt && mount(dev_name, mountpoint, cifs_fstype, flags, options)) {
switch (errno) {
case ECONNREFUSED:
case EHOSTUNREACH:
if (addr) {
addr = addr->ai_next;
if (addr)
goto mount_retry;
}
break;
case ENODEV:
fprintf(stderr, "mount error: cifs filesystem not supported by the system\n");
break;
case ENXIO:
if(retry == 0) {
retry = 1;
if (uppercase_string(dev_name) &&
uppercase_string(share_name) &&
uppercase_string(prefixpath)) {
fprintf(stderr, "retrying with upper case share name\n");
goto mount_retry;
}
}
}
fprintf(stderr, "mount error(%d): %s\n", errno, strerror(errno));
fprintf(stderr, "Refer to the mount.cifs(8) manual page (e.g. man "
"mount.cifs)\n");
rc = EX_FAIL;
goto mount_exit;
}
if (nomtab)
goto mount_exit;
atexit(unlock_mtab);
rc = lock_mtab();
if (rc) {
fprintf(stderr, "cannot lock mtab");
goto mount_exit;
}
pmntfile = setmntent(MOUNTED, "a+");
if (!pmntfile) {
fprintf(stderr, "could not update mount table\n");
unlock_mtab();
rc = EX_FILEIO;
goto mount_exit;
}
mountent.mnt_fsname = dev_name;
mountent.mnt_dir = mountpoint;
mountent.mnt_type = (char *)(void *)cifs_fstype;
mountent.mnt_opts = (char *)malloc(220);
if(mountent.mnt_opts) {
char * mount_user = getusername();
memset(mountent.mnt_opts,0,200);
if(flags & MS_RDONLY)
strlcat(mountent.mnt_opts,"ro",220);
else
strlcat(mountent.mnt_opts,"rw",220);
if(flags & MS_MANDLOCK)
strlcat(mountent.mnt_opts,",mand",220);
if(flags & MS_NOEXEC)
strlcat(mountent.mnt_opts,",noexec",220);
if(flags & MS_NOSUID)
strlcat(mountent.mnt_opts,",nosuid",220);
if(flags & MS_NODEV)
strlcat(mountent.mnt_opts,",nodev",220);
if(flags & MS_SYNCHRONOUS)
strlcat(mountent.mnt_opts,",sync",220);
if(mount_user) {
if(getuid() != 0) {
strlcat(mountent.mnt_opts,
",user=", 220);
strlcat(mountent.mnt_opts,
mount_user, 220);
}
}
}
mountent.mnt_freq = 0;
mountent.mnt_passno = 0;
rc = addmntent(pmntfile,&mountent);
endmntent(pmntfile);
unlock_mtab();
SAFE_FREE(mountent.mnt_opts);
if (rc)
rc = EX_FILEIO;
mount_exit:
if(mountpassword) {
int len = strlen(mountpassword);
memset(mountpassword,0,len);
SAFE_FREE(mountpassword);
}
if (addrhead)
freeaddrinfo(addrhead);
SAFE_FREE(options);
SAFE_FREE(orgoptions);
SAFE_FREE(resolved_path);
SAFE_FREE(share_name);
exit(rc);
}
| 1
|
[
"CWE-59"
] |
samba
|
3ae5dac462c4ed0fb2cd94553583c56fce2f9d80
| 131,900,618,422,685,200,000,000,000,000,000,000,000
| 524
|
mount.cifs: take extra care that mountpoint isn't changed during mount
It's possible to trick mount.cifs into mounting onto the wrong directory
by replacing the mountpoint with a symlink to a directory. mount.cifs
attempts to check the validity of the mountpoint, but there's still a
possible race between those checks and the mount(2) syscall.
To guard against this, chdir to the mountpoint very early, and only deal
with it as "." from then on out.
Signed-off-by: Jeff Layton <[email protected]>
|
int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
{
if (sk->sk_family == AF_INET)
return ip_recv_error(sk, msg, len, addr_len);
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6)
return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
#endif
return -EINVAL;
}
| 0
|
[] |
net
|
79462ad02e861803b3840cc782248c7359451cd9
| 324,776,167,458,646,160,000,000,000,000,000,000,000
| 10
|
net: add validation for the socket syscall protocol argument
郭永刚 reported that one could simply crash the kernel as root by
using a simple program:
int socket_fd;
struct sockaddr_in addr;
addr.sin_port = 0;
addr.sin_addr.s_addr = INADDR_ANY;
addr.sin_family = 10;
socket_fd = socket(10,3,0x40000000);
connect(socket_fd , &addr,16);
AF_INET, AF_INET6 sockets actually only support 8-bit protocol
identifiers. inet_sock's skc_protocol field thus is sized accordingly,
thus larger protocol identifiers simply cut off the higher bits and
store a zero in the protocol fields.
This could lead to e.g. NULL function pointer because as a result of
the cut off inet_num is zero and we call down to inet_autobind, which
is NULL for raw sockets.
kernel: Call Trace:
kernel: [<ffffffff816db90e>] ? inet_autobind+0x2e/0x70
kernel: [<ffffffff816db9a4>] inet_dgram_connect+0x54/0x80
kernel: [<ffffffff81645069>] SYSC_connect+0xd9/0x110
kernel: [<ffffffff810ac51b>] ? ptrace_notify+0x5b/0x80
kernel: [<ffffffff810236d8>] ? syscall_trace_enter_phase2+0x108/0x200
kernel: [<ffffffff81645e0e>] SyS_connect+0xe/0x10
kernel: [<ffffffff81779515>] tracesys_phase2+0x84/0x89
I found no particular commit which introduced this problem.
CVE: CVE-2015-8543
Cc: Cong Wang <[email protected]>
Reported-by: 郭永刚 <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
{
if (sk->sk_type != SOCK_SEQPACKET)
return;
if (encrypt == 0x00) {
if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
l2cap_sock_clear_timer(sk);
l2cap_sock_set_timer(sk, HZ * 5);
} else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
__l2cap_sock_close(sk, ECONNREFUSED);
} else {
if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
l2cap_sock_clear_timer(sk);
}
}
| 0
|
[
"CWE-200",
"CWE-119",
"CWE-787"
] |
linux
|
f2fcfcd670257236ebf2088bbdf26f6a8ef459fe
| 24,193,200,191,005,250,000,000,000,000,000,000,000
| 16
|
Bluetooth: Add configuration support for ERTM and Streaming mode
Add support to config_req and config_rsp to configure ERTM and Streaming
mode. If the remote device specifies ERTM or Streaming mode, then the
same mode is proposed. Otherwise ERTM or Basic mode is used. And in case
of a state 2 device, the remote device should propose the same mode. If
not, then the channel gets disconnected.
Signed-off-by: Gustavo F. Padovan <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]>
|
size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
{
if (iter_is_iovec(i)) {
size_t count = min(size, iov_iter_count(i));
const struct iovec *p;
size_t skip;
size -= count;
for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
size_t len = min(count, p->iov_len - skip);
size_t ret;
if (unlikely(!len))
continue;
ret = fault_in_safe_writeable(p->iov_base + skip, len);
count -= len - ret;
if (ret)
break;
}
return count + size;
}
return 0;
}
| 0
|
[
"CWE-665",
"CWE-284"
] |
linux
|
9d2231c5d74e13b2a0546fee6737ee4446017903
| 56,911,458,208,707,040,000,000,000,000,000,000,000
| 23
|
lib/iov_iter: initialize "flags" in new pipe_buffer
The functions copy_page_to_iter_pipe() and push_pipe() can both
allocate a new pipe_buffer, but the "flags" member initializer is
missing.
Fixes: 241699cd72a8 ("new iov_iter flavour: pipe-backed")
To: Alexander Viro <[email protected]>
To: [email protected]
To: [email protected]
Cc: [email protected]
Signed-off-by: Max Kellermann <[email protected]>
Signed-off-by: Al Viro <[email protected]>
|
format_timestamp(uint32_t timestamp)
{
static char buffer[32];
if ((timestamp & 0xff000000) == 0xff000000)
snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
0x00ffffff);
else
snprintf(buffer, sizeof(buffer), "%us", timestamp);
return buffer;
}
| 0
|
[
"CWE-476"
] |
linux
|
bcf3b67d16a4c8ffae0aa79de5853435e683945c
| 202,208,934,037,230,240,000,000,000,000,000,000,000
| 11
|
scsi: megaraid_sas: return error when create DMA pool failed
when create DMA pool for cmd frames failed, we should return -ENOMEM,
instead of 0.
In some case in:
megasas_init_adapter_fusion()
-->megasas_alloc_cmds()
-->megasas_create_frame_pool
create DMA pool failed,
--> megasas_free_cmds() [1]
-->megasas_alloc_cmds_fusion()
failed, then goto fail_alloc_cmds.
-->megasas_free_cmds() [2]
we will call megasas_free_cmds twice, [1] will kfree cmd_list,
[2] will use cmd_list.it will cause a problem:
Unable to handle kernel NULL pointer dereference at virtual address
00000000
pgd = ffffffc000f70000
[00000000] *pgd=0000001fbf893003, *pud=0000001fbf893003,
*pmd=0000001fbf894003, *pte=006000006d000707
Internal error: Oops: 96000005 [#1] SMP
Modules linked in:
CPU: 18 PID: 1 Comm: swapper/0 Not tainted
task: ffffffdfb9290000 ti: ffffffdfb923c000 task.ti: ffffffdfb923c000
PC is at megasas_free_cmds+0x30/0x70
LR is at megasas_free_cmds+0x24/0x70
...
Call trace:
[<ffffffc0005b779c>] megasas_free_cmds+0x30/0x70
[<ffffffc0005bca74>] megasas_init_adapter_fusion+0x2f4/0x4d8
[<ffffffc0005b926c>] megasas_init_fw+0x2dc/0x760
[<ffffffc0005b9ab0>] megasas_probe_one+0x3c0/0xcd8
[<ffffffc0004a5abc>] local_pci_probe+0x4c/0xb4
[<ffffffc0004a5c40>] pci_device_probe+0x11c/0x14c
[<ffffffc00053a5e4>] driver_probe_device+0x1ec/0x430
[<ffffffc00053a92c>] __driver_attach+0xa8/0xb0
[<ffffffc000538178>] bus_for_each_dev+0x74/0xc8
[<ffffffc000539e88>] driver_attach+0x28/0x34
[<ffffffc000539a18>] bus_add_driver+0x16c/0x248
[<ffffffc00053b234>] driver_register+0x6c/0x138
[<ffffffc0004a5350>] __pci_register_driver+0x5c/0x6c
[<ffffffc000ce3868>] megasas_init+0xc0/0x1a8
[<ffffffc000082a58>] do_one_initcall+0xe8/0x1ec
[<ffffffc000ca7be8>] kernel_init_freeable+0x1c8/0x284
[<ffffffc0008d90b8>] kernel_init+0x1c/0xe4
Signed-off-by: Jason Yan <[email protected]>
Acked-by: Sumit Saxena <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]>
|
_XkbSetMapChecks(ClientPtr client, DeviceIntPtr dev, xkbSetMapReq * req,
char *values, Bool doswap)
{
XkbSrvInfoPtr xkbi;
XkbDescPtr xkb;
int error;
int nTypes = 0, nActions;
CARD8 mapWidths[XkbMaxLegalKeyCode + 1] = { 0 };
CARD16 symsPerKey[XkbMaxLegalKeyCode + 1] = { 0 };
XkbSymMapPtr map;
int i;
if (!dev->key)
return 0;
xkbi = dev->key->xkbInfo;
xkb = xkbi->desc;
if ((xkb->min_key_code != req->minKeyCode) ||
(xkb->max_key_code != req->maxKeyCode)) {
if (client->xkbClientFlags & _XkbClientIsAncient) {
/* pre 1.0 versions of Xlib have a bug */
req->minKeyCode = xkb->min_key_code;
req->maxKeyCode = xkb->max_key_code;
}
else {
if (!XkbIsLegalKeycode(req->minKeyCode)) {
client->errorValue =
_XkbErrCode3(2, req->minKeyCode, req->maxKeyCode);
return BadValue;
}
if (req->minKeyCode > req->maxKeyCode) {
client->errorValue =
_XkbErrCode3(3, req->minKeyCode, req->maxKeyCode);
return BadMatch;
}
}
}
if ((req->present & XkbKeyTypesMask) &&
(!CheckKeyTypes(client, xkb, req, (xkbKeyTypeWireDesc **) &values,
&nTypes, mapWidths, doswap))) {
client->errorValue = nTypes;
return BadValue;
}
else {
nTypes = xkb->map->num_types;
}
/* symsPerKey/mapWidths must be filled regardless of client-side flags */
map = &xkb->map->key_sym_map[xkb->min_key_code];
for (i = xkb->min_key_code; i < xkb->max_key_code; i++, map++) {
register int g, ng, w;
ng = XkbNumGroups(map->group_info);
for (w = g = 0; g < ng; g++) {
if (map->kt_index[g] >= (unsigned) nTypes) {
client->errorValue = _XkbErrCode4(0x13, i, g, map->kt_index[g]);
return BadValue;
}
if (mapWidths[map->kt_index[g]] > w)
w = mapWidths[map->kt_index[g]];
}
symsPerKey[i] = w * ng;
}
if ((req->present & XkbKeySymsMask) &&
(!CheckKeySyms(client, xkb, req, nTypes, mapWidths, symsPerKey,
(xkbSymMapWireDesc **) &values, &error, doswap))) {
client->errorValue = error;
return BadValue;
}
if ((req->present & XkbKeyActionsMask) &&
(!CheckKeyActions(xkb, req, nTypes, mapWidths, symsPerKey,
(CARD8 **) &values, &nActions))) {
client->errorValue = nActions;
return BadValue;
}
if ((req->present & XkbKeyBehaviorsMask) &&
(!CheckKeyBehaviors
(xkb, req, (xkbBehaviorWireDesc **) &values, &error))) {
client->errorValue = error;
return BadValue;
}
if ((req->present & XkbVirtualModsMask) &&
(!CheckVirtualMods(xkb, req, (CARD8 **) &values, &error))) {
client->errorValue = error;
return BadValue;
}
if ((req->present & XkbExplicitComponentsMask) &&
(!CheckKeyExplicit(xkb, req, (CARD8 **) &values, &error))) {
client->errorValue = error;
return BadValue;
}
if ((req->present & XkbModifierMapMask) &&
(!CheckModifierMap(xkb, req, (CARD8 **) &values, &error))) {
client->errorValue = error;
return BadValue;
}
if ((req->present & XkbVirtualModMapMask) &&
(!CheckVirtualModMap
(xkb, req, (xkbVModMapWireDesc **) &values, &error))) {
client->errorValue = error;
return BadValue;
}
if (((values - ((char *) req)) / 4) != req->length) {
ErrorF("[xkb] Internal error! Bad length in XkbSetMap (after check)\n");
client->errorValue = values - ((char *) &req[1]);
return BadLength;
}
return Success;
}
| 0
|
[
"CWE-119"
] |
xserver
|
f7cd1276bbd4fe3a9700096dec33b52b8440788d
| 194,601,629,742,360,960,000,000,000,000,000,000,000
| 117
|
Correct bounds checking in XkbSetNames()
CVE-2020-14345 / ZDI 11428
This vulnerability was discovered by:
Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
Signed-off-by: Matthieu Herrb <[email protected]>
|
exif_data_set_data_type (ExifData *d, ExifDataType dt)
{
if (!d || !d->priv)
return;
d->priv->data_type = dt;
}
| 0
|
[
"CWE-400",
"CWE-703"
] |
libexif
|
6aa11df549114ebda520dde4cdaea2f9357b2c89
| 257,547,921,376,382,700,000,000,000,000,000,000,000
| 7
|
Improve deep recursion detection in exif_data_load_data_content.
The existing detection was still vulnerable to pathological cases
causing DoS by wasting CPU. The new algorithm takes the number of tags
into account to make it harder to abuse by cases using shallow recursion
but with a very large number of tags. This improves on commit 5d28011c
which wasn't sufficient to counter this kind of case.
The limitation in the previous fix was discovered by Laurent Delosieres,
Secunia Research at Flexera (Secunia Advisory SA84652) and is assigned
the identifier CVE-2018-20030.
|
static gboolean DecodeHeader(unsigned char *BFH, unsigned char *BIH,
struct bmp_progressive_state *State,
GError **error)
{
gint clrUsed;
/* First check for the two first bytes content. A sane
BMP file must start with bytes 0x42 0x4D. */
if (*BFH != 0x42 || *(BFH + 1) != 0x4D) {
g_set_error_literal (error,
GDK_PIXBUF_ERROR,
GDK_PIXBUF_ERROR_CORRUPT_IMAGE,
_("BMP image has bogus header data"));
State->read_state = READ_STATE_ERROR;
return FALSE;
}
/* FIXME this is totally unrobust against bogus image data. */
if (State->BufferSize < lsb_32 (&BIH[0]) + 14) {
State->BufferSize = lsb_32 (&BIH[0]) + 14;
if (!grow_buffer (State, error))
return FALSE;
return TRUE;
}
#if DUMPBIH
DumpBIH(BIH);
#endif
State->Header.size = lsb_32 (&BIH[0]);
if (State->Header.size == 124) {
/* BMP v5 */
State->Header.width = lsb_32 (&BIH[4]);
State->Header.height = lsb_32 (&BIH[8]);
State->Header.depth = lsb_16 (&BIH[14]);
State->Compressed = lsb_32 (&BIH[16]);
} else if (State->Header.size == 108) {
/* BMP v4 */
State->Header.width = lsb_32 (&BIH[4]);
State->Header.height = lsb_32 (&BIH[8]);
State->Header.depth = lsb_16 (&BIH[14]);
State->Compressed = lsb_32 (&BIH[16]);
} else if (State->Header.size == 64) {
/* BMP OS/2 v2 */
State->Header.width = lsb_32 (&BIH[4]);
State->Header.height = lsb_32 (&BIH[8]);
State->Header.depth = lsb_16 (&BIH[14]);
State->Compressed = lsb_32 (&BIH[16]);
} else if (State->Header.size == 40) {
/* BMP v3 */
State->Header.width = lsb_32 (&BIH[4]);
State->Header.height = lsb_32 (&BIH[8]);
State->Header.depth = lsb_16 (&BIH[14]);
State->Compressed = lsb_32 (&BIH[16]);
} else if (State->Header.size == 12) {
/* BMP OS/2 */
State->Header.width = lsb_16 (&BIH[4]);
State->Header.height = lsb_16 (&BIH[6]);
State->Header.depth = lsb_16 (&BIH[10]);
State->Compressed = BI_RGB;
} else {
g_set_error_literal (error,
GDK_PIXBUF_ERROR,
GDK_PIXBUF_ERROR_CORRUPT_IMAGE,
_("BMP image has unsupported header size"));
State->read_state = READ_STATE_ERROR;
return FALSE;
}
if (State->Header.depth > 32)
{
g_set_error_literal (error,
GDK_PIXBUF_ERROR,
GDK_PIXBUF_ERROR_CORRUPT_IMAGE,
_("BMP image has unsupported depth"));
State->read_state = READ_STATE_ERROR;
}
if (State->Header.size == 12)
clrUsed = 1 << State->Header.depth;
else
clrUsed = (int) (BIH[35] << 24) + (BIH[34] << 16) + (BIH[33] << 8) + (BIH[32]);
if (clrUsed != 0)
State->Header.n_colors = clrUsed;
else
State->Header.n_colors = (1 << State->Header.depth);
State->Type = State->Header.depth; /* This may be less trivial someday */
/* Negative heights indicates bottom-down pixelorder */
if (State->Header.height < 0) {
State->Header.height = -State->Header.height;
State->Header.Negative = 1;
}
if (State->Header.Negative &&
(State->Compressed != BI_RGB && State->Compressed != BI_BITFIELDS))
{
g_set_error_literal (error,
GDK_PIXBUF_ERROR,
GDK_PIXBUF_ERROR_CORRUPT_IMAGE,
_("Topdown BMP images cannot be compressed"));
State->read_state = READ_STATE_ERROR;
return FALSE;
}
if (State->Header.width <= 0 || State->Header.height == 0 ||
(State->Compressed == BI_RLE4 && State->Type != 4) ||
(State->Compressed == BI_RLE8 && State->Type != 8) ||
(State->Compressed == BI_BITFIELDS && !(State->Type == 16 || State->Type == 32)) ||
(State->Compressed > BI_BITFIELDS)) {
g_set_error_literal (error,
GDK_PIXBUF_ERROR,
GDK_PIXBUF_ERROR_CORRUPT_IMAGE,
_("BMP image has bogus header data"));
State->read_state = READ_STATE_ERROR;
return FALSE;
}
if (State->Type == 32)
State->LineWidth = State->Header.width * 4;
else if (State->Type == 24)
State->LineWidth = State->Header.width * 3;
else if (State->Type == 16)
State->LineWidth = State->Header.width * 2;
else if (State->Type == 8)
State->LineWidth = State->Header.width * 1;
else if (State->Type == 4)
State->LineWidth = (State->Header.width + 1) / 2;
else if (State->Type == 1) {
State->LineWidth = State->Header.width / 8;
if ((State->Header.width & 7) != 0)
State->LineWidth++;
} else {
g_set_error_literal (error,
GDK_PIXBUF_ERROR,
GDK_PIXBUF_ERROR_CORRUPT_IMAGE,
_("BMP image has bogus header data"));
State->read_state = READ_STATE_ERROR;
return FALSE;
}
/* Pad to a 32 bit boundary */
if (((State->LineWidth % 4) > 0)
&& (State->Compressed == BI_RGB || State->Compressed == BI_BITFIELDS))
State->LineWidth = (State->LineWidth / 4) * 4 + 4;
if (State->pixbuf == NULL) {
if (State->size_func) {
gint width = State->Header.width;
gint height = State->Header.height;
(*State->size_func) (&width, &height, State->user_data);
if (width == 0 || height == 0) {
State->read_state = READ_STATE_DONE;
State->BufferSize = 0;
return TRUE;
}
}
if (State->Type == 32 ||
State->Compressed == BI_RLE4 ||
State->Compressed == BI_RLE8)
State->pixbuf =
gdk_pixbuf_new(GDK_COLORSPACE_RGB, TRUE, 8,
(gint) State->Header.width,
(gint) State->Header.height);
else
State->pixbuf =
gdk_pixbuf_new(GDK_COLORSPACE_RGB, FALSE, 8,
(gint) State->Header.width,
(gint) State->Header.height);
if (State->pixbuf == NULL) {
g_set_error_literal (error,
GDK_PIXBUF_ERROR,
GDK_PIXBUF_ERROR_INSUFFICIENT_MEMORY,
_("Not enough memory to load bitmap image"));
State->read_state = READ_STATE_ERROR;
return FALSE;
}
if (State->prepared_func != NULL)
/* Notify the client that we are ready to go */
(*State->prepared_func) (State->pixbuf, NULL, State->user_data);
/* make all pixels initially transparent */
if (State->Compressed == BI_RLE4 || State->Compressed == BI_RLE8) {
memset (State->pixbuf->pixels, 0, State->pixbuf->rowstride * State->Header.height);
State->compr.p = State->pixbuf->pixels
+ State->pixbuf->rowstride * (State->Header.height- 1);
}
}
State->BufferDone = 0;
if (State->Type <= 8) {
gint samples;
State->read_state = READ_STATE_PALETTE;
/* Allocate enough to hold the palette */
samples = (State->Header.size == 12 ? 3 : 4);
State->BufferSize = State->Header.n_colors * samples;
/* Skip over everything between the palette and the data.
This protects us against a malicious BFH[10] value.
*/
State->BufferPadding = (lsb_32 (&BFH[10]) - 14 - State->Header.size) - State->BufferSize;
} else if (State->Compressed == BI_RGB) {
if (State->BufferSize < lsb_32 (&BFH[10]))
{
/* skip over padding between headers and image data */
State->read_state = READ_STATE_HEADERS;
State->BufferDone = State->BufferSize;
State->BufferSize = lsb_32 (&BFH[10]);
}
else
{
State->read_state = READ_STATE_DATA;
State->BufferSize = State->LineWidth;
}
} else if (State->Compressed == BI_BITFIELDS) {
if (State->Header.size == 108 || State->Header.size == 124)
{
/* v4 and v5 have the bitmasks in the header */
if (!decode_bitmasks (&BIH[40], State, error)) {
State->read_state = READ_STATE_ERROR;
return FALSE;
}
}
else
{
State->read_state = READ_STATE_BITMASKS;
State->BufferSize = 12;
}
} else {
g_set_error_literal (error,
GDK_PIXBUF_ERROR,
GDK_PIXBUF_ERROR_CORRUPT_IMAGE,
_("BMP image has bogus header data"));
State->read_state = READ_STATE_ERROR;
return FALSE;
}
if (!grow_buffer (State, error))
return FALSE;
return TRUE;
}
| 1
|
[
"CWE-119"
] |
gdk-pixbuf
|
b7bf6fbfb310fceba2d35d4de143b8d5ffdad990
| 230,198,375,811,667,530,000,000,000,000,000,000,000
| 251
|
bmp: Reject impossible palette size
bmp headers contain separate fields for the number of colors,
and the bit depth. Catch the impossible n_colors > 1 << depth
and error early, before it causes a out-of-bounds memory
access when decoding the colormap.
https://bugzilla.gnome.org/show_bug.cgi?id=758991
|
static void vmxnet_tx_pkt_do_sw_csum(struct VmxnetTxPkt *pkt)
{
struct iovec *iov = &pkt->vec[VMXNET_TX_PKT_L2HDR_FRAG];
uint32_t csum_cntr;
uint16_t csum = 0;
/* num of iovec without vhdr */
uint32_t iov_len = pkt->payload_frags + VMXNET_TX_PKT_PL_START_FRAG - 1;
uint16_t csl;
struct ip_header *iphdr;
size_t csum_offset = pkt->virt_hdr.csum_start + pkt->virt_hdr.csum_offset;
/* Put zero to checksum field */
iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum);
/* Calculate L4 TCP/UDP checksum */
csl = pkt->payload_len;
/* data checksum */
csum_cntr =
net_checksum_add_iov(iov, iov_len, pkt->virt_hdr.csum_start, csl);
/* add pseudo header to csum */
iphdr = pkt->vec[VMXNET_TX_PKT_L3HDR_FRAG].iov_base;
csum_cntr += eth_calc_pseudo_hdr_csum(iphdr, csl);
/* Put the checksum obtained into the packet */
csum = cpu_to_be16(net_checksum_finish(csum_cntr));
iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum);
}
| 0
|
[
"CWE-20"
] |
qemu
|
a7278b36fcab9af469563bd7b9dadebe2ae25e48
| 82,741,442,532,577,900,000,000,000,000,000,000,000
| 28
|
net/vmxnet3: Refine l2 header validation
Validation of l2 header length assumed minimal packet size as
eth_header + 2 * vlan_header regardless of the actual protocol.
This caused crash for valid non-IP packets shorter than 22 bytes, as
'tx_pkt->packet_type' hasn't been assigned for such packets, and
'vmxnet3_on_tx_done_update_stats()' expects it to be properly set.
Refine header length validation in 'vmxnet_tx_pkt_parse_headers'.
Check its return value during packet processing flow.
As a side effect, in case IPv4 and IPv6 header validation failure,
corrupt packets will be dropped.
Signed-off-by: Dana Rubin <[email protected]>
Signed-off-by: Shmulik Ladkani <[email protected]>
Signed-off-by: Jason Wang <[email protected]>
|
static enum led_brightness
ims_pcu_backlight_get_brightness(struct led_classdev *cdev)
{
struct ims_pcu_backlight *backlight =
container_of(cdev, struct ims_pcu_backlight, cdev);
struct ims_pcu *pcu =
container_of(backlight, struct ims_pcu, backlight);
int brightness;
int error;
mutex_lock(&pcu->cmd_mutex);
error = ims_pcu_execute_query(pcu, GET_BRIGHTNESS);
if (error) {
dev_warn(pcu->dev,
"Failed to get current brightness, error: %d\n",
error);
/* Assume the LED is OFF */
brightness = LED_OFF;
} else {
brightness =
get_unaligned_le16(&pcu->cmd_buf[IMS_PCU_DATA_OFFSET]);
}
mutex_unlock(&pcu->cmd_mutex);
return brightness;
| 0
|
[
"CWE-703"
] |
linux
|
a0ad220c96692eda76b2e3fd7279f3dcd1d8a8ff
| 75,858,647,199,871,440,000,000,000,000,000,000,000
| 27
|
Input: ims-pcu - sanity check against missing interfaces
A malicious device missing interface can make the driver oops.
Add sanity checking.
Signed-off-by: Oliver Neukum <[email protected]>
CC: [email protected]
Signed-off-by: Dmitry Torokhov <[email protected]>
|
static int add_match_callback(
sd_bus_message *m,
void *userdata,
sd_bus_error *ret_error) {
sd_bus_slot *match_slot = userdata;
bool failed = false;
int r;
assert(m);
assert(match_slot);
sd_bus_slot_ref(match_slot);
if (sd_bus_message_is_method_error(m, NULL)) {
log_debug_errno(sd_bus_message_get_errno(m),
"Unable to add match %s, failing connection: %s",
match_slot->match_callback.match_string,
sd_bus_message_get_error(m)->message);
failed = true;
} else
log_debug("Match %s successfully installed.", match_slot->match_callback.match_string);
if (match_slot->match_callback.install_callback) {
sd_bus *bus;
bus = sd_bus_message_get_bus(m);
/* This function has been called as slot handler, and we want to call another slot handler. Let's
* update the slot callback metadata temporarily with our own data, and then revert back to the old
* values. */
assert(bus->current_slot == match_slot->match_callback.install_slot);
assert(bus->current_handler == add_match_callback);
assert(bus->current_userdata == userdata);
bus->current_slot = match_slot;
bus->current_handler = match_slot->match_callback.install_callback;
bus->current_userdata = match_slot->userdata;
r = match_slot->match_callback.install_callback(m, match_slot->userdata, ret_error);
bus->current_slot = match_slot->match_callback.install_slot;
bus->current_handler = add_match_callback;
bus->current_userdata = userdata;
} else {
if (failed) /* Generic failure handling: destroy the connection */
bus_enter_closing(sd_bus_message_get_bus(m));
r = 1;
}
/* We don't need the install method reply slot anymore, let's free it */
match_slot->match_callback.install_slot = sd_bus_slot_unref(match_slot->match_callback.install_slot);
if (failed && match_slot->floating)
bus_slot_disconnect(match_slot, true);
sd_bus_slot_unref(match_slot);
return r;
}
| 0
|
[
"CWE-416"
] |
systemd
|
1068447e6954dc6ce52f099ed174c442cb89ed54
| 143,537,134,632,593,230,000,000,000,000,000,000,000
| 63
|
sd-bus: introduce API for re-enqueuing incoming messages
When authorizing via PolicyKit we want to process incoming method calls
twice: once to process and figure out that we need PK authentication,
and a second time after we aquired PK authentication to actually execute
the operation. With this new call sd_bus_enqueue_for_read() we have a
way to put an incoming message back into the read queue for this
purpose.
This might have other uses too, for example debugging.
|
fix_transited_encoding(krb5_context context,
krb5_kdc_configuration *config,
krb5_boolean check_policy,
const TransitedEncoding *tr,
EncTicketPart *et,
const char *client_realm,
const char *server_realm,
const char *tgt_realm)
{
krb5_error_code ret = 0;
char **realms, **tmp;
unsigned int num_realms;
size_t i;
switch (tr->tr_type) {
case DOMAIN_X500_COMPRESS:
break;
case 0:
/*
* Allow empty content of type 0 because that is was Microsoft
* generates in their TGT.
*/
if (tr->contents.length == 0)
break;
kdc_log(context, config, 0,
"Transited type 0 with non empty content");
return KRB5KDC_ERR_TRTYPE_NOSUPP;
default:
kdc_log(context, config, 0,
"Unknown transited type: %u", tr->tr_type);
return KRB5KDC_ERR_TRTYPE_NOSUPP;
}
ret = krb5_domain_x500_decode(context,
tr->contents,
&realms,
&num_realms,
client_realm,
server_realm);
if(ret){
krb5_warn(context, ret,
"Decoding transited encoding");
return ret;
}
if(strcmp(client_realm, tgt_realm) && strcmp(server_realm, tgt_realm)) {
/* not us, so add the previous realm to transited set */
if (num_realms + 1 > UINT_MAX/sizeof(*realms)) {
ret = ERANGE;
goto free_realms;
}
tmp = realloc(realms, (num_realms + 1) * sizeof(*realms));
if(tmp == NULL){
ret = ENOMEM;
goto free_realms;
}
realms = tmp;
realms[num_realms] = strdup(tgt_realm);
if(realms[num_realms] == NULL){
ret = ENOMEM;
goto free_realms;
}
num_realms++;
}
if(num_realms == 0) {
if(strcmp(client_realm, server_realm))
kdc_log(context, config, 0,
"cross-realm %s -> %s", client_realm, server_realm);
} else {
size_t l = 0;
char *rs;
for(i = 0; i < num_realms; i++)
l += strlen(realms[i]) + 2;
rs = malloc(l);
if(rs != NULL) {
*rs = '\0';
for(i = 0; i < num_realms; i++) {
if(i > 0)
strlcat(rs, ", ", l);
strlcat(rs, realms[i], l);
}
kdc_log(context, config, 0,
"cross-realm %s -> %s via [%s]",
client_realm, server_realm, rs);
free(rs);
}
}
if(check_policy) {
ret = krb5_check_transited(context, client_realm,
server_realm,
realms, num_realms, NULL);
if(ret) {
krb5_warn(context, ret, "cross-realm %s -> %s",
client_realm, server_realm);
goto free_realms;
}
et->flags.transited_policy_checked = 1;
}
et->transited.tr_type = DOMAIN_X500_COMPRESS;
ret = krb5_domain_x500_encode(realms, num_realms, &et->transited.contents);
if(ret)
krb5_warn(context, ret, "Encoding transited encoding");
free_realms:
for(i = 0; i < num_realms; i++)
free(realms[i]);
free(realms);
return ret;
}
| 1
|
[
"CWE-284",
"CWE-295"
] |
heimdal
|
b1e699103f08d6a0ca46a122193c9da65f6cf837
| 151,159,690,953,385,700,000,000,000,000,000,000,000
| 107
|
Fix transit path validation CVE-2017-6594
Commit f469fc6 (2010-10-02) inadvertently caused the previous hop realm
to not be added to the transit path of issued tickets. This may, in
some cases, enable bypass of capath policy in Heimdal versions 1.5
through 7.2.
Note, this may break sites that rely on the bug. With the bug some
incomplete [capaths] worked, that should not have. These may now break
authentication in some cross-realm configurations.
|
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
{
struct device_domain_info *info;
struct context_entry *context;
struct dmar_domain *domain;
unsigned long flags;
u64 ctx_lo;
int ret;
domain = get_valid_domain_for_dev(sdev->dev);
if (!domain)
return -EINVAL;
spin_lock_irqsave(&device_domain_lock, flags);
spin_lock(&iommu->lock);
ret = -EINVAL;
info = sdev->dev->archdata.iommu;
if (!info || !info->pasid_supported)
goto out;
context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
if (WARN_ON(!context))
goto out;
ctx_lo = context[0].lo;
sdev->did = domain->iommu_did[iommu->seq_id];
sdev->sid = PCI_DEVID(info->bus, info->devfn);
if (!(ctx_lo & CONTEXT_PASIDE)) {
if (iommu->pasid_state_table)
context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
context[1].lo = (u64)virt_to_phys(info->pasid_table->table) |
intel_iommu_get_pts(sdev->dev);
wmb();
/* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
* extended to permit requests-with-PASID if the PASIDE bit
* is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
* however, the PASIDE bit is ignored and requests-with-PASID
* are unconditionally blocked. Which makes less sense.
* So convert from CONTEXT_TT_PASS_THROUGH to one of the new
* "guest mode" translation types depending on whether ATS
* is available or not. Annoyingly, we can't use the new
* modes *unless* PASIDE is set. */
if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
ctx_lo &= ~CONTEXT_TT_MASK;
if (info->ats_supported)
ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
else
ctx_lo |= CONTEXT_TT_PT_PASID << 2;
}
ctx_lo |= CONTEXT_PASIDE;
if (iommu->pasid_state_table)
ctx_lo |= CONTEXT_DINVE;
if (info->pri_supported)
ctx_lo |= CONTEXT_PRS;
context[0].lo = ctx_lo;
wmb();
iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
}
/* Enable PASID support in the device, if it wasn't already */
if (!info->pasid_enabled)
iommu_enable_dev_iotlb(info);
if (info->ats_enabled) {
sdev->dev_iotlb = 1;
sdev->qdep = info->ats_qdep;
if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
sdev->qdep = 0;
}
ret = 0;
out:
spin_unlock(&iommu->lock);
spin_unlock_irqrestore(&device_domain_lock, flags);
return ret;
}
| 0
|
[] |
linux
|
fb58fdcd295b914ece1d829b24df00a17a9624bc
| 69,790,706,395,735,320,000,000,000,000,000,000,000
| 83
|
iommu/vt-d: Do not enable ATS for untrusted devices
Currently Linux automatically enables ATS (Address Translation Service)
for any device that supports it (and IOMMU is turned on). ATS is used to
accelerate DMA access as the device can cache translations locally so
there is no need to do full translation on IOMMU side. However, as
pointed out in [1] ATS can be used to bypass IOMMU based security
completely by simply sending PCIe read/write transaction with AT
(Address Translation) field set to "translated".
To mitigate this modify the Intel IOMMU code so that it does not enable
ATS for any device that is marked as being untrusted. In case this turns
out to cause performance issues we may selectively allow ATS based on
user decision but currently use big hammer and disable it completely to
be on the safe side.
[1] https://www.repository.cam.ac.uk/handle/1810/274352
Signed-off-by: Mika Westerberg <[email protected]>
Reviewed-by: Ashok Raj <[email protected]>
Reviewed-by: Joerg Roedel <[email protected]>
Acked-by: Joerg Roedel <[email protected]>
|
int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
{
struct rdma_id_private *id_priv;
unsigned long flags;
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
spin_lock_irqsave(&id_priv->lock, flags);
if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) {
id_priv->options |= (1 << CMA_OPTION_AFONLY);
id_priv->afonly = afonly;
ret = 0;
} else {
ret = -EINVAL;
}
spin_unlock_irqrestore(&id_priv->lock, flags);
return ret;
}
| 0
|
[
"CWE-416"
] |
linux
|
bc0bdc5afaa740d782fbf936aaeebd65e5c2921d
| 32,080,033,391,311,630,000,000,000,000,000,000,000
| 18
|
RDMA/cma: Do not change route.addr.src_addr.ss_family
If the state is not idle then rdma_bind_addr() will immediately fail and
no change to global state should happen.
For instance if the state is already RDMA_CM_LISTEN then this will corrupt
the src_addr and would cause the test in cma_cancel_operation():
if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
To view a mangled src_addr, eg with a IPv6 loopback address but an IPv4
family, failing the test.
This would manifest as this trace from syzkaller:
BUG: KASAN: use-after-free in __list_add_valid+0x93/0xa0 lib/list_debug.c:26
Read of size 8 at addr ffff8881546491e0 by task syz-executor.1/32204
CPU: 1 PID: 32204 Comm: syz-executor.1 Not tainted 5.12.0-rc8-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:79 [inline]
dump_stack+0x141/0x1d7 lib/dump_stack.c:120
print_address_description.constprop.0.cold+0x5b/0x2f8 mm/kasan/report.c:232
__kasan_report mm/kasan/report.c:399 [inline]
kasan_report.cold+0x7c/0xd8 mm/kasan/report.c:416
__list_add_valid+0x93/0xa0 lib/list_debug.c:26
__list_add include/linux/list.h:67 [inline]
list_add_tail include/linux/list.h:100 [inline]
cma_listen_on_all drivers/infiniband/core/cma.c:2557 [inline]
rdma_listen+0x787/0xe00 drivers/infiniband/core/cma.c:3751
ucma_listen+0x16a/0x210 drivers/infiniband/core/ucma.c:1102
ucma_write+0x259/0x350 drivers/infiniband/core/ucma.c:1732
vfs_write+0x28e/0xa30 fs/read_write.c:603
ksys_write+0x1ee/0x250 fs/read_write.c:658
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
Which is indicating that an rdma_id_private was destroyed without doing
cma_cancel_listens().
Instead of trying to re-use the src_addr memory to indirectly create an
any address build one explicitly on the stack and bind to that as any
other normal flow would do.
Link: https://lore.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: 732d41c545bb ("RDMA/cma: Make the locking for automatic state transition more clear")
Reported-by: [email protected]
Tested-by: Hao Sun <[email protected]>
Reviewed-by: Leon Romanovsky <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
|
static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
int num_migrate, const struct xfrm_kmaddress *k,
const struct xfrm_selector *sel, u8 dir, u8 type)
{
const struct xfrm_migrate *mp;
struct xfrm_userpolicy_id *pol_id;
struct nlmsghdr *nlh;
int i, err;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
if (nlh == NULL)
return -EMSGSIZE;
pol_id = nlmsg_data(nlh);
/* copy data from selector, dir, and type to the pol_id */
memset(pol_id, 0, sizeof(*pol_id));
memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
pol_id->dir = dir;
if (k != NULL) {
err = copy_to_user_kmaddress(k, skb);
if (err)
goto out_cancel;
}
err = copy_to_user_policy_type(type, skb);
if (err)
goto out_cancel;
for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
err = copy_to_user_migrate(mp, skb);
if (err)
goto out_cancel;
}
return nlmsg_end(skb, nlh);
out_cancel:
nlmsg_cancel(skb, nlh);
return err;
}
| 0
|
[
"CWE-264"
] |
net
|
90f62cf30a78721641e08737bda787552428061e
| 58,380,896,344,694,220,000,000,000,000,000,000,000
| 39
|
net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
bool IsSupported(const NodeDef* node) const override {
return node->input_size() == 1 && IsIdempotent(*node) &&
!IsInPreserveSet(*node);
}
| 0
|
[
"CWE-476"
] |
tensorflow
|
e6340f0665d53716ef3197ada88936c2a5f7a2d3
| 293,968,305,921,970,400,000,000,000,000,000,000,000
| 4
|
Handle a special grappler case resulting in crash.
It might happen that a malformed input could be used to trick Grappler into trying to optimize a node with no inputs. This, in turn, would produce a null pointer dereference and a segfault.
PiperOrigin-RevId: 369242852
Change-Id: I2e5cbe7aec243d34a6d60220ac8ac9b16f136f6b
|
unsigned int round_pipe_size(unsigned long size)
{
if (size > (1U << 31))
return 0;
/* Minimum pipe size, as required by POSIX */
if (size < PAGE_SIZE)
return PAGE_SIZE;
return roundup_pow_of_two(size);
}
| 0
|
[
"CWE-362"
] |
linux
|
189b0ddc245139af81198d1a3637cac74f96e13a
| 207,039,318,708,498,160,000,000,000,000,000,000,000
| 11
|
pipe: Fix missing lock in pipe_resize_ring()
pipe_resize_ring() needs to take the pipe->rd_wait.lock spinlock to
prevent post_one_notification() from trying to insert into the ring
whilst the ring is being replaced.
The occupancy check must be done after the lock is taken, and the lock
must be taken after the new ring is allocated.
The bug can lead to an oops looking something like:
BUG: KASAN: use-after-free in post_one_notification.isra.0+0x62e/0x840
Read of size 4 at addr ffff88801cc72a70 by task poc/27196
...
Call Trace:
post_one_notification.isra.0+0x62e/0x840
__post_watch_notification+0x3b7/0x650
key_create_or_update+0xb8b/0xd20
__do_sys_add_key+0x175/0x340
__x64_sys_add_key+0xbe/0x140
do_syscall_64+0x5c/0xc0
entry_SYSCALL_64_after_hwframe+0x44/0xae
Reported by Selim Enes Karaduman @Enesdex working with Trend Micro Zero
Day Initiative.
Fixes: c73be61cede5 ("pipe: Add general notification queue support")
Reported-by: [email protected] # ZDI-CAN-17291
Signed-off-by: David Howells <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
verify_assemblyref_table (MonoImage *image, GSList *list, int level)
{
MonoTableInfo *t = &image->tables [MONO_TABLE_ASSEMBLYREF];
guint32 cols [MONO_ASSEMBLYREF_SIZE];
const char *p;
int i;
if (level & MONO_VERIFY_ERROR) {
for (i = 0; i < t->rows; ++i) {
mono_metadata_decode_row (t, i, cols, MONO_ASSEMBLYREF_SIZE);
if (!is_valid_assembly_flags (cols [MONO_ASSEMBLYREF_FLAGS]))
ADD_ERROR (list, g_strdup_printf ("Invalid flags in assemblyref row %d: 0x%x", i + 1, cols [MONO_ASSEMBLY_FLAGS]));
if (!is_valid_blob (image, cols [MONO_ASSEMBLYREF_PUBLIC_KEY], FALSE))
ADD_ERROR (list, g_strdup_printf ("AssemblyRef public key in row %d is an invalid index", i + 1));
if (!(p = is_valid_string (image, cols [MONO_ASSEMBLYREF_CULTURE], FALSE))) {
ADD_ERROR (list, g_strdup_printf ("AssemblyRef culture in row %d is invalid", i + 1));
} else {
if (!is_valid_culture (p))
ADD_ERROR (list, g_strdup_printf ("AssemblyRef culture `%s' in row %d is invalid", p, i + 1));
}
if (cols [MONO_ASSEMBLYREF_HASH_VALUE] && !is_valid_blob (image, cols [MONO_ASSEMBLYREF_HASH_VALUE], TRUE))
ADD_ERROR (list, g_strdup_printf ("AssemblyRef hash value in row %d is invalid or not null and empty", i + 1));
}
}
if (level & MONO_VERIFY_WARNING) {
/* check for duplicated rows */
for (i = 0; i < t->rows; ++i) {
}
}
return list;
}
| 0
|
[
"CWE-20"
] |
mono
|
4905ef1130feb26c3150b28b97e4a96752e0d399
| 181,797,512,878,654,200,000,000,000,000,000,000,000
| 34
|
Handle invalid instantiation of generic methods.
* verify.c: Add new function to internal verifier API to check
method instantiations.
* reflection.c (mono_reflection_bind_generic_method_parameters):
Check the instantiation before returning it.
Fixes #655847
|
int ksys_unshare(unsigned long unshare_flags)
{
struct fs_struct *fs, *new_fs = NULL;
struct files_struct *fd, *new_fd = NULL;
struct cred *new_cred = NULL;
struct nsproxy *new_nsproxy = NULL;
int do_sysvsem = 0;
int err;
/*
* If unsharing a user namespace must also unshare the thread group
* and unshare the filesystem root and working directories.
*/
if (unshare_flags & CLONE_NEWUSER)
unshare_flags |= CLONE_THREAD | CLONE_FS;
/*
* If unsharing vm, must also unshare signal handlers.
*/
if (unshare_flags & CLONE_VM)
unshare_flags |= CLONE_SIGHAND;
/*
* If unsharing a signal handlers, must also unshare the signal queues.
*/
if (unshare_flags & CLONE_SIGHAND)
unshare_flags |= CLONE_THREAD;
/*
* If unsharing namespace, must also unshare filesystem information.
*/
if (unshare_flags & CLONE_NEWNS)
unshare_flags |= CLONE_FS;
err = check_unshare_flags(unshare_flags);
if (err)
goto bad_unshare_out;
/*
* CLONE_NEWIPC must also detach from the undolist: after switching
* to a new ipc namespace, the semaphore arrays from the old
* namespace are unreachable.
*/
if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
do_sysvsem = 1;
err = unshare_fs(unshare_flags, &new_fs);
if (err)
goto bad_unshare_out;
err = unshare_fd(unshare_flags, &new_fd);
if (err)
goto bad_unshare_cleanup_fs;
err = unshare_userns(unshare_flags, &new_cred);
if (err)
goto bad_unshare_cleanup_fd;
err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
new_cred, new_fs);
if (err)
goto bad_unshare_cleanup_cred;
if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
if (do_sysvsem) {
/*
* CLONE_SYSVSEM is equivalent to sys_exit().
*/
exit_sem(current);
}
if (unshare_flags & CLONE_NEWIPC) {
/* Orphan segments in old ns (see sem above). */
exit_shm(current);
shm_init_task(current);
}
if (new_nsproxy)
switch_task_namespaces(current, new_nsproxy);
task_lock(current);
if (new_fs) {
fs = current->fs;
spin_lock(&fs->lock);
current->fs = new_fs;
if (--fs->users)
new_fs = NULL;
else
new_fs = fs;
spin_unlock(&fs->lock);
}
if (new_fd) {
fd = current->files;
current->files = new_fd;
new_fd = fd;
}
task_unlock(current);
if (new_cred) {
/* Install the new user namespace */
commit_creds(new_cred);
new_cred = NULL;
}
}
perf_event_namespaces(current);
bad_unshare_cleanup_cred:
if (new_cred)
put_cred(new_cred);
bad_unshare_cleanup_fd:
if (new_fd)
put_files_struct(new_fd);
bad_unshare_cleanup_fs:
if (new_fs)
free_fs_struct(new_fs);
bad_unshare_out:
return err;
}
| 0
|
[
"CWE-362"
] |
linux
|
7b55851367136b1efd84d98fea81ba57a98304cf
| 238,891,115,343,844,270,000,000,000,000,000,000,000
| 115
|
fork: record start_time late
This changes the fork(2) syscall to record the process start_time after
initializing the basic task structure but still before making the new
process visible to user-space.
Technically, we could record the start_time anytime during fork(2). But
this might lead to scenarios where a start_time is recorded long before
a process becomes visible to user-space. For instance, with
userfaultfd(2) and TLS, user-space can delay the execution of fork(2)
for an indefinite amount of time (and will, if this causes network
access, or similar).
By recording the start_time late, it much closer reflects the point in
time where the process becomes live and can be observed by other
processes.
Lastly, this makes it much harder for user-space to predict and control
the start_time they get assigned. Previously, user-space could fork a
process and stall it in copy_thread_tls() before its pid is allocated,
but after its start_time is recorded. This can be misused to later-on
cycle through PIDs and resume the stalled fork(2) yielding a process
that has the same pid and start_time as a process that existed before.
This can be used to circumvent security systems that identify processes
by their pid+start_time combination.
Even though user-space was always aware that start_time recording is
flaky (but several projects are known to still rely on start_time-based
identification), changing the start_time to be recorded late will help
mitigate existing attacks and make it much harder for user-space to
control the start_time a process gets assigned.
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Tom Gundersen <[email protected]>
Signed-off-by: David Herrmann <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
set<rgw_zone_id> get_zone_ids_from_names(rgw::sal::RGWRadosStore *store,
const vector<string>& zone_names) const {
set<rgw_zone_id> ids;
for (auto& name : zone_names) {
rgw_zone_id id;
if (store->svc()->zone->find_zone_id_by_name(name, &id)) {
ids.insert(std::move(id));
}
}
return ids;
}
| 0
|
[
"CWE-79"
] |
ceph
|
8f90658c731499722d5f4393c8ad70b971d05f77
| 109,172,705,497,062,400,000,000,000,000,000,000,000
| 13
|
rgw: reject unauthenticated response-header actions
Signed-off-by: Matt Benjamin <[email protected]>
Reviewed-by: Casey Bodley <[email protected]>
(cherry picked from commit d8dd5e513c0c62bbd7d3044d7e2eddcd897bd400)
|
static bool nested_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
{
unsigned long always_on = VMXON_CR0_ALWAYSON;
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high &
SECONDARY_EXEC_UNRESTRICTED_GUEST &&
nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
always_on &= ~(X86_CR0_PE | X86_CR0_PG);
return (val & always_on) == always_on;
}
| 0
|
[
"CWE-284",
"CWE-264"
] |
linux
|
3ce424e45411cf5a13105e0386b6ecf6eeb4f66f
| 174,643,697,609,586,960,000,000,000,000,000,000,000
| 11
|
kvm:vmx: more complete state update on APICv on/off
The function to update APICv on/off state (in particular, to deactivate
it when enabling Hyper-V SynIC) is incomplete: it doesn't adjust
APICv-related fields among secondary processor-based VM-execution
controls. As a result, Windows 2012 guests get stuck when SynIC-based
auto-EOI interrupt intersected with e.g. an IPI in the guest.
In addition, the MSR intercept bitmap isn't updated every time "virtualize
x2APIC mode" is toggled. This path can only be triggered by a malicious
guest, because Windows didn't use x2APIC but rather their own synthetic
APIC access MSRs; however a guest running in a SynIC-enabled VM could
switch to x2APIC and thus obtain direct access to host APIC MSRs
(CVE-2016-4440).
The patch fixes those omissions.
Signed-off-by: Roman Kagan <[email protected]>
Reported-by: Steve Rutherford <[email protected]>
Reported-by: Yang Zhang <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static int select_fallback_rq(int cpu, struct task_struct *p)
{
int nid = cpu_to_node(cpu);
const struct cpumask *nodemask = NULL;
enum { cpuset, possible, fail } state = cpuset;
int dest_cpu;
/*
* If the node that the cpu is on has been offlined, cpu_to_node()
* will return -1. There is no cpu on the node, and we should
* select the cpu on the other node.
*/
if (nid != -1) {
nodemask = cpumask_of_node(nid);
/* Look for allowed, online CPU in same node. */
for_each_cpu(dest_cpu, nodemask) {
if (!cpu_online(dest_cpu))
continue;
if (!cpu_active(dest_cpu))
continue;
if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
return dest_cpu;
}
}
for (;;) {
/* Any allowed, online CPU? */
for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
if (!cpu_online(dest_cpu))
continue;
if (!cpu_active(dest_cpu))
continue;
goto out;
}
/* No more Mr. Nice Guy. */
switch (state) {
case cpuset:
if (IS_ENABLED(CONFIG_CPUSETS)) {
cpuset_cpus_allowed_fallback(p);
state = possible;
break;
}
/* fall-through */
case possible:
do_set_cpus_allowed(p, cpu_possible_mask);
state = fail;
break;
case fail:
BUG();
break;
}
}
out:
if (state != cpuset) {
/*
* Don't tell them about moving exiting tasks or
* kernel threads (both mm NULL), since they never
* leave kernel.
*/
if (p->mm && printk_ratelimit()) {
printk_deferred("process %d (%s) no longer affine to cpu%d\n",
task_pid_nr(p), p->comm, cpu);
}
}
return dest_cpu;
}
| 0
|
[
"CWE-119"
] |
linux
|
29d6455178a09e1dc340380c582b13356227e8df
| 112,457,885,552,896,510,000,000,000,000,000,000,000
| 71
|
sched: panic on corrupted stack end
Until now, hitting this BUG_ON caused a recursive oops (because oops
handling involves do_exit(), which calls into the scheduler, which in
turn raises an oops), which caused stuff below the stack to be
overwritten until a panic happened (e.g. via an oops in interrupt
context, caused by the overwritten CPU index in the thread_info).
Just panic directly.
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
ofputil_bucket_list_front(const struct ovs_list *buckets)
{
static struct ofputil_bucket *bucket;
ASSIGN_CONTAINER(bucket, ovs_list_front(buckets), list_node);
return bucket;
}
| 0
|
[
"CWE-772"
] |
ovs
|
77ad4225d125030420d897c873e4734ac708c66b
| 92,407,497,068,844,450,000,000,000,000,000,000,000
| 8
|
ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod().
Found by libFuzzer.
Reported-by: Bhargava Shastry <[email protected]>
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]>
|
int enc_untrusted_raise(int sig) {
int klinux_sig = TokLinuxSignalNumber(sig);
if (klinux_sig < 0) {
errno = EINVAL;
return -1;
}
MessageWriter input;
input.Push<int>(klinux_sig);
MessageReader output;
const auto status = NonSystemCallDispatcher(::asylo::host_call::kRaiseHandler,
&input, &output);
CheckStatusAndParamCount(status, output, "enc_untrusted_raise", 2);
int result = output.next<int>();
int klinux_errno = output.next<int>();
if (result != 0) {
errno = FromkLinuxErrorNumber(klinux_errno);
}
return result;
}
| 0
|
[
"CWE-125"
] |
asylo
|
b1d120a2c7d7446d2cc58d517e20a1b184b82200
| 127,360,613,276,124,540,000,000,000,000,000,000,000
| 21
|
Check for return size in enc_untrusted_read
Check return size does not exceed requested. The returned result and
content still cannot be trusted, but it's expected behavior when not
using a secure file system.
PiperOrigin-RevId: 333827386
Change-Id: I0bdec0aec9356ea333dc8c647eba5d2772875f29
|
xfs_attr_shortform_allfit(
struct xfs_buf *bp,
struct xfs_inode *dp)
{
struct xfs_attr_leafblock *leaf;
struct xfs_attr_leaf_entry *entry;
xfs_attr_leaf_name_local_t *name_loc;
struct xfs_attr3_icleaf_hdr leafhdr;
int bytes;
int i;
struct xfs_mount *mp = bp->b_target->bt_mount;
leaf = bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
entry = xfs_attr3_leaf_entryp(leaf);
bytes = sizeof(struct xfs_attr_sf_hdr);
for (i = 0; i < leafhdr.count; entry++, i++) {
if (entry->flags & XFS_ATTR_INCOMPLETE)
continue; /* don't copy partial entries */
if (!(entry->flags & XFS_ATTR_LOCAL))
return 0;
name_loc = xfs_attr3_leaf_name_local(leaf, i);
if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX)
return 0;
if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX)
return 0;
bytes += sizeof(struct xfs_attr_sf_entry) - 1
+ name_loc->namelen
+ be16_to_cpu(name_loc->valuelen);
}
if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
(dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
(bytes == sizeof(struct xfs_attr_sf_hdr)))
return -1;
return xfs_attr_shortform_bytesfit(dp, bytes);
}
| 0
|
[
"CWE-476"
] |
linux
|
bb3d48dcf86a97dc25fe9fc2c11938e19cb4399a
| 68,913,033,487,623,360,000,000,000,000,000,000,000
| 37
|
xfs: don't call xfs_da_shrink_inode with NULL bp
xfs_attr3_leaf_create may have errored out before instantiating a buffer,
for example if the blkno is out of range. In that case there is no work
to do to remove it, and in fact xfs_da_shrink_inode will lead to an oops
if we try.
This also seems to fix a flaw where the original error from
xfs_attr3_leaf_create gets overwritten in the cleanup case, and it
removes a pointless assignment to bp which isn't used after this.
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=199969
Reported-by: Xu, Wen <[email protected]>
Tested-by: Xu, Wen <[email protected]>
Signed-off-by: Eric Sandeen <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
Signed-off-by: Darrick J. Wong <[email protected]>
|
static int usb_dev_suspend(struct device *dev)
{
return usb_suspend(dev, PMSG_SUSPEND);
}
| 0
|
[
"CWE-400",
"CWE-703"
] |
linux
|
704620afc70cf47abb9d6a1a57f3825d2bca49cf
| 182,848,482,986,692,720,000,000,000,000,000,000,000
| 4
|
USB: check usb_get_extra_descriptor for proper size
When reading an extra descriptor, we need to properly check the minimum
and maximum size allowed, to prevent from invalid data being sent by a
device.
Reported-by: Hui Peng <[email protected]>
Reported-by: Mathias Payer <[email protected]>
Co-developed-by: Linus Torvalds <[email protected]>
Signed-off-by: Hui Peng <[email protected]>
Signed-off-by: Mathias Payer <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
ExtractorImpl::ExtractorImpl(const JwtProviderList& providers) {
for (const auto& provider : providers) {
ASSERT(provider);
addProvider(*provider);
}
}
| 0
|
[] |
envoy
|
2c60632d41555ec8b3d9ef5246242be637a2db0f
| 119,139,368,312,515,300,000,000,000,000,000,000,000
| 6
|
http: header map security fixes for duplicate headers (#197)
Previously header matching did not match on all headers for
non-inline headers. This patch changes the default behavior to
always logically match on all headers. Multiple individual
headers will be logically concatenated with ',' similar to what
is done with inline headers. This makes the behavior effectively
consistent. This behavior can be temporary reverted by setting
the runtime value "envoy.reloadable_features.header_match_on_all_headers"
to "false".
Targeted fixes have been additionally performed on the following
extensions which make them consider all duplicate headers by default as
a comma concatenated list:
1) Any extension using CEL matching on headers.
2) The header to metadata filter.
3) The JWT filter.
4) The Lua filter.
Like primary header matching used in routing, RBAC, etc. this behavior
can be disabled by setting the runtime value
"envoy.reloadable_features.header_match_on_all_headers" to false.
Finally, the setCopy() header map API previously only set the first
header in the case of duplicate non-inline headers. setCopy() now
behaves similiarly to the other set*() APIs and replaces all found
headers with a single value. This may have had security implications
in the extauth filter which uses this API. This behavior can be disabled
by setting the runtime value
"envoy.reloadable_features.http_set_copy_replace_all_headers" to false.
Fixes https://github.com/envoyproxy/envoy-setec/issues/188
Signed-off-by: Matt Klein <[email protected]>
|
YR_OBJECT* yr_object_array_get_item(
YR_OBJECT* object,
int flags,
int index)
{
YR_OBJECT* result = NULL;
YR_OBJECT_ARRAY* array;
assert(object->type == OBJECT_TYPE_ARRAY);
if (index < 0)
return NULL;
array = object_as_array(object);
if (array->items != NULL && array->items->count > index)
result = array->items->objects[index];
if (result == NULL && flags & OBJECT_CREATE)
{
yr_object_copy(array->prototype_item, &result);
if (result != NULL)
yr_object_array_set_item(object, result, index);
}
return result;
}
| 0
|
[
"CWE-119",
"CWE-787"
] |
yara
|
4a342f01e5439b9bb901aff1c6c23c536baeeb3f
| 230,854,779,287,397,120,000,000,000,000,000,000,000
| 28
|
Fix heap overflow (reported by Jurriaan Bremer)
When setting a new array item with yr_object_array_set_item() the array size is doubled if the index for the new item is larger than the already allocated ones. No further checks were made to ensure that the index fits into the array after doubling its capacity. If the array capacity was for example 64, and a new object is assigned to an index larger than 128 the overflow occurs. As yr_object_array_set_item() is usually invoked with indexes that increase monotonically by one, this bug never triggered before. But the new "dotnet" module has the potential to allow the exploitation of this bug by scanning a specially crafted .NET binary.
|
static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
{
return whichfork != XFS_COW_FORK &&
XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
XFS_IFORK_NEXTENTS(ip, whichfork) <=
XFS_IFORK_MAXEXT(ip, whichfork);
}
| 0
|
[] |
linux
|
2c4306f719b083d17df2963bc761777576b8ad1b
| 308,699,193,118,023,500,000,000,000,000,000,000,000
| 7
|
xfs: set format back to extents if xfs_bmap_extents_to_btree
If xfs_bmap_extents_to_btree fails in a mode where we call
xfs_iroot_realloc(-1) to de-allocate the root, set the
format back to extents.
Otherwise we can assume we can dereference ifp->if_broot
based on the XFS_DINODE_FMT_BTREE format, and crash.
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=199423
Signed-off-by: Eric Sandeen <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
Signed-off-by: Darrick J. Wong <[email protected]>
|
static void __exit exit_f2fs_fs(void)
{
remove_proc_entry("fs/f2fs", NULL);
f2fs_destroy_root_stats();
unregister_filesystem(&f2fs_fs_type);
unregister_shrinker(&f2fs_shrinker_info);
kset_unregister(f2fs_kset);
destroy_extent_cache();
destroy_checkpoint_caches();
destroy_segment_manager_caches();
destroy_node_manager_caches();
destroy_inodecache();
f2fs_destroy_trace_ios();
}
| 0
|
[
"CWE-284"
] |
linux
|
b9dd46188edc2f0d1f37328637860bb65a771124
| 282,129,448,395,494,670,000,000,000,000,000,000,000
| 14
|
f2fs: sanity check segment count
F2FS uses 4 bytes to represent block address. As a result, supported
size of disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments.
Signed-off-by: Jin Qian <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]>
|
extrainfo_insert(routerlist_t *rl, extrainfo_t *ei)
{
int r = 0;
routerinfo_t *ri = rimap_get(rl->identity_map,
ei->cache_info.identity_digest);
signed_descriptor_t *sd =
sdmap_get(rl->desc_by_eid_map, ei->cache_info.signed_descriptor_digest);
extrainfo_t *ei_tmp;
{
/* XXXX remove this code if it slows us down. */
extrainfo_t *ei_generated = router_get_my_extrainfo();
tor_assert(ei_generated != ei);
}
if (!ri) {
/* This router is unknown; we can't even verify the signature. Give up.*/
goto done;
}
if (routerinfo_incompatible_with_extrainfo(ri, ei, sd, NULL)) {
goto done;
}
/* Okay, if we make it here, we definitely have a router corresponding to
* this extrainfo. */
ei_tmp = eimap_set(rl->extra_info_map,
ei->cache_info.signed_descriptor_digest,
ei);
r = 1;
if (ei_tmp) {
rl->extrainfo_store.bytes_dropped +=
ei_tmp->cache_info.signed_descriptor_len;
extrainfo_free(ei_tmp);
}
done:
if (r == 0)
extrainfo_free(ei);
#ifdef DEBUG_ROUTERLIST
routerlist_assert_ok(rl);
#endif
return r;
}
| 0
|
[
"CWE-399"
] |
tor
|
308f6dad20675c42b29862f4269ad1fbfb00dc9a
| 249,008,440,183,419,670,000,000,000,000,000,000,000
| 45
|
Mitigate a side-channel leak of which relays Tor chooses for a circuit
Tor's and OpenSSL's current design guarantee that there are other leaks,
but this one is likely to be more easily exploitable, and is easy to fix.
|
mono_get_object_from_blob (MonoDomain *domain, MonoType *type, const char *blob)
{
void *retval;
MonoClass *klass;
MonoObject *object;
MonoType *basetype = type;
if (!blob)
return NULL;
klass = mono_class_from_mono_type (type);
if (klass->valuetype) {
object = mono_object_new (domain, klass);
retval = ((gchar *) object + sizeof (MonoObject));
if (klass->enumtype)
basetype = mono_class_enum_basetype (klass);
} else {
retval = &object;
}
if (!mono_get_constant_value_from_blob (domain, basetype->type, blob, retval))
return object;
else
return NULL;
}
| 0
|
[
"CWE-20"
] |
mono
|
4905ef1130feb26c3150b28b97e4a96752e0d399
| 312,536,506,895,004,700,000,000,000,000,000,000,000
| 25
|
Handle invalid instantiation of generic methods.
* verify.c: Add new function to internal verifier API to check
method instantiations.
* reflection.c (mono_reflection_bind_generic_method_parameters):
Check the instantiation before returning it.
Fixes #655847
|
static int set_ro(const char *val, struct kernel_param *kp)
{
return kstrtouint(val, 10, (unsigned int *)&start_readonly);
}
| 0
|
[
"CWE-200"
] |
linux
|
b6878d9e03043695dbf3fa1caa6dfc09db225b16
| 277,321,617,927,903,940,000,000,000,000,000,000,000
| 4
|
md: use kzalloc() when bitmap is disabled
In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a
mdu_bitmap_file_t called "file".
5769 file = kmalloc(sizeof(*file), GFP_NOIO);
5770 if (!file)
5771 return -ENOMEM;
This structure is copied to user space at the end of the function.
5786 if (err == 0 &&
5787 copy_to_user(arg, file, sizeof(*file)))
5788 err = -EFAULT
But if bitmap is disabled only the first byte of "file" is initialized
with zero, so it's possible to read some bytes (up to 4095) of kernel
space memory from user space. This is an information leak.
5775 /* bitmap disabled, zero the first byte and copy out */
5776 if (!mddev->bitmap_info.file)
5777 file->pathname[0] = '\0';
Signed-off-by: Benjamin Randazzo <[email protected]>
Signed-off-by: NeilBrown <[email protected]>
|
static void oz_complete_buffered_urb(struct oz_port *port,
struct oz_endpoint *ep,
struct urb *urb)
{
int data_len, available_space, copy_len;
data_len = ep->buffer[ep->out_ix];
if (data_len <= urb->transfer_buffer_length)
available_space = data_len;
else
available_space = urb->transfer_buffer_length;
if (++ep->out_ix == ep->buffer_size)
ep->out_ix = 0;
copy_len = ep->buffer_size - ep->out_ix;
if (copy_len >= available_space)
copy_len = available_space;
memcpy(urb->transfer_buffer, &ep->buffer[ep->out_ix], copy_len);
if (copy_len < available_space) {
memcpy((urb->transfer_buffer + copy_len), ep->buffer,
(available_space - copy_len));
ep->out_ix = available_space - copy_len;
} else {
ep->out_ix += copy_len;
}
urb->actual_length = available_space;
if (ep->out_ix == ep->buffer_size)
ep->out_ix = 0;
ep->buffered_units--;
oz_dbg(ON, "Trying to give back buffered frame of size=%d\n",
available_space);
oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
| 0
|
[
"CWE-703",
"CWE-189"
] |
linux
|
b1bb5b49373b61bf9d2c73a4d30058ba6f069e4c
| 296,192,461,055,443,140,000,000,000,000,000,000,000
| 35
|
ozwpan: Use unsigned ints to prevent heap overflow
Using signed integers, the subtraction between required_size and offset
could wind up being negative, resulting in a memcpy into a heap buffer
with a negative length, resulting in huge amounts of network-supplied
data being copied into the heap, which could potentially lead to remote
code execution.. This is remotely triggerable with a magic packet.
A PoC which obtains DoS follows below. It requires the ozprotocol.h file
from this module.
=-=-=-=-=-=
#include <arpa/inet.h>
#include <linux/if_packet.h>
#include <net/if.h>
#include <netinet/ether.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <endian.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#define u8 uint8_t
#define u16 uint16_t
#define u32 uint32_t
#define __packed __attribute__((__packed__))
#include "ozprotocol.h"
static int hex2num(char c)
{
if (c >= '0' && c <= '9')
return c - '0';
if (c >= 'a' && c <= 'f')
return c - 'a' + 10;
if (c >= 'A' && c <= 'F')
return c - 'A' + 10;
return -1;
}
static int hwaddr_aton(const char *txt, uint8_t *addr)
{
int i;
for (i = 0; i < 6; i++) {
int a, b;
a = hex2num(*txt++);
if (a < 0)
return -1;
b = hex2num(*txt++);
if (b < 0)
return -1;
*addr++ = (a << 4) | b;
if (i < 5 && *txt++ != ':')
return -1;
}
return 0;
}
int main(int argc, char *argv[])
{
if (argc < 3) {
fprintf(stderr, "Usage: %s interface destination_mac\n", argv[0]);
return 1;
}
uint8_t dest_mac[6];
if (hwaddr_aton(argv[2], dest_mac)) {
fprintf(stderr, "Invalid mac address.\n");
return 1;
}
int sockfd = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW);
if (sockfd < 0) {
perror("socket");
return 1;
}
struct ifreq if_idx;
int interface_index;
strncpy(if_idx.ifr_ifrn.ifrn_name, argv[1], IFNAMSIZ - 1);
if (ioctl(sockfd, SIOCGIFINDEX, &if_idx) < 0) {
perror("SIOCGIFINDEX");
return 1;
}
interface_index = if_idx.ifr_ifindex;
if (ioctl(sockfd, SIOCGIFHWADDR, &if_idx) < 0) {
perror("SIOCGIFHWADDR");
return 1;
}
uint8_t *src_mac = (uint8_t *)&if_idx.ifr_hwaddr.sa_data;
struct {
struct ether_header ether_header;
struct oz_hdr oz_hdr;
struct oz_elt oz_elt;
struct oz_elt_connect_req oz_elt_connect_req;
} __packed connect_packet = {
.ether_header = {
.ether_type = htons(OZ_ETHERTYPE),
.ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] },
.ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] }
},
.oz_hdr = {
.control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT),
.last_pkt_num = 0,
.pkt_num = htole32(0)
},
.oz_elt = {
.type = OZ_ELT_CONNECT_REQ,
.length = sizeof(struct oz_elt_connect_req)
},
.oz_elt_connect_req = {
.mode = 0,
.resv1 = {0},
.pd_info = 0,
.session_id = 0,
.presleep = 35,
.ms_isoc_latency = 0,
.host_vendor = 0,
.keep_alive = 0,
.apps = htole16((1 << OZ_APPID_USB) | 0x1),
.max_len_div16 = 0,
.ms_per_isoc = 0,
.up_audio_buf = 0,
.ms_per_elt = 0
}
};
struct {
struct ether_header ether_header;
struct oz_hdr oz_hdr;
struct oz_elt oz_elt;
struct oz_get_desc_rsp oz_get_desc_rsp;
} __packed pwn_packet = {
.ether_header = {
.ether_type = htons(OZ_ETHERTYPE),
.ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] },
.ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] }
},
.oz_hdr = {
.control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT),
.last_pkt_num = 0,
.pkt_num = htole32(1)
},
.oz_elt = {
.type = OZ_ELT_APP_DATA,
.length = sizeof(struct oz_get_desc_rsp)
},
.oz_get_desc_rsp = {
.app_id = OZ_APPID_USB,
.elt_seq_num = 0,
.type = OZ_GET_DESC_RSP,
.req_id = 0,
.offset = htole16(2),
.total_size = htole16(1),
.rcode = 0,
.data = {0}
}
};
struct sockaddr_ll socket_address = {
.sll_ifindex = interface_index,
.sll_halen = ETH_ALEN,
.sll_addr = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] }
};
if (sendto(sockfd, &connect_packet, sizeof(connect_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) {
perror("sendto");
return 1;
}
usleep(300000);
if (sendto(sockfd, &pwn_packet, sizeof(pwn_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) {
perror("sendto");
return 1;
}
return 0;
}
Signed-off-by: Jason A. Donenfeld <[email protected]>
Acked-by: Dan Carpenter <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
void RGWDeleteBucketTags_ObjStore_S3::send_response()
{
if (op_ret)
set_req_state_err(s, op_ret);
dump_errno(s);
end_header(s, this, "application/xml");
dump_start(s);
}
| 0
|
[
"CWE-79"
] |
ceph
|
8f90658c731499722d5f4393c8ad70b971d05f77
| 160,030,011,678,408,400,000,000,000,000,000,000,000
| 8
|
rgw: reject unauthenticated response-header actions
Signed-off-by: Matt Benjamin <[email protected]>
Reviewed-by: Casey Bodley <[email protected]>
(cherry picked from commit d8dd5e513c0c62bbd7d3044d7e2eddcd897bd400)
|
read_buffer (ksba_reader_t reader, unsigned char *buffer, size_t count)
{
gpg_error_t err;
size_t nread;
while (count)
{
err = ksba_reader_read (reader, buffer, count, &nread);
if (err)
return err;
buffer += nread;
count -= nread;
}
return 0;
}
| 0
|
[
"CWE-20"
] |
gnupg
|
2183683bd633818dd031b090b5530951de76f392
| 274,794,760,111,340,000,000,000,000,000,000,000,000
| 15
|
Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]>
|
flow_set_dl_vlan(struct flow *flow, ovs_be16 vid)
{
if (vid == htons(OFP10_VLAN_NONE)) {
flow->vlans[0].tci = htons(0);
} else {
vid &= htons(VLAN_VID_MASK);
flow->vlans[0].tci &= ~htons(VLAN_VID_MASK);
flow->vlans[0].tci |= htons(VLAN_CFI) | vid;
}
}
| 0
|
[
"CWE-400"
] |
ovs
|
79cec1a736b91548ec882d840986a11affda1068
| 240,563,836,255,219,100,000,000,000,000,000,000,000
| 10
|
flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]>
|
static void print_binder_buffer(struct seq_file *m, const char *prefix,
struct binder_buffer *buffer)
{
seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
prefix, buffer->debug_id, buffer->data,
buffer->data_size, buffer->offsets_size,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
}
| 0
|
[
"CWE-416"
] |
linux
|
7bada55ab50697861eee6bb7d60b41e68a961a9c
| 201,991,641,903,039,960,000,000,000,000,000,000,000
| 9
|
binder: fix race that allows malicious free of live buffer
Malicious code can attempt to free buffers using the BC_FREE_BUFFER
ioctl to binder. There are protections against a user freeing a buffer
while in use by the kernel, however there was a window where
BC_FREE_BUFFER could be used to free a recently allocated buffer that
was not completely initialized. This resulted in a use-after-free
detected by KASAN with a malicious test program.
This window is closed by setting the buffer's allow_user_free attribute
to 0 when the buffer is allocated or when the user has previously freed
it instead of waiting for the caller to set it. The problem was that
when the struct buffer was recycled, allow_user_free was stale and set
to 1 allowing a free to go through.
Signed-off-by: Todd Kjos <[email protected]>
Acked-by: Arve Hjønnevåg <[email protected]>
Cc: stable <[email protected]> # 4.14
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
char *choose_init(const char *rootfs)
{
char *retv = NULL;
const char *empty = "",
*tmp;
int ret, env_set = 0;
if (!getenv("PATH")) {
if (setenv("PATH", "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", 0))
SYSERROR("Failed to setenv");
env_set = 1;
}
retv = on_path("init.lxc", rootfs);
if (env_set) {
if (unsetenv("PATH"))
SYSERROR("Failed to unsetenv");
}
if (retv)
return retv;
retv = malloc(PATH_MAX);
if (!retv)
return NULL;
if (rootfs)
tmp = rootfs;
else
tmp = empty;
ret = snprintf(retv, PATH_MAX, "%s/%s/%s", tmp, SBINDIR, "/init.lxc");
if (ret < 0 || ret >= PATH_MAX) {
ERROR("pathname too long");
goto out1;
}
if (access(retv, X_OK) == 0)
return retv;
ret = snprintf(retv, PATH_MAX, "%s/%s/%s", tmp, LXCINITDIR, "/lxc/lxc-init");
if (ret < 0 || ret >= PATH_MAX) {
ERROR("pathname too long");
goto out1;
}
if (access(retv, X_OK) == 0)
return retv;
ret = snprintf(retv, PATH_MAX, "%s/usr/lib/lxc/lxc-init", tmp);
if (ret < 0 || ret >= PATH_MAX) {
ERROR("pathname too long");
goto out1;
}
if (access(retv, X_OK) == 0)
return retv;
ret = snprintf(retv, PATH_MAX, "%s/sbin/lxc-init", tmp);
if (ret < 0 || ret >= PATH_MAX) {
ERROR("pathname too long");
goto out1;
}
if (access(retv, X_OK) == 0)
return retv;
/*
* Last resort, look for the statically compiled init.lxc which we
* hopefully bind-mounted in.
* If we are called during container setup, and we get to this point,
* then the init.lxc.static from the host will need to be bind-mounted
* in. So we return NULL here to indicate that.
*/
if (rootfs)
goto out1;
ret = snprintf(retv, PATH_MAX, "/init.lxc.static");
if (ret < 0 || ret >= PATH_MAX) {
WARN("Nonsense - name /lxc.init.static too long");
goto out1;
}
if (access(retv, X_OK) == 0)
return retv;
out1:
free(retv);
return NULL;
}
| 0
|
[
"CWE-417"
] |
lxc
|
c1cf54ebf251fdbad1e971679614e81649f1c032
| 143,206,245,998,034,400,000,000,000,000,000,000,000
| 92
|
CVE 2018-6556: verify netns fd in lxc-user-nic
Signed-off-by: Christian Brauner <[email protected]>
|
lexer_hex_to_code_point (const uint8_t *source_p, /**< current source position */
parser_line_counter_t length) /**< source length */
{
lit_code_point_t result = 0;
do
{
uint32_t byte = *source_p++;
result <<= 4;
if (byte >= LIT_CHAR_0 && byte <= LIT_CHAR_9)
{
result += byte - LIT_CHAR_0;
}
else
{
byte = LEXER_TO_ASCII_LOWERCASE (byte);
if (byte >= LIT_CHAR_LOWERCASE_A && byte <= LIT_CHAR_LOWERCASE_F)
{
result += byte - (LIT_CHAR_LOWERCASE_A - 10);
}
else
{
return UINT32_MAX;
}
}
} while (--length > 0);
return result;
} /* lexer_hex_to_code_point */
| 0
|
[
"CWE-288"
] |
jerryscript
|
f3a420b672927037beb4508d7bdd68fb25d2caf6
| 106,822,752,510,112,290,000,000,000,000,000,000,000
| 31
|
Fix class static block opening brace parsing (#4942)
The next character should not be consumed after finding the static block opening brace.
This patch fixes #4916.
JerryScript-DCO-1.0-Signed-off-by: Martin Negyokru [email protected]
|
snprint_description(char *buf, size_t buf_len,
oid * objid, size_t objidlen, int width)
{
size_t out_len = 0;
if (sprint_realloc_description((u_char **) & buf, &buf_len, &out_len, 0,
objid, objidlen, width)) {
return (int) out_len;
} else {
return -1;
}
}
| 0
|
[
"CWE-59",
"CWE-61"
] |
net-snmp
|
4fd9a450444a434a993bc72f7c3486ccce41f602
| 185,747,369,247,004,870,000,000,000,000,000,000,000
| 12
|
CHANGES: snmpd: Stop reading and writing the mib_indexes/* files
Caching directory contents is something the operating system should do
and is not something Net-SNMP should do. Instead of storing a copy of
the directory contents in ${tmp_dir}/mib_indexes/${n}, always scan a
MIB directory.
|
amqp_table_t *amqp_get_server_properties(amqp_connection_state_t state) {
return &state->server_properties;
}
| 0
|
[
"CWE-20",
"CWE-190",
"CWE-787"
] |
rabbitmq-c
|
fc85be7123050b91b054e45b91c78d3241a5047a
| 194,200,412,250,876,000,000,000,000,000,000,000,000
| 3
|
lib: check frame_size is >= INT32_MAX
When parsing a frame header, validate that the frame_size is less than
or equal to INT32_MAX. Given frame_max is limited between 0 and
INT32_MAX in amqp_login and friends, this does not change the API.
This prevents a potential buffer overflow when a malicious client sends
a frame_size that is close to UINT32_MAX, in which causes an overflow
when computing state->target_size resulting in a small value there. A
buffer is then allocated with the small amount, then memcopy copies the
frame_size writing to memory beyond the end of the buffer.
|
phone_oui_to_icon_name (const char *bdaddr)
{
char *vendor;
const char *ret = NULL;
vendor = oui_to_vendor (bdaddr);
if (vendor == NULL)
return NULL;
if (strstr (vendor, "Apple") != NULL)
ret = "phone-apple-iphone";
else if (strstr (vendor, "Samsung") != NULL)
ret = "phone-samsung-galaxy-s";
else if (strstr (vendor, "Google") != NULL)
ret = "phone-google-nexus-one";
g_free (vendor);
return ret;
}
| 0
|
[] |
gnome-bluetooth
|
6b5086d42ea64d46277f3c93b43984f331d12f89
| 277,539,683,036,678,400,000,000,000,000,000,000,000
| 19
|
lib: Fix Discoverable being reset when turned off
Work-around race in bluetoothd which would reset the discoverable
flag if a timeout change was requested before discoverable finished
being set to off:
See https://bugzilla.redhat.com/show_bug.cgi?id=1602985
|
static const char *update_rule_action(cmd_parms *cmd, directory_config *dcfg,
const char *p1, const char *p2, int offset)
{
char *my_error_msg = NULL;
msre_rule *rule = NULL;
msre_actionset *new_actionset = NULL;
msre_ruleset *ruleset = dcfg->ruleset;
extern msc_engine *modsecurity;
/* Get the ruleset if one exists */
if ((ruleset == NULL)||(ruleset == NOT_SET_P)) {
return NULL;
}
#ifdef DEBUG_CONF
ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, cmd->pool,
"Update rule id=\"%s\" with action \"%s\".", p1, p2);
#endif
/* Fetch the rule */
rule = msre_ruleset_fetch_rule(ruleset, p1, offset);
if (rule == NULL) {
#ifdef DEBUG_CONF
ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, cmd->pool,
"Update rule id=\"%s\" with action \"%s\" failed: Rule not found.", p1, p2);
#endif
return NULL;
}
/* Check the rule actionset */
/* ENH: Can this happen? */
if (rule->actionset == NULL) {
return apr_psprintf(cmd->pool, "ModSecurity: Attempt to update action for rule \"%s\" failed: Rule does not have an actionset.", p1);
}
/* Create a new actionset */
new_actionset = msre_actionset_create(modsecurity->msre, p2, &my_error_msg);
if (new_actionset == NULL) return FATAL_ERROR;
if (my_error_msg != NULL) return my_error_msg;
/* Must NOT change an id */
if ((new_actionset->id != NOT_SET_P) && (rule->actionset->id != NULL) && (strcmp(rule->actionset->id, new_actionset->id) != 0)) {
return apr_psprintf(cmd->pool, "ModSecurity: Rule IDs cannot be updated via SecRuleUpdateActionById.");
}
/* Must NOT alter the phase */
if ((new_actionset->phase != NOT_SET) && (rule->actionset->phase != new_actionset->phase)) {
return apr_psprintf(cmd->pool, "ModSecurity: Rule phases cannot be updated via SecRuleUpdateActionById.");
}
#ifdef DEBUG_CONF
{
char *actions = msre_actionset_generate_action_string(ruleset->mp, rule->actionset);
ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, cmd->pool,
"Update rule %pp id=\"%s\" old action: \"%s\"",
rule,
(rule->actionset->id == NOT_SET_P ? "(none)" : rule->actionset->id),
actions);
}
#endif
/* Merge new actions with the rule */
/* ENH: Will this leak the old actionset? */
rule->actionset = msre_actionset_merge(modsecurity->msre, rule->actionset,
new_actionset, 1);
msre_actionset_set_defaults(rule->actionset);
/* Update the unparsed rule */
rule->unparsed = msre_rule_generate_unparsed(ruleset->mp, rule, NULL, NULL, NULL);
#ifdef DEBUG_CONF
{
char *actions = msre_actionset_generate_action_string(ruleset->mp, rule->actionset);
ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, cmd->pool,
"Update rule %pp id=\"%s\" new action: \"%s\"",
rule,
(rule->actionset->id == NOT_SET_P ? "(none)" : rule->actionset->id),
actions);
}
#endif
return NULL;
}
| 0
|
[
"CWE-20",
"CWE-611"
] |
ModSecurity
|
d4d80b38aa85eccb26e3c61b04d16e8ca5de76fe
| 57,611,376,117,889,170,000,000,000,000,000,000,000
| 83
|
Added SecXmlExternalEntity
|
compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
void __user *userptr)
{
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
void __user *pos;
unsigned int size;
int ret = 0;
unsigned int i = 0;
struct ip6t_entry *iter;
counters = alloc_counters(table);
if (IS_ERR(counters))
return PTR_ERR(counters);
pos = userptr;
size = total_size;
xt_entry_foreach(iter, private->entries, total_size) {
ret = compat_copy_entry_to_user(iter, &pos,
&size, counters, i++);
if (ret != 0)
break;
}
vfree(counters);
return ret;
}
| 0
|
[
"CWE-119"
] |
nf-next
|
d7591f0c41ce3e67600a982bab6989ef0f07b3ce
| 33,005,755,082,972,060,000,000,000,000,000,000,000
| 27
|
netfilter: x_tables: introduce and use xt_copy_counters_from_user
The three variants use same copy&pasted code, condense this into a
helper and use that.
Make sure info.name is 0-terminated.
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev,
struct netdev_queue *txq)
{
spinlock_t *root_lock = qdisc_lock(q);
bool contended;
int rc;
qdisc_calculate_pkt_len(skb, q);
/*
* Heuristic to force contended enqueues to serialize on a
* separate lock before trying to get qdisc main lock.
* This permits __QDISC___STATE_RUNNING owner to get the lock more
* often and dequeue packets faster.
*/
contended = qdisc_is_running(q);
if (unlikely(contended))
spin_lock(&q->busylock);
spin_lock(root_lock);
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
kfree_skb(skb);
rc = NET_XMIT_DROP;
} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
qdisc_run_begin(q)) {
/*
* This is a work-conserving queue; there are no old skbs
* waiting to be sent out; and the qdisc is not running -
* xmit the skb directly.
*/
qdisc_bstats_update(q, skb);
if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
contended = false;
}
__qdisc_run(q);
} else
qdisc_run_end(q);
rc = NET_XMIT_SUCCESS;
} else {
rc = q->enqueue(skb, q) & NET_XMIT_MASK;
if (qdisc_run_begin(q)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
contended = false;
}
__qdisc_run(q);
}
}
spin_unlock(root_lock);
if (unlikely(contended))
spin_unlock(&q->busylock);
return rc;
| 0
|
[
"CWE-400",
"CWE-703"
] |
linux
|
fac8e0f579695a3ecbc4d3cac369139d7f819971
| 7,848,436,336,150,225,000,000,000,000,000,000,000
| 58
|
tunnels: Don't apply GRO to multiple layers of encapsulation.
When drivers express support for TSO of encapsulated packets, they
only mean that they can do it for one layer of encapsulation.
Supporting additional levels would mean updating, at a minimum,
more IP length fields and they are unaware of this.
No encapsulation device expresses support for handling offloaded
encapsulated packets, so we won't generate these types of frames
in the transmit path. However, GRO doesn't have a check for
multiple levels of encapsulation and will attempt to build them.
UDP tunnel GRO actually does prevent this situation but it only
handles multiple UDP tunnels stacked on top of each other. This
generalizes that solution to prevent any kind of tunnel stacking
that would cause problems.
Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack")
Signed-off-by: Jesse Gross <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
export_greeter_interface (GdmSession *self,
GDBusConnection *connection)
{
GdmDBusGreeter *greeter_interface;
greeter_interface = GDM_DBUS_GREETER (gdm_dbus_greeter_skeleton_new ());
g_signal_connect (greeter_interface,
"handle-begin-auto-login",
G_CALLBACK (gdm_session_handle_client_begin_auto_login),
self);
g_signal_connect (greeter_interface,
"handle-select-session",
G_CALLBACK (gdm_session_handle_client_select_session),
self);
g_signal_connect (greeter_interface,
"handle-select-user",
G_CALLBACK (gdm_session_handle_client_select_user),
self);
g_signal_connect (greeter_interface,
"handle-start-session-when-ready",
G_CALLBACK (gdm_session_handle_client_start_session_when_ready),
self);
g_signal_connect (greeter_interface,
"handle-get-timed-login-details",
G_CALLBACK (gdm_session_handle_get_timed_login_details),
self);
g_dbus_interface_skeleton_export (G_DBUS_INTERFACE_SKELETON (greeter_interface),
connection,
GDM_SESSION_DBUS_OBJECT_PATH,
NULL);
self->priv->greeter_interface = greeter_interface;
}
| 0
|
[] |
gdm
|
5ac224602f1d603aac5eaa72e1760d3e33a26f0a
| 80,824,174,313,108,740,000,000,000,000,000,000,000
| 36
|
session: disconnect signals from worker proxy when conversation is freed
We don't want an outstanding reference on the worker proxy to lead to
signal handlers getting dispatched after the conversation is freed.
https://bugzilla.gnome.org/show_bug.cgi?id=758032
|
int do_echo(struct st_command *command)
{
DYNAMIC_STRING ds_echo;
DBUG_ENTER("do_echo");
init_dynamic_string(&ds_echo, "", command->query_len, 256);
do_eval(&ds_echo, command->first_argument, command->end, FALSE);
dynstr_append_mem(&ds_res, ds_echo.str, ds_echo.length);
dynstr_append_mem(&ds_res, "\n", 1);
dynstr_free(&ds_echo);
command->last_argument= command->end;
DBUG_RETURN(0);
}
| 0
|
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
| 307,972,732,508,552,220,000,000,000,000,000,000,000
| 13
|
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
|
static void fuse_write_update_size(struct inode *inode, loff_t pos)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
spin_lock(&fc->lock);
fi->attr_version = ++fc->attr_version;
if (pos > inode->i_size)
i_size_write(inode, pos);
spin_unlock(&fc->lock);
}
| 0
|
[] |
linux-2.6
|
0bd87182d3ab18a32a8e9175d3f68754c58e3432
| 239,922,387,633,477,900,000,000,000,000,000,000,000
| 11
|
fuse: fix kunmap in fuse_ioctl_copy_user
Looks like another victim of the confusing kmap() vs kmap_atomic() API
differences.
Reported-by: Todor Gyumyushev <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: [email protected]
|
QPDF::writeHSharedObject(BitWriter& w)
{
HSharedObject& t = this->m->shared_object_hints;
w.writeBits(t.first_shared_obj, 32); // 1
w.writeBits(t.first_shared_offset, 32); // 2
w.writeBits(t.nshared_first_page, 32); // 3
w.writeBits(t.nshared_total, 32); // 4
w.writeBits(t.nbits_nobjects, 16); // 5
w.writeBits(t.min_group_length, 32); // 6
w.writeBits(t.nbits_delta_group_length, 16); // 7
QTC::TC("qpdf", "QPDF lin write nshared_total > nshared_first_page",
(t.nshared_total > t.nshared_first_page) ? 1 : 0);
int nitems = t.nshared_total;
std::vector<HSharedObjectEntry>& entries = t.entries;
write_vector_int(w, nitems, entries,
t.nbits_delta_group_length,
&HSharedObjectEntry::delta_group_length);
write_vector_int(w, nitems, entries,
1, &HSharedObjectEntry::signature_present);
for (int i = 0; i < nitems; ++i)
{
// If signature were present, we'd have to write a 128-bit hash.
if (entries.at(i).signature_present != 0)
{
stopOnError("found unexpected signature present"
" while writing linearization data");
}
}
write_vector_int(w, nitems, entries,
t.nbits_nobjects,
&HSharedObjectEntry::nobjects_minus_one);
}
| 1
|
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
| 118,894,927,439,915,020,000,000,000,000,000,000,000
| 36
|
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
|
static int msg_parse_fetch(struct ImapHeader *h, char *s)
{
char tmp[SHORT_STRING];
char *ptmp = NULL;
if (!s)
return -1;
while (*s)
{
SKIPWS(s);
if (mutt_str_strncasecmp("FLAGS", s, 5) == 0)
{
s = msg_parse_flags(h, s);
if (!s)
return -1;
}
else if (mutt_str_strncasecmp("UID", s, 3) == 0)
{
s += 3;
SKIPWS(s);
if (mutt_str_atoui(s, &h->data->uid) < 0)
return -1;
s = imap_next_word(s);
}
else if (mutt_str_strncasecmp("INTERNALDATE", s, 12) == 0)
{
s += 12;
SKIPWS(s);
if (*s != '\"')
{
mutt_debug(1, "bogus INTERNALDATE entry: %s\n", s);
return -1;
}
s++;
ptmp = tmp;
while (*s && (*s != '\"') && (ptmp != (tmp + sizeof(tmp) - 1)))
*ptmp++ = *s++;
if (*s != '\"')
return -1;
s++; /* skip past the trailing " */
*ptmp = '\0';
h->received = mutt_date_parse_imap(tmp);
}
else if (mutt_str_strncasecmp("RFC822.SIZE", s, 11) == 0)
{
s += 11;
SKIPWS(s);
ptmp = tmp;
while (isdigit((unsigned char) *s) && (ptmp != (tmp + sizeof(tmp) - 1)))
*ptmp++ = *s++;
*ptmp = '\0';
if (mutt_str_atol(tmp, &h->content_length) < 0)
return -1;
}
else if ((mutt_str_strncasecmp("BODY", s, 4) == 0) ||
(mutt_str_strncasecmp("RFC822.HEADER", s, 13) == 0))
{
/* handle above, in msg_fetch_header */
return -2;
}
else if (*s == ')')
s++; /* end of request */
else if (*s)
{
/* got something i don't understand */
imap_error("msg_parse_fetch", s);
return -1;
}
}
return 0;
}
| 0
|
[
"CWE-119",
"CWE-787"
] |
neomutt
|
1b0f0d0988e6df4e32e9f4bf8780846ea95d4485
| 314,790,893,055,318,600,000,000,000,000,000,000,000
| 75
|
Don't overflow stack buffer in msg_parse_fetch
|
FindCommnr(str)
const char *str;
{
int x, m, l = 0, r = RC_LAST;
while (l <= r)
{
m = (l + r) / 2;
x = strcmp(str, comms[m].name);
if (x > 0)
l = m + 1;
else if (x < 0)
r = m - 1;
else
return m;
}
return RC_ILLEGAL;
}
| 0
|
[] |
screen
|
c5db181b6e017cfccb8d7842ce140e59294d9f62
| 13,530,999,628,951,460,000,000,000,000,000,000,000
| 17
|
ansi: add support for xterm OSC 11
It allows for getting and setting the background color. Notably, Vim uses
OSC 11 to learn whether it's running on a light or dark colored terminal
and choose a color scheme accordingly.
Tested with gnome-terminal and xterm. When called with "?" argument the
current background color is returned:
$ echo -ne "\e]11;?\e\\"
$ 11;rgb:2323/2727/2929
Signed-off-by: Lubomir Rintel <[email protected]>
(cherry picked from commit 7059bff20a28778f9d3acf81cad07b1388d02309)
Signed-off-by: Amadeusz Sławiński <[email protected]
|
same_node_or_its_all_contexts_clone_p (cgraph_node *node, cgraph_node *dest)
{
if (node == dest)
return true;
class ipa_node_params *info = IPA_NODE_REF (node);
return info->is_all_contexts_clone && info->ipcp_orig_node == dest;
}
| 0
|
[
"CWE-20"
] |
gcc
|
a09ccc22459c565814f79f96586fe4ad083fe4eb
| 84,857,524,481,407,800,000,000,000,000,000,000,000
| 8
|
Avoid segfault when doing IPA-VRP but not IPA-CP (PR 93015)
2019-12-21 Martin Jambor <[email protected]>
PR ipa/93015
* ipa-cp.c (ipcp_store_vr_results): Check that info exists
testsuite/
* gcc.dg/lto/pr93015_0.c: New test.
From-SVN: r279695
|
static int netjoin_set_nickmode(IRC_SERVER_REC *server, NETJOIN_REC *rec,
const char *channel, char prefix)
{
GSList *pos;
const char *flags;
char *found_chan = NULL;
for (pos = rec->now_channels; pos != NULL; pos = pos->next) {
char *chan = pos->data;
if (strcasecmp(chan+1, channel) == 0) {
found_chan = chan;
break;
}
}
if (found_chan == NULL)
return FALSE;
flags = server->get_nick_flags(SERVER(server));
while (*flags != '\0') {
if (found_chan[0] == *flags)
break;
if (prefix == *flags) {
found_chan[0] = prefix;
break;
}
flags++;
}
return TRUE;
}
| 0
|
[
"CWE-416"
] |
irssi
|
a6cae91cecba2e8cf11ed779c5da5a229472575c
| 74,863,120,435,714,960,000,000,000,000,000,000,000
| 30
|
Merge pull request #812 from ailin-nemui/tape-netsplit
revert netsplit print optimisation
(cherry picked from commit 7de1378dab8081932d9096e19ae3d0921e560230)
|
UTF8StringValidate(
Syntax *syntax,
struct berval *in )
{
int len;
unsigned char *u = (unsigned char *)in->bv_val, *end = in->bv_val + in->bv_len;
if( BER_BVISEMPTY( in ) && syntax == slap_schema.si_syn_directoryString ) {
/* directory strings cannot be empty */
return LDAP_INVALID_SYNTAX;
}
for( ; u < end; u += len ) {
/* get the length indicated by the first byte */
len = LDAP_UTF8_CHARLEN2( u, len );
/* very basic checks */
switch( len ) {
case 6:
if( (u[5] & 0xC0) != 0x80 ) {
return LDAP_INVALID_SYNTAX;
}
case 5:
if( (u[4] & 0xC0) != 0x80 ) {
return LDAP_INVALID_SYNTAX;
}
case 4:
if( (u[3] & 0xC0) != 0x80 ) {
return LDAP_INVALID_SYNTAX;
}
case 3:
if( (u[2] & 0xC0 )!= 0x80 ) {
return LDAP_INVALID_SYNTAX;
}
case 2:
if( (u[1] & 0xC0) != 0x80 ) {
return LDAP_INVALID_SYNTAX;
}
case 1:
/* CHARLEN already validated it */
break;
default:
return LDAP_INVALID_SYNTAX;
}
/* make sure len corresponds with the offset
to the next character */
if( LDAP_UTF8_OFFSET( (char *)u ) != len ) return LDAP_INVALID_SYNTAX;
}
if( u > end ) {
return LDAP_INVALID_SYNTAX;
}
return LDAP_SUCCESS;
}
| 0
|
[
"CWE-617"
] |
openldap
|
67670f4544e28fb09eb7319c39f404e1d3229e65
| 56,704,982,585,617,330,000,000,000,000,000,000,000
| 56
|
ITS#9383 remove assert in certificateListValidate
|
std::string normalize_path(const std::string &p1)
{
if (p1.empty()) return p1;
std::string p2;
#ifdef _WIN32
if (p1.size() >= 2 && p1[1] == ':')
// Windows relative paths with explicit drive name are not handled.
p2 = p1;
else
#endif
if (!is_path_sep(p1[0]))
p2 = get_cwd() + "/" + p1;
else
p2 = p1;
#ifdef _WIN32
std::string drive;
if (p2.size() >= 2 && p2[1] == ':') {
drive = p2.substr(0, 2);
p2.erase(0, 2);
}
#endif
std::vector<std::string> components(1);
for (int i = 0, i_end = p2.size(); i <= i_end; ++i)
{
std::string &last = components[components.size() - 1];
char c = p2.c_str()[i];
if (is_path_sep(c) || c == 0)
{
if (last == ".")
last.clear();
else if (last == "..")
{
if (components.size() >= 2) {
components.pop_back();
components[components.size() - 1].clear();
} else
last.clear();
}
else if (!last.empty())
components.push_back(std::string());
}
else
last += c;
}
std::ostringstream p4;
components.pop_back();
#ifdef _WIN32
p4 << drive;
#endif
BOOST_FOREACH(const std::string &s, components)
{
p4 << '/' << s;
}
DBG_FS << "Normalizing '" << p2 << "' to '" << p4.str() << "'\n";
return p4.str();
}
| 0
|
[
"CWE-200"
] |
wesnoth
|
af61f9fdd15cd439da9e2fe5fa39d174c923eaae
| 290,866,477,249,455,750,000,000,000,000,000,000,000
| 64
|
fs: Use game data path to resolve ./ in the absence of a current_dir
Fixes a file content disclosure bug (#22042) affecting functionality
relying on the get_wml_location() function and not passing a non-empty
value for the current_dir parameter.
See <https://gna.org/bugs/?22042> for details.
This is a candidate for the 1.10 and 1.12 branches.
(Backported from master, commit 314425ab0e57b32909d324f7d4bf213d62cbd3b5.)
|
static int mailimf_plus_parse(const char * message, size_t length,
size_t * indx)
{
return mailimf_unstrict_char_parse(message, length, indx, '+');
}
| 0
|
[
"CWE-476"
] |
libetpan
|
1fe8fbc032ccda1db9af66d93016b49c16c1f22d
| 226,839,340,598,659,500,000,000,000,000,000,000,000
| 5
|
Fixed crash #274
|
static int ipv6_gso_send_check(struct sk_buff *skb)
{
struct ipv6hdr *ipv6h;
struct inet6_protocol *ops;
int err = -EINVAL;
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
goto out;
ipv6h = ipv6_hdr(skb);
__skb_pull(skb, sizeof(*ipv6h));
err = -EPROTONOSUPPORT;
rcu_read_lock();
ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
if (likely(ops && ops->gso_send_check)) {
skb_reset_transport_header(skb);
err = ops->gso_send_check(skb);
}
rcu_read_unlock();
out:
return err;
}
| 0
|
[] |
linux-2.6
|
2e761e0532a784816e7e822dbaaece8c5d4be14d
| 173,589,613,123,524,960,000,000,000,000,000,000,000
| 24
|
ipv6 netns: init net is used to set bindv6only for new sock
The bindv6only is tuned via sysctl. It is already on a struct net
and per-net sysctls allow for its modification (ipv6_sysctl_net_init).
Despite this the value configured in the init net is used for the
rest of them.
Signed-off-by: Pavel Emelyanov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int test_if_almost_full(MI_INFO *info)
{
if (info->s->options & HA_OPTION_COMPRESS_RECORD)
return 0;
return mysql_file_seek(info->s->kfile, 0L, MY_SEEK_END,
MYF(MY_THREADSAFE)) / 10 * 9 >
(my_off_t) info->s->base.max_key_file_length ||
mysql_file_seek(info->dfile, 0L, MY_SEEK_END,
MYF(0)) / 10 * 9 >
(my_off_t) info->s->base.max_data_file_length;
}
| 0
|
[
"CWE-362"
] |
mysql-server
|
4e5473862e6852b0f3802b0cd0c6fa10b5253291
| 50,539,823,951,429,920,000,000,000,000,000,000,000
| 11
|
Bug#24388746: PRIVILEGE ESCALATION AND RACE CONDITION USING CREATE TABLE
During REPAIR TABLE of a MyISAM table, a temporary data file (.TMD)
is created. When repair finishes, this file is renamed to the original
.MYD file. The problem was that during this rename, we copied the
stats from the old file to the new file with chmod/chown. If a user
managed to replace the temporary file before chmod/chown was executed,
it was possible to get an arbitrary file with the privileges of the
mysql user.
This patch fixes the problem by not copying stats from the old
file to the new file. This is not needed as the new file was
created with the correct stats. This fix only changes server
behavior - external utilities such as myisamchk still does
chmod/chown.
No test case provided since the problem involves synchronization
with file system operations.
|
static int megasas_create_frame_pool(struct megasas_instance *instance)
{
int i;
u16 max_cmd;
u32 sge_sz;
u32 frame_count;
struct megasas_cmd *cmd;
max_cmd = instance->max_mfi_cmds;
/*
* Size of our frame is 64 bytes for MFI frame, followed by max SG
* elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
*/
sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
sizeof(struct megasas_sge32);
if (instance->flag_ieee)
sge_sz = sizeof(struct megasas_sge_skinny);
/*
* For MFI controllers.
* max_num_sge = 60
* max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
* Total 960 byte (15 MFI frame of 64 byte)
*
* Fusion adapter require only 3 extra frame.
* max_num_sge = 16 (defined as MAX_IOCTL_SGE)
* max_sge_sz = 12 byte (sizeof megasas_sge64)
* Total 192 byte (3 MFI frame of 64 byte)
*/
frame_count = (instance->adapter_type == MFI_SERIES) ?
(15 + 1) : (3 + 1);
instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
/*
* Use DMA pool facility provided by PCI layer
*/
instance->frame_dma_pool = dma_pool_create("megasas frame pool",
&instance->pdev->dev,
instance->mfi_frame_size, 256, 0);
if (!instance->frame_dma_pool) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
return -ENOMEM;
}
instance->sense_dma_pool = dma_pool_create("megasas sense pool",
&instance->pdev->dev, 128,
4, 0);
if (!instance->sense_dma_pool) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
dma_pool_destroy(instance->frame_dma_pool);
instance->frame_dma_pool = NULL;
return -ENOMEM;
}
/*
* Allocate and attach a frame to each of the commands in cmd_list.
* By making cmd->index as the context instead of the &cmd, we can
* always use 32bit context regardless of the architecture
*/
for (i = 0; i < max_cmd; i++) {
cmd = instance->cmd_list[i];
cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
GFP_KERNEL, &cmd->frame_phys_addr);
cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
GFP_KERNEL, &cmd->sense_phys_addr);
/*
* megasas_teardown_frame_pool() takes care of freeing
* whatever has been allocated
*/
if (!cmd->frame || !cmd->sense) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
megasas_teardown_frame_pool(instance);
return -ENOMEM;
}
cmd->frame->io.context = cpu_to_le32(cmd->index);
cmd->frame->io.pad_0 = 0;
if ((instance->adapter_type == MFI_SERIES) && reset_devices)
cmd->frame->hdr.cmd = MFI_CMD_INVALID;
}
return 0;
}
| 0
|
[
"CWE-476"
] |
linux
|
bcf3b67d16a4c8ffae0aa79de5853435e683945c
| 7,547,211,850,674,665,000,000,000,000,000,000,000
| 92
|
scsi: megaraid_sas: return error when create DMA pool failed
when create DMA pool for cmd frames failed, we should return -ENOMEM,
instead of 0.
In some case in:
megasas_init_adapter_fusion()
-->megasas_alloc_cmds()
-->megasas_create_frame_pool
create DMA pool failed,
--> megasas_free_cmds() [1]
-->megasas_alloc_cmds_fusion()
failed, then goto fail_alloc_cmds.
-->megasas_free_cmds() [2]
we will call megasas_free_cmds twice, [1] will kfree cmd_list,
[2] will use cmd_list.it will cause a problem:
Unable to handle kernel NULL pointer dereference at virtual address
00000000
pgd = ffffffc000f70000
[00000000] *pgd=0000001fbf893003, *pud=0000001fbf893003,
*pmd=0000001fbf894003, *pte=006000006d000707
Internal error: Oops: 96000005 [#1] SMP
Modules linked in:
CPU: 18 PID: 1 Comm: swapper/0 Not tainted
task: ffffffdfb9290000 ti: ffffffdfb923c000 task.ti: ffffffdfb923c000
PC is at megasas_free_cmds+0x30/0x70
LR is at megasas_free_cmds+0x24/0x70
...
Call trace:
[<ffffffc0005b779c>] megasas_free_cmds+0x30/0x70
[<ffffffc0005bca74>] megasas_init_adapter_fusion+0x2f4/0x4d8
[<ffffffc0005b926c>] megasas_init_fw+0x2dc/0x760
[<ffffffc0005b9ab0>] megasas_probe_one+0x3c0/0xcd8
[<ffffffc0004a5abc>] local_pci_probe+0x4c/0xb4
[<ffffffc0004a5c40>] pci_device_probe+0x11c/0x14c
[<ffffffc00053a5e4>] driver_probe_device+0x1ec/0x430
[<ffffffc00053a92c>] __driver_attach+0xa8/0xb0
[<ffffffc000538178>] bus_for_each_dev+0x74/0xc8
[<ffffffc000539e88>] driver_attach+0x28/0x34
[<ffffffc000539a18>] bus_add_driver+0x16c/0x248
[<ffffffc00053b234>] driver_register+0x6c/0x138
[<ffffffc0004a5350>] __pci_register_driver+0x5c/0x6c
[<ffffffc000ce3868>] megasas_init+0xc0/0x1a8
[<ffffffc000082a58>] do_one_initcall+0xe8/0x1ec
[<ffffffc000ca7be8>] kernel_init_freeable+0x1c8/0x284
[<ffffffc0008d90b8>] kernel_init+0x1c/0xe4
Signed-off-by: Jason Yan <[email protected]>
Acked-by: Sumit Saxena <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]>
|
unload_job(cupsd_job_t *job) /* I - Job */
{
if (!job->attrs)
return;
cupsdLogJob(job, CUPSD_LOG_DEBUG, "Unloading...");
ippDelete(job->attrs);
job->attrs = NULL;
job->state = NULL;
job->reasons = NULL;
job->impressions = NULL;
job->sheets = NULL;
job->job_sheets = NULL;
job->printer_message = NULL;
job->printer_reasons = NULL;
}
| 0
|
[] |
cups
|
d47f6aec436e0e9df6554436e391471097686ecc
| 291,201,594,956,766,100,000,000,000,000,000,000,000
| 18
|
Fix local privilege escalation to root and sandbox bypasses in scheduler
(rdar://37836779, rdar://37836995, rdar://37837252, rdar://37837581)
|
Header::screenWindowCenter ()
{
return static_cast <V2fAttribute &>
((*this)["screenWindowCenter"]).value();
}
| 0
|
[
"CWE-125"
] |
openexr
|
e79d2296496a50826a15c667bf92bdc5a05518b4
| 90,601,907,415,795,060,000,000,000,000,000,000,000
| 5
|
fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <[email protected]>
|
int octetStringIndexer(
slap_mask_t use,
slap_mask_t flags,
Syntax *syntax,
MatchingRule *mr,
struct berval *prefix,
BerVarray values,
BerVarray *keysp,
void *ctx )
{
int i;
size_t slen, mlen;
BerVarray keys;
HASH_CONTEXT HASHcontext;
unsigned char HASHdigest[HASH_BYTES];
struct berval digest;
digest.bv_val = (char *)HASHdigest;
digest.bv_len = sizeof(HASHdigest);
for( i=0; !BER_BVISNULL( &values[i] ); i++ ) {
/* just count them */
}
/* we should have at least one value at this point */
assert( i > 0 );
keys = slap_sl_malloc( sizeof( struct berval ) * (i+1), ctx );
slen = syntax->ssyn_oidlen;
mlen = mr->smr_oidlen;
hashPreset( &HASHcontext, prefix, 0, syntax, mr);
for( i=0; !BER_BVISNULL( &values[i] ); i++ ) {
hashIter( &HASHcontext, HASHdigest,
(unsigned char *)values[i].bv_val, values[i].bv_len );
ber_dupbv_x( &keys[i], &digest, ctx );
}
BER_BVZERO( &keys[i] );
*keysp = keys;
return LDAP_SUCCESS;
}
| 0
|
[
"CWE-617"
] |
openldap
|
67670f4544e28fb09eb7319c39f404e1d3229e65
| 190,702,379,785,254,230,000,000,000,000,000,000,000
| 44
|
ITS#9383 remove assert in certificateListValidate
|
PosibErr<void> Config::read_in_file(ParmStr file) {
FStream in;
RET_ON_ERR(in.open(file, "r"));
return read_in(in, file);
}
| 0
|
[
"CWE-125"
] |
aspell
|
80fa26c74279fced8d778351cff19d1d8f44fe4e
| 205,394,628,028,420,640,000,000,000,000,000,000,000
| 5
|
Fix various bugs found by OSS-Fuze.
|
static int vhost_scsi_open(struct inode *inode, struct file *f)
{
struct vhost_scsi *vs;
struct vhost_virtqueue **vqs;
int r = -ENOMEM, i;
vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
if (!vs) {
vs = vzalloc(sizeof(*vs));
if (!vs)
goto err_vs;
}
vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
if (!vqs)
goto err_vqs;
vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
vs->vs_events_nr = 0;
vs->vs_events_missed = false;
vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
vqs[i] = &vs->vqs[i].vq;
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
vhost_scsi_init_inflight(vs, NULL);
f->private_data = vs;
return 0;
err_vqs:
kvfree(vs);
err_vs:
return r;
}
| 0
|
[
"CWE-200",
"CWE-119"
] |
linux
|
59c816c1f24df0204e01851431d3bab3eb76719c
| 201,191,960,484,897,360,000,000,000,000,000,000,000
| 43
|
vhost/scsi: potential memory corruption
This code in vhost_scsi_make_tpg() is confusing because we limit "tpgt"
to UINT_MAX but the data type of "tpg->tport_tpgt" and that is a u16.
I looked at the context and it turns out that in
vhost_scsi_set_endpoint(), "tpg->tport_tpgt" is used as an offset into
the vs_tpg[] array which has VHOST_SCSI_MAX_TARGET (256) elements so
anything higher than 255 then it is invalid. I have made that the limit
now.
In vhost_scsi_send_evt() we mask away values higher than 255, but now
that the limit has changed, we don't need the mask.
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Nicholas Bellinger <[email protected]>
|
void CLASS smal_v9_load_raw()
{
unsigned seg[256][2], offset, nseg, holes, i;
fseek(ifp, 67, SEEK_SET);
offset = get4();
nseg = (uchar)fgetc(ifp);
fseek(ifp, offset, SEEK_SET);
for (i = 0; i < nseg * 2; i++)
((unsigned *)seg)[i] = get4() + data_offset * (i & 1);
fseek(ifp, 78, SEEK_SET);
holes = fgetc(ifp);
fseek(ifp, 88, SEEK_SET);
seg[nseg][0] = raw_height * raw_width;
seg[nseg][1] = get4() + data_offset;
for (i = 0; i < nseg; i++)
smal_decode_segment(seg + i, holes);
if (holes)
fill_holes(holes);
}
| 0
|
[
"CWE-476",
"CWE-119"
] |
LibRaw
|
d7c3d2cb460be10a3ea7b32e9443a83c243b2251
| 310,876,264,412,531,740,000,000,000,000,000,000,000
| 20
|
Secunia SA75000 advisory: several buffer overruns
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.