func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
recursive_boxed_constraint_type_check (VerifyContext *ctx, MonoType *type, MonoClass *constraint_class, int recursion_level)
{
MonoType *constraint_type = &constraint_class->byval_arg;
if (recursion_level <= 0)
return FALSE;
if (verify_type_compatibility_full (ctx, type, mono_type_get_type_byval (constraint_type), FALSE))
return TRUE;
if (mono_type_is_generic_argument (constraint_type)) {
MonoGenericParam *param = get_generic_param (ctx, constraint_type);
MonoClass **class;
if (!param)
return FALSE;
for (class = mono_generic_param_info (param)->constraints; class && *class; ++class) {
if (recursive_boxed_constraint_type_check (ctx, type, *class, recursion_level - 1))
return TRUE;
}
}
return FALSE;
} | 0 | [
"CWE-20"
]
| mono | 4905ef1130feb26c3150b28b97e4a96752e0d399 | 87,924,120,372,163,930,000,000,000,000,000,000,000 | 21 | Handle invalid instantiation of generic methods.
* verify.c: Add new function to internal verifier API to check
method instantiations.
* reflection.c (mono_reflection_bind_generic_method_parameters):
Check the instantiation before returning it.
Fixes #655847 |
htmlcharactersDebug(void *ctx ATTRIBUTE_UNUSED, const xmlChar *ch, int len)
{
unsigned char output[40];
int inlen = len, outlen = 30;
htmlEncodeEntities(output, &outlen, ch, &inlen, 0);
output[outlen] = 0;
fprintf(SAXdebug, "SAX.characters(%s, %d)\n", output, len);
} | 0 | [
"CWE-125"
]
| libxml2 | a820dbeac29d330bae4be05d9ecd939ad6b4aa33 | 123,638,575,390,301,500,000,000,000,000,000,000,000 | 10 | Bug 758605: Heap-based buffer overread in xmlDictAddString <https://bugzilla.gnome.org/show_bug.cgi?id=758605>
Reviewed by David Kilzer.
* HTMLparser.c:
(htmlParseName): Add bounds check.
(htmlParseNameComplex): Ditto.
* result/HTML/758605.html: Added.
* result/HTML/758605.html.err: Added.
* result/HTML/758605.html.sax: Added.
* runtest.c:
(pushParseTest): The input for the new test case was so small
(4 bytes) that htmlParseChunk() was never called after
htmlCreatePushParserCtxt(), thereby creating a false positive
test failure. Fixed by using a do-while loop so we always call
htmlParseChunk() at least once.
* test/HTML/758605.html: Added. |
mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen)
{
FWDownload_t *dlmsg;
MPT_FRAME_HDR *mf;
FWDownloadTCSGE_t *ptsge;
MptSge_t *sgl, *sgIn;
char *sgOut;
struct buflist *buflist;
struct buflist *bl;
dma_addr_t sgl_dma;
int ret;
int numfrags = 0;
int maxfrags;
int n = 0;
u32 sgdir;
u32 nib;
int fw_bytes_copied = 0;
int i;
int sge_offset = 0;
u16 iocstat;
pFWDownloadReply_t ReplyMsg = NULL;
unsigned long timeleft;
/* Valid device. Get a message frame and construct the FW download message.
*/
if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
return -EAGAIN;
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT
"mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id));
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.bufp = %p\n",
iocp->name, ufwbuf));
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n",
iocp->name, (int)fwlen));
dlmsg = (FWDownload_t*) mf;
ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL;
sgOut = (char *) (ptsge + 1);
/*
* Construct f/w download request
*/
dlmsg->ImageType = MPI_FW_DOWNLOAD_ITYPE_FW;
dlmsg->Reserved = 0;
dlmsg->ChainOffset = 0;
dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD;
dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0;
if (iocp->facts.MsgVersion >= MPI_VERSION_01_05)
dlmsg->MsgFlags = MPI_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT;
else
dlmsg->MsgFlags = 0;
/* Set up the Transaction SGE.
*/
ptsge->Reserved = 0;
ptsge->ContextSize = 0;
ptsge->DetailsLength = 12;
ptsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
ptsge->Reserved_0100_Checksum = 0;
ptsge->ImageOffset = 0;
ptsge->ImageSize = cpu_to_le32(fwlen);
/* Add the SGL
*/
/*
* Need to kmalloc area(s) for holding firmware image bytes.
* But we need to do it piece meal, using a proper
* scatter gather list (with 128kB MAX hunks).
*
* A practical limit here might be # of sg hunks that fit into
* a single IOC request frame; 12 or 8 (see below), so:
* For FC9xx: 12 x 128kB == 1.5 mB (max)
* For C1030: 8 x 128kB == 1 mB (max)
* We could support chaining, but things get ugly(ier:)
*
* Set the sge_offset to the start of the sgl (bytes).
*/
sgdir = 0x04000000; /* IOC will READ from sys mem */
sge_offset = sizeof(MPIHeader_t) + sizeof(FWDownloadTCSGE_t);
if ((sgl = kbuf_alloc_2_sgl(fwlen, sgdir, sge_offset,
&numfrags, &buflist, &sgl_dma, iocp)) == NULL)
return -ENOMEM;
/*
* We should only need SGL with 2 simple_32bit entries (up to 256 kB)
* for FC9xx f/w image, but calculate max number of sge hunks
* we can fit into a request frame, and limit ourselves to that.
* (currently no chain support)
* maxfrags = (Request Size - FWdownload Size ) / Size of 32 bit SGE
* Request maxfrags
* 128 12
* 96 8
* 64 4
*/
maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) -
sizeof(FWDownloadTCSGE_t))
/ iocp->SGE_size;
if (numfrags > maxfrags) {
ret = -EMLINK;
goto fwdl_out;
}
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: sgl buffer = %p, sgfrags = %d\n",
iocp->name, sgl, numfrags));
/*
* Parse SG list, copying sgl itself,
* plus f/w image hunks from user space as we go...
*/
ret = -EFAULT;
sgIn = sgl;
bl = buflist;
for (i=0; i < numfrags; i++) {
/* Get the SGE type: 0 - TCSGE, 3 - Chain, 1 - Simple SGE
* Skip everything but Simple. If simple, copy from
* user space into kernel space.
* Note: we should not have anything but Simple as
* Chain SGE are illegal.
*/
nib = (sgIn->FlagsLength & 0x30000000) >> 28;
if (nib == 0 || nib == 3) {
;
} else if (sgIn->Address) {
iocp->add_sge(sgOut, sgIn->FlagsLength, sgIn->Address);
n++;
if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) {
printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - "
"Unable to copy f/w buffer hunk#%d @ %p\n",
iocp->name, __FILE__, __LINE__, n, ufwbuf);
goto fwdl_out;
}
fw_bytes_copied += bl->len;
}
sgIn++;
bl++;
sgOut += iocp->SGE_size;
}
DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags);
/*
* Finally, perform firmware download.
*/
ReplyMsg = NULL;
SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, dlmsg->MsgContext);
INITIALIZE_MGMT_STATUS(iocp->ioctl_cmds.status)
mpt_put_msg_frame(mptctl_id, iocp, mf);
/* Now wait for the command to complete */
retry_wait:
timeleft = wait_for_completion_timeout(&iocp->ioctl_cmds.done, HZ*60);
if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
ret = -ETIME;
printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
if (iocp->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
mpt_free_msg_frame(iocp, mf);
goto fwdl_out;
}
if (!timeleft) {
printk(MYIOC_s_WARN_FMT
"FW download timeout, doorbell=0x%08x\n",
iocp->name, mpt_GetIocState(iocp, 0));
mptctl_timeout_expired(iocp, mf);
} else
goto retry_wait;
goto fwdl_out;
}
if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
mpt_free_msg_frame(iocp, mf);
ret = -ENODATA;
goto fwdl_out;
}
if (sgl)
kfree_sgl(sgl, sgl_dma, buflist, iocp);
ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply;
iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK;
if (iocstat == MPI_IOCSTATUS_SUCCESS) {
printk(MYIOC_s_INFO_FMT "F/W update successful!\n", iocp->name);
return 0;
} else if (iocstat == MPI_IOCSTATUS_INVALID_FUNCTION) {
printk(MYIOC_s_WARN_FMT "Hmmm... F/W download not supported!?!\n",
iocp->name);
printk(MYIOC_s_WARN_FMT "(time to go bang on somebodies door)\n",
iocp->name);
return -EBADRQC;
} else if (iocstat == MPI_IOCSTATUS_BUSY) {
printk(MYIOC_s_WARN_FMT "IOC_BUSY!\n", iocp->name);
printk(MYIOC_s_WARN_FMT "(try again later?)\n", iocp->name);
return -EBUSY;
} else {
printk(MYIOC_s_WARN_FMT "ioctl_fwdl() returned [bad] status = %04xh\n",
iocp->name, iocstat);
printk(MYIOC_s_WARN_FMT "(bad VooDoo)\n", iocp->name);
return -ENOMSG;
}
return 0;
fwdl_out:
CLEAR_MGMT_STATUS(iocp->ioctl_cmds.status);
SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, 0);
kfree_sgl(sgl, sgl_dma, buflist, iocp);
return ret;
} | 0 | [
"CWE-362",
"CWE-369"
]
| linux | 28d76df18f0ad5bcf5fa48510b225f0ed262a99b | 504,564,490,950,595,460,000,000,000,000,000,000 | 211 | scsi: mptfusion: Fix double fetch bug in ioctl
Tom Hatskevich reported that we look up "iocp" then, in the called
functions we do a second copy_from_user() and look it up again.
The problem that could cause is:
drivers/message/fusion/mptctl.c
674 /* All of these commands require an interrupt or
675 * are unknown/illegal.
676 */
677 if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0)
^^^^
We take this lock.
678 return ret;
679
680 if (cmd == MPTFWDOWNLOAD)
681 ret = mptctl_fw_download(arg);
^^^
Then the user memory changes and we look up "iocp" again but a different
one so now we are holding the incorrect lock and have a race condition.
682 else if (cmd == MPTCOMMAND)
683 ret = mptctl_mpt_command(arg);
The security impact of this bug is not as bad as it could have been
because these operations are all privileged and root already has
enormous destructive power. But it's still worth fixing.
This patch passes the "iocp" pointer to the functions to avoid the
second lookup. That deletes 100 lines of code from the driver so
it's a nice clean up as well.
Link: https://lore.kernel.org/r/20200114123414.GA7957@kadam
Reported-by: Tom Hatskevich <[email protected]>
Reviewed-by: Greg Kroah-Hartman <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]> |
int linenoiseHistorySave(const char* filename) {
FILE* fp = fopen(filename, "wt");
if (fp == NULL) {
return -1;
}
for (int j = 0; j < historyLen; ++j) {
if (history[j][0] != '\0') {
fprintf(fp, "%s\n", history[j]);
}
}
fclose(fp);
return 0;
} | 1 | [
"CWE-200"
]
| mongo | 035cf2afc04988b22cb67f4ebfd77e9b344cb6e0 | 178,347,086,034,084,700,000,000,000,000,000,000,000 | 14 | SERVER-25335 avoid group and other permissions when creating .dbshell history file |
static RList *create_cache_bins(RBinFile *bf, RDyldCache *cache) {
RList *bins = r_list_newf ((RListFree)free_bin);
if (!bins) {
return NULL;
}
char *target_libs = NULL;
RList *target_lib_names = NULL;
int *deps = NULL;
target_libs = r_sys_getenv ("R_DYLDCACHE_FILTER");
if (target_libs) {
target_lib_names = r_str_split_list (target_libs, ":", 0);
if (!target_lib_names) {
r_list_free (bins);
return NULL;
}
deps = R_NEWS0 (int, cache->hdr->imagesCount);
if (!deps) {
r_list_free (bins);
r_list_free (target_lib_names);
return NULL;
}
}
ut32 i;
for (i = 0; i < cache->n_hdr; i++) {
cache_hdr_t *hdr = &cache->hdr[i];
ut64 hdr_offset = cache->hdr_offset[i];
ut32 maps_index = cache->maps_index[i];
cache_img_t *img = read_cache_images (cache->buf, hdr, hdr_offset);
if (!img) {
goto next;
}
ut32 j;
ut16 *depArray = NULL;
cache_imgxtr_t *extras = NULL;
if (target_libs) {
HtPU *path_to_idx = NULL;
if (cache->accel) {
depArray = R_NEWS0 (ut16, cache->accel->depListCount);
if (!depArray) {
goto next;
}
if (r_buf_fread_at (cache->buf, cache->accel->depListOffset, (ut8*) depArray, "s", cache->accel->depListCount) != cache->accel->depListCount * 2) {
goto next;
}
extras = read_cache_imgextra (cache->buf, hdr, cache->accel);
if (!extras) {
goto next;
}
} else {
path_to_idx = create_path_to_index (cache->buf, img, hdr);
}
for (j = 0; j < hdr->imagesCount; j++) {
bool printing = !deps[j];
char *lib_name = get_lib_name (cache->buf, &img[j]);
if (!lib_name) {
break;
}
if (strstr (lib_name, "libobjc.A.dylib")) {
deps[j]++;
}
if (!r_list_find (target_lib_names, lib_name, string_contains)) {
R_FREE (lib_name);
continue;
}
if (printing) {
eprintf ("FILTER: %s\n", lib_name);
}
R_FREE (lib_name);
deps[j]++;
if (extras && depArray) {
ut32 k;
for (k = extras[j].dependentsStartArrayIndex; depArray[k] != 0xffff; k++) {
ut16 dep_index = depArray[k] & 0x7fff;
deps[dep_index]++;
char *dep_name = get_lib_name (cache->buf, &img[dep_index]);
if (!dep_name) {
break;
}
if (printing) {
eprintf ("-> %s\n", dep_name);
}
free (dep_name);
}
} else if (path_to_idx) {
carve_deps_at_address (cache, img, path_to_idx, img[j].address, deps, printing);
}
}
ht_pu_free (path_to_idx);
R_FREE (depArray);
R_FREE (extras);
}
for (j = 0; j < hdr->imagesCount; j++) {
if (deps && !deps[j]) {
continue;
}
ut64 pa = va2pa (img[j].address, hdr->mappingCount, &cache->maps[maps_index], cache->buf, 0, NULL, NULL);
if (pa == UT64_MAX) {
continue;
}
ut8 magicbytes[4];
r_buf_read_at (cache->buf, pa, magicbytes, 4);
int magic = r_read_le32 (magicbytes);
switch (magic) {
case MH_MAGIC_64:
{
char file[256];
RDyldBinImage *bin = R_NEW0 (RDyldBinImage);
if (!bin) {
goto next;
}
bin->header_at = pa;
bin->hdr_offset = hdr_offset;
bin->symbols_off = resolve_symbols_off (cache, pa);
bin->va = img[j].address;
if (r_buf_read_at (cache->buf, img[j].pathFileOffset, (ut8*) &file, sizeof (file)) == sizeof (file)) {
file[255] = 0;
char *last_slash = strrchr (file, '/');
if (last_slash && *last_slash) {
if (last_slash > file) {
char *scan = last_slash - 1;
while (scan > file && *scan != '/') {
scan--;
}
if (*scan == '/') {
bin->file = strdup (scan + 1);
} else {
bin->file = strdup (last_slash + 1);
}
} else {
bin->file = strdup (last_slash + 1);
}
} else {
bin->file = strdup (file);
}
}
r_list_append (bins, bin);
break;
}
default:
eprintf ("Unknown sub-bin\n");
break;
}
}
next:
R_FREE (depArray);
R_FREE (extras);
R_FREE (img);
}
if (r_list_empty (bins)) {
r_list_free (bins);
bins = NULL;
}
R_FREE (deps);
R_FREE (target_libs);
r_list_free (target_lib_names);
return bins;
} | 1 | [
"CWE-787"
]
| radare2 | c84b7232626badd075caf3ae29661b609164bac6 | 260,772,577,031,726,000,000,000,000,000,000,000,000 | 167 | Fix heap buffer overflow in dyldcache parser ##crash
* Reported by: Lazymio via huntr.dev
* Reproducer: dyldovf |
static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg)
{
return 1;
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | 12f09ccb4612734a53e47ed5302e0479c10a50f8 | 123,541,777,076,910,460,000,000,000,000,000,000,000 | 4 | loopback: off by one in tcm_loop_make_naa_tpg()
This is an off by one 'tgpt' check in tcm_loop_make_naa_tpg() that could result
in memory corruption.
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Nicholas A. Bellinger <[email protected]> |
inherit_event(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
struct perf_event *group_leader,
struct perf_event_context *child_ctx)
{
enum perf_event_active_state parent_state = parent_event->state;
struct perf_event *child_event;
unsigned long flags;
/*
* Instead of creating recursive hierarchies of events,
* we link inherited events back to the original parent,
* which has a filp for sure, which we use as the reference
* count:
*/
if (parent_event->parent)
parent_event = parent_event->parent;
child_event = perf_event_alloc(&parent_event->attr,
parent_event->cpu,
child,
group_leader, parent_event,
NULL, NULL);
if (IS_ERR(child_event))
return child_event;
if (is_orphaned_event(parent_event) ||
!atomic_long_inc_not_zero(&parent_event->refcount)) {
free_event(child_event);
return NULL;
}
get_ctx(child_ctx);
/*
* Make the child state follow the state of the parent event,
* not its attr.disabled bit. We hold the parent's mutex,
* so we won't race with perf_event_{en, dis}able_family.
*/
if (parent_state >= PERF_EVENT_STATE_INACTIVE)
child_event->state = PERF_EVENT_STATE_INACTIVE;
else
child_event->state = PERF_EVENT_STATE_OFF;
if (parent_event->attr.freq) {
u64 sample_period = parent_event->hw.sample_period;
struct hw_perf_event *hwc = &child_event->hw;
hwc->sample_period = sample_period;
hwc->last_period = sample_period;
local64_set(&hwc->period_left, sample_period);
}
child_event->ctx = child_ctx;
child_event->overflow_handler = parent_event->overflow_handler;
child_event->overflow_handler_context
= parent_event->overflow_handler_context;
/*
* Precalculate sample_data sizes
*/
perf_event__header_size(child_event);
perf_event__id_header_size(child_event);
/*
* Link it up in the child's context:
*/
raw_spin_lock_irqsave(&child_ctx->lock, flags);
add_event_to_ctx(child_event, child_ctx);
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
* Link this into the parent event's child list
*/
WARN_ON_ONCE(parent_event->ctx->parent_ctx);
mutex_lock(&parent_event->child_mutex);
list_add_tail(&child_event->child_list, &parent_event->child_list);
mutex_unlock(&parent_event->child_mutex);
return child_event;
} | 0 | [
"CWE-284",
"CWE-264"
]
| linux | f63a8daa5812afef4f06c962351687e1ff9ccb2b | 18,922,788,820,090,225,000,000,000,000,000,000,000 | 84 | perf: Fix event->ctx locking
There have been a few reported issues wrt. the lack of locking around
changing event->ctx. This patch tries to address those.
It avoids the whole rwsem thing; and while it appears to work, please
give it some thought in review.
What I did fail at is sensible runtime checks on the use of
event->ctx, the RCU use makes it very hard.
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Linus Torvalds <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
{
struct page * page;
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return NULL;
map_page_into_agp(page);
get_page(page);
atomic_inc(&agp_bridge->current_memory_agp);
return page;
} | 0 | [
"CWE-190"
]
| linux-2.6 | 194b3da873fd334ef183806db751473512af29ce | 238,085,080,266,505,700,000,000,000,000,000,000,000 | 14 | agp: fix arbitrary kernel memory writes
pg_start is copied from userspace on AGPIOC_BIND and AGPIOC_UNBIND ioctl
cmds of agp_ioctl() and passed to agpioc_bind_wrap(). As said in the
comment, (pg_start + mem->page_count) may wrap in case of AGPIOC_BIND,
and it is not checked at all in case of AGPIOC_UNBIND. As a result, user
with sufficient privileges (usually "video" group) may generate either
local DoS or privilege escalation.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: Dave Airlie <[email protected]> |
static void btrfs_close_one_device(struct btrfs_device *device)
{
struct btrfs_fs_devices *fs_devices = device->fs_devices;
struct btrfs_device *new_device;
struct rcu_string *name;
if (device->bdev)
fs_devices->open_devices--;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
device->devid != BTRFS_DEV_REPLACE_DEVID) {
list_del_init(&device->dev_alloc_list);
fs_devices->rw_devices--;
}
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
fs_devices->missing_devices--;
btrfs_close_bdev(device);
new_device = btrfs_alloc_device(NULL, &device->devid,
device->uuid);
BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
/* Safe because we are under uuid_mutex */
if (device->name) {
name = rcu_string_strdup(device->name->str, GFP_NOFS);
BUG_ON(!name); /* -ENOMEM */
rcu_assign_pointer(new_device->name, name);
}
list_replace_rcu(&device->dev_list, &new_device->dev_list);
new_device->fs_devices = device->fs_devices;
call_rcu(&device->rcu, free_device_rcu);
} | 0 | [
"CWE-476",
"CWE-284"
]
| linux | 09ba3bc9dd150457c506e4661380a6183af651c1 | 82,955,206,969,127,550,000,000,000,000,000,000,000 | 36 | btrfs: merge btrfs_find_device and find_device
Both btrfs_find_device() and find_device() does the same thing except
that the latter does not take the seed device onto account in the device
scanning context. We can merge them.
Signed-off-by: Anand Jain <[email protected]>
Reviewed-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]> |
static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
{
struct gfar_private *priv = netdev_priv(ndev);
struct rxfcb *fcb = NULL;
/* fcb is at the beginning if exists */
fcb = (struct rxfcb *)skb->data;
/* Remove the FCB from the skb
* Remove the padded bytes, if there are any
*/
if (priv->uses_rxfcb)
skb_pull(skb, GMAC_FCB_LEN);
/* Get receive timestamp from the skb */
if (priv->hwts_rx_en) {
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
u64 *ns = (u64 *) skb->data;
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
}
if (priv->padding)
skb_pull(skb, priv->padding);
/* Trim off the FCS */
pskb_trim(skb, skb->len - ETH_FCS_LEN);
if (ndev->features & NETIF_F_RXCSUM)
gfar_rx_checksum(skb, fcb);
/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
* Even if vlan rx accel is disabled, on some chips
* RXFCB_VLN is pseudo randomly set.
*/
if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
be16_to_cpu(fcb->flags) & RXFCB_VLN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
be16_to_cpu(fcb->vlctl));
} | 0 | []
| linux | d8861bab48b6c1fc3cdbcab8ff9d1eaea43afe7f | 157,115,785,328,047,650,000,000,000,000,000,000,000 | 41 | gianfar: fix jumbo packets+napi+rx overrun crash
When using jumbo packets and overrunning rx queue with napi enabled,
the following sequence is observed in gfar_add_rx_frag:
| lstatus | | skb |
t | lstatus, size, flags | first | len, data_len, *ptr |
---+--------------------------------------+-------+-----------------------+
13 | 18002348, 9032, INTERRUPT LAST | 0 | 9600, 8000, f554c12e |
12 | 10000640, 1600, INTERRUPT | 0 | 8000, 6400, f554c12e |
11 | 10000640, 1600, INTERRUPT | 0 | 6400, 4800, f554c12e |
10 | 10000640, 1600, INTERRUPT | 0 | 4800, 3200, f554c12e |
09 | 10000640, 1600, INTERRUPT | 0 | 3200, 1600, f554c12e |
08 | 14000640, 1600, INTERRUPT FIRST | 0 | 1600, 0, f554c12e |
07 | 14000640, 1600, INTERRUPT FIRST | 1 | 0, 0, f554c12e |
06 | 1c000080, 128, INTERRUPT LAST FIRST | 1 | 0, 0, abf3bd6e |
05 | 18002348, 9032, INTERRUPT LAST | 0 | 8000, 6400, c5a57780 |
04 | 10000640, 1600, INTERRUPT | 0 | 6400, 4800, c5a57780 |
03 | 10000640, 1600, INTERRUPT | 0 | 4800, 3200, c5a57780 |
02 | 10000640, 1600, INTERRUPT | 0 | 3200, 1600, c5a57780 |
01 | 10000640, 1600, INTERRUPT | 0 | 1600, 0, c5a57780 |
00 | 14000640, 1600, INTERRUPT FIRST | 1 | 0, 0, c5a57780 |
So at t=7 a new packets is started but not finished, probably due to rx
overrun - but rx overrun is not indicated in the flags. Instead a new
packets starts at t=8. This results in skb->len to exceed size for the LAST
fragment at t=13 and thus a negative fragment size added to the skb.
This then crashes:
kernel BUG at include/linux/skbuff.h:2277!
Oops: Exception in kernel mode, sig: 5 [#1]
...
NIP [c04689f4] skb_pull+0x2c/0x48
LR [c03f62ac] gfar_clean_rx_ring+0x2e4/0x844
Call Trace:
[ec4bfd38] [c06a84c4] _raw_spin_unlock_irqrestore+0x60/0x7c (unreliable)
[ec4bfda8] [c03f6a44] gfar_poll_rx_sq+0x48/0xe4
[ec4bfdc8] [c048d504] __napi_poll+0x54/0x26c
[ec4bfdf8] [c048d908] net_rx_action+0x138/0x2c0
[ec4bfe68] [c06a8f34] __do_softirq+0x3a4/0x4fc
[ec4bfed8] [c0040150] run_ksoftirqd+0x58/0x70
[ec4bfee8] [c0066ecc] smpboot_thread_fn+0x184/0x1cc
[ec4bff08] [c0062718] kthread+0x140/0x144
[ec4bff38] [c0012350] ret_from_kernel_thread+0x14/0x1c
This patch fixes this by checking for computed LAST fragment size, so a
negative sized fragment is never added.
In order to prevent the newer rx frame from getting corrupted, the FIRST
flag is checked to discard the incomplete older frame.
Signed-off-by: Michael Braun <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
lq850_print_page(gx_device_printer *pdev, gp_file *prn_stream)
{
char lq850_init_string [] = "\033@\033P\033l\000\r\033\053\001\033Q";
return dot24_print_page(pdev, prn_stream, lq850_init_string, sizeof(lq850_init_string));
} | 0 | [
"CWE-369"
]
| ghostpdl | eaba1d97b62831b42c51840cc8ee2bc4576c942e | 142,346,199,180,630,930,000,000,000,000,000,000,000 | 6 | Bug 701828: make dot24_print_page() return error instead of divide by zero.
Fixes:
./sanbin/gs -dBATCH -dNOPAUSE -dSAFER -r2 -sOutputFile=tmp -sDEVICE=necp6 ../bug-701828.pdf |
static long do_wait(struct wait_opts *wo)
{
struct task_struct *tsk;
int retval;
trace_sched_process_wait(wo->wo_pid);
init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
wo->child_wait.private = current;
add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
repeat:
/*
* If there is nothing that can match our critiera just get out.
* We will clear ->notask_error to zero if we see any child that
* might later match our criteria, even if we are not able to reap
* it yet.
*/
wo->notask_error = -ECHILD;
if ((wo->wo_type < PIDTYPE_MAX) &&
(!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
goto notask;
set_current_state(TASK_INTERRUPTIBLE);
read_lock(&tasklist_lock);
tsk = current;
do {
retval = do_wait_thread(wo, tsk);
if (retval)
goto end;
retval = ptrace_do_wait(wo, tsk);
if (retval)
goto end;
if (wo->wo_flags & __WNOTHREAD)
break;
} while_each_thread(current, tsk);
read_unlock(&tasklist_lock);
notask:
retval = wo->notask_error;
if (!retval && !(wo->wo_flags & WNOHANG)) {
retval = -ERESTARTSYS;
if (!signal_pending(current)) {
schedule();
goto repeat;
}
}
end:
__set_current_state(TASK_RUNNING);
remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
return retval;
} | 0 | [
"CWE-20",
"CWE-703",
"CWE-400"
]
| linux | b69f2292063d2caf37ca9aec7d63ded203701bf3 | 215,325,314,403,266,380,000,000,000,000,000,000,000 | 53 | block: Fix io_context leak after failure of clone with CLONE_IO
With CLONE_IO, parent's io_context->nr_tasks is incremented, but never
decremented whenever copy_process() fails afterwards, which prevents
exit_io_context() from calling IO schedulers exit functions.
Give a task_struct to exit_io_context(), and call exit_io_context() instead of
put_io_context() in copy_process() cleanup path.
Signed-off-by: Louis Rilling <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
{
struct dx_countlimit *c;
struct dx_tail *t;
int count_offset, limit, count;
if (!ext4_has_metadata_csum(inode->i_sb))
return;
c = get_dx_countlimit(inode, dirent, &count_offset);
if (!c) {
EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D.");
return;
}
limit = le16_to_cpu(c->limit);
count = le16_to_cpu(c->count);
if (count_offset + (limit * sizeof(struct dx_entry)) >
EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
warn_no_space_for_csum(inode);
return;
}
t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
t->dt_checksum = ext4_dx_csum(inode, dirent, count_offset, count, t);
} | 0 | [
"CWE-125"
]
| linux | 5872331b3d91820e14716632ebb56b1399b34fe1 | 108,019,985,619,551,110,000,000,000,000,000,000,000 | 25 | ext4: fix potential negative array index in do_split()
If for any reason a directory passed to do_split() does not have enough
active entries to exceed half the size of the block, we can end up
iterating over all "count" entries without finding a split point.
In this case, count == move, and split will be zero, and we will
attempt a negative index into map[].
Guard against this by detecting this case, and falling back to
split-to-half-of-count instead; in this case we will still have
plenty of space (> half blocksize) in each split block.
Fixes: ef2b02d3e617 ("ext34: ensure do_split leaves enough free space in both blocks")
Signed-off-by: Eric Sandeen <[email protected]>
Reviewed-by: Andreas Dilger <[email protected]>
Reviewed-by: Jan Kara <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Theodore Ts'o <[email protected]> |
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
union split_spte *ssptep, sspte;
ssptep = (union split_spte *)sptep;
sspte = (union split_spte)spte;
WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
/*
* If we map the spte from present to nonpresent, we should clear
* present bit firstly to avoid vcpu fetch the old high bits.
*/
smp_wmb();
ssptep->spte_high = sspte.spte_high;
count_spte_clear(sptep, spte);
} | 0 | [
"CWE-476"
]
| linux | 9f46c187e2e680ecd9de7983e4d081c3391acc76 | 34,551,376,273,428,440,000,000,000,000,000,000,000 | 18 | KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID
With shadow paging enabled, the INVPCID instruction results in a call
to kvm_mmu_invpcid_gva. If INVPCID is executed with CR0.PG=0, the
invlpg callback is not set and the result is a NULL pointer dereference.
Fix it trivially by checking for mmu->invlpg before every call.
There are other possibilities:
- check for CR0.PG, because KVM (like all Intel processors after P5)
flushes guest TLB on CR0.PG changes so that INVPCID/INVLPG are a
nop with paging disabled
- check for EFER.LMA, because KVM syncs and flushes when switching
MMU contexts outside of 64-bit mode
All of these are tricky, go for the simple solution. This is CVE-2022-1789.
Reported-by: Yongkang Jia <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]> |
static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
{
struct pglist_data *pgdat;
unsigned long zones_size[MAX_NR_ZONES] = {0};
unsigned long zholes_size[MAX_NR_ZONES] = {0};
unsigned long start_pfn = start >> PAGE_SHIFT;
pgdat = arch_alloc_nodedata(nid);
if (!pgdat)
return NULL;
arch_refresh_nodedata(nid, pgdat);
/* we can use NODE_DATA(nid) from here */
/* init node's zones as empty zones, we don't have any present pages.*/
free_area_init_node(nid, zones_size, start_pfn, zholes_size);
/*
* The node we allocated has no zone fallback lists. For avoiding
* to access not-initialized zonelist, build here.
*/
mutex_lock(&zonelists_mutex);
build_all_zonelists(pgdat, NULL);
mutex_unlock(&zonelists_mutex);
return pgdat;
} | 0 | []
| linux-2.6 | 08dff7b7d629807dbb1f398c68dd9cd58dd657a1 | 39,495,906,079,975,633,000,000,000,000,000,000,000 | 28 | mm/hotplug: correctly add new zone to all other nodes' zone lists
When online_pages() is called to add new memory to an empty zone, it
rebuilds all zone lists by calling build_all_zonelists(). But there's a
bug which prevents the new zone to be added to other nodes' zone lists.
online_pages() {
build_all_zonelists()
.....
node_set_state(zone_to_nid(zone), N_HIGH_MEMORY)
}
Here the node of the zone is put into N_HIGH_MEMORY state after calling
build_all_zonelists(), but build_all_zonelists() only adds zones from
nodes in N_HIGH_MEMORY state to the fallback zone lists.
build_all_zonelists()
->__build_all_zonelists()
->build_zonelists()
->find_next_best_node()
->for_each_node_state(n, N_HIGH_MEMORY)
So memory in the new zone will never be used by other nodes, and it may
cause strange behavor when system is under memory pressure. So put node
into N_HIGH_MEMORY state before calling build_all_zonelists().
Signed-off-by: Jianguo Wu <[email protected]>
Signed-off-by: Jiang Liu <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Rusty Russell <[email protected]>
Cc: Yinghai Lu <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: KOSAKI Motohiro <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Keping Chen <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
UnicodeString::char32At(int32_t offset) const
{
int32_t len = length();
if((uint32_t)offset < (uint32_t)len) {
const UChar *array = getArrayStart();
UChar32 c;
U16_GET(array, 0, offset, len, c);
return c;
} else {
return kInvalidUChar;
}
} | 0 | [
"CWE-190",
"CWE-787"
]
| icu | b7d08bc04a4296982fcef8b6b8a354a9e4e7afca | 46,911,442,910,059,910,000,000,000,000,000,000,000 | 12 | ICU-20958 Prevent SEGV_MAPERR in append
See #971 |
check_symlinks_fsobj(char *path, int *error_number, struct archive_string *error_string, int flags)
{
#if !defined(HAVE_LSTAT)
/* Platform doesn't have lstat, so we can't look for symlinks. */
(void)a; /* UNUSED */
(void)path; /* UNUSED */
(void)error_number; /* UNUSED */
(void)error_string; /* UNUSED */
(void)flags; /* UNUSED */
return (ARCHIVE_OK);
#else
int res = ARCHIVE_OK;
char *tail;
char *head;
int last;
char c;
int r;
struct stat st;
int restore_pwd;
/* Nothing to do here if name is empty */
if(path[0] == '\0')
return (ARCHIVE_OK);
/*
* Guard against symlink tricks. Reject any archive entry whose
* destination would be altered by a symlink.
*
* Walk the filename in chunks separated by '/'. For each segment:
* - if it doesn't exist, continue
* - if it's symlink, abort or remove it
* - if it's a directory and it's not the last chunk, cd into it
* As we go:
* head points to the current (relative) path
* tail points to the temporary \0 terminating the segment we're currently examining
* c holds what used to be in *tail
* last is 1 if this is the last tail
*/
restore_pwd = open(".", O_RDONLY | O_BINARY | O_CLOEXEC);
__archive_ensure_cloexec_flag(restore_pwd);
if (restore_pwd < 0)
return (ARCHIVE_FATAL);
head = path;
tail = path;
last = 0;
/* TODO: reintroduce a safe cache here? */
/* Skip the root directory if the path is absolute. */
if(tail == path && tail[0] == '/')
++tail;
/* Keep going until we've checked the entire name.
* head, tail, path all alias the same string, which is
* temporarily zeroed at tail, so be careful restoring the
* stashed (c=tail[0]) for error messages.
* Exiting the loop with break is okay; continue is not.
*/
while (!last) {
/* Skip the separator we just consumed, plus any adjacent ones */
while (*tail == '/')
++tail;
/* Skip the next path element. */
while (*tail != '\0' && *tail != '/')
++tail;
/* is this the last path component? */
last = (tail[0] == '\0') || (tail[0] == '/' && tail[1] == '\0');
/* temporarily truncate the string here */
c = tail[0];
tail[0] = '\0';
/* Check that we haven't hit a symlink. */
r = lstat(head, &st);
if (r != 0) {
tail[0] = c;
/* We've hit a dir that doesn't exist; stop now. */
if (errno == ENOENT) {
break;
} else {
/* Treat any other error as fatal - best to be paranoid here
* Note: This effectively disables deep directory
* support when security checks are enabled.
* Otherwise, very long pathnames that trigger
* an error here could evade the sandbox.
* TODO: We could do better, but it would probably
* require merging the symlink checks with the
* deep-directory editing. */
if (error_number) *error_number = errno;
if (error_string)
archive_string_sprintf(error_string,
"Could not stat %s",
path);
res = ARCHIVE_FAILED;
break;
}
} else if (S_ISDIR(st.st_mode)) {
if (!last) {
if (chdir(head) != 0) {
tail[0] = c;
if (error_number) *error_number = errno;
if (error_string)
archive_string_sprintf(error_string,
"Could not chdir %s",
path);
res = (ARCHIVE_FATAL);
break;
}
/* Our view is now from inside this dir: */
head = tail + 1;
}
} else if (S_ISLNK(st.st_mode)) {
if (last) {
/*
* Last element is symlink; remove it
* so we can overwrite it with the
* item being extracted.
*/
if (unlink(head)) {
tail[0] = c;
if (error_number) *error_number = errno;
if (error_string)
archive_string_sprintf(error_string,
"Could not remove symlink %s",
path);
res = ARCHIVE_FAILED;
break;
}
/*
* Even if we did remove it, a warning
* is in order. The warning is silly,
* though, if we're just replacing one
* symlink with another symlink.
*/
tail[0] = c;
/* FIXME: not sure how important this is to restore
if (!S_ISLNK(path)) {
if (error_number) *error_number = 0;
if (error_string)
archive_string_sprintf(error_string,
"Removing symlink %s",
path);
}
*/
/* Symlink gone. No more problem! */
res = ARCHIVE_OK;
break;
} else if (flags & ARCHIVE_EXTRACT_UNLINK) {
/* User asked us to remove problems. */
if (unlink(head) != 0) {
tail[0] = c;
if (error_number) *error_number = 0;
if (error_string)
archive_string_sprintf(error_string,
"Cannot remove intervening symlink %s",
path);
res = ARCHIVE_FAILED;
break;
}
tail[0] = c;
} else {
tail[0] = c;
if (error_number) *error_number = 0;
if (error_string)
archive_string_sprintf(error_string,
"Cannot extract through symlink %s",
path);
res = ARCHIVE_FAILED;
break;
}
}
/* be sure to always maintain this */
tail[0] = c;
if (tail[0] != '\0')
tail++; /* Advance to the next segment. */
}
/* Catches loop exits via break */
tail[0] = c;
#ifdef HAVE_FCHDIR
/* If we changed directory above, restore it here. */
if (restore_pwd >= 0) {
r = fchdir(restore_pwd);
if (r != 0) {
if(error_number) *error_number = errno;
if(error_string)
archive_string_sprintf(error_string,
"chdir() failure");
}
close(restore_pwd);
restore_pwd = -1;
if (r != 0) {
res = (ARCHIVE_FATAL);
}
}
#endif
/* TODO: reintroduce a safe cache here? */
return res;
#endif
} | 0 | [
"CWE-20",
"CWE-476"
]
| libarchive | dfd6b54ce33960e420fb206d8872fb759b577ad9 | 71,800,755,002,070,240,000,000,000,000,000,000,000 | 194 | Fixes for Issue #745 and Issue #746 from Doran Moppert. |
static int bn2binpad(unsigned char *to, size_t tolen, BIGNUM *b)
{
size_t blen;
blen = BN_num_bytes(b);
/* If BIGNUM length greater than buffer, mask to get rightmost
* bytes. NB: modifies b but this doesn't matter for our purposes.
*/
if (blen > tolen)
{
BN_mask_bits(b, tolen << 3);
/* Update length because mask operation might create leading
* zeroes.
*/
blen = BN_num_bytes(b);
}
/* If b length smaller than buffer pad with zeroes */
if (blen < tolen)
{
memset(to, 0, tolen - blen);
to += tolen - blen;
}
/* This call cannot fail */
BN_bn2bin(b, to);
return 1;
} | 1 | []
| openssl | 200f249b8c3b6439e0200d01caadc24806f1a983 | 271,992,352,835,707,370,000,000,000,000,000,000,000 | 26 | Remove Dual EC DRBG from FIPS module. |
tight_filter_gradient24(VncState *vs, uint8_t *buf, int w, int h)
{
uint32_t *buf32;
uint32_t pix32;
int shift[3];
int *prev;
int here[3], upper[3], left[3], upperleft[3];
int prediction;
int x, y, c;
buf32 = (uint32_t *)buf;
memset(vs->tight.gradient.buffer, 0, w * 3 * sizeof(int));
if (1 /* FIXME: (vs->clientds.flags & QEMU_BIG_ENDIAN_FLAG) ==
(vs->ds->surface->flags & QEMU_BIG_ENDIAN_FLAG) */) {
shift[0] = vs->client_pf.rshift;
shift[1] = vs->client_pf.gshift;
shift[2] = vs->client_pf.bshift;
} else {
shift[0] = 24 - vs->client_pf.rshift;
shift[1] = 24 - vs->client_pf.gshift;
shift[2] = 24 - vs->client_pf.bshift;
}
for (y = 0; y < h; y++) {
for (c = 0; c < 3; c++) {
upper[c] = 0;
here[c] = 0;
}
prev = (int *)vs->tight.gradient.buffer;
for (x = 0; x < w; x++) {
pix32 = *buf32++;
for (c = 0; c < 3; c++) {
upperleft[c] = upper[c];
left[c] = here[c];
upper[c] = *prev;
here[c] = (int)(pix32 >> shift[c] & 0xFF);
*prev++ = here[c];
prediction = left[c] + upper[c] - upperleft[c];
if (prediction < 0) {
prediction = 0;
} else if (prediction > 0xFF) {
prediction = 0xFF;
}
*buf++ = (char)(here[c] - prediction);
}
}
}
} | 0 | [
"CWE-125"
]
| qemu | 9f64916da20eea67121d544698676295bbb105a7 | 78,458,381,544,485,370,000,000,000,000,000,000,000 | 50 | pixman/vnc: use pixman images in vnc.
The vnc code uses *three* DisplaySurfaces:
First is the surface of the actual QemuConsole, usually the guest
screen, but could also be a text console (monitor/serial reachable via
Ctrl-Alt-<nr> keys). This is left as-is.
Second is the current server's view of the screen content. The vnc code
uses this to figure which parts of the guest screen did _really_ change
to reduce the amount of updates sent to the vnc clients. It is also
used as data source when sending out the updates to the clients. This
surface gets replaced by a pixman image. The format changes too,
instead of using the guest screen format we'll use fixed 32bit rgb
framebuffer and convert the pixels on the fly when comparing and
updating the server framebuffer.
Third surface carries the format expected by the vnc client. That isn't
used to store image data. This surface is switched to PixelFormat and a
boolean for bigendian byte order.
Signed-off-by: Gerd Hoffmann <[email protected]> |
void CurlIo::CurlImpl::writeRemote(const byte* data, size_t size, long from, long to)
{
std::string scriptPath(getEnv(envHTTPPOST));
if (scriptPath == "") {
throw Error(1, "Please set the path of the server script to handle http post data to EXIV2_HTTP_POST environmental variable.");
}
Exiv2::Uri hostInfo = Exiv2::Uri::Parse(path_);
// add the protocol and host to the path
std::size_t protocolIndex = scriptPath.find("://");
if (protocolIndex == std::string::npos) {
if (scriptPath[0] != '/') scriptPath = "/" + scriptPath;
scriptPath = hostInfo.Protocol + "://" + hostInfo.Host + scriptPath;
}
curl_easy_reset(curl_); // reset all options
curl_easy_setopt(curl_, CURLOPT_NOPROGRESS, 1L); // no progress meter please
//curl_easy_setopt(curl_, CURLOPT_VERBOSE, 1); // debugging mode
curl_easy_setopt(curl_, CURLOPT_URL, scriptPath.c_str());
curl_easy_setopt(curl_, CURLOPT_SSL_VERIFYPEER, 0L);
// encode base64
size_t encodeLength = ((size + 2) / 3) * 4 + 1;
char* encodeData = new char[encodeLength];
base64encode(data, size, encodeData, encodeLength);
// url encode
char* urlencodeData = urlencode(encodeData);
delete[] encodeData;
std::stringstream ss;
ss << "path=" << hostInfo.Path << "&"
<< "from=" << from << "&"
<< "to=" << to << "&"
<< "data=" << urlencodeData;
std::string postData = ss.str();
delete[] urlencodeData;
curl_easy_setopt(curl_, CURLOPT_POSTFIELDS, postData.c_str());
// Perform the request, res will get the return code.
CURLcode res = curl_easy_perform(curl_);
if(res != CURLE_OK) {
throw Error(1, curl_easy_strerror(res));
} else {
long serverCode;
curl_easy_getinfo (curl_, CURLINFO_RESPONSE_CODE, &serverCode);
if (serverCode >= 400 || serverCode < 0) {
throw Error(55, "Server", serverCode);
}
}
} | 0 | [
"CWE-125"
]
| exiv2 | 6e3855aed7ba8bb4731fc4087ca7f9078b2f3d97 | 197,049,513,248,913,740,000,000,000,000,000,000,000 | 52 | Fix https://github.com/Exiv2/exiv2/issues/55 |
ext4_xattr_set(struct inode *inode, int name_index, const char *name,
const void *value, size_t value_len, int flags)
{
handle_t *handle;
int error, retries = 0;
int credits = ext4_jbd2_credits_xattr(inode);
retry:
handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
} else {
int error2;
error = ext4_xattr_set_handle(handle, inode, name_index, name,
value, value_len, flags);
error2 = ext4_journal_stop(handle);
if (error == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
if (error == 0)
error = error2;
}
return error;
} | 0 | [
"CWE-241",
"CWE-19"
]
| linux | 82939d7999dfc1f1998c4b1c12e2f19edbdff272 | 124,225,969,767,512,430,000,000,000,000,000,000,000 | 26 | ext4: convert to mbcache2
The conversion is generally straightforward. The only tricky part is
that xattr block corresponding to found mbcache entry can get freed
before we get buffer lock for that block. So we have to check whether
the entry is still valid after getting buffer lock.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]> |
static int xfrm_dump_policy_done(struct netlink_callback *cb)
{
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
struct net *net = sock_net(cb->skb->sk);
xfrm_policy_walk_done(walk, net);
return 0;
} | 0 | [
"CWE-125"
]
| linux | b805d78d300bcf2c83d6df7da0c818b0fee41427 | 75,601,274,674,014,380,000,000,000,000,000,000,000 | 8 | xfrm: policy: Fix out-of-bound array accesses in __xfrm_policy_unlink
UBSAN report this:
UBSAN: Undefined behaviour in net/xfrm/xfrm_policy.c:1289:24
index 6 is out of range for type 'unsigned int [6]'
CPU: 1 PID: 0 Comm: swapper/1 Not tainted 4.4.162-514.55.6.9.x86_64+ #13
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
0000000000000000 1466cf39b41b23c9 ffff8801f6b07a58 ffffffff81cb35f4
0000000041b58ab3 ffffffff83230f9c ffffffff81cb34e0 ffff8801f6b07a80
ffff8801f6b07a20 1466cf39b41b23c9 ffffffff851706e0 ffff8801f6b07ae8
Call Trace:
<IRQ> [<ffffffff81cb35f4>] __dump_stack lib/dump_stack.c:15 [inline]
<IRQ> [<ffffffff81cb35f4>] dump_stack+0x114/0x1a0 lib/dump_stack.c:51
[<ffffffff81d94225>] ubsan_epilogue+0x12/0x8f lib/ubsan.c:164
[<ffffffff81d954db>] __ubsan_handle_out_of_bounds+0x16e/0x1b2 lib/ubsan.c:382
[<ffffffff82a25acd>] __xfrm_policy_unlink+0x3dd/0x5b0 net/xfrm/xfrm_policy.c:1289
[<ffffffff82a2e572>] xfrm_policy_delete+0x52/0xb0 net/xfrm/xfrm_policy.c:1309
[<ffffffff82a3319b>] xfrm_policy_timer+0x30b/0x590 net/xfrm/xfrm_policy.c:243
[<ffffffff813d3927>] call_timer_fn+0x237/0x990 kernel/time/timer.c:1144
[<ffffffff813d8e7e>] __run_timers kernel/time/timer.c:1218 [inline]
[<ffffffff813d8e7e>] run_timer_softirq+0x6ce/0xb80 kernel/time/timer.c:1401
[<ffffffff8120d6f9>] __do_softirq+0x299/0xe10 kernel/softirq.c:273
[<ffffffff8120e676>] invoke_softirq kernel/softirq.c:350 [inline]
[<ffffffff8120e676>] irq_exit+0x216/0x2c0 kernel/softirq.c:391
[<ffffffff82c5edab>] exiting_irq arch/x86/include/asm/apic.h:652 [inline]
[<ffffffff82c5edab>] smp_apic_timer_interrupt+0x8b/0xc0 arch/x86/kernel/apic/apic.c:926
[<ffffffff82c5c985>] apic_timer_interrupt+0xa5/0xb0 arch/x86/entry/entry_64.S:735
<EOI> [<ffffffff81188096>] ? native_safe_halt+0x6/0x10 arch/x86/include/asm/irqflags.h:52
[<ffffffff810834d7>] arch_safe_halt arch/x86/include/asm/paravirt.h:111 [inline]
[<ffffffff810834d7>] default_idle+0x27/0x430 arch/x86/kernel/process.c:446
[<ffffffff81085f05>] arch_cpu_idle+0x15/0x20 arch/x86/kernel/process.c:437
[<ffffffff8132abc3>] default_idle_call+0x53/0x90 kernel/sched/idle.c:92
[<ffffffff8132b32d>] cpuidle_idle_call kernel/sched/idle.c:156 [inline]
[<ffffffff8132b32d>] cpu_idle_loop kernel/sched/idle.c:251 [inline]
[<ffffffff8132b32d>] cpu_startup_entry+0x60d/0x9a0 kernel/sched/idle.c:299
[<ffffffff8113e119>] start_secondary+0x3c9/0x560 arch/x86/kernel/smpboot.c:245
The issue is triggered as this:
xfrm_add_policy
-->verify_newpolicy_info //check the index provided by user with XFRM_POLICY_MAX
//In my case, the index is 0x6E6BB6, so it pass the check.
-->xfrm_policy_construct //copy the user's policy and set xfrm_policy_timer
-->xfrm_policy_insert
--> __xfrm_policy_link //use the orgin dir, in my case is 2
--> xfrm_gen_index //generate policy index, there is 0x6E6BB6
then xfrm_policy_timer be fired
xfrm_policy_timer
--> xfrm_policy_id2dir //get dir from (policy index & 7), in my case is 6
--> xfrm_policy_delete
--> __xfrm_policy_unlink //access policy_count[dir], trigger out of range access
Add xfrm_policy_id2dir check in verify_newpolicy_info, make sure the computed dir is
valid, to fix the issue.
Reported-by: Hulk Robot <[email protected]>
Fixes: e682adf021be ("xfrm: Try to honor policy index if it's supplied by user")
Signed-off-by: YueHaibing <[email protected]>
Acked-by: Herbert Xu <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]> |
void mark_tree_uninteresting(struct tree *tree)
{
struct object *obj = &tree->object;
if (!tree)
return;
if (obj->flags & UNINTERESTING)
return;
obj->flags |= UNINTERESTING;
mark_tree_contents_uninteresting(tree);
} | 0 | [
"CWE-119",
"CWE-787"
]
| git | 34fa79a6cde56d6d428ab0d3160cb094ebad3305 | 324,473,870,038,199,830,000,000,000,000,000,000,000 | 11 | prefer memcpy to strcpy
When we already know the length of a string (e.g., because
we just malloc'd to fit it), it's nicer to use memcpy than
strcpy, as it makes it more obvious that we are not going to
overflow the buffer (because the size we pass matches the
size in the allocation).
This also eliminates calls to strcpy, which make auditing
the code base harder.
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]> |
int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
pte_t *dst_pte,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
struct page **pagep)
{
int vm_shared = dst_vma->vm_flags & VM_SHARED;
struct hstate *h = hstate_vma(dst_vma);
pte_t _dst_pte;
spinlock_t *ptl;
int ret;
struct page *page;
if (!*pagep) {
ret = -ENOMEM;
page = alloc_huge_page(dst_vma, dst_addr, 0);
if (IS_ERR(page))
goto out;
ret = copy_huge_page_from_user(page,
(const void __user *) src_addr,
pages_per_huge_page(h), false);
/* fallback to copy_from_user outside mmap_sem */
if (unlikely(ret)) {
ret = -EFAULT;
*pagep = page;
/* don't free the page */
goto out;
}
} else {
page = *pagep;
*pagep = NULL;
}
/*
* The memory barrier inside __SetPageUptodate makes sure that
* preceding stores to the page contents become visible before
* the set_pte_at() write.
*/
__SetPageUptodate(page);
set_page_huge_active(page);
/*
* If shared, add to page cache
*/
if (vm_shared) {
struct address_space *mapping = dst_vma->vm_file->f_mapping;
pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
ret = huge_add_to_page_cache(page, mapping, idx);
if (ret)
goto out_release_nounlock;
}
ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
spin_lock(ptl);
ret = -EEXIST;
if (!huge_pte_none(huge_ptep_get(dst_pte)))
goto out_release_unlock;
if (vm_shared) {
page_dup_rmap(page, true);
} else {
ClearPagePrivate(page);
hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
}
_dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
if (dst_vma->vm_flags & VM_WRITE)
_dst_pte = huge_pte_mkdirty(_dst_pte);
_dst_pte = pte_mkyoung(_dst_pte);
set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
(void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
dst_vma->vm_flags & VM_WRITE);
hugetlb_count_add(pages_per_huge_page(h), dst_mm);
/* No need to invalidate - it was non-present before */
update_mmu_cache(dst_vma, dst_addr, dst_pte);
spin_unlock(ptl);
if (vm_shared)
unlock_page(page);
ret = 0;
out:
return ret;
out_release_unlock:
spin_unlock(ptl);
out_release_nounlock:
if (vm_shared)
unlock_page(page);
put_page(page);
goto out;
} | 1 | [
"CWE-703"
]
| linux | 5af10dfd0afc559bb4b0f7e3e8227a1578333995 | 208,616,599,368,884,150,000,000,000,000,000,000,000 | 98 | userfaultfd: hugetlbfs: remove superfluous page unlock in VM_SHARED case
huge_add_to_page_cache->add_to_page_cache implicitly unlocks the page
before returning in case of errors.
The error returned was -EEXIST by running UFFDIO_COPY on a non-hole
offset of a VM_SHARED hugetlbfs mapping. It was an userland bug that
triggered it and the kernel must cope with it returning -EEXIST from
ioctl(UFFDIO_COPY) as expected.
page dumped because: VM_BUG_ON_PAGE(!PageLocked(page))
kernel BUG at mm/filemap.c:964!
invalid opcode: 0000 [#1] SMP
CPU: 1 PID: 22582 Comm: qemu-system-x86 Not tainted 4.11.11-300.fc26.x86_64 #1
RIP: unlock_page+0x4a/0x50
Call Trace:
hugetlb_mcopy_atomic_pte+0xc0/0x320
mcopy_atomic+0x96f/0xbe0
userfaultfd_ioctl+0x218/0xe90
do_vfs_ioctl+0xa5/0x600
SyS_ioctl+0x79/0x90
entry_SYSCALL_64_fastpath+0x1a/0xa9
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Andrea Arcangeli <[email protected]>
Tested-by: Maxime Coquelin <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
Cc: "Dr. David Alan Gilbert" <[email protected]>
Cc: Mike Rapoport <[email protected]>
Cc: Alexey Perevalov <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
verify_destination (CommonJob *job,
GFile *dest,
char **dest_fs_id,
goffset required_size)
{
GFileInfo *info, *fsinfo;
GError *error;
guint64 free_size;
guint64 size_difference;
char *primary, *secondary, *details;
int response;
GFileType file_type;
gboolean dest_is_symlink = FALSE;
if (dest_fs_id)
{
*dest_fs_id = NULL;
}
retry:
error = NULL;
info = g_file_query_info (dest,
G_FILE_ATTRIBUTE_STANDARD_TYPE ","
G_FILE_ATTRIBUTE_ID_FILESYSTEM,
dest_is_symlink ? G_FILE_QUERY_INFO_NONE : G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS,
job->cancellable,
&error);
if (info == NULL)
{
if (IS_IO_ERROR (error, CANCELLED))
{
g_error_free (error);
return;
}
primary = f (_("Error while copying to “%B”."), dest);
details = NULL;
if (IS_IO_ERROR (error, PERMISSION_DENIED))
{
secondary = f (_("You do not have permissions to access the destination folder."));
}
else
{
secondary = f (_("There was an error getting information about the destination."));
details = error->message;
}
response = run_error (job,
primary,
secondary,
details,
FALSE,
CANCEL, RETRY,
NULL);
g_error_free (error);
if (response == 0 || response == GTK_RESPONSE_DELETE_EVENT)
{
abort_job (job);
}
else if (response == 1)
{
goto retry;
}
else
{
g_assert_not_reached ();
}
return;
}
file_type = g_file_info_get_file_type (info);
if (!dest_is_symlink && file_type == G_FILE_TYPE_SYMBOLIC_LINK)
{
/* Record that destination is a symlink and do real stat() once again */
dest_is_symlink = TRUE;
g_object_unref (info);
goto retry;
}
if (dest_fs_id)
{
*dest_fs_id =
g_strdup (g_file_info_get_attribute_string (info,
G_FILE_ATTRIBUTE_ID_FILESYSTEM));
}
g_object_unref (info);
if (file_type != G_FILE_TYPE_DIRECTORY)
{
primary = f (_("Error while copying to “%B”."), dest);
secondary = f (_("The destination is not a folder."));
run_error (job,
primary,
secondary,
NULL,
FALSE,
CANCEL,
NULL);
abort_job (job);
return;
}
if (dest_is_symlink)
{
/* We can't reliably statfs() destination if it's a symlink, thus not doing any further checks. */
return;
}
fsinfo = g_file_query_filesystem_info (dest,
G_FILE_ATTRIBUTE_FILESYSTEM_FREE ","
G_FILE_ATTRIBUTE_FILESYSTEM_READONLY,
job->cancellable,
NULL);
if (fsinfo == NULL)
{
/* All sorts of things can go wrong getting the fs info (like not supported)
* only check these things if the fs returns them
*/
return;
}
if (required_size > 0 &&
g_file_info_has_attribute (fsinfo, G_FILE_ATTRIBUTE_FILESYSTEM_FREE))
{
free_size = g_file_info_get_attribute_uint64 (fsinfo,
G_FILE_ATTRIBUTE_FILESYSTEM_FREE);
if (free_size < required_size)
{
size_difference = required_size - free_size;
primary = f (_("Error while copying to “%B”."), dest);
secondary = f (_("There is not enough space on the destination. Try to remove files to make space."));
details = f (_("%S more space is required to copy to the destination."), size_difference);
response = run_warning (job,
primary,
secondary,
details,
FALSE,
CANCEL,
COPY_FORCE,
RETRY,
NULL);
if (response == 0 || response == GTK_RESPONSE_DELETE_EVENT)
{
abort_job (job);
}
else if (response == 2)
{
goto retry;
}
else if (response == 1)
{
/* We are forced to copy - just fall through ... */
}
else
{
g_assert_not_reached ();
}
}
}
if (!job_aborted (job) &&
g_file_info_get_attribute_boolean (fsinfo,
G_FILE_ATTRIBUTE_FILESYSTEM_READONLY))
{
primary = f (_("Error while copying to “%B”."), dest);
secondary = f (_("The destination is read-only."));
run_error (job,
primary,
secondary,
NULL,
FALSE,
CANCEL,
NULL);
g_error_free (error);
abort_job (job);
}
g_object_unref (fsinfo);
} | 0 | [
"CWE-20"
]
| nautilus | 1630f53481f445ada0a455e9979236d31a8d3bb0 | 14,261,128,763,182,269,000,000,000,000,000,000,000 | 195 | mime-actions: use file metadata for trusting desktop files
Currently we only trust desktop files that have the executable bit
set, and don't replace the displayed icon or the displayed name until
it's trusted, which prevents for running random programs by a malicious
desktop file.
However, the executable permission is preserved if the desktop file
comes from a compressed file.
To prevent this, add a metadata::trusted metadata to the file once the
user acknowledges the file as trusted. This adds metadata to the file,
which cannot be added unless it has access to the computer.
Also remove the SHEBANG "trusted" content we were putting inside the
desktop file, since that doesn't add more security since it can come
with the file itself.
https://bugzilla.gnome.org/show_bug.cgi?id=777991 |
static int snd_pcm_pre_start(struct snd_pcm_substream *substream,
snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
return -EBADFD;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
!snd_pcm_playback_data(substream))
return -EPIPE;
runtime->trigger_tstamp_latched = false;
runtime->trigger_master = substream;
return 0;
} | 0 | [
"CWE-125"
]
| linux | 92ee3c60ec9fe64404dc035e7c41277d74aa26cb | 190,348,296,642,246,530,000,000,000,000,000,000,000 | 13 | ALSA: pcm: Fix races among concurrent hw_params and hw_free calls
Currently we have neither proper check nor protection against the
concurrent calls of PCM hw_params and hw_free ioctls, which may result
in a UAF. Since the existing PCM stream lock can't be used for
protecting the whole ioctl operations, we need a new mutex to protect
those racy calls.
This patch introduced a new mutex, runtime->buffer_mutex, and applies
it to both hw_params and hw_free ioctl code paths. Along with it, the
both functions are slightly modified (the mmap_count check is moved
into the state-check block) for code simplicity.
Reported-by: Hu Jiahui <[email protected]>
Cc: <[email protected]>
Reviewed-by: Jaroslav Kysela <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Takashi Iwai <[email protected]> |
dir_split_resource_into_fingerprint_pairs(const char *res,
smartlist_t *pairs_out)
{
smartlist_t *pairs_tmp = smartlist_create();
smartlist_t *pairs_result = smartlist_create();
smartlist_split_string(pairs_tmp, res, "+", 0, 0);
if (smartlist_len(pairs_tmp)) {
char *last = smartlist_get(pairs_tmp,smartlist_len(pairs_tmp)-1);
size_t last_len = strlen(last);
if (last_len > 2 && !strcmp(last+last_len-2, ".z")) {
last[last_len-2] = '\0';
}
}
SMARTLIST_FOREACH_BEGIN(pairs_tmp, char *, cp) {
if (strlen(cp) != HEX_DIGEST_LEN*2+1) {
log_info(LD_DIR,
"Skipping digest pair %s with non-standard length.", escaped(cp));
} else if (cp[HEX_DIGEST_LEN] != '-') {
log_info(LD_DIR,
"Skipping digest pair %s with missing dash.", escaped(cp));
} else {
fp_pair_t pair;
if (base16_decode(pair.first, DIGEST_LEN, cp, HEX_DIGEST_LEN)<0 ||
base16_decode(pair.second,
DIGEST_LEN, cp+HEX_DIGEST_LEN+1, HEX_DIGEST_LEN)<0) {
log_info(LD_DIR, "Skipping non-decodable digest pair %s", escaped(cp));
} else {
smartlist_add(pairs_result, tor_memdup(&pair, sizeof(pair)));
}
}
tor_free(cp);
} SMARTLIST_FOREACH_END(cp);
smartlist_free(pairs_tmp);
/* Uniq-and-sort */
smartlist_sort(pairs_result, _compare_pairs);
smartlist_uniq(pairs_result, _compare_pairs, _tor_free);
smartlist_add_all(pairs_out, pairs_result);
smartlist_free(pairs_result);
return 0;
} | 0 | []
| tor | 973c18bf0e84d14d8006a9ae97fde7f7fb97e404 | 223,897,807,415,280,440,000,000,000,000,000,000,000 | 43 | Fix assertion failure in tor_timegm.
Fixes bug 6811. |
int ssl_cipher_get_evp(const SSL_SESSION *s, const EVP_CIPHER **enc,
const EVP_MD **md, int *mac_pkey_type, int *mac_secret_size,SSL_COMP **comp)
{
int i;
const SSL_CIPHER *c;
c=s->cipher;
if (c == NULL) return(0);
if (comp != NULL)
{
SSL_COMP ctmp;
#ifndef OPENSSL_NO_COMP
load_builtin_compressions();
#endif
*comp=NULL;
ctmp.id=s->compress_meth;
if (ssl_comp_methods != NULL)
{
i=sk_SSL_COMP_find(ssl_comp_methods,&ctmp);
if (i >= 0)
*comp=sk_SSL_COMP_value(ssl_comp_methods,i);
else
*comp=NULL;
}
}
if ((enc == NULL) || (md == NULL)) return(0);
switch (c->algorithm_enc)
{
case SSL_DES:
i=SSL_ENC_DES_IDX;
break;
case SSL_3DES:
i=SSL_ENC_3DES_IDX;
break;
case SSL_RC4:
i=SSL_ENC_RC4_IDX;
break;
case SSL_RC2:
i=SSL_ENC_RC2_IDX;
break;
case SSL_IDEA:
i=SSL_ENC_IDEA_IDX;
break;
case SSL_eNULL:
i=SSL_ENC_NULL_IDX;
break;
case SSL_AES128:
i=SSL_ENC_AES128_IDX;
break;
case SSL_AES256:
i=SSL_ENC_AES256_IDX;
break;
case SSL_CAMELLIA128:
i=SSL_ENC_CAMELLIA128_IDX;
break;
case SSL_CAMELLIA256:
i=SSL_ENC_CAMELLIA256_IDX;
break;
case SSL_eGOST2814789CNT:
i=SSL_ENC_GOST89_IDX;
break;
case SSL_SEED:
i=SSL_ENC_SEED_IDX;
break;
default:
i= -1;
break;
}
if ((i < 0) || (i > SSL_ENC_NUM_IDX))
*enc=NULL;
else
{
if (i == SSL_ENC_NULL_IDX)
*enc=EVP_enc_null();
else
*enc=ssl_cipher_methods[i];
}
switch (c->algorithm_mac)
{
case SSL_MD5:
i=SSL_MD_MD5_IDX;
break;
case SSL_SHA1:
i=SSL_MD_SHA1_IDX;
break;
case SSL_GOST94:
i = SSL_MD_GOST94_IDX;
break;
case SSL_GOST89MAC:
i = SSL_MD_GOST89MAC_IDX;
break;
default:
i= -1;
break;
}
if ((i < 0) || (i > SSL_MD_NUM_IDX))
{
*md=NULL;
if (mac_pkey_type!=NULL) *mac_pkey_type = NID_undef;
if (mac_secret_size!=NULL) *mac_secret_size = 0;
}
else
{
*md=ssl_digest_methods[i];
if (mac_pkey_type!=NULL) *mac_pkey_type = ssl_mac_pkey_id[i];
if (mac_secret_size!=NULL) *mac_secret_size = ssl_mac_secret_size[i];
}
if ((*enc != NULL) && (*md != NULL) && (!mac_pkey_type||*mac_pkey_type != NID_undef))
return(1);
else
return(0);
} | 0 | []
| openssl | edc032b5e3f3ebb1006a9c89e0ae00504f47966f | 209,485,070,590,803,600,000,000,000,000,000,000,000 | 119 | Add SRP support. |
ex_z(exarg_T *eap)
{
char_u *x;
long bigness;
char_u *kind;
int minus = 0;
linenr_T start, end, curs, i;
int j;
linenr_T lnum = eap->line2;
/* Vi compatible: ":z!" uses display height, without a count uses
* 'scroll' */
if (eap->forceit)
bigness = curwin->w_height;
else if (!ONE_WINDOW)
bigness = curwin->w_height - 3;
else
bigness = curwin->w_p_scr * 2;
if (bigness < 1)
bigness = 1;
x = eap->arg;
kind = x;
if (*kind == '-' || *kind == '+' || *kind == '='
|| *kind == '^' || *kind == '.')
++x;
while (*x == '-' || *x == '+')
++x;
if (*x != 0)
{
if (!VIM_ISDIGIT(*x))
{
emsg(_("E144: non-numeric argument to :z"));
return;
}
else
{
bigness = atol((char *)x);
/* bigness could be < 0 if atol(x) overflows. */
if (bigness > 2 * curbuf->b_ml.ml_line_count || bigness < 0)
bigness = 2 * curbuf->b_ml.ml_line_count;
p_window = bigness;
if (*kind == '=')
bigness += 2;
}
}
/* the number of '-' and '+' multiplies the distance */
if (*kind == '-' || *kind == '+')
for (x = kind + 1; *x == *kind; ++x)
;
switch (*kind)
{
case '-':
start = lnum - bigness * (linenr_T)(x - kind) + 1;
end = start + bigness - 1;
curs = end;
break;
case '=':
start = lnum - (bigness + 1) / 2 + 1;
end = lnum + (bigness + 1) / 2 - 1;
curs = lnum;
minus = 1;
break;
case '^':
start = lnum - bigness * 2;
end = lnum - bigness;
curs = lnum - bigness;
break;
case '.':
start = lnum - (bigness + 1) / 2 + 1;
end = lnum + (bigness + 1) / 2 - 1;
curs = end;
break;
default: /* '+' */
start = lnum;
if (*kind == '+')
start += bigness * (linenr_T)(x - kind - 1) + 1;
else if (eap->addr_count == 0)
++start;
end = start + bigness - 1;
curs = end;
break;
}
if (start < 1)
start = 1;
if (end > curbuf->b_ml.ml_line_count)
end = curbuf->b_ml.ml_line_count;
if (curs > curbuf->b_ml.ml_line_count)
curs = curbuf->b_ml.ml_line_count;
else if (curs < 1)
curs = 1;
for (i = start; i <= end; i++)
{
if (minus && i == lnum)
{
msg_putchar('\n');
for (j = 1; j < Columns; j++)
msg_putchar('-');
}
print_line(i, eap->flags & EXFLAG_NR, eap->flags & EXFLAG_LIST);
if (minus && i == lnum)
{
msg_putchar('\n');
for (j = 1; j < Columns; j++)
msg_putchar('-');
}
}
if (curwin->w_cursor.lnum != curs)
{
curwin->w_cursor.lnum = curs;
curwin->w_cursor.col = 0;
}
ex_no_reprint = TRUE;
} | 0 | [
"CWE-78"
]
| vim | 8c62a08faf89663e5633dc5036cd8695c80f1075 | 99,915,781,821,945,350,000,000,000,000,000,000,000 | 132 | patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others. |
void setTypeConvert(robj *setobj, int enc) {
setTypeIterator *si;
serverAssertWithInfo(NULL,setobj,setobj->type == OBJ_SET &&
setobj->encoding == OBJ_ENCODING_INTSET);
if (enc == OBJ_ENCODING_HT) {
int64_t intele;
dict *d = dictCreate(&setDictType,NULL);
sds element;
/* Presize the dict to avoid rehashing */
dictExpand(d,intsetLen(setobj->ptr));
/* To add the elements we extract integers and create redis objects */
si = setTypeInitIterator(setobj);
while (setTypeNext(si,&element,&intele) != -1) {
element = sdsfromlonglong(intele);
serverAssert(dictAdd(d,element,NULL) == DICT_OK);
}
setTypeReleaseIterator(si);
setobj->encoding = OBJ_ENCODING_HT;
zfree(setobj->ptr);
setobj->ptr = d;
} else {
serverPanic("Unsupported set conversion");
}
} | 0 | [
"CWE-190"
]
| redis | a30d367a71b7017581cf1ca104242a3c644dec0f | 251,011,576,787,573,180,000,000,000,000,000,000,000 | 28 | Fix Integer overflow issue with intsets (CVE-2021-32687)
The vulnerability involves changing the default set-max-intset-entries
configuration parameter to a very large value and constructing specially
crafted commands to manipulate sets |
static void php_do_chgrp(INTERNAL_FUNCTION_PARAMETERS, int do_lchgrp) /* {{{ */
{
char *filename;
int filename_len;
zval *group;
gid_t gid;
int ret;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sz/", &filename, &filename_len, &group) == FAILURE) {
RETURN_FALSE;
}
if (strlen(filename) != filename_len) {
RETURN_FALSE;
}
if (Z_TYPE_P(group) == IS_LONG) {
gid = (gid_t)Z_LVAL_P(group);
} else if (Z_TYPE_P(group) == IS_STRING) {
#if defined(ZTS) && defined(HAVE_GETGRNAM_R) && defined(_SC_GETGR_R_SIZE_MAX)
struct group gr;
struct group *retgrptr;
long grbuflen = sysconf(_SC_GETGR_R_SIZE_MAX);
char *grbuf;
if (grbuflen < 1) {
RETURN_FALSE;
}
grbuf = emalloc(grbuflen);
if (getgrnam_r(Z_STRVAL_P(group), &gr, grbuf, grbuflen, &retgrptr) != 0 || retgrptr == NULL) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to find gid for %s", Z_STRVAL_P(group));
efree(grbuf);
RETURN_FALSE;
}
efree(grbuf);
gid = gr.gr_gid;
#else
struct group *gr = getgrnam(Z_STRVAL_P(group));
if (!gr) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to find gid for %s", Z_STRVAL_P(group));
RETURN_FALSE;
}
gid = gr->gr_gid;
#endif
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "parameter 2 should be string or integer, %s given", zend_zval_type_name(group));
RETURN_FALSE;
}
if (PG(safe_mode) &&(!php_checkuid(filename, NULL, CHECKUID_ALLOW_FILE_NOT_EXISTS))) {
RETURN_FALSE;
}
/* Check the basedir */
if (php_check_open_basedir(filename TSRMLS_CC)) {
RETURN_FALSE;
}
if (do_lchgrp) {
#if HAVE_LCHOWN
ret = VCWD_LCHOWN(filename, -1, gid);
#endif
} else {
ret = VCWD_CHOWN(filename, -1, gid);
}
if (ret == -1) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", strerror(errno));
RETURN_FALSE;
}
RETURN_TRUE;
} | 0 | []
| php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 184,286,054,779,207,900,000,000,000,000,000,000,000 | 73 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
void machine_power_off(void)
{
machine_shutdown();
if (pm_power_off)
pm_power_off();
} | 0 | [
"CWE-284",
"CWE-264"
]
| linux | a4780adeefd042482f624f5e0d577bf9cdcbb760 | 299,625,984,911,479,350,000,000,000,000,000,000,000 | 6 | ARM: 7735/2: Preserve the user r/w register TPIDRURW on context switch and fork
Since commit 6a1c53124aa1 the user writeable TLS register was zeroed to
prevent it from being used as a covert channel between two tasks.
There are more and more applications coming to Windows RT,
Wine could support them, but mostly they expect to have
the thread environment block (TEB) in TPIDRURW.
This patch preserves that register per thread instead of clearing it.
Unlike the TPIDRURO, which is already switched, the TPIDRURW
can be updated from userspace so needs careful treatment in the case that we
modify TPIDRURW and call fork(). To avoid this we must always read
TPIDRURW in copy_thread.
Signed-off-by: André Hentschel <[email protected]>
Signed-off-by: Will Deacon <[email protected]>
Signed-off-by: Jonathan Austin <[email protected]>
Signed-off-by: Russell King <[email protected]> |
bool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) {
if (!handling_upgrade_) {
// Only direct dispatch for Upgrade requests.
return false;
}
ENVOY_CONN_LOG(trace, "direct-dispatched {} bytes", connection_, data.length());
onBody(data);
data.drain(data.length());
return true;
} | 0 | [
"CWE-770"
]
| envoy | 7ca28ff7d46454ae930e193d97b7d08156b1ba59 | 274,698,072,870,891,560,000,000,000,000,000,000,000 | 11 | [http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145)
Signed-off-by: antonio <[email protected]> |
static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
bool is_target)
{
struct nf_mttg_trav *trav = seq->private;
unsigned int j;
trav->class = MTTG_TRAV_INIT;
for (j = 0; j < *pos; ++j)
if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
return NULL;
return trav;
} | 0 | [
"CWE-119"
]
| nf-next | d7591f0c41ce3e67600a982bab6989ef0f07b3ce | 319,419,990,149,245,130,000,000,000,000,000,000,000 | 12 | netfilter: x_tables: introduce and use xt_copy_counters_from_user
The three variants use same copy&pasted code, condense this into a
helper and use that.
Make sure info.name is 0-terminated.
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]> |
static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
{
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
spin_lock_irqsave(&ctx->completion_lock, flags);
__io_cqring_fill_event(req, res, cflags);
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
} | 0 | []
| linux | 0f2122045b946241a9e549c2a76cea54fa58a7ff | 277,752,981,201,957,000,000,000,000,000,000,000,000 | 12 | io_uring: don't rely on weak ->files references
Grab actual references to the files_struct. To avoid circular references
issues due to this, we add a per-task note that keeps track of what
io_uring contexts a task has used. When the tasks execs or exits its
assigned files, we cancel requests based on this tracking.
With that, we can grab proper references to the files table, and no
longer need to rely on stashing away ring_fd and ring_file to check
if the ring_fd may have been closed.
Cc: [email protected] # v5.5+
Reviewed-by: Pavel Begunkov <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
parse_SET_L4_DST_PORT(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return str_to_u16(arg, "destination port",
&ofpact_put_SET_L4_DST_PORT(ofpacts)->port);
} | 0 | [
"CWE-125"
]
| ovs | 9237a63c47bd314b807cda0bd2216264e82edbe8 | 241,303,948,608,177,400,000,000,000,000,000,000,000 | 6 | ofp-actions: Avoid buffer overread in BUNDLE action decoding.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]> |
static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, struct nameidata *nd)
{
struct dentry * result;
struct inode *dir = parent->d_inode;
mutex_lock(&dir->i_mutex);
/*
* First re-do the cached lookup just in case it was created
* while we waited for the directory semaphore..
*
* FIXME! This could use version numbering or similar to
* avoid unnecessary cache lookups.
*
* The "dcache_lock" is purely to protect the RCU list walker
* from concurrent renames at this point (we mustn't get false
* negatives from the RCU list walk here, unlike the optimistic
* fast walk).
*
* so doing d_lookup() (with seqlock), instead of lockfree __d_lookup
*/
result = d_lookup(parent, name);
if (!result) {
struct dentry *dentry;
/* Don't create child dentry for a dead directory. */
result = ERR_PTR(-ENOENT);
if (IS_DEADDIR(dir))
goto out_unlock;
dentry = d_alloc(parent, name);
result = ERR_PTR(-ENOMEM);
if (dentry) {
result = dir->i_op->lookup(dir, dentry, nd);
if (result)
dput(dentry);
else
result = dentry;
}
out_unlock:
mutex_unlock(&dir->i_mutex);
return result;
}
/*
* Uhhuh! Nasty case: the cache was re-populated while
* we waited on the semaphore. Need to revalidate.
*/
mutex_unlock(&dir->i_mutex);
if (result->d_op && result->d_op->d_revalidate) {
result = do_revalidate(result, nd);
if (!result)
result = ERR_PTR(-ENOENT);
}
return result;
} | 0 | [
"CWE-120"
]
| linux-2.6 | d70b67c8bc72ee23b55381bd6a884f4796692f77 | 165,492,615,886,804,020,000,000,000,000,000,000,000 | 55 | [patch] vfs: fix lookup on deleted directory
Lookup can install a child dentry for a deleted directory. This keeps
the directory dentry alive, and the inode pinned in the cache and on
disk, even after all external references have gone away.
This isn't a big problem normally, since memory pressure or umount
will clear out the directory dentry and its children, releasing the
inode. But for UBIFS this causes problems because its orphan area can
overflow.
Fix this by returning ENOENT for all lookups on a S_DEAD directory
before creating a child dentry.
Thanks to Zoltan Sogor for noticing this while testing UBIFS, and
Artem for the excellent analysis of the problem and testing.
Reported-by: Artem Bityutskiy <[email protected]>
Tested-by: Artem Bityutskiy <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
Signed-off-by: Al Viro <[email protected]> |
reset (int fd, char const *file, struct stats *stats)
{
if (! pagesize)
{
pagesize = getpagesize ();
if (pagesize == 0 || 2 * pagesize + 1 <= pagesize)
abort ();
bufalloc = ALIGN_TO (INITIAL_BUFSIZE, pagesize) + pagesize + 1;
buffer = xmalloc (bufalloc);
}
bufbeg = buflim = ALIGN_TO (buffer + 1, pagesize);
bufbeg[-1] = eolbyte;
bufdesc = fd;
if (S_ISREG (stats->stat.st_mode))
{
if (file)
bufoffset = 0;
else
{
bufoffset = lseek (fd, 0, SEEK_CUR);
if (bufoffset < 0)
{
suppressible_error (_("lseek failed"), errno);
return 0;
}
}
}
return 1;
} | 0 | [
"CWE-189"
]
| grep | 8fcf61523644df42e1905c81bed26838e0b04f91 | 20,801,712,186,695,181,000,000,000,000,000,000,000 | 31 | grep: fix integer-overflow issues in main program
* NEWS: Document this.
* bootstrap.conf (gnulib_modules): Add inttypes, xstrtoimax.
Remove xstrtoumax.
* src/main.c: Include <inttypes.h>, for INTMAX_MAX, PRIdMAX.
(context_length_arg, prtext, grepbuf, grep, grepfile)
(get_nondigit_option, main):
Use intmax_t, not int, for line counts.
(context_length_arg, main): Silently ceiling line counts
to maximum value, since there's no practical difference between
doing that and using infinite-precision arithmetic.
(out_before, out_after, pending): Now intmax_t, not int.
(max_count, outleft): Now intmax_t, not off_t.
(prepend_args, prepend_default_options, main):
Use size_t, not int, for sizes.
(prepend_default_options): Check for int and size_t overflow. |
void readField(folly::Optional<T>& data, FieldType /* fieldType */) {
data = folly::Optional<T>(readRaw<T>());
} | 0 | [
"CWE-400",
"CWE-522",
"CWE-674"
]
| mcrouter | 97e033b3bb0cb16b61bf49f0dc7f311a3e0edd1b | 196,773,838,200,527,260,000,000,000,000,000,000,000 | 3 | Attempt to make CarbonProtocolReader::skip tail recursive
Reviewed By: edenzik
Differential Revision: D17967570
fbshipit-source-id: fdc32e190a521349c7c8f4d6081902fa18eb0284 |
int lxc_strmunmap(void *addr, size_t length)
{
return munmap(addr, length + 1);
} | 0 | [
"CWE-417"
]
| lxc | 5eb45428b312e978fb9e294dde16efb14dd9fa4d | 132,769,430,410,137,750,000,000,000,000,000,000,000 | 4 | CVE 2018-6556: verify netns fd in lxc-user-nic
Signed-off-by: Christian Brauner <[email protected]> |
static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
struct cgroup *cgrp;
BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
cgrp = cgroup_kn_lock_live(of->kn, false);
if (!cgrp)
return -ENODEV;
spin_lock(&release_agent_path_lock);
strlcpy(cgrp->root->release_agent_path, strstrip(buf),
sizeof(cgrp->root->release_agent_path));
spin_unlock(&release_agent_path_lock);
cgroup_kn_unlock(of->kn);
return nbytes;
} | 1 | [
"CWE-287",
"CWE-269"
]
| linux | 24f6008564183aa120d07c03d9289519c2fe02af | 40,479,071,972,421,042,000,000,000,000,000,000,000 | 17 | cgroup-v1: Require capabilities to set release_agent
The cgroup release_agent is called with call_usermodehelper. The function
call_usermodehelper starts the release_agent with a full set fo capabilities.
Therefore require capabilities when setting the release_agaent.
Reported-by: Tabitha Sable <[email protected]>
Tested-by: Tabitha Sable <[email protected]>
Fixes: 81a6a5cdd2c5 ("Task Control Groups: automatic userspace notification of idle cgroups")
Cc: [email protected] # v2.6.24+
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: Tejun Heo <[email protected]> |
local char *justname(char *path)
{
char *p;
p = strrchr(path, '/');
return p == NULL ? path : p + 1;
} | 0 | [
"CWE-703",
"CWE-22"
]
| pigz | fdad1406b3ec809f4954ff7cdf9e99eb18c2458f | 321,160,620,182,582,100,000,000,000,000,000,000,000 | 7 | When decompressing with -N or -NT, strip any path from header name.
This uses the path of the compressed file combined with the name
from the header as the name of the decompressed output file. Any
path information in the header name is stripped. This avoids a
possible vulnerability where absolute or descending paths are put
in the gzip header. |
void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
bool test, bool reset)
{
iwl_disable_interrupts(trans);
/*
* in testing mode, the host stays awake and the
* hardware won't be reset (not even partially)
*/
if (test)
return;
iwl_pcie_disable_ict(trans);
iwl_pcie_synchronize_irqs(trans);
iwl_clear_bit(trans, CSR_GP_CNTRL,
BIT(trans->trans_cfg->csr->flag_mac_access_req));
iwl_clear_bit(trans, CSR_GP_CNTRL,
BIT(trans->trans_cfg->csr->flag_init_done));
if (reset) {
/*
* reset TX queues -- some of their registers reset during S3
* so if we don't reset everything here the D3 image would try
* to execute some invalid memory upon resume
*/
iwl_trans_pcie_tx_reset(trans);
}
iwl_pcie_set_pwr(trans, true);
} | 0 | [
"CWE-476"
]
| linux | 8188a18ee2e48c9a7461139838048363bfce3fef | 106,358,997,184,975,950,000,000,000,000,000,000,000 | 32 | iwlwifi: pcie: fix rb_allocator workqueue allocation
We don't handle failures in the rb_allocator workqueue allocation
correctly. To fix that, move the code earlier so the cleanup is
easier and we don't have to undo all the interrupt allocations in
this case.
Signed-off-by: Johannes Berg <[email protected]>
Signed-off-by: Luca Coelho <[email protected]> |
static inline int put_v4l2_input32(struct v4l2_input __user *kp,
struct v4l2_input32 __user *up)
{
if (copy_in_user(up, kp, sizeof(*up)))
return -EFAULT;
return 0;
} | 0 | [
"CWE-787"
]
| linux | a1dfb4c48cc1e64eeb7800a27c66a6f7e88d075a | 160,963,174,887,815,100,000,000,000,000,000,000,000 | 7 | media: v4l2-compat-ioctl32.c: refactor compat ioctl32 logic
The 32-bit compat v4l2 ioctl handling is implemented based on its 64-bit
equivalent. It converts 32-bit data structures into its 64-bit
equivalents and needs to provide the data to the 64-bit ioctl in user
space memory which is commonly allocated using
compat_alloc_user_space().
However, due to how that function is implemented, it can only be called
a single time for every syscall invocation.
Supposedly to avoid this limitation, the existing code uses a mix of
memory from the kernel stack and memory allocated through
compat_alloc_user_space().
Under normal circumstances, this would not work, because the 64-bit
ioctl expects all pointers to point to user space memory. As a
workaround, set_fs(KERNEL_DS) is called to temporarily disable this
extra safety check and allow kernel pointers. However, this might
introduce a security vulnerability: The result of the 32-bit to 64-bit
conversion is writeable by user space because the output buffer has been
allocated via compat_alloc_user_space(). A malicious user space process
could then manipulate pointers inside this output buffer, and due to the
previous set_fs(KERNEL_DS) call, functions like get_user() or put_user()
no longer prevent kernel memory access.
The new approach is to pre-calculate the total amount of user space
memory that is needed, allocate it using compat_alloc_user_space() and
then divide up the allocated memory to accommodate all data structures
that need to be converted.
An alternative approach would have been to retain the union type karg
that they allocated on the kernel stack in do_video_ioctl(), copy all
data from user space into karg and then back to user space. However, we
decided against this approach because it does not align with other
compat syscall implementations. Instead, we tried to replicate the
get_user/put_user pairs as found in other places in the kernel:
if (get_user(clipcount, &up->clipcount) ||
put_user(clipcount, &kp->clipcount)) return -EFAULT;
Notes from [email protected]:
This patch was taken from:
https://github.com/LineageOS/android_kernel_samsung_apq8084/commit/97b733953c06e4f0398ade18850f0817778255f7
Clearly nobody could be bothered to upstream this patch or at minimum
tell us :-( We only heard about this a week ago.
This patch was rebased and cleaned up. Compared to the original I
also swapped the order of the convert_in_user arguments so that they
matched copy_in_user. It was hard to review otherwise. I also replaced
the ALLOC_USER_SPACE/ALLOC_AND_GET by a normal function.
Fixes: 6b5a9492ca ("v4l: introduce string control support.")
Signed-off-by: Daniel Mentz <[email protected]>
Co-developed-by: Hans Verkuil <[email protected]>
Acked-by: Sakari Ailus <[email protected]>
Signed-off-by: Hans Verkuil <[email protected]>
Cc: <[email protected]> # for v4.15 and up
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
static int init_ssl_connection(SSL *con)
{
int i;
const char *str;
X509 *peer;
long verify_error;
MS_STATIC char buf[BUFSIZ];
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG)
const unsigned char *next_proto_neg;
unsigned next_proto_neg_len;
#endif
#ifndef OPENSSL_NO_KRB5
char *client_princ;
#endif
unsigned char *exportedkeymat;
i=SSL_accept(con);
while (i <= 0 && SSL_get_error(con,i) == SSL_ERROR_WANT_X509_LOOKUP)
{
BIO_printf(bio_s_out,"LOOKUP during accept %s\n",srp_callback_parm.login);
srp_callback_parm.user = SRP_VBASE_get_by_user(srp_callback_parm.vb, srp_callback_parm.login);
if (srp_callback_parm.user)
BIO_printf(bio_s_out,"LOOKUP done %s\n",srp_callback_parm.user->info);
else
BIO_printf(bio_s_out,"LOOKUP not successful\n");
i=SSL_accept(con);
}
if (i <= 0)
{
if (BIO_sock_should_retry(i))
{
BIO_printf(bio_s_out,"DELAY\n");
return(1);
}
BIO_printf(bio_err,"ERROR\n");
verify_error=SSL_get_verify_result(con);
if (verify_error != X509_V_OK)
{
BIO_printf(bio_err,"verify error:%s\n",
X509_verify_cert_error_string(verify_error));
}
else
ERR_print_errors(bio_err);
return(0);
}
PEM_write_bio_SSL_SESSION(bio_s_out,SSL_get_session(con));
peer=SSL_get_peer_certificate(con);
if (peer != NULL)
{
BIO_printf(bio_s_out,"Client certificate\n");
PEM_write_bio_X509(bio_s_out,peer);
X509_NAME_oneline(X509_get_subject_name(peer),buf,sizeof buf);
BIO_printf(bio_s_out,"subject=%s\n",buf);
X509_NAME_oneline(X509_get_issuer_name(peer),buf,sizeof buf);
BIO_printf(bio_s_out,"issuer=%s\n",buf);
X509_free(peer);
}
if (SSL_get_shared_ciphers(con,buf,sizeof buf) != NULL)
BIO_printf(bio_s_out,"Shared ciphers:%s\n",buf);
str=SSL_CIPHER_get_name(SSL_get_current_cipher(con));
BIO_printf(bio_s_out,"CIPHER is %s\n",(str != NULL)?str:"(NONE)");
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG)
SSL_get0_next_proto_negotiated(con, &next_proto_neg, &next_proto_neg_len);
if (next_proto_neg)
{
BIO_printf(bio_s_out,"NEXTPROTO is ");
BIO_write(bio_s_out, next_proto_neg, next_proto_neg_len);
BIO_printf(bio_s_out, "\n");
}
#endif
{
SRTP_PROTECTION_PROFILE *srtp_profile
= SSL_get_selected_srtp_profile(con);
if(srtp_profile)
BIO_printf(bio_s_out,"SRTP Extension negotiated, profile=%s\n",
srtp_profile->name);
}
if (SSL_cache_hit(con)) BIO_printf(bio_s_out,"Reused session-id\n");
if (SSL_ctrl(con,SSL_CTRL_GET_FLAGS,0,NULL) &
TLS1_FLAGS_TLS_PADDING_BUG)
BIO_printf(bio_s_out,
"Peer has incorrect TLSv1 block padding\n");
#ifndef OPENSSL_NO_KRB5
client_princ = kssl_ctx_get0_client_princ(SSL_get0_kssl_ctx(con));
if (client_princ != NULL)
{
BIO_printf(bio_s_out,"Kerberos peer principal is %s\n",
client_princ);
}
#endif /* OPENSSL_NO_KRB5 */
BIO_printf(bio_s_out, "Secure Renegotiation IS%s supported\n",
SSL_get_secure_renegotiation_support(con) ? "" : " NOT");
if (keymatexportlabel != NULL) {
BIO_printf(bio_s_out, "Keying material exporter:\n");
BIO_printf(bio_s_out, " Label: '%s'\n", keymatexportlabel);
BIO_printf(bio_s_out, " Length: %i bytes\n",
keymatexportlen);
exportedkeymat = OPENSSL_malloc(keymatexportlen);
if (exportedkeymat != NULL) {
i = SSL_export_keying_material(con, exportedkeymat,
keymatexportlen,
keymatexportlabel,
strlen(keymatexportlabel),
NULL, 0, 0);
if (i != keymatexportlen) {
BIO_printf(bio_s_out,
" Error: return value %i\n", i);
} else {
BIO_printf(bio_s_out, " Keying material: ");
for (i=0; i<keymatexportlen; i++)
BIO_printf(bio_s_out, "%02X",
exportedkeymat[i]);
BIO_printf(bio_s_out, "\n");
}
OPENSSL_free(exportedkeymat);
}
}
return(1);
} | 0 | []
| openssl | 4817504d069b4c5082161b02a22116ad75f822b1 | 118,951,553,815,580,080,000,000,000,000,000,000,000 | 126 | PR: 2658
Submitted by: Robin Seggelmann <[email protected]>
Reviewed by: steve
Support for TLS/DTLS heartbeats. |
void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf,
size_t len)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
const struct ieee80211_mgmt *mgmt = (void *)buf;
u32 cmd;
if (WARN_ON(len < 2))
return;
if (ieee80211_is_deauth(mgmt->frame_control))
cmd = NL80211_CMD_UNPROT_DEAUTHENTICATE;
else
cmd = NL80211_CMD_UNPROT_DISASSOCIATE;
trace_cfg80211_rx_unprot_mlme_mgmt(dev, buf, len);
nl80211_send_mlme_event(rdev, dev, buf, len, cmd, GFP_ATOMIC, -1,
NULL, 0);
} | 0 | [
"CWE-120"
]
| linux | f88eb7c0d002a67ef31aeb7850b42ff69abc46dc | 299,105,649,305,936,300,000,000,000,000,000,000,000 | 21 | nl80211: validate beacon head
We currently don't validate the beacon head, i.e. the header,
fixed part and elements that are to go in front of the TIM
element. This means that the variable elements there can be
malformed, e.g. have a length exceeding the buffer size, but
most downstream code from this assumes that this has already
been checked.
Add the necessary checks to the netlink policy.
Cc: [email protected]
Fixes: ed1b6cc7f80f ("cfg80211/nl80211: add beacon settings")
Link: https://lore.kernel.org/r/1569009255-I7ac7fbe9436e9d8733439eab8acbbd35e55c74ef@changeid
Signed-off-by: Johannes Berg <[email protected]> |
get_text_gray_cmyk_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo)
/* This version is for reading text-format PGM files with any maxval and
converting to CMYK */
{
ppm_source_ptr source = (ppm_source_ptr)sinfo;
FILE *infile = source->pub.input_file;
register JSAMPROW ptr;
register JSAMPLE *rescale = source->rescale;
JDIMENSION col;
unsigned int maxval = source->maxval;
ptr = source->pub.buffer[0];
if (maxval == MAXJSAMPLE) {
for (col = cinfo->image_width; col > 0; col--) {
JSAMPLE gray = read_pbm_integer(cinfo, infile, maxval);
rgb_to_cmyk(gray, gray, gray, ptr, ptr + 1, ptr + 2, ptr + 3);
ptr += 4;
}
} else {
for (col = cinfo->image_width; col > 0; col--) {
JSAMPLE gray = rescale[read_pbm_integer(cinfo, infile, maxval)];
rgb_to_cmyk(gray, gray, gray, ptr, ptr + 1, ptr + 2, ptr + 3);
ptr += 4;
}
}
return 1;
} | 0 | [
"CWE-200",
"CWE-125"
]
| libjpeg-turbo | 9c78a04df4e44ef6487eee99c4258397f4fdca55 | 73,154,618,019,023,570,000,000,000,000,000,000,000 | 27 | cjpeg: Fix OOB read caused by malformed 8-bit BMP
... in which one or more of the color indices is out of range for the
number of palette entries.
Fix partly borrowed from jpeg-9c. This commit also adopts Guido's
JERR_PPM_OUTOFRANGE enum value in lieu of our project-specific
JERR_PPM_TOOLARGE enum value.
Fixes #258 |
floorLog2 (int x)
{
//
// For x > 0, floorLog2(y) returns floor(log(x)/log(2)).
//
int y = 0;
while (x > 1)
{
y += 1;
x >>= 1;
}
return y;
} | 0 | [
"CWE-125"
]
| openexr | e79d2296496a50826a15c667bf92bdc5a05518b4 | 237,768,803,355,141,930,000,000,000,000,000,000,000 | 16 | fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <[email protected]> |
qf_goto_cwindow(qf_info_T *qi, int resize, int sz, int vertsplit)
{
win_T *win;
win = qf_find_win(qi);
if (win == NULL)
return FAIL;
win_goto(win);
if (resize)
{
if (vertsplit)
{
if (sz != win->w_width)
win_setwidth(sz);
}
else if (sz != win->w_height && win->w_height
+ win->w_status_height + tabline_height() < cmdline_row)
win_setheight(sz);
}
return OK;
} | 0 | [
"CWE-416"
]
| vim | 4f1b083be43f351bc107541e7b0c9655a5d2c0bb | 737,457,683,653,792,300,000,000,000,000,000,000 | 23 | patch 9.0.0322: crash when no errors and 'quickfixtextfunc' is set
Problem: Crash when no errors and 'quickfixtextfunc' is set.
Solution: Do not handle errors if there aren't any. |
inbound_foundip (session *sess, char *ip, const message_tags_data *tags_data)
{
struct hostent *HostAddr;
HostAddr = gethostbyname (ip);
if (HostAddr)
{
prefs.dcc_ip = ((struct in_addr *) HostAddr->h_addr)->s_addr;
EMIT_SIGNAL_TIMESTAMP (XP_TE_FOUNDIP, sess->server->server_session,
inet_ntoa (*((struct in_addr *) HostAddr->h_addr)),
NULL, NULL, NULL, 0, tags_data->timestamp);
}
} | 0 | [
"CWE-22"
]
| hexchat | 4e061a43b3453a9856d34250c3913175c45afe9d | 6,566,794,412,765,470,000,000,000,000,000,000,000 | 13 | Clean up handling CAP LS |
static inline void timer_base_lock_expiry(struct timer_base *base)
{
spin_lock(&base->expiry_lock);
} | 0 | [
"CWE-200",
"CWE-330"
]
| linux | f227e3ec3b5cad859ad15666874405e8c1bbc1d4 | 106,094,914,232,200,220,000,000,000,000,000,000,000 | 4 | random32: update the net random state on interrupt and activity
This modifies the first 32 bits out of the 128 bits of a random CPU's
net_rand_state on interrupt or CPU activity to complicate remote
observations that could lead to guessing the network RNG's internal
state.
Note that depending on some network devices' interrupt rate moderation
or binding, this re-seeding might happen on every packet or even almost
never.
In addition, with NOHZ some CPUs might not even get timer interrupts,
leaving their local state rarely updated, while they are running
networked processes making use of the random state. For this reason, we
also perform this update in update_process_times() in order to at least
update the state when there is user or system activity, since it's the
only case we care about.
Reported-by: Amit Klein <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Cc: Eric Dumazet <[email protected]>
Cc: "Jason A. Donenfeld" <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: <[email protected]>
Signed-off-by: Willy Tarreau <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
message_send_paused(const char *const jid)
{
xmpp_ctx_t * const ctx = connection_get_ctx();
xmpp_stanza_t *stanza = stanza_create_chat_state(ctx, jid, STANZA_NAME_PAUSED);
_send_message_stanza(stanza);
xmpp_stanza_release(stanza);
} | 0 | [
"CWE-20",
"CWE-346"
]
| profanity | 8e75437a7e43d4c55e861691f74892e666e29b0b | 150,567,803,468,736,940,000,000,000,000,000,000,000 | 7 | Add carbons from check |
static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->ioprio || sqe->buf_index)
return -EINVAL;
if (req->flags & REQ_F_FIXED_FILE)
return -EBADF;
req->statx.dfd = READ_ONCE(sqe->fd);
req->statx.mask = READ_ONCE(sqe->len);
req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
req->statx.flags = READ_ONCE(sqe->statx_flags);
return 0;
} | 0 | [
"CWE-416"
]
| linux | 6d816e088c359866f9867057e04f244c608c42fe | 181,809,110,477,922,500,000,000,000,000,000,000,000 | 17 | io_uring: hold 'ctx' reference around task_work queue + execute
We're holding the request reference, but we need to go one higher
to ensure that the ctx remains valid after the request has finished.
If the ring is closed with pending task_work inflight, and the
given io_kiocb finishes sync during issue, then we need a reference
to the ring itself around the task_work execution cycle.
Cc: [email protected] # v5.7+
Reported-by: [email protected]
Signed-off-by: Jens Axboe <[email protected]> |
static irqreturn_t fsl_hv_state_change_thread(int irq, void *data)
{
struct doorbell_isr *dbisr = data;
blocking_notifier_call_chain(&failover_subscribers, dbisr->partition,
NULL);
return IRQ_HANDLED;
} | 0 | [
"CWE-190"
]
| linux | 6a024330650e24556b8a18cc654ad00cfecf6c6c | 171,171,332,959,261,580,000,000,000,000,000,000,000 | 9 | drivers/virt/fsl_hypervisor.c: prevent integer overflow in ioctl
The "param.count" value is a u64 thatcomes from the user. The code
later in the function assumes that param.count is at least one and if
it's not then it leads to an Oops when we dereference the ZERO_SIZE_PTR.
Also the addition can have an integer overflow which would lead us to
allocate a smaller "pages" array than required. I can't immediately
tell what the possible run times implications are, but it's safest to
prevent the overflow.
Link: http://lkml.kernel.org/r/20181218082129.GE32567@kadam
Fixes: 6db7199407ca ("drivers/virt: introduce Freescale hypervisor management driver")
Signed-off-by: Dan Carpenter <[email protected]>
Reviewed-by: Andrew Morton <[email protected]>
Cc: Timur Tabi <[email protected]>
Cc: Mihai Caraman <[email protected]>
Cc: Kumar Gala <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
get_quote_count(const char *line)
{
int quote_count= 0;
const char *quote= line;
while ((quote= strpbrk(quote, "'`\"")) != NULL) {
quote_count++;
quote++;
}
return quote_count;
} | 0 | []
| mysql-server | 20addb05e58fdf822896f490fcaaf2ec5ed4bcb5 | 336,611,062,137,766,060,000,000,000,000,000,000,000 | 12 | Bug# 25998635: Client does not escape the USE statement
When there are quotes in the USE statement, the mysql client does
not correctly escape them.
The USE statement is processed line by line from the client's parser,
and cannot handle multi-line commands as the server.
The fix is to escape the USE parameters whenever quotes are used. |
void bio_endio(struct bio *bio)
{
again:
if (!bio_remaining_done(bio))
return;
if (!bio_integrity_endio(bio))
return;
/*
* Need to have a real endio function for chained bios, otherwise
* various corner cases will break (like stacking block devices that
* save/restore bi_end_io) - however, we want to avoid unbounded
* recursion and blowing the stack. Tail call optimization would
* handle this, but compiling with frame pointers also disables
* gcc's sibling call optimization.
*/
if (bio->bi_end_io == bio_chain_endio) {
bio = __bio_chain_endio(bio);
goto again;
}
if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
trace_block_bio_complete(bio->bi_disk->queue, bio,
blk_status_to_errno(bio->bi_status));
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
}
blk_throtl_bio_endio(bio);
/* release cgroup info */
bio_uninit(bio);
if (bio->bi_end_io)
bio->bi_end_io(bio);
} | 0 | [
"CWE-772",
"CWE-787"
]
| linux | 95d78c28b5a85bacbc29b8dba7c04babb9b0d467 | 206,625,537,155,934,960,000,000,000,000,000,000,000 | 33 | fix unbalanced page refcounting in bio_map_user_iov
bio_map_user_iov and bio_unmap_user do unbalanced pages refcounting if
IO vector has small consecutive buffers belonging to the same page.
bio_add_pc_page merges them into one, but the page reference is never
dropped.
Cc: [email protected]
Signed-off-by: Vitaly Mayatskikh <[email protected]>
Signed-off-by: Al Viro <[email protected]> |
static int csnmp_read_host(user_data_t *ud) {
host_definition_t *host;
int status;
int success;
int i;
host = ud->data;
if (host->interval == 0)
host->interval = plugin_get_interval();
if (host->sess_handle == NULL)
csnmp_host_open_session(host);
if (host->sess_handle == NULL)
return (-1);
success = 0;
for (i = 0; i < host->data_list_len; i++) {
data_definition_t *data = host->data_list[i];
if (data->is_table)
status = csnmp_read_table(host, data);
else
status = csnmp_read_value(host, data);
if (status == 0)
success++;
}
if (success == 0)
return (-1);
return (0);
} /* int csnmp_read_host */ | 0 | [
"CWE-415"
]
| collectd | d16c24542b2f96a194d43a73c2e5778822b9cb47 | 84,773,867,460,263,020,000,000,000,000,000,000,000 | 35 | snmp plugin: Fix double free of request PDU
snmp_sess_synch_response() always frees request PDU, in both case of request
error and success. If error condition occurs inside of `while (status == 0)`
loop, double free of `req` happens.
Issue: #2291
Signed-off-by: Florian Forster <[email protected]> |
const sslHashes& SSL::getHashes() const
{
return hashes_;
} | 0 | [
"CWE-254"
]
| mysql-server | e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69 | 222,190,237,215,806,840,000,000,000,000,000,000,000 | 4 | Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED. |
setup_worker_server (GdmSession *self)
{
GDBusAuthObserver *observer;
GDBusServer *server;
GError *error = NULL;
g_debug ("GdmSession: Creating D-Bus server for worker for session");
observer = g_dbus_auth_observer_new ();
g_signal_connect_object (observer,
"authorize-authenticated-peer",
G_CALLBACK (allow_worker_function),
self,
0);
server = gdm_dbus_setup_private_server (observer, &error);
g_object_unref (observer);
if (server == NULL) {
g_warning ("Cannot create worker D-Bus server for the session: %s",
error->message);
return;
}
g_signal_connect_object (server,
"new-connection",
G_CALLBACK (handle_connection_from_worker),
self,
0);
self->priv->worker_server = server;
g_dbus_server_start (server);
g_debug ("GdmSession: D-Bus server for workers listening on %s",
g_dbus_server_get_client_address (self->priv->worker_server));
} | 0 | []
| gdm | 5ac224602f1d603aac5eaa72e1760d3e33a26f0a | 52,001,911,995,685,750,000,000,000,000,000,000,000 | 36 | session: disconnect signals from worker proxy when conversation is freed
We don't want an outstanding reference on the worker proxy to lead to
signal handlers getting dispatched after the conversation is freed.
https://bugzilla.gnome.org/show_bug.cgi?id=758032 |
inline void LegacyDepthwiseConvImpl(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& filter_shape,
const uint8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
uint8* output_data, int thread_start, int thread_end, int thread_dim) {
return LegacyDepthwiseConvWithRounding<
DepthwiseConvOutputRounding::kAwayFromZero>(
params, input_shape, input_data, filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data, thread_start, thread_end,
thread_dim);
} | 0 | [
"CWE-703",
"CWE-835"
]
| tensorflow | dfa22b348b70bb89d6d6ec0ff53973bacb4f4695 | 230,670,885,554,931,930,000,000,000,000,000,000,000 | 12 | Prevent a division by 0 in average ops.
PiperOrigin-RevId: 385184660
Change-Id: I7affd4554f9b336fca29ac68f633232c094d0bd3 |
add_color_map (const gint32 image_id,
PSDimage *img_a)
{
GimpParasite *parasite;
if (img_a->color_map_len)
{
if (img_a->color_mode != PSD_DUOTONE)
gimp_image_set_colormap (image_id, img_a->color_map, img_a->color_map_entries);
else
{
/* Add parasite for Duotone color data */
IFDBG(2) g_debug ("Add Duotone color data parasite");
parasite = gimp_parasite_new (PSD_PARASITE_DUOTONE_DATA, 0,
img_a->color_map_len, img_a->color_map);
gimp_image_parasite_attach (image_id, parasite);
gimp_parasite_free (parasite);
}
g_free (img_a->color_map);
}
return 0;
} | 0 | [
"CWE-190"
]
| gimp | 88eccea84aa375197cc04a2a0e2e29debb56bfa5 | 240,690,494,077,533,340,000,000,000,000,000,000,000 | 23 | Harden the PSD plugin against integer overflows.
Issues discovered by Stefan Cornelius, Secunia Research, advisory SA37232
and CVE identifier CVE-2009-3909. Fixes bug #600741.
(cherry picked from commit 9cc8d78ff33b7a36852b74e64b427489cad44d0e) |
GF_Err gf_m4a_parse_program_config_element(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg)
{
u32 i;
cfg->program_config_element_present = 1;
cfg->cpe_channels = 0;
cfg->element_instance_tag = gf_bs_read_int_log(bs, 4, "element_instance_tag");
cfg->object_type = gf_bs_read_int_log(bs, 2, "object_type");
cfg->sampling_frequency_index = gf_bs_read_int_log(bs, 4, "sampling_frequency_index");
cfg->num_front_channel_elements = gf_bs_read_int_log(bs, 4, "num_front_channel_elements");
cfg->num_side_channel_elements = gf_bs_read_int_log(bs, 4, "num_side_channel_elements");
cfg->num_back_channel_elements = gf_bs_read_int_log(bs, 4, "num_back_channel_elements");
cfg->num_lfe_channel_elements = gf_bs_read_int_log(bs, 2, "num_lfe_channel_elements");
cfg->num_assoc_data_elements = gf_bs_read_int_log(bs, 3, "num_assoc_data_elements");
cfg->num_valid_cc_elements = gf_bs_read_int_log(bs, 4, "num_valid_cc_elements");
cfg->mono_mixdown_present = (Bool)gf_bs_read_int_log(bs, 1, "mono_mixdown_present");
if (cfg->mono_mixdown_present) {
cfg->mono_mixdown_element_number = gf_bs_read_int_log(bs, 4, "mono_mixdown_element_number");
}
cfg->stereo_mixdown_present = gf_bs_read_int_log(bs, 1, "stereo_mixdown_present");
if (cfg->stereo_mixdown_present) {
cfg->stereo_mixdown_element_number = gf_bs_read_int_log(bs, 4, "stereo_mixdown_element_number");
}
cfg->matrix_mixdown_idx_present = gf_bs_read_int_log(bs, 1, "matrix_mixdown_idx_present");
if (cfg->matrix_mixdown_idx_present) {
cfg->matrix_mixdown_idx = gf_bs_read_int_log(bs, 2, "matrix_mixdown_idx");
cfg->pseudo_surround_enable = gf_bs_read_int_log(bs, 1, "pseudo_surround_enable");
}
for (i = 0; i < cfg->num_front_channel_elements; i++) {
cfg->front_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "front_element_is_cpe", i);
cfg->front_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "front_element_tag_select", i);
if (cfg->front_element_is_cpe[i]) cfg->cpe_channels++;
}
for (i = 0; i < cfg->num_side_channel_elements; i++) {
cfg->side_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "side_element_is_cpe", i);
cfg->side_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "side_element_tag_select", i);
if (cfg->side_element_is_cpe[i]) cfg->cpe_channels++;
}
for (i = 0; i < cfg->num_back_channel_elements; i++) {
cfg->back_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "back_element_is_cpe", i);
cfg->back_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "back_element_tag_select", i);
if (cfg->back_element_is_cpe[i]) cfg->cpe_channels++;
}
for (i = 0; i < cfg->num_lfe_channel_elements; i++) {
cfg->lfe_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "lfe_element_tag_select", i);
}
for (i = 0; i < cfg->num_assoc_data_elements; i++) {
cfg->assoc_data_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "assoc_data_element_tag_select", i);
}
for (i = 0; i < cfg->num_valid_cc_elements; i++) {
cfg->cc_element_is_ind_sw[i] = gf_bs_read_int_log_idx(bs, 1, "cc_element_is_ind_sw", i);
cfg->valid_cc_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "valid_cc_element_tag_select", i);
}
gf_bs_align(bs);
cfg->comment_field_bytes = gf_bs_read_int_log(bs, 8, "comment_field_bytes");
gf_bs_read_data(bs, (char *)cfg->comments, cfg->comment_field_bytes);
cfg->nb_chan = cfg->num_front_channel_elements + cfg->num_back_channel_elements + cfg->num_side_channel_elements + cfg->num_lfe_channel_elements;
cfg->nb_chan += cfg->cpe_channels;
return GF_OK;
} | 0 | [
"CWE-190",
"CWE-787"
]
| gpac | 51cdb67ff7c5f1242ac58c5aa603ceaf1793b788 | 148,473,498,396,659,620,000,000,000,000,000,000,000 | 64 | add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722 |
void ImapModelOpenConnectionTest::testOk()
{
QCoreApplication::processEvents();
QCoreApplication::processEvents();
QVERIFY(SOCK->writtenStuff().isEmpty());
SOCK->fakeReading( "* OK foo\r\n" );
QVERIFY( completedSpy->isEmpty() );
QCoreApplication::processEvents();
QCoreApplication::processEvents();
QCoreApplication::processEvents();
QCOMPARE( SOCK->writtenStuff(), QByteArray("y0 CAPABILITY\r\n") );
QVERIFY( completedSpy->isEmpty() );
SOCK->fakeReading( "* CAPABILITY IMAP4rev1\r\ny0 OK capability completed\r\n" );
QCoreApplication::processEvents();
QCoreApplication::processEvents();
QCoreApplication::processEvents();
QCOMPARE( authSpy->size(), 1 );
QCoreApplication::processEvents();
QCoreApplication::processEvents();
QCOMPARE( SOCK->writtenStuff(), QByteArray("y1 LOGIN luzr sikrit\r\n") );
SOCK->fakeReading( "y1 OK [CAPABILITY IMAP4rev1] logged in\r\n");
QCoreApplication::processEvents();
QCoreApplication::processEvents();
QCOMPARE( completedSpy->size(), 1 );
QVERIFY(failedSpy->isEmpty());
QCOMPARE( authSpy->size(), 1 );
QVERIFY(startTlsUpgradeSpy->isEmpty());
} | 0 | [
"CWE-200"
]
| trojita | 25fffa3e25cbad85bbca804193ad336b090a9ce1 | 19,500,556,497,286,180,000,000,000,000,000,000,000 | 28 | IMAP: refuse to work when STARTTLS is required but server sends PREAUTH
Oops, we cannot send STARTTLS when the connection is already authenticated.
This is serious enough to warrant an error; an attacker might be going after a
plaintext of a message we're going to APPEND, etc.
Thanks to Arnt Gulbrandsen on the imap-protocol ML for asking what happens when
we're configured to request STARTTLS and a PREAUTH is received, and to Michael M
Slusarz for starting that discussion.
Hope the error message is readable enough.
CVE: CVE-2014-2567 |
TEST_F(RouterTest, InternalRedirectKeepsFragmentWithOveride) {
TestScopedRuntime scoped_runtime;
Runtime::LoaderSingleton::getExisting()->mergeValues(
{{"envoy.reloadable_features.http_reject_path_with_fragment", "false"}});
enableRedirects();
default_request_headers_.setForwardedProto("http");
sendRequest();
EXPECT_CALL(callbacks_, clearRouteCache());
EXPECT_CALL(callbacks_, recreateStream(_)).WillOnce(Return(true));
Http::ResponseHeaderMapPtr redirect_headers{new Http::TestResponseHeaderMapImpl{
{":status", "302"}, {"location", "http://www.foo.com/#fragment"}}};
response_decoder_->decodeHeaders(std::move(redirect_headers), false);
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_internal_redirect_succeeded_total")
.value());
// In production, the HCM recreateStream would have called this.
router_.onDestroy();
EXPECT_EQ("/#fragment", default_request_headers_.getPathValue());
} | 0 | [
"CWE-703"
]
| envoy | 18871dbfb168d3512a10c78dd267ff7c03f564c6 | 11,655,301,498,255,832,000,000,000,000,000,000,000 | 21 | [1.18] CVE-2022-21655
Crash with direct_response
Signed-off-by: Otto van der Schaaf <[email protected]> |
device_create_groups_vargs(struct class *class, struct device *parent,
dev_t devt, void *drvdata,
const struct attribute_group **groups,
const char *fmt, va_list args)
{
struct device *dev = NULL;
int retval = -ENODEV;
if (class == NULL || IS_ERR(class))
goto error;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
retval = -ENOMEM;
goto error;
}
device_initialize(dev);
dev->devt = devt;
dev->class = class;
dev->parent = parent;
dev->groups = groups;
dev->release = device_create_release;
dev_set_drvdata(dev, drvdata);
retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
if (retval)
goto error;
retval = device_add(dev);
if (retval)
goto error;
return dev;
error:
put_device(dev);
return ERR_PTR(retval);
} | 0 | [
"CWE-787"
]
| linux | aa838896d87af561a33ecefea1caa4c15a68bc47 | 220,676,932,706,061,650,000,000,000,000,000,000,000 | 39 | drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions
Convert the various sprintf fmaily calls in sysfs device show functions
to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety.
Done with:
$ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 .
And cocci script:
$ cat sysfs_emit_dev.cocci
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Signed-off-by: Joe Perches <[email protected]>
Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
Value ExpressionRegex::serialize(bool explain) const {
return Value(
Document{{_opName,
Document{{"input", _input->serialize(explain)},
{"regex", _regex->serialize(explain)},
{"options", _options ? _options->serialize(explain) : Value()}}}});
} | 0 | []
| mongo | 1772b9a0393b55e6a280a35e8f0a1f75c014f301 | 20,151,900,829,397,368,000,000,000,000,000,000,000 | 7 | SERVER-49404 Enforce additional checks in $arrayToObject |
static int handle_cr(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification, val;
int cr;
int reg;
int err;
int ret;
exit_qualification = vmx_get_exit_qual(vcpu);
cr = exit_qualification & 15;
reg = (exit_qualification >> 8) & 15;
switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */
val = kvm_register_readl(vcpu, reg);
trace_kvm_cr_write(cr, val);
switch (cr) {
case 0:
err = handle_set_cr0(vcpu, val);
return kvm_complete_insn_gp(vcpu, err);
case 3:
WARN_ON_ONCE(enable_unrestricted_guest);
err = kvm_set_cr3(vcpu, val);
return kvm_complete_insn_gp(vcpu, err);
case 4:
err = handle_set_cr4(vcpu, val);
return kvm_complete_insn_gp(vcpu, err);
case 8: {
u8 cr8_prev = kvm_get_cr8(vcpu);
u8 cr8 = (u8)val;
err = kvm_set_cr8(vcpu, cr8);
ret = kvm_complete_insn_gp(vcpu, err);
if (lapic_in_kernel(vcpu))
return ret;
if (cr8_prev <= cr8)
return ret;
/*
* TODO: we might be squashing a
* KVM_GUESTDBG_SINGLESTEP-triggered
* KVM_EXIT_DEBUG here.
*/
vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
return 0;
}
}
break;
case 2: /* clts */
WARN_ONCE(1, "Guest should always own CR0.TS");
vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
return kvm_skip_emulated_instruction(vcpu);
case 1: /*mov from cr*/
switch (cr) {
case 3:
WARN_ON_ONCE(enable_unrestricted_guest);
val = kvm_read_cr3(vcpu);
kvm_register_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val);
return kvm_skip_emulated_instruction(vcpu);
case 8:
val = kvm_get_cr8(vcpu);
kvm_register_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val);
return kvm_skip_emulated_instruction(vcpu);
}
break;
case 3: /* lmsw */
val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
kvm_lmsw(vcpu, val);
return kvm_skip_emulated_instruction(vcpu);
default:
break;
}
vcpu->run->exit_reason = 0;
vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
(int)(exit_qualification >> 4) & 3, cr);
return 0;
} | 0 | [
"CWE-787"
]
| linux | 04c4f2ee3f68c9a4bf1653d15f1a9a435ae33f7a | 213,401,518,191,575,000,000,000,000,000,000,000,000 | 79 | KVM: VMX: Don't use vcpu->run->internal.ndata as an array index
__vmx_handle_exit() uses vcpu->run->internal.ndata as an index for
an array access. Since vcpu->run is (can be) mapped to a user address
space with a writer permission, the 'ndata' could be updated by the
user process at anytime (the user process can set it to outside the
bounds of the array).
So, it is not safe that __vmx_handle_exit() uses the 'ndata' that way.
Fixes: 1aa561b1a4c0 ("kvm: x86: Add "last CPU" to some KVM_EXIT information")
Signed-off-by: Reiji Watanabe <[email protected]>
Reviewed-by: Jim Mattson <[email protected]>
Message-Id: <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]> |
ofpacts_verify(const struct ofpact ofpacts[], size_t ofpacts_len,
uint32_t allowed_ovsinsts, enum ofpact_type outer_action)
{
const struct ofpact *a;
enum ovs_instruction_type inst;
inst = OVSINST_OFPIT13_METER;
OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
enum ovs_instruction_type next;
enum ofperr error;
if (a->type == OFPACT_CONJUNCTION) {
OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
if (a->type != OFPACT_CONJUNCTION && a->type != OFPACT_NOTE) {
VLOG_WARN("\"conjunction\" actions may be used along with "
"\"note\" but not any other kind of action "
"(such as the \"%s\" action used here)",
ofpact_name(a->type));
return OFPERR_NXBAC_BAD_CONJUNCTION;
}
}
return 0;
}
error = ofpacts_verify_nested(a, outer_action);
if (error) {
return error;
}
next = ovs_instruction_type_from_ofpact_type(a->type);
if (a > ofpacts
&& (inst == OVSINST_OFPIT11_APPLY_ACTIONS
? next < inst
: next <= inst)) {
const char *name = ovs_instruction_name_from_type(inst);
const char *next_name = ovs_instruction_name_from_type(next);
if (next == inst) {
VLOG_WARN("duplicate %s instruction not allowed, for OpenFlow "
"1.1+ compatibility", name);
} else {
VLOG_WARN("invalid instruction ordering: %s must appear "
"before %s, for OpenFlow 1.1+ compatibility",
next_name, name);
}
return OFPERR_OFPBAC_UNSUPPORTED_ORDER;
}
if (!((1u << next) & allowed_ovsinsts)) {
const char *name = ovs_instruction_name_from_type(next);
VLOG_WARN("%s instruction not allowed here", name);
return OFPERR_OFPBIC_UNSUP_INST;
}
inst = next;
}
return 0;
} | 0 | [
"CWE-125"
]
| ovs | 9237a63c47bd314b807cda0bd2216264e82edbe8 | 290,231,337,420,686,900,000,000,000,000,000,000,000 | 59 | ofp-actions: Avoid buffer overread in BUNDLE action decoding.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]> |
test_client_auth_request_none (TestConnection *test,
gconstpointer data)
{
GIOStream *connection;
GError *error = NULL;
test->database = g_tls_file_database_new (tls_test_file_path ("ca-roots.pem"), &error);
g_assert_no_error (error);
g_assert_nonnull (test->database);
/* Request, but don't provide, a client certificate */
connection = start_async_server_and_connect_to_it (test, G_TLS_AUTHENTICATION_REQUESTED);
test->client_connection = g_tls_client_connection_new (connection, test->identity, &error);
g_assert_no_error (error);
g_assert_nonnull (test->client_connection);
g_object_unref (connection);
g_tls_connection_set_database (G_TLS_CONNECTION (test->client_connection), test->database);
/* All validation in this test */
g_tls_client_connection_set_validation_flags (G_TLS_CLIENT_CONNECTION (test->client_connection),
G_TLS_CERTIFICATE_VALIDATE_ALL);
read_test_data_async (test);
g_main_loop_run (test->loop);
wait_until_server_finished (test);
/* The connection should succeed and everything should work. We only REQUESTED
* authentication, in contrast to G_TLS_AUTHENTICATION_REQUIRED where this
* should fail.
*/
g_assert_no_error (test->read_error);
g_assert_no_error (test->server_error);
} | 0 | [
"CWE-295"
]
| glib-networking | 29513946809590c4912550f6f8620468f9836d94 | 334,634,986,837,167,400,000,000,000,000,000,000,000 | 34 | Return bad identity error if identity is unset
When the server-identity property of GTlsClientConnection is unset, the
documentation sasy we need to fail the certificate verification with
G_TLS_CERTIFICATE_BAD_IDENTITY. This is important because otherwise,
it's easy for applications to fail to specify server identity.
Unfortunately, we did not correctly implement the intended, documented
behavior. When server identity is missing, we check the validity of the
TLS certificate, but do not check if it corresponds to the expected
server (since we have no expected server). Then we assume the identity
is good, instead of returning bad identity, as documented. This means,
for example, that evil.com can present a valid certificate issued to
evil.com, and we would happily accept it for paypal.com.
Fixes #135 |
TestCheckedArrayByteSink(char* outbuf, int32_t capacity)
: CheckedArrayByteSink(outbuf, capacity), calledFlush(FALSE) {} | 0 | [
"CWE-190",
"CWE-787"
]
| icu | b7d08bc04a4296982fcef8b6b8a354a9e4e7afca | 287,686,556,034,331,580,000,000,000,000,000,000,000 | 2 | ICU-20958 Prevent SEGV_MAPERR in append
See #971 |
socket_ref_state_set (AtkObject *accessible)
{
char *child_name, *child_path;
AtkSocket *socket = ATK_SOCKET (accessible);
int count = 0;
int j;
int v;
DBusMessage *message, *reply;
DBusMessageIter iter, iter_array;
AtkStateSet *set;
set = atk_state_set_new ();
if (!socket->embedded_plug_id)
return set;
child_name = g_strdup (socket->embedded_plug_id);
if (!child_name)
return set;
child_path = g_utf8_strchr (child_name + 1, -1, ':');
if (!child_path)
{
g_free (child_name);
return set;
}
*(child_path++) = '\0';
message = dbus_message_new_method_call (child_name, child_path, ATSPI_DBUS_INTERFACE_ACCESSIBLE, "GetState");
g_free (child_name);
reply = dbus_connection_send_with_reply_and_block (spi_global_app_data->bus, message, 1, NULL);
dbus_message_unref (message);
if (reply == NULL)
return set;
if (strcmp (dbus_message_get_signature (reply), "au") != 0)
{
dbus_message_unref (reply);
return set;
}
dbus_message_iter_init (reply, &iter);
dbus_message_iter_recurse (&iter, &iter_array);
do
{
dbus_message_iter_get_basic (&iter_array, &v);
for (j = 0; j < 32; j++)
{
if (v & (1 << j))
{
AtkState state = spi_atk_state_from_spi_state ((count << 5) + j);
atk_state_set_add_state (set, state);
}
}
count++;
}
while (dbus_message_iter_next (&iter_array));
dbus_message_unref (reply);
return set;
} | 0 | []
| at-spi2-atk | e4f3eee2e137cd34cd427875365f458c65458164 | 239,064,092,287,275,400,000,000,000,000,000,000,000 | 57 | Use XDG_RUNTIME_DIR to hold sockets, and do not make a world-writable dir
If we use XDG_RUNTIME_DIR, then the directory should be owned by the
appropriate user, so it should not need to be world-writable. Hopefully this
won't break accessibility for administrative apps on some distro.
https://bugzilla.gnome.org/show_bug.cgi?id=678348 |
TEST_P(RBACIntegrationTest, Allowed) {
useAccessLog("%RESPONSE_CODE_DETAILS%");
config_helper_.addFilter(RBAC_CONFIG);
initialize();
codec_client_ = makeHttpConnection(lookupPort("http"));
auto response = codec_client_->makeRequestWithBody(
Http::TestRequestHeaderMapImpl{
{":method", "GET"},
{":path", "/"},
{":scheme", "http"},
{":authority", "host"},
{"x-forwarded-for", "10.0.0.1"},
},
1024);
waitForNextUpstreamRequest();
upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true);
response->waitForEndStream();
ASSERT_TRUE(response->complete());
EXPECT_EQ("200", response->headers().getStatusValue());
EXPECT_THAT(waitForAccessLog(access_log_name_), testing::HasSubstr("via_upstream"));
} | 0 | []
| envoy | 2c60632d41555ec8b3d9ef5246242be637a2db0f | 244,966,856,226,339,350,000,000,000,000,000,000,000 | 24 | http: header map security fixes for duplicate headers (#197)
Previously header matching did not match on all headers for
non-inline headers. This patch changes the default behavior to
always logically match on all headers. Multiple individual
headers will be logically concatenated with ',' similar to what
is done with inline headers. This makes the behavior effectively
consistent. This behavior can be temporary reverted by setting
the runtime value "envoy.reloadable_features.header_match_on_all_headers"
to "false".
Targeted fixes have been additionally performed on the following
extensions which make them consider all duplicate headers by default as
a comma concatenated list:
1) Any extension using CEL matching on headers.
2) The header to metadata filter.
3) The JWT filter.
4) The Lua filter.
Like primary header matching used in routing, RBAC, etc. this behavior
can be disabled by setting the runtime value
"envoy.reloadable_features.header_match_on_all_headers" to false.
Finally, the setCopy() header map API previously only set the first
header in the case of duplicate non-inline headers. setCopy() now
behaves similiarly to the other set*() APIs and replaces all found
headers with a single value. This may have had security implications
in the extauth filter which uses this API. This behavior can be disabled
by setting the runtime value
"envoy.reloadable_features.http_set_copy_replace_all_headers" to false.
Fixes https://github.com/envoyproxy/envoy-setec/issues/188
Signed-off-by: Matt Klein <[email protected]> |
static int mov_read_dref(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
MOVStreamContext *sc;
int entries, i, j;
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
avio_rb32(pb); // version + flags
entries = avio_rb32(pb);
if (entries >= UINT_MAX / sizeof(*sc->drefs))
return AVERROR_INVALIDDATA;
av_free(sc->drefs);
sc->drefs_count = 0;
sc->drefs = av_mallocz(entries * sizeof(*sc->drefs));
if (!sc->drefs)
return AVERROR(ENOMEM);
sc->drefs_count = entries;
for (i = 0; i < sc->drefs_count; i++) {
MOVDref *dref = &sc->drefs[i];
uint32_t size = avio_rb32(pb);
int64_t next = avio_tell(pb) + size - 4;
if (size < 12)
return AVERROR_INVALIDDATA;
dref->type = avio_rl32(pb);
avio_rb32(pb); // version + flags
av_dlog(c->fc, "type %.4s size %d\n", (char*)&dref->type, size);
if (dref->type == MKTAG('a','l','i','s') && size > 150) {
/* macintosh alias record */
uint16_t volume_len, len;
int16_t type;
avio_skip(pb, 10);
volume_len = avio_r8(pb);
volume_len = FFMIN(volume_len, 27);
avio_read(pb, dref->volume, 27);
dref->volume[volume_len] = 0;
av_log(c->fc, AV_LOG_DEBUG, "volume %s, len %d\n", dref->volume, volume_len);
avio_skip(pb, 12);
len = avio_r8(pb);
len = FFMIN(len, 63);
avio_read(pb, dref->filename, 63);
dref->filename[len] = 0;
av_log(c->fc, AV_LOG_DEBUG, "filename %s, len %d\n", dref->filename, len);
avio_skip(pb, 16);
/* read next level up_from_alias/down_to_target */
dref->nlvl_from = avio_rb16(pb);
dref->nlvl_to = avio_rb16(pb);
av_log(c->fc, AV_LOG_DEBUG, "nlvl from %d, nlvl to %d\n",
dref->nlvl_from, dref->nlvl_to);
avio_skip(pb, 16);
for (type = 0; type != -1 && avio_tell(pb) < next; ) {
if(url_feof(pb))
return AVERROR_EOF;
type = avio_rb16(pb);
len = avio_rb16(pb);
av_log(c->fc, AV_LOG_DEBUG, "type %d, len %d\n", type, len);
if (len&1)
len += 1;
if (type == 2) { // absolute path
av_free(dref->path);
dref->path = av_mallocz(len+1);
if (!dref->path)
return AVERROR(ENOMEM);
avio_read(pb, dref->path, len);
if (len > volume_len && !strncmp(dref->path, dref->volume, volume_len)) {
len -= volume_len;
memmove(dref->path, dref->path+volume_len, len);
dref->path[len] = 0;
}
for (j = 0; j < len; j++)
if (dref->path[j] == ':')
dref->path[j] = '/';
av_log(c->fc, AV_LOG_DEBUG, "path %s\n", dref->path);
} else if (type == 0) { // directory name
av_free(dref->dir);
dref->dir = av_malloc(len+1);
if (!dref->dir)
return AVERROR(ENOMEM);
avio_read(pb, dref->dir, len);
dref->dir[len] = 0;
for (j = 0; j < len; j++)
if (dref->dir[j] == ':')
dref->dir[j] = '/';
av_log(c->fc, AV_LOG_DEBUG, "dir %s\n", dref->dir);
} else
avio_skip(pb, len);
}
}
avio_seek(pb, next, SEEK_SET);
}
return 0;
} | 0 | [
"CWE-119",
"CWE-787"
]
| FFmpeg | 689e59b7ffed34eba6159dcc78e87133862e3746 | 323,212,882,257,690,800,000,000,000,000,000,000,000 | 107 | mov: reset dref_count on realloc to keep values consistent.
This fixes a potential crash.
Signed-off-by: Michael Niedermayer <[email protected]> |
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
bool pr = false;
switch (msr) {
case MSR_EFER:
return set_efer(vcpu, data);
case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */
data &= ~(u64)0x100; /* ignore ignne emulation enable */
data &= ~(u64)0x8; /* ignore TLB cache disable */
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
data);
return 1;
}
break;
case MSR_FAM10H_MMIO_CONF_BASE:
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
"0x%llx\n", data);
return 1;
}
break;
case MSR_AMD64_NB_CFG:
break;
case MSR_IA32_DEBUGCTLMSR:
if (!data) {
/* We support the non-activated case already */
break;
} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
/* Values other than LBR and BTF are vendor-specific,
thus reserved and should throw a #GP */
return 1;
}
vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
__func__, data);
break;
case MSR_IA32_UCODE_REV:
case MSR_IA32_UCODE_WRITE:
case MSR_VM_HSAVE_PA:
case MSR_AMD64_PATCH_LOADER:
break;
case 0x200 ... 0x2ff:
return set_msr_mtrr(vcpu, msr, data);
case MSR_IA32_APICBASE:
kvm_set_apic_base(vcpu, data);
break;
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
return kvm_x2apic_msr_write(vcpu, msr, data);
case MSR_IA32_TSCDEADLINE:
kvm_set_lapic_tscdeadline_msr(vcpu, data);
break;
case MSR_IA32_MISC_ENABLE:
vcpu->arch.ia32_misc_enable_msr = data;
break;
case MSR_KVM_WALL_CLOCK_NEW:
case MSR_KVM_WALL_CLOCK:
vcpu->kvm->arch.wall_clock = data;
kvm_write_wall_clock(vcpu->kvm, data);
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
kvmclock_reset(vcpu);
vcpu->arch.time = data;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
/* we verify if the enable bit is set... */
if (!(data & 1))
break;
/* ...but clean it before doing the actual write */
vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
vcpu->arch.time_page =
gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
if (is_error_page(vcpu->arch.time_page))
vcpu->arch.time_page = NULL;
break;
}
case MSR_KVM_ASYNC_PF_EN:
if (kvm_pv_enable_async_pf(vcpu, data))
return 1;
break;
case MSR_KVM_STEAL_TIME:
if (unlikely(!sched_info_on()))
return 1;
if (data & KVM_STEAL_RESERVED_MASK)
return 1;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
data & KVM_STEAL_VALID_BITS))
return 1;
vcpu->arch.st.msr_val = data;
if (!(data & KVM_MSR_ENABLED))
break;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
preempt_disable();
accumulate_steal_time(vcpu);
preempt_enable();
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
break;
case MSR_KVM_PV_EOI_EN:
if (kvm_lapic_enable_pv_eoi(vcpu, data))
return 1;
break;
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
return set_msr_mce(vcpu, msr, data);
/* Performance counters are not protected by a CPUID bit,
* so we should check all of them in the generic path for the sake of
* cross vendor migration.
* Writing a zero into the event select MSRs disables them,
* which we perfectly emulate ;-). Any other value should be at least
* reported, some guests depend on them.
*/
case MSR_K7_EVNTSEL0:
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:
if (data != 0)
vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
/* at least RHEL 4 unconditionally writes to the perfctr registers,
* so we ignore writes to make it happy.
*/
case MSR_K7_PERFCTR0:
case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2:
case MSR_K7_PERFCTR3:
vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1:
pr = true;
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data);
if (pr || data != 0)
vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
case MSR_K7_CLK_CTL:
/*
* Ignore all writes to this no longer documented MSR.
* Writes are only relevant for old K7 processors,
* all pre-dating SVM, but a recommended workaround from
* AMD for these chips. It is possible to specify the
* affected processor models on the command line, hence
* the need to ignore the workaround.
*/
break;
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
if (kvm_hv_msr_partition_wide(msr)) {
int r;
mutex_lock(&vcpu->kvm->lock);
r = set_msr_hyperv_pw(vcpu, msr, data);
mutex_unlock(&vcpu->kvm->lock);
return r;
} else
return set_msr_hyperv(vcpu, msr, data);
break;
case MSR_IA32_BBL_CR_CTL3:
/* Drop writes to this legacy MSR -- see rdmsr
* counterpart for further detail.
*/
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
break;
case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.length = data;
break;
case MSR_AMD64_OSVW_STATUS:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.status = data;
break;
default:
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data);
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data);
if (!ignore_msrs) {
vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
msr, data);
return 1;
} else {
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
msr, data);
break;
}
}
return 0;
} | 0 | []
| linux | 6d1068b3a98519247d8ba4ec85cd40ac136dbdf9 | 106,363,673,182,494,820,000,000,000,000,000,000,000 | 213 | KVM: x86: invalid opcode oops on SET_SREGS with OSXSAVE bit set (CVE-2012-4461)
On hosts without the XSAVE support unprivileged local user can trigger
oops similar to the one below by setting X86_CR4_OSXSAVE bit in guest
cr4 register using KVM_SET_SREGS ioctl and later issuing KVM_RUN
ioctl.
invalid opcode: 0000 [#2] SMP
Modules linked in: tun ip6table_filter ip6_tables ebtable_nat ebtables
...
Pid: 24935, comm: zoog_kvm_monito Tainted: G D 3.2.0-3-686-pae
EIP: 0060:[<f8b9550c>] EFLAGS: 00210246 CPU: 0
EIP is at kvm_arch_vcpu_ioctl_run+0x92a/0xd13 [kvm]
EAX: 00000001 EBX: 000f387e ECX: 00000000 EDX: 00000000
ESI: 00000000 EDI: 00000000 EBP: ef5a0060 ESP: d7c63e70
DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
Process zoog_kvm_monito (pid: 24935, ti=d7c62000 task=ed84a0c0
task.ti=d7c62000)
Stack:
00000001 f70a1200 f8b940a9 ef5a0060 00000000 00200202 f8769009 00000000
ef5a0060 000f387e eda5c020 8722f9c8 00015bae 00000000 ed84a0c0 ed84a0c0
c12bf02d 0000ae80 ef7f8740 fffffffb f359b740 ef5a0060 f8b85dc1 0000ae80
Call Trace:
[<f8b940a9>] ? kvm_arch_vcpu_ioctl_set_sregs+0x2fe/0x308 [kvm]
...
[<c12bfb44>] ? syscall_call+0x7/0xb
Code: 89 e8 e8 14 ee ff ff ba 00 00 04 00 89 e8 e8 98 48 ff ff 85 c0 74
1e 83 7d 48 00 75 18 8b 85 08 07 00 00 31 c9 8b 95 0c 07 00 00 <0f> 01
d1 c7 45 48 01 00 00 00 c7 45 1c 01 00 00 00 0f ae f0 89
EIP: [<f8b9550c>] kvm_arch_vcpu_ioctl_run+0x92a/0xd13 [kvm] SS:ESP
0068:d7c63e70
QEMU first retrieves the supported features via KVM_GET_SUPPORTED_CPUID
and then sets them later. So guest's X86_FEATURE_XSAVE should be masked
out on hosts without X86_FEATURE_XSAVE, making kvm_set_cr4 with
X86_CR4_OSXSAVE fail. Userspaces that allow specifying guest cpuid with
X86_FEATURE_XSAVE even on hosts that do not support it, might be
susceptible to this attack from inside the guest as well.
Allow setting X86_CR4_OSXSAVE bit only if host has XSAVE support.
Signed-off-by: Petr Matousek <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
static int wc_BuildEd448KeyDer(ed448_key* key, byte* output, word32 inLen,
int pubOut)
{
byte algoArray[MAX_ALGO_SZ];
byte ver[MAX_VERSION_SZ];
byte seq[MAX_SEQ_SZ];
int ret;
word32 idx = 0, seqSz, verSz, algoSz, privSz, pubSz = 0;
if (key == NULL || output == NULL || inLen == 0)
return BAD_FUNC_ARG;
if (pubOut) {
pubSz = 2 + 2 + ED448_PUB_KEY_SIZE;
}
privSz = 2 + 2 + ED448_KEY_SIZE;
algoSz = SetAlgoID(ED448k, algoArray, oidKeyType, 0);
verSz = SetMyVersion(0, ver, FALSE);
seqSz = SetSequence(verSz + algoSz + privSz + pubSz, seq);
if (seqSz + verSz + algoSz + privSz + pubSz > inLen)
return BAD_FUNC_ARG;
/* write out */
/* seq */
XMEMCPY(output + idx, seq, seqSz);
idx = seqSz;
/* ver */
XMEMCPY(output + idx, ver, verSz);
idx += verSz;
/* algo */
XMEMCPY(output + idx, algoArray, algoSz);
idx += algoSz;
/* privKey */
idx += SetOctetString(2 + ED448_KEY_SIZE, output + idx);
idx += SetOctetString(ED448_KEY_SIZE, output + idx);
ret = wc_ed448_export_private_only(key, output + idx, &privSz);
if (ret != 0)
return ret;
idx += privSz;
/* pubKey */
if (pubOut) {
idx += SetExplicit(1, 2 + ED448_PUB_KEY_SIZE, output + idx);
idx += SetOctetString(ED448_KEY_SIZE, output + idx);
ret = wc_ed448_export_public(key, output + idx, &pubSz);
if (ret != 0)
return ret;
idx += pubSz;
}
return idx;
} | 0 | [
"CWE-125",
"CWE-345"
]
| wolfssl | f93083be72a3b3d956b52a7ec13f307a27b6e093 | 181,760,056,919,349,070,000,000,000,000,000,000,000 | 52 | OCSP: improve handling of OCSP no check extension |
auto operator()(Args && ... args) const
-> decltype(format(str, std::forward<Args>(args)...)) {
return format(str, std::forward<Args>(args)...);
} | 0 | [
"CWE-134",
"CWE-119",
"CWE-787"
]
| fmt | 8cf30aa2be256eba07bb1cefb998c52326e846e7 | 148,139,970,660,657,350,000,000,000,000,000,000,000 | 4 | Fix segfault on complex pointer formatting (#642) |
static int cdrom_ioctl_select_speed(struct cdrom_device_info *cdi,
unsigned long arg)
{
cd_dbg(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n");
if (!CDROM_CAN(CDC_SELECT_SPEED))
return -ENOSYS;
return cdi->ops->select_speed(cdi, arg);
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | 9de4ee40547fd315d4a0ed1dd15a2fa3559ad707 | 129,397,736,209,676,610,000,000,000,000,000,000,000 | 9 | cdrom: information leak in cdrom_ioctl_media_changed()
This cast is wrong. "cdi->capacity" is an int and "arg" is an unsigned
long. The way the check is written now, if one of the high 32 bits is
set then we could read outside the info->slots[] array.
This bug is pretty old and it predates git.
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: [email protected]
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
static void clear_empty_dir(struct ctl_dir *dir)
{
dir->header.ctl_table[0].child = NULL;
} | 0 | [
"CWE-20",
"CWE-399"
]
| linux | 93362fa47fe98b62e4a34ab408c4a418432e7939 | 338,238,023,951,604,980,000,000,000,000,000,000,000 | 5 | sysctl: Drop reference added by grab_header in proc_sys_readdir
Fixes CVE-2016-9191, proc_sys_readdir doesn't drop reference
added by grab_header when return from !dir_emit_dots path.
It can cause any path called unregister_sysctl_table will
wait forever.
The calltrace of CVE-2016-9191:
[ 5535.960522] Call Trace:
[ 5535.963265] [<ffffffff817cdaaf>] schedule+0x3f/0xa0
[ 5535.968817] [<ffffffff817d33fb>] schedule_timeout+0x3db/0x6f0
[ 5535.975346] [<ffffffff817cf055>] ? wait_for_completion+0x45/0x130
[ 5535.982256] [<ffffffff817cf0d3>] wait_for_completion+0xc3/0x130
[ 5535.988972] [<ffffffff810d1fd0>] ? wake_up_q+0x80/0x80
[ 5535.994804] [<ffffffff8130de64>] drop_sysctl_table+0xc4/0xe0
[ 5536.001227] [<ffffffff8130de17>] drop_sysctl_table+0x77/0xe0
[ 5536.007648] [<ffffffff8130decd>] unregister_sysctl_table+0x4d/0xa0
[ 5536.014654] [<ffffffff8130deff>] unregister_sysctl_table+0x7f/0xa0
[ 5536.021657] [<ffffffff810f57f5>] unregister_sched_domain_sysctl+0x15/0x40
[ 5536.029344] [<ffffffff810d7704>] partition_sched_domains+0x44/0x450
[ 5536.036447] [<ffffffff817d0761>] ? __mutex_unlock_slowpath+0x111/0x1f0
[ 5536.043844] [<ffffffff81167684>] rebuild_sched_domains_locked+0x64/0xb0
[ 5536.051336] [<ffffffff8116789d>] update_flag+0x11d/0x210
[ 5536.057373] [<ffffffff817cf61f>] ? mutex_lock_nested+0x2df/0x450
[ 5536.064186] [<ffffffff81167acb>] ? cpuset_css_offline+0x1b/0x60
[ 5536.070899] [<ffffffff810fce3d>] ? trace_hardirqs_on+0xd/0x10
[ 5536.077420] [<ffffffff817cf61f>] ? mutex_lock_nested+0x2df/0x450
[ 5536.084234] [<ffffffff8115a9f5>] ? css_killed_work_fn+0x25/0x220
[ 5536.091049] [<ffffffff81167ae5>] cpuset_css_offline+0x35/0x60
[ 5536.097571] [<ffffffff8115aa2c>] css_killed_work_fn+0x5c/0x220
[ 5536.104207] [<ffffffff810bc83f>] process_one_work+0x1df/0x710
[ 5536.110736] [<ffffffff810bc7c0>] ? process_one_work+0x160/0x710
[ 5536.117461] [<ffffffff810bce9b>] worker_thread+0x12b/0x4a0
[ 5536.123697] [<ffffffff810bcd70>] ? process_one_work+0x710/0x710
[ 5536.130426] [<ffffffff810c3f7e>] kthread+0xfe/0x120
[ 5536.135991] [<ffffffff817d4baf>] ret_from_fork+0x1f/0x40
[ 5536.142041] [<ffffffff810c3e80>] ? kthread_create_on_node+0x230/0x230
One cgroup maintainer mentioned that "cgroup is trying to offline
a cpuset css, which takes place under cgroup_mutex. The offlining
ends up trying to drain active usages of a sysctl table which apprently
is not happening."
The real reason is that proc_sys_readdir doesn't drop reference added
by grab_header when return from !dir_emit_dots path. So this cpuset
offline path will wait here forever.
See here for details: http://www.openwall.com/lists/oss-security/2016/11/04/13
Fixes: f0c3b5093add ("[readdir] convert procfs")
Cc: [email protected]
Reported-by: CAI Qian <[email protected]>
Tested-by: Yang Shukui <[email protected]>
Signed-off-by: Zhou Chengming <[email protected]>
Acked-by: Al Viro <[email protected]>
Signed-off-by: Eric W. Biederman <[email protected]> |
static CImg<T> tensor(const T& a0) {
return matrix(a0);
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 331,343,315,270,139,300,000,000,000,000,000,000,000 | 3 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
u64 notify_id, u64 cookie, s32 result)
{
dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
__rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
} | 0 | [
"CWE-863"
]
| linux | f44d04e696feaf13d192d942c4f14ad2e117065a | 261,590,273,624,828,640,000,000,000,000,000,000,000 | 6 | rbd: require global CAP_SYS_ADMIN for mapping and unmapping
It turns out that currently we rely only on sysfs attribute
permissions:
$ ll /sys/bus/rbd/{add*,remove*}
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add_single_major
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/remove
--w------- 1 root root 4096 Sep 3 20:38 /sys/bus/rbd/remove_single_major
This means that images can be mapped and unmapped (i.e. block devices
can be created and deleted) by a UID 0 process even after it drops all
privileges or by any process with CAP_DAC_OVERRIDE in its user namespace
as long as UID 0 is mapped into that user namespace.
Be consistent with other virtual block devices (loop, nbd, dm, md, etc)
and require CAP_SYS_ADMIN in the initial user namespace for mapping and
unmapping, and also for dumping the configuration string and refreshing
the image header.
Cc: [email protected]
Signed-off-by: Ilya Dryomov <[email protected]>
Reviewed-by: Jeff Layton <[email protected]> |
static void rt6_bind_peer(struct rt6_info *rt, int create)
{
struct inet_peer_base *base;
struct inet_peer *peer;
base = inetpeer_base_ptr(rt->_rt6i_peer);
if (!base)
return;
peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
if (peer) {
if (!rt6_set_peer(rt, peer))
inet_putpeer(peer);
}
} | 0 | [
"CWE-119"
]
| net | c88507fbad8055297c1d1e21e599f46960cbee39 | 314,600,691,366,779,630,000,000,000,000,000,000,000 | 15 | ipv6: don't set DST_NOCOUNT for remotely added routes
DST_NOCOUNT should only be used if an authorized user adds routes
locally. In case of routes which are added on behalf of router
advertisments this flag must not get used as it allows an unlimited
number of routes getting added remotely.
Signed-off-by: Sabrina Dubroca <[email protected]>
Acked-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
htmlDocDumpMemoryFormat(xmlDocPtr cur, xmlChar**mem, int *size, int format) {
xmlOutputBufferPtr buf;
xmlCharEncodingHandlerPtr handler = NULL;
const char *encoding;
xmlInitParser();
if ((mem == NULL) || (size == NULL))
return;
if (cur == NULL) {
*mem = NULL;
*size = 0;
return;
}
encoding = (const char *) htmlGetMetaEncoding(cur);
if (encoding != NULL) {
xmlCharEncoding enc;
enc = xmlParseCharEncoding(encoding);
if (enc != XML_CHAR_ENCODING_UTF8) {
handler = xmlFindCharEncodingHandler(encoding);
if (handler == NULL)
htmlSaveErr(XML_SAVE_UNKNOWN_ENCODING, NULL, encoding);
}
} else {
/*
* Fallback to HTML or ASCII when the encoding is unspecified
*/
if (handler == NULL)
handler = xmlFindCharEncodingHandler("HTML");
if (handler == NULL)
handler = xmlFindCharEncodingHandler("ascii");
}
buf = xmlAllocOutputBufferInternal(handler);
if (buf == NULL) {
*mem = NULL;
*size = 0;
return;
}
htmlDocContentDumpFormatOutput(buf, cur, NULL, format);
xmlOutputBufferFlush(buf);
if (buf->conv != NULL) {
*size = xmlBufUse(buf->conv);
*mem = xmlStrndup(xmlBufContent(buf->conv), *size);
} else {
*size = xmlBufUse(buf->buffer);
*mem = xmlStrndup(xmlBufContent(buf->buffer), *size);
}
(void)xmlOutputBufferClose(buf);
} | 0 | [
"CWE-79"
]
| libxml2 | c1ba6f54d32b707ca6d91cb3257ce9de82876b6f | 204,678,918,475,111,950,000,000,000,000,000,000,000 | 56 | Revert "Do not URI escape in server side includes"
This reverts commit 960f0e275616cadc29671a218d7fb9b69eb35588.
This commit introduced
- an infinite loop, found by OSS-Fuzz, which could be easily fixed.
- an algorithm with quadratic runtime
- a security issue, see
https://bugzilla.gnome.org/show_bug.cgi?id=769760
A better approach is to add an option not to escape URLs at all
which libxml2 should have possibly done in the first place. |
static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
{
return __bpf_hash_map_seq_show(seq, v);
} | 0 | [
"CWE-787"
]
| bpf | c4eb1f403243fc7bbb7de644db8587c03de36da6 | 284,631,449,068,577,800,000,000,000,000,000,000,000 | 4 | bpf: Fix integer overflow involving bucket_size
In __htab_map_lookup_and_delete_batch(), hash buckets are iterated
over to count the number of elements in each bucket (bucket_size).
If bucket_size is large enough, the multiplication to calculate
kvmalloc() size could overflow, resulting in out-of-bounds write
as reported by KASAN:
[...]
[ 104.986052] BUG: KASAN: vmalloc-out-of-bounds in __htab_map_lookup_and_delete_batch+0x5ce/0xb60
[ 104.986489] Write of size 4194224 at addr ffffc9010503be70 by task crash/112
[ 104.986889]
[ 104.987193] CPU: 0 PID: 112 Comm: crash Not tainted 5.14.0-rc4 #13
[ 104.987552] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014
[ 104.988104] Call Trace:
[ 104.988410] dump_stack_lvl+0x34/0x44
[ 104.988706] print_address_description.constprop.0+0x21/0x140
[ 104.988991] ? __htab_map_lookup_and_delete_batch+0x5ce/0xb60
[ 104.989327] ? __htab_map_lookup_and_delete_batch+0x5ce/0xb60
[ 104.989622] kasan_report.cold+0x7f/0x11b
[ 104.989881] ? __htab_map_lookup_and_delete_batch+0x5ce/0xb60
[ 104.990239] kasan_check_range+0x17c/0x1e0
[ 104.990467] memcpy+0x39/0x60
[ 104.990670] __htab_map_lookup_and_delete_batch+0x5ce/0xb60
[ 104.990982] ? __wake_up_common+0x4d/0x230
[ 104.991256] ? htab_of_map_free+0x130/0x130
[ 104.991541] bpf_map_do_batch+0x1fb/0x220
[...]
In hashtable, if the elements' keys have the same jhash() value, the
elements will be put into the same bucket. By putting a lot of elements
into a single bucket, the value of bucket_size can be increased to
trigger the integer overflow.
Triggering the overflow is possible for both callers with CAP_SYS_ADMIN
and callers without CAP_SYS_ADMIN.
It will be trivial for a caller with CAP_SYS_ADMIN to intentionally
reach this overflow by enabling BPF_F_ZERO_SEED. As this flag will set
the random seed passed to jhash() to 0, it will be easy for the caller
to prepare keys which will be hashed into the same value, and thus put
all the elements into the same bucket.
If the caller does not have CAP_SYS_ADMIN, BPF_F_ZERO_SEED cannot be
used. However, it will be still technically possible to trigger the
overflow, by guessing the random seed value passed to jhash() (32bit)
and repeating the attempt to trigger the overflow. In this case,
the probability to trigger the overflow will be low and will take
a very long time.
Fix the integer overflow by calling kvmalloc_array() instead of
kvmalloc() to allocate memory.
Fixes: 057996380a42 ("bpf: Add batch ops to all htab bpf map")
Signed-off-by: Tatsuhiko Yasumatsu <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Link: https://lore.kernel.org/bpf/[email protected] |
xdr_krb5_octet(XDR *xdrs, krb5_octet *objp)
{
if (!xdr_u_char(xdrs, objp))
return (FALSE);
return (TRUE);
} | 0 | [
"CWE-703"
]
| krb5 | a197e92349a4aa2141b5dff12e9dd44c2a2166e3 | 325,577,344,039,648,770,000,000,000,000,000,000,000 | 6 | Fix kadm5/gssrpc XDR double free [CVE-2014-9421]
[MITKRB5-SA-2015-001] In auth_gssapi_unwrap_data(), do not free
partial deserialization results upon failure to deserialize. This
responsibility belongs to the callers, svctcp_getargs() and
svcudp_getargs(); doing it in the unwrap function results in freeing
the results twice.
In xdr_krb5_tl_data() and xdr_krb5_principal(), null out the pointers
we are freeing, as other XDR functions such as xdr_bytes() and
xdr_string().
ticket: 8056 (new)
target_version: 1.13.1
tags: pullup |
static int SRP_user_pwd_set_sv(SRP_user_pwd *vinfo, const char *s,
const char *v)
{
unsigned char tmp[MAX_LEN];
int len;
if (strlen(s) > MAX_LEN || strlen(v) > MAX_LEN)
return 0;
len = t_fromb64(tmp, v);
if (NULL == (vinfo->v = BN_bin2bn(tmp, len, NULL)))
return 0;
len = t_fromb64(tmp, s);
return ((vinfo->s = BN_bin2bn(tmp, len, NULL)) != NULL);
} | 0 | [
"CWE-399"
]
| openssl | 380f18ed5f140e0ae1b68f3ab8f4f7c395658d9e | 63,539,124,865,455,845,000,000,000,000,000,000,000 | 14 | CVE-2016-0798: avoid memory leak in SRP
The SRP user database lookup method SRP_VBASE_get_by_user had confusing
memory management semantics; the returned pointer was sometimes newly
allocated, and sometimes owned by the callee. The calling code has no
way of distinguishing these two cases.
Specifically, SRP servers that configure a secret seed to hide valid
login information are vulnerable to a memory leak: an attacker
connecting with an invalid username can cause a memory leak of around
300 bytes per connection.
Servers that do not configure SRP, or configure SRP but do not configure
a seed are not vulnerable.
In Apache, the seed directive is known as SSLSRPUnknownUserSeed.
To mitigate the memory leak, the seed handling in SRP_VBASE_get_by_user
is now disabled even if the user has configured a seed.
Applications are advised to migrate to SRP_VBASE_get1_by_user. However,
note that OpenSSL makes no strong guarantees about the
indistinguishability of valid and invalid logins. In particular,
computations are currently not carried out in constant time.
Reviewed-by: Rich Salz <[email protected]> |
test_headers_bad (Test *test,
gconstpointer data)
{
GHashTable *headers;
headers = web_socket_util_new_headers ();
/* Bad version */
g_hash_table_insert (headers, g_strdup ("Cookie"), g_strdup ("CockpitAuth=v=1;k=blah"));
if (cockpit_auth_check_cookie (test->auth, "/cockpit", headers))
g_assert_not_reached ();
/* Bad hash */
g_hash_table_remove_all (headers);
g_hash_table_insert (headers, g_strdup ("Cookie"), g_strdup ("CockpitAuth=v=2;k=blah"));
if (cockpit_auth_check_cookie (test->auth, "/cockpit", headers))
g_assert_not_reached ();
g_hash_table_destroy (headers);
} | 1 | []
| cockpit | c51f6177576d7e12614c64d316cf0b67addd17c9 | 64,534,851,171,630,770,000,000,000,000,000,000,000 | 20 | ws: Fix bug parsing invalid base64 headers
The len parameter to g_base64_decode_inplace() is a inout
parameter, and needs to be initialized. Lets just use
the simpler g_base64_decode() function. This fixes a segfault.
Closes #10819 |
static OPJ_BYTE * opj_jp2_write_colr(opj_jp2_t *jp2,
OPJ_UINT32 * p_nb_bytes_written
)
{
/* room for 8 bytes for box 3 for common data and variable upon profile*/
OPJ_UINT32 l_colr_size = 11;
OPJ_BYTE * l_colr_data, * l_current_colr_ptr;
/* preconditions */
assert(jp2 != 00);
assert(p_nb_bytes_written != 00);
assert(jp2->meth == 1 || jp2->meth == 2);
switch (jp2->meth) {
case 1 :
l_colr_size += 4; /* EnumCS */
break;
case 2 :
assert(jp2->color.icc_profile_len); /* ICC profile */
l_colr_size += jp2->color.icc_profile_len;
break;
default :
return 00;
}
l_colr_data = (OPJ_BYTE *) opj_calloc(1, l_colr_size);
if (l_colr_data == 00) {
return 00;
}
l_current_colr_ptr = l_colr_data;
opj_write_bytes(l_current_colr_ptr, l_colr_size,
4); /* write box size */
l_current_colr_ptr += 4;
opj_write_bytes(l_current_colr_ptr, JP2_COLR, 4); /* BPCC */
l_current_colr_ptr += 4;
opj_write_bytes(l_current_colr_ptr, jp2->meth, 1); /* METH */
++l_current_colr_ptr;
opj_write_bytes(l_current_colr_ptr, jp2->precedence, 1); /* PRECEDENCE */
++l_current_colr_ptr;
opj_write_bytes(l_current_colr_ptr, jp2->approx, 1); /* APPROX */
++l_current_colr_ptr;
if (jp2->meth ==
1) { /* Meth value is restricted to 1 or 2 (Table I.9 of part 1) */
opj_write_bytes(l_current_colr_ptr, jp2->enumcs, 4);
} /* EnumCS */
else {
if (jp2->meth == 2) { /* ICC profile */
OPJ_UINT32 i;
for (i = 0; i < jp2->color.icc_profile_len; ++i) {
opj_write_bytes(l_current_colr_ptr, jp2->color.icc_profile_buf[i], 1);
++l_current_colr_ptr;
}
}
}
*p_nb_bytes_written = l_colr_size;
return l_colr_data;
} | 0 | [
"CWE-20"
]
| openjpeg | 4edb8c83374f52cd6a8f2c7c875e8ffacccb5fa5 | 153,261,204,552,851,750,000,000,000,000,000,000,000 | 66 | Add support for generation of PLT markers in encoder
* -PLT switch added to opj_compress
* Add a opj_encoder_set_extra_options() function that
accepts a PLT=YES option, and could be expanded later
for other uses.
-------
Testing with a Sentinel2 10m band, T36JTT_20160914T074612_B02.jp2,
coming from S2A_MSIL1C_20160914T074612_N0204_R135_T36JTT_20160914T081456.SAFE
Decompress it to TIFF:
```
opj_uncompress -i T36JTT_20160914T074612_B02.jp2 -o T36JTT_20160914T074612_B02.tif
```
Recompress it with similar parameters as original:
```
opj_compress -n 5 -c [256,256],[256,256],[256,256],[256,256],[256,256] -t 1024,1024 -PLT -i T36JTT_20160914T074612_B02.tif -o T36JTT_20160914T074612_B02_PLT.jp2
```
Dump codestream detail with GDAL dump_jp2.py utility (https://github.com/OSGeo/gdal/blob/master/gdal/swig/python/samples/dump_jp2.py)
```
python dump_jp2.py T36JTT_20160914T074612_B02.jp2 > /tmp/dump_sentinel2_ori.txt
python dump_jp2.py T36JTT_20160914T074612_B02_PLT.jp2 > /tmp/dump_sentinel2_openjpeg_plt.txt
```
The diff between both show very similar structure, and identical number of packets in PLT markers
Now testing with Kakadu (KDU803_Demo_Apps_for_Linux-x86-64_200210)
Full file decompression:
```
kdu_expand -i T36JTT_20160914T074612_B02_PLT.jp2 -o tmp.tif
Consumed 121 tile-part(s) from a total of 121 tile(s).
Consumed 80,318,806 codestream bytes (excluding any file format) = 5.329697
bits/pel.
Processed using the multi-threaded environment, with
8 parallel threads of execution
```
Partial decompresson (presumably using PLT markers):
```
kdu_expand -i T36JTT_20160914T074612_B02.jp2 -o tmp.pgm -region "{0.5,0.5},{0.01,0.01}"
kdu_expand -i T36JTT_20160914T074612_B02_PLT.jp2 -o tmp2.pgm -region "{0.5,0.5},{0.01,0.01}"
diff tmp.pgm tmp2.pgm && echo "same !"
```
-------
Funded by ESA for S2-MPC project |
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
unsigned int access)
{
if (unlikely(is_mmio_spte(*sptep))) {
if (gfn != get_mmio_spte_gfn(*sptep)) {
mmu_spte_clear_no_track(sptep);
return true;
}
mark_mmio_spte(vcpu, sptep, gfn, access);
return true;
}
return false;
} | 0 | [
"CWE-476"
]
| linux | 9f46c187e2e680ecd9de7983e4d081c3391acc76 | 29,855,839,524,862,060,000,000,000,000,000,000,000 | 15 | KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID
With shadow paging enabled, the INVPCID instruction results in a call
to kvm_mmu_invpcid_gva. If INVPCID is executed with CR0.PG=0, the
invlpg callback is not set and the result is a NULL pointer dereference.
Fix it trivially by checking for mmu->invlpg before every call.
There are other possibilities:
- check for CR0.PG, because KVM (like all Intel processors after P5)
flushes guest TLB on CR0.PG changes so that INVPCID/INVLPG are a
nop with paging disabled
- check for EFER.LMA, because KVM syncs and flushes when switching
MMU contexts outside of 64-bit mode
All of these are tricky, go for the simple solution. This is CVE-2022-1789.
Reported-by: Yongkang Jia <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]> |
static int vcmp(struct mg_str s1, const char *s2) {
// LOG(LL_INFO, ("->%.*s<->%s<- %d %d %d", (int) s1.len, s1.ptr, s2,
//(int) s1.len, strncmp(s1.ptr, s2, s1.len), mg_vcmp(&s1, s2)));
return mg_vcmp(&s1, s2) == 0;
} | 0 | [
"CWE-552"
]
| mongoose | c65c8fdaaa257e0487ab0aaae9e8f6b439335945 | 164,917,214,768,770,980,000,000,000,000,000,000,000 | 5 | Protect against the directory traversal in mg_upload() |
PHP_FUNCTION(openssl_private_decrypt)
{
zval **key, *crypted;
EVP_PKEY *pkey;
int cryptedlen;
unsigned char *cryptedbuf = NULL;
unsigned char *crypttemp;
int successful = 0;
long padding = RSA_PKCS1_PADDING;
long keyresource = -1;
char * data;
int data_len;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "szZ|l", &data, &data_len, &crypted, &key, &padding) == FAILURE) {
return;
}
RETVAL_FALSE;
pkey = php_openssl_evp_from_zval(key, 0, "", 0, &keyresource TSRMLS_CC);
if (pkey == NULL) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "key parameter is not a valid private key");
RETURN_FALSE;
}
cryptedlen = EVP_PKEY_size(pkey);
crypttemp = emalloc(cryptedlen + 1);
switch (pkey->type) {
case EVP_PKEY_RSA:
case EVP_PKEY_RSA2:
cryptedlen = RSA_private_decrypt(data_len,
(unsigned char *)data,
crypttemp,
pkey->pkey.rsa,
padding);
if (cryptedlen != -1) {
cryptedbuf = emalloc(cryptedlen + 1);
memcpy(cryptedbuf, crypttemp, cryptedlen);
successful = 1;
}
break;
default:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "key type not supported in this PHP build!");
}
efree(crypttemp);
if (successful) {
zval_dtor(crypted);
cryptedbuf[cryptedlen] = '\0';
ZVAL_STRINGL(crypted, (char *)cryptedbuf, cryptedlen, 0);
cryptedbuf = NULL;
RETVAL_TRUE;
}
if (keyresource == -1) {
EVP_PKEY_free(pkey);
}
if (cryptedbuf) {
efree(cryptedbuf);
}
} | 0 | [
"CWE-200"
]
| php-src | 270a406ac94b5fc5cc9ef59fc61e3b4b95648a3e | 221,612,067,843,119,240,000,000,000,000,000,000,000 | 62 | Fix bug #61413 ext\openssl\tests\openssl_encrypt_crash.phpt fails 5.3 only |
uint64_t length() const override { return size_; } | 0 | [
"CWE-401"
]
| envoy | 5eba69a1f375413fb93fab4173f9c393ac8c2818 | 254,496,505,168,163,900,000,000,000,000,000,000,000 | 1 | [buffer] Add on-drain hook to buffer API and use it to avoid fragmentation due to tracking of H2 data and control frames in the output buffer (#144)
Signed-off-by: antonio <[email protected]> |
int git_index_set_caps(git_index *index, int caps)
{
unsigned int old_ignore_case;
assert(index);
old_ignore_case = index->ignore_case;
if (caps == GIT_INDEXCAP_FROM_OWNER) {
git_repository *repo = INDEX_OWNER(index);
int val;
if (!repo)
return create_index_error(
-1, "cannot access repository to set index caps");
if (!git_repository__cvar(&val, repo, GIT_CVAR_IGNORECASE))
index->ignore_case = (val != 0);
if (!git_repository__cvar(&val, repo, GIT_CVAR_FILEMODE))
index->distrust_filemode = (val == 0);
if (!git_repository__cvar(&val, repo, GIT_CVAR_SYMLINKS))
index->no_symlinks = (val == 0);
}
else {
index->ignore_case = ((caps & GIT_INDEXCAP_IGNORE_CASE) != 0);
index->distrust_filemode = ((caps & GIT_INDEXCAP_NO_FILEMODE) != 0);
index->no_symlinks = ((caps & GIT_INDEXCAP_NO_SYMLINKS) != 0);
}
if (old_ignore_case != index->ignore_case) {
git_index__set_ignore_case(index, (bool)index->ignore_case);
}
return 0;
} | 0 | [
"CWE-415",
"CWE-190"
]
| libgit2 | 3db1af1f370295ad5355b8f64b865a2a357bcac0 | 336,049,593,969,769,940,000,000,000,000,000,000,000 | 35 | index: error out on unreasonable prefix-compressed path lengths
When computing the complete path length from the encoded
prefix-compressed path, we end up just allocating the complete path
without ever checking what the encoded path length actually is. This can
easily lead to a denial of service by just encoding an unreasonable long
path name inside of the index. Git already enforces a maximum path
length of 4096 bytes. As we also have that enforcement ready in some
places, just make sure that the resulting path is smaller than
GIT_PATH_MAX.
Reported-by: Krishna Ram Prakash R <[email protected]>
Reported-by: Vivek Parikh <[email protected]> |
const char *MACH0_(get_os)(struct MACH0_(obj_t) * bin) {
if (bin) {
switch (bin->os) {
case 1: return "macos";
case 2: return "ios";
case 3: return "watchos";
case 4: return "tvos";
}
}
return "darwin";
} | 0 | [
"CWE-787"
]
| rizin | 348b1447d1452f978b69631d6de5b08dd3bdf79d | 191,548,780,803,580,840,000,000,000,000,000,000,000 | 11 | fix #2956 - oob write in mach0.c |
static int auth_server_input_done(struct auth_server_connection *conn)
{
if (array_count(&conn->available_auth_mechs) == 0) {
i_error("BUG: Authentication server returned no mechanisms");
return -1;
}
if (conn->cookie == NULL) {
i_error("BUG: Authentication server didn't send a cookie");
return -1;
}
timeout_remove(&conn->to);
conn->handshake_received = TRUE;
if (conn->client->connect_notify_callback != NULL) {
conn->client->connect_notify_callback(conn->client, TRUE,
conn->client->connect_notify_context);
}
return 0;
} | 0 | []
| core | a9b135760aea6d1790d447d351c56b78889dac22 | 11,819,917,078,706,325,000,000,000,000,000,000,000 | 20 | lib-auth: Remove request after abort
Otherwise the request will still stay in hash table
and get dereferenced when all requests are aborted
causing an attempt to access free'd memory.
Found by Apollon Oikonomopoulos <[email protected]>
Broken in 1a29ed2f96da1be22fa5a4d96c7583aa81b8b060 |
static int build_sadinfo(struct sk_buff *skb, struct net *net,
u32 pid, u32 seq, u32 flags)
{
struct xfrmk_sadinfo si;
struct xfrmu_sadhinfo sh;
struct nlmsghdr *nlh;
int err;
u32 *f;
nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
if (nlh == NULL) /* shouldn't really happen ... */
return -EMSGSIZE;
f = nlmsg_data(nlh);
*f = flags;
xfrm_sad_getinfo(net, &si);
sh.sadhmcnt = si.sadhmcnt;
sh.sadhcnt = si.sadhcnt;
err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
if (!err)
err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
return nlmsg_end(skb, nlh);
} | 0 | [
"CWE-200"
]
| linux | 1f86840f897717f86d523a13e99a447e6a5d2fa5 | 10,647,489,033,546,172,000,000,000,000,000,000,000 | 30 | xfrm_user: fix info leak in copy_to_user_tmpl()
The memory used for the template copy is a local stack variable. As
struct xfrm_user_tmpl contains multiple holes added by the compiler for
alignment, not initializing the memory will lead to leaking stack bytes
to userland. Add an explicit memset(0) to avoid the info leak.
Initial version of the patch by Brad Spengler.
Cc: Brad Spengler <[email protected]>
Signed-off-by: Mathias Krause <[email protected]>
Acked-by: Steffen Klassert <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
Tfloat __distance_eikonal(const CImg<Tfloat>& res, const Tfloat P,
const int x=0, const int y=0, const int z=0) const {
const Tfloat M = (Tfloat)cimg::type<T>::max();
T T1 = (T)std::min(x - 1>=0?res(x - 1,y,z):M,x + 1<width()?res(x + 1,y,z):M);
Tfloat root = 0;
if (_depth>1) { // 3D
T
T2 = (T)std::min(y - 1>=0?res(x,y - 1,z):M,y + 1<height()?res(x,y + 1,z):M),
T3 = (T)std::min(z - 1>=0?res(x,y,z - 1):M,z + 1<depth()?res(x,y,z + 1):M);
if (T1>T2) cimg::swap(T1,T2);
if (T2>T3) cimg::swap(T2,T3);
if (T1>T2) cimg::swap(T1,T2);
if (P<=0) return (Tfloat)T1;
if (T3<M && ___distance_eikonal(3,-2*(T1 + T2 + T3),T1*T1 + T2*T2 + T3*T3 - P*P,root))
return std::max((Tfloat)T3,root);
if (T2<M && ___distance_eikonal(2,-2*(T1 + T2),T1*T1 + T2*T2 - P*P,root))
return std::max((Tfloat)T2,root);
return P + T1;
} else if (_height>1) { // 2D
T T2 = (T)std::min(y - 1>=0?res(x,y - 1,z):M,y + 1<height()?res(x,y + 1,z):M);
if (T1>T2) cimg::swap(T1,T2);
if (P<=0) return (Tfloat)T1;
if (T2<M && ___distance_eikonal(2,-2*(T1 + T2),T1*T1 + T2*T2 - P*P,root))
return std::max((Tfloat)T2,root);
return P + T1;
} else { // 1D
if (P<=0) return (Tfloat)T1;
return P + T1;
}
return 0;
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 334,970,538,104,193,500,000,000,000,000,000,000,000 | 31 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
struct virtio_net_hdr *vmxnet_tx_pkt_get_vhdr(struct VmxnetTxPkt *pkt)
{
assert(pkt);
return &pkt->virt_hdr;
} | 0 | [
"CWE-20"
]
| qemu | a7278b36fcab9af469563bd7b9dadebe2ae25e48 | 60,264,235,311,063,700,000,000,000,000,000,000,000 | 5 | net/vmxnet3: Refine l2 header validation
Validation of l2 header length assumed minimal packet size as
eth_header + 2 * vlan_header regardless of the actual protocol.
This caused crash for valid non-IP packets shorter than 22 bytes, as
'tx_pkt->packet_type' hasn't been assigned for such packets, and
'vmxnet3_on_tx_done_update_stats()' expects it to be properly set.
Refine header length validation in 'vmxnet_tx_pkt_parse_headers'.
Check its return value during packet processing flow.
As a side effect, in case IPv4 and IPv6 header validation failure,
corrupt packets will be dropped.
Signed-off-by: Dana Rubin <[email protected]>
Signed-off-by: Shmulik Ladkani <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
int security_quotactl(int cmds, int type, int id, struct super_block *sb)
{
return security_ops->quotactl(cmds, type, id, sb);
} | 0 | []
| linux-2.6 | ee18d64c1f632043a02e6f5ba5e045bb26a5465f | 122,447,804,532,817,360,000,000,000,000,000,000,000 | 4 | KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]> |
void ip_cleanup(Slirp *slirp)
{
udp_cleanup(slirp);
tcp_cleanup(slirp);
icmp_cleanup(slirp);
} | 0 | [
"CWE-787"
]
| libslirp | 126c04acbabd7ad32c2b018fe10dfac2a3bc1210 | 129,387,115,945,912,480,000,000,000,000,000,000,000 | 6 | Fix heap overflow in ip_reass on big packet input
When the first fragment does not fit in the preallocated buffer, q will
already be pointing to the ext buffer, so we mustn't try to update it.
Signed-off-by: Samuel Thibault <[email protected]> |
jas_iccprof_t *jas_iccprof_load(jas_stream_t *in)
{
jas_iccprof_t *prof;
int numtags;
long curoff;
long reloff;
long prevoff;
jas_iccsig_t type;
jas_iccattrval_t *attrval;
jas_iccattrval_t *prevattrval;
jas_icctagtabent_t *tagtabent;
int i;
int len;
prof = 0;
attrval = 0;
if (!(prof = jas_iccprof_create())) {
goto error;
}
if (jas_iccprof_readhdr(in, &prof->hdr)) {
jas_eprintf("cannot get header\n");
goto error;
}
if (jas_iccprof_gettagtab(in, &prof->tagtab)) {
jas_eprintf("cannot get tab table\n");
goto error;
}
jas_iccprof_sorttagtab(&prof->tagtab);
numtags = prof->tagtab.numents;
curoff = JAS_ICC_HDRLEN + 4 + 12 * numtags;
prevoff = 0;
prevattrval = 0;
for (i = 0; i < numtags; ++i) {
tagtabent = &prof->tagtab.ents[i];
if (tagtabent->off == JAS_CAST(jas_iccuint32_t, prevoff)) {
if (prevattrval) {
if (!(attrval = jas_iccattrval_clone(prevattrval)))
goto error;
if (jas_iccprof_setattr(prof, tagtabent->tag, attrval))
goto error;
jas_iccattrval_destroy(attrval);
attrval = 0;
} else {
#if 0
jas_eprintf("warning: skipping unknown tag type\n");
#endif
}
continue;
}
reloff = tagtabent->off - curoff;
if (reloff > 0) {
if (jas_stream_gobble(in, reloff) != reloff)
goto error;
curoff += reloff;
} else if (reloff < 0) {
/* This should never happen since we read the tagged
element data in a single pass. */
abort();
}
prevoff = curoff;
if (jas_iccgetuint32(in, &type)) {
goto error;
}
if (jas_stream_gobble(in, 4) != 4) {
goto error;
}
curoff += 8;
if (!jas_iccattrvalinfo_lookup(type)) {
#if 0
jas_eprintf("warning: skipping unknown tag type\n");
#endif
prevattrval = 0;
continue;
}
if (!(attrval = jas_iccattrval_create(type))) {
goto error;
}
len = tagtabent->len - 8;
if ((*attrval->ops->input)(attrval, in, len)) {
goto error;
}
curoff += len;
if (jas_iccprof_setattr(prof, tagtabent->tag, attrval)) {
goto error;
}
prevattrval = attrval; /* This is correct, but slimey. */
jas_iccattrval_destroy(attrval);
attrval = 0;
}
return prof;
error:
if (prof)
jas_iccprof_destroy(prof);
if (attrval)
jas_iccattrval_destroy(attrval);
return 0;
} | 0 | [
"CWE-189"
]
| jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 90,680,570,879,911,600,000,000,000,000,000,000,000 | 102 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
Subsets and Splits