func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static SECURITY_STATUS SEC_ENTRY kerberos_SetContextAttributesW(PCtxtHandle phContext,
ULONG ulAttribute, void* pBuffer,
ULONG cbBuffer)
{
return SEC_E_UNSUPPORTED_FUNCTION;
}
| 0 |
[] |
FreeRDP
|
479e891545473f01c187daffdfa05fc752b54b72
| 12,237,322,617,410,103,000,000,000,000,000,000,000 | 6 |
check return values for SetCredentialsAttributes, throw warnings for unsupported attributes
|
static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr,
size_t len)
{
if (!len) {
return NVME_SUCCESS;
}
trace_pci_nvme_map_addr_cmb(addr, len);
if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) {
return NVME_DATA_TRAS_ERROR;
}
qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len);
return NVME_SUCCESS;
}
| 0 |
[] |
qemu
|
736b01642d85be832385063f278fe7cd4ffb5221
| 189,829,740,180,420,900,000,000,000,000,000,000,000 | 17 |
hw/nvme: fix CVE-2021-3929
This fixes CVE-2021-3929 "locally" by denying DMA to the iomem of the
device itself. This still allows DMA to MMIO regions of other devices
(e.g. doing P2P DMA to the controller memory buffer of another NVMe
device).
Fixes: CVE-2021-3929
Reported-by: Qiuhao Li <[email protected]>
Reviewed-by: Keith Busch <[email protected]>
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Signed-off-by: Klaus Jensen <[email protected]>
|
TEST_F(RenameCollectionTest, RenameCollectionToItselfByNsForApplyOps) {
auto dbName = _sourceNss.db().toString();
auto uuid = _createCollectionWithUUID(_opCtx.get(), _sourceNss);
auto uuidDoc = BSON("ui" << uuid);
auto cmd = BSON("renameCollection" << _sourceNss.ns() << "to" << _sourceNss.ns() << "dropTarget"
<< true);
ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), dbName, uuidDoc["ui"], cmd, {}));
ASSERT_TRUE(_collectionExists(_opCtx.get(), _sourceNss));
}
| 0 |
[
"CWE-20"
] |
mongo
|
35c1b1f588f04926a958ad2fe4d9c59d79f81e8b
| 245,739,026,813,236,560,000,000,000,000,000,000,000 | 9 |
SERVER-35636 renameCollectionForApplyOps checks for complete namespace
|
static int create_user_core(int user_core_fd, pid_t pid, off_t ulimit_c)
{
if (user_core_fd >= 0)
{
off_t core_size = copyfd_size(STDIN_FILENO, user_core_fd, ulimit_c, COPYFD_SPARSE);
if (fsync(user_core_fd) != 0 || close(user_core_fd) != 0 || core_size < 0)
{
/* perror first, otherwise unlink may trash errno */
perror_msg("Error writing '%s'", full_core_basename);
xchdir(user_pwd);
unlink(core_basename);
return 1;
}
if (ulimit_c == 0 || core_size > ulimit_c)
{
xchdir(user_pwd);
unlink(core_basename);
return 1;
}
log_notice("Saved core dump of pid %lu to %s (%llu bytes)", (long)pid, full_core_basename, (long long)core_size);
}
return 0;
}
| 0 |
[
"CWE-362"
] |
abrt
|
a6cdfd6a16251447264d203e145624a96fa811e3
| 305,681,738,374,600,500,000,000,000,000,000,000,000 | 24 |
ccpp: add support for containers
A process is considered to be containerized when:
- has the 'container' env variable set to some value
- or has the 'container_uuid' env variable set to some value
- or has remounted /
Signed-off-by: Jakub Filak <[email protected]>
|
qb_ipcs_connection_stats_get(qb_ipcs_connection_t * c,
struct qb_ipcs_connection_stats * stats,
int32_t clear_after_read)
{
if (c == NULL) {
return -EINVAL;
}
memcpy(stats, &c->stats, sizeof(struct qb_ipcs_connection_stats));
if (clear_after_read) {
memset(&c->stats, 0, sizeof(struct qb_ipcs_connection_stats_2));
c->stats.client_pid = c->pid;
}
return 0;
}
| 0 |
[
"CWE-59"
] |
libqb
|
e322e98dc264bc5911d6fe1d371e55ac9f95a71e
| 82,738,685,806,581,020,000,000,000,000,000,000,000 | 14 |
ipc: use O_EXCL on SHM files, and randomize the names
Signed-off-by: Christine Caulfield <[email protected]>
|
_load_job_limits(void)
{
List steps;
ListIterator step_iter;
step_loc_t *stepd;
int fd;
job_mem_limits_t *job_limits_ptr;
slurmstepd_mem_info_t stepd_mem_info;
if (!job_limits_list)
job_limits_list = list_create(_job_limits_free);
job_limits_loaded = true;
steps = stepd_available(conf->spooldir, conf->node_name);
step_iter = list_iterator_create(steps);
while ((stepd = list_next(step_iter))) {
job_limits_ptr = list_find_first(job_limits_list,
_step_limits_match, stepd);
if (job_limits_ptr) /* already processed */
continue;
fd = stepd_connect(stepd->directory, stepd->nodename,
stepd->jobid, stepd->stepid,
&stepd->protocol_version);
if (fd == -1)
continue; /* step completed */
if (stepd_get_mem_limits(fd, stepd->protocol_version,
&stepd_mem_info) != SLURM_SUCCESS) {
error("Error reading step %u.%u memory limits from "
"slurmstepd",
stepd->jobid, stepd->stepid);
close(fd);
continue;
}
if ((stepd_mem_info.job_mem_limit
|| stepd_mem_info.step_mem_limit)) {
/* create entry for this job */
job_limits_ptr = xmalloc(sizeof(job_mem_limits_t));
job_limits_ptr->job_id = stepd->jobid;
job_limits_ptr->step_id = stepd->stepid;
job_limits_ptr->job_mem =
stepd_mem_info.job_mem_limit;
job_limits_ptr->step_mem =
stepd_mem_info.step_mem_limit;
#if _LIMIT_INFO
info("RecLim step:%u.%u job_mem:%"PRIu64""
" step_mem:%"PRIu64"",
job_limits_ptr->job_id, job_limits_ptr->step_id,
job_limits_ptr->job_mem,
job_limits_ptr->step_mem);
#endif
list_append(job_limits_list, job_limits_ptr);
}
close(fd);
}
list_iterator_destroy(step_iter);
FREE_NULL_LIST(steps);
}
| 0 |
[
"CWE-20"
] |
slurm
|
df545955e4f119974c278bff0c47155257d5afc7
| 319,244,206,858,494,900,000,000,000,000,000,000,000 | 60 |
Validate gid and user_name values provided to slurmd up front.
Do not defer until later, and do not potentially miss out on proper
validation of the user_name field which can lead to improper authentication
handling.
CVE-2018-10995.
|
BSONObj spec() {
return BSON("$and" << BSON_ARRAY(0 << 1 << "$a"));
}
| 0 |
[
"CWE-835"
] |
mongo
|
0a076417d1d7fba3632b73349a1fd29a83e68816
| 314,920,896,992,789,550,000,000,000,000,000,000,000 | 3 |
SERVER-38070 fix infinite loop in agg expression
|
add_watchdog_timer (GSWindow *window,
glong timeout)
{
window->priv->watchdog_timer_id = g_timeout_add_seconds (timeout,
(GSourceFunc)watchdog_timer,
window);
}
| 0 |
[
"CWE-284"
] |
cinnamon-screensaver
|
da7af55f1fa966c52e15cc288d4f8928eca8cc9f
| 337,711,463,323,879,560,000,000,000,000,000,000,000 | 7 |
Workaround gtk3 bug, don't allow GtkWindow to handle popup_menu.
|
g_file_exist(const char* filename)
{
#if defined(_WIN32)
return 0; // use FileAge(filename) <> -1
#else
return access(filename, F_OK) == 0;
#endif
}
| 0 |
[] |
xrdp
|
d8f9e8310dac362bb9578763d1024178f94f4ecc
| 23,417,582,599,938,438,000,000,000,000,000,000,000 | 8 |
move temp files from /tmp to /tmp/.xrdp
|
static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_list_next(v, &input_dev_list, pos);
}
| 0 |
[
"CWE-703",
"CWE-787"
] |
linux
|
cb222aed03d798fc074be55e59d9a112338ee784
| 295,222,353,194,265,000,000,000,000,000,000,000,000 | 4 |
Input: add safety guards to input_set_keycode()
If we happen to have a garbage in input device's keycode table with values
too big we'll end up doing clear_bit() with offset way outside of our
bitmaps, damaging other objects within an input device or even outside of
it. Let's add sanity checks to the returned old keycodes.
Reported-by: [email protected]
Reported-by: [email protected]
Link: https://lore.kernel.org/r/20191207212757.GA245964@dtor-ws
Signed-off-by: Dmitry Torokhov <[email protected]>
|
redraw_after_callback(int call_update_screen)
{
++redrawing_for_callback;
if (State == HITRETURN || State == ASKMORE)
; // do nothing
else if (State & CMDLINE)
{
// Don't redraw when in prompt_for_number().
if (cmdline_row > 0)
{
// Redrawing only works when the screen didn't scroll. Don't clear
// wildmenu entries.
if (msg_scrolled == 0
#ifdef FEAT_WILDMENU
&& wild_menu_showing == 0
#endif
&& call_update_screen)
update_screen(0);
// Redraw in the same position, so that the user can continue
// editing the command.
redrawcmdline_ex(FALSE);
}
}
else if (State & (NORMAL | INSERT | TERMINAL))
{
// keep the command line if possible
update_screen(VALID_NO_UPDATE);
setcursor();
if (msg_scrolled == 0)
{
// don't want a hit-enter prompt when something else is displayed
msg_didany = FALSE;
need_wait_return = FALSE;
}
}
cursor_on();
#ifdef FEAT_GUI
if (gui.in_use && !gui_mch_is_blink_off())
// Don't update the cursor when it is blinking and off to avoid
// flicker.
out_flush_cursor(FALSE, FALSE);
else
#endif
out_flush();
--redrawing_for_callback;
}
| 0 |
[
"CWE-122"
] |
vim
|
826bfe4bbd7594188e3d74d2539d9707b1c6a14b
| 316,290,463,441,877,740,000,000,000,000,000,000,000 | 50 |
patch 8.2.3487: illegal memory access if buffer name is very long
Problem: Illegal memory access if buffer name is very long.
Solution: Make sure not to go over the end of the buffer.
|
static int backref_match_at_nested_level(regex_t* reg
, OnigStackType* top, OnigStackType* stk_base
, int ignore_case, int case_fold_flag
, int nest, int mem_num, UChar* memp, UChar** s, const UChar* send)
{
UChar *ss, *p, *pstart, *pend = NULL_UCHARP;
int level;
OnigStackType* k;
level = 0;
k = top;
k--;
while (k >= stk_base) {
if (k->type == STK_CALL_FRAME) {
level--;
}
else if (k->type == STK_RETURN) {
level++;
}
else if (level == nest) {
if (k->type == STK_MEM_START) {
if (mem_is_in_memp(k->u.mem.num, mem_num, memp)) {
pstart = k->u.mem.pstr;
if (pend != NULL_UCHARP) {
if (pend - pstart > send - *s) return 0; /* or goto next_mem; */
p = pstart;
ss = *s;
if (ignore_case != 0) {
if (string_cmp_ic(reg->enc, case_fold_flag,
pstart, &ss, (int )(pend - pstart)) == 0)
return 0; /* or goto next_mem; */
}
else {
while (p < pend) {
if (*p++ != *ss++) return 0; /* or goto next_mem; */
}
}
*s = ss;
return 1;
}
}
}
else if (k->type == STK_MEM_END) {
if (mem_is_in_memp(k->u.mem.num, mem_num, memp)) {
pend = k->u.mem.pstr;
}
}
}
k--;
}
return 0;
}
| 0 |
[
"CWE-125"
] |
oniguruma
|
65a9b1aa03c9bc2dc01b074295b9603232cb3b78
| 105,463,502,208,267,350,000,000,000,000,000,000,000 | 55 |
onig-5.9.2
|
dwg_ref_object_relative (const Dwg_Data *restrict dwg,
Dwg_Object_Ref *restrict ref,
const Dwg_Object *restrict obj)
{
if (ref->obj && !dwg->dirty_refs)
return ref->obj;
if (dwg_resolve_handleref (ref, obj))
{
Dwg_Object *o = dwg_resolve_handle (dwg, ref->absolute_ref);
if (!dwg->dirty_refs && o)
ref->obj = o;
return o;
}
else
return NULL;
}
| 0 |
[
"CWE-787"
] |
libredwg
|
ecf5183d8b3b286afe2a30021353b7116e0208dd
| 177,575,650,807,031,930,000,000,000,000,000,000,000 | 16 |
dwg_section_wtype: fix fuzzing overflow
with illegal and overlong section names. Fixes GH #349, #352
section names cannot be longer than 24
|
void io_printf(int fd, const char *format, ...)
{
va_list ap;
char buf[BIGPATHBUFLEN];
int len;
va_start(ap, format);
len = vsnprintf(buf, sizeof buf, format, ap);
va_end(ap);
if (len < 0)
exit_cleanup(RERR_PROTOCOL);
if (len >= (int)sizeof buf) {
rprintf(FERROR, "io_printf() was too long for the buffer.\n");
exit_cleanup(RERR_PROTOCOL);
}
write_sbuf(fd, buf);
}
| 0 |
[] |
rsync
|
b7231c7d02cfb65d291af74ff66e7d8c507ee871
| 227,135,003,977,066,300,000,000,000,000,000,000,000 | 20 |
Some extra file-list safety checks.
|
void KrecipesView::show ( void )
{
slotSetPanel( SelectP );
QWidget::show();
}
| 0 |
[] |
krecipes
|
cd1490fb5fe82cbe9172a43be13298001b446ecd
| 321,609,973,777,224,220,000,000,000,000,000,000,000 | 5 |
Use WebKit instead of KHTML for printing recipes, fixes sourceforge #2990118 and
#2960140.
svn path=/trunk/extragear/utils/krecipes/; revision=1137824
|
static int vfswrap_fchown(vfs_handle_struct *handle, files_struct *fsp, uid_t uid, gid_t gid)
{
#ifdef HAVE_FCHOWN
int result;
START_PROFILE(syscall_fchown);
result = fchown(fsp->fh->fd, uid, gid);
END_PROFILE(syscall_fchown);
return result;
#else
errno = ENOSYS;
return -1;
#endif
}
| 0 |
[
"CWE-665"
] |
samba
|
30e724cbff1ecd90e5a676831902d1e41ec1b347
| 88,653,748,538,625,750,000,000,000,000,000,000,000 | 14 |
FSCTL_GET_SHADOW_COPY_DATA: Initialize output array to zero
Otherwise num_volumes and the end marker can return uninitialized data
to the client.
Signed-off-by: Christof Schmitt <[email protected]>
Reviewed-by: Jeremy Allison <[email protected]>
Reviewed-by: Simo Sorce <[email protected]>
|
setup_bus_child (gpointer data)
{
A11yBusLauncher *app = data;
(void) app;
close (app->pipefd[0]);
dup2 (app->pipefd[1], 3);
close (app->pipefd[1]);
/* On Linux, tell the bus process to exit if this process goes away */
#ifdef __linux
#include <sys/prctl.h>
prctl (PR_SET_PDEATHSIG, 15);
#endif
}
| 0 |
[] |
at-spi2-core
|
c2e87fe00b596dba20c9d57d406ab8faa744b15a
| 213,088,916,003,762,300,000,000,000,000,000,000,000 | 15 |
Fix inverted logic.
Don't write more into a buffer than it can hold.
https://bugzilla.gnome.org/show_bug.cgi?id=791124
|
bool Item_in_subselect::test_limit(st_select_lex_unit *unit_arg)
{
if (unit_arg->fake_select_lex &&
unit_arg->fake_select_lex->test_limit())
return(1);
SELECT_LEX *sl= unit_arg->first_select();
for (; sl; sl= sl->next_select())
{
if (sl->test_limit())
return(1);
}
return(0);
}
| 0 |
[
"CWE-89"
] |
server
|
3c209bfc040ddfc41ece8357d772547432353fd2
| 146,367,907,676,467,450,000,000,000,000,000,000,000 | 14 |
MDEV-25994: Crash with union of my_decimal type in ORDER BY clause
When single-row subquery fails with "Subquery reutrns more than 1 row"
error, it will raise an error and return NULL.
On the other hand, Item_singlerow_subselect sets item->maybe_null=0
for table-less subqueries like "(SELECT not_null_value)" (*)
This discrepancy (item with maybe_null=0 returning NULL) causes the
code in Type_handler_decimal_result::make_sort_key_part() to crash.
Fixed this by allowing inference (*) only when the subquery is NOT a
UNION.
|
compileString(const char *inString, CharacterClass **characterClasses,
TranslationTableCharacterAttributes *characterClassAttribute,
short opcodeLengths[], TranslationTableOffset *newRuleOffset,
TranslationTableRule **newRule, RuleName **ruleNames,
TranslationTableHeader **table) {
/* This function can be used to make changes to tables on the fly. */
int k;
FileInfo nested;
if (inString == NULL) return 0;
memset(&nested, 0, sizeof(nested));
nested.fileName = inString;
nested.encoding = noEncoding;
nested.lineNumber = 1;
nested.status = 0;
nested.linepos = 0;
for (k = 0; inString[k]; k++) nested.line[k] = inString[k];
nested.line[k] = 0;
nested.linelen = k;
return compileRule(&nested, characterClasses, characterClassAttribute, opcodeLengths,
newRuleOffset, newRule, ruleNames, table);
}
| 0 |
[
"CWE-787"
] |
liblouis
|
fb2bfce4ed49ac4656a8f7e5b5526e4838da1dde
| 160,577,408,148,992,620,000,000,000,000,000,000,000 | 21 |
Fix yet another buffer overflow in the braille table parser
Reported by Henri Salo
Fixes #592
|
int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
const struct rpc_call_ops *call_ops,
int how, int flags)
{
struct rpc_task *task;
int priority = flush_task_priority(how);
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = data->cred,
};
struct rpc_task_setup task_setup_data = {
.task = &data->task,
.rpc_client = clnt,
.rpc_message = &msg,
.callback_ops = call_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC | flags,
.priority = priority,
};
/* Set up the initial task struct. */
NFS_PROTO(data->inode)->commit_setup(data, &msg);
dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
nfs4_state_protect(NFS_SERVER(data->inode)->nfs_client,
NFS_SP4_MACH_CRED_COMMIT, &task_setup_data.rpc_client, &msg);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
if (how & FLUSH_SYNC)
rpc_wait_for_completion_task(task);
rpc_put_task(task);
return 0;
}
| 0 |
[
"CWE-20",
"CWE-200"
] |
linux
|
263b4509ec4d47e0da3e753f85a39ea12d1eff24
| 321,710,933,013,495,450,000,000,000,000,000,000,000 | 37 |
nfs: always make sure page is up-to-date before extending a write to cover the entire page
We should always make sure the cached page is up-to-date when we're
determining whether we can extend a write to cover the full page -- even
if we've received a write delegation from the server.
Commit c7559663 added logic to skip this check if we have a write
delegation, which can lead to data corruption such as the following
scenario if client B receives a write delegation from the NFS server:
Client A:
# echo 123456789 > /mnt/file
Client B:
# echo abcdefghi >> /mnt/file
# cat /mnt/file
0�D0�abcdefghi
Just because we hold a write delegation doesn't mean that we've read in
the entire page contents.
Cc: <[email protected]> # v3.11+
Signed-off-by: Scott Mayhew <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
sched_feat_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
char *cmp;
int i;
if (cnt > 63)
cnt = 63;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
cmp = strstrip(buf);
i = sched_feat_set(cmp);
if (i == __SCHED_FEAT_NR)
return -EINVAL;
*ppos += cnt;
return cnt;
}
| 0 |
[
"CWE-200"
] |
linux
|
4efbc454ba68def5ef285b26ebfcfdb605b52755
| 204,802,201,250,320,800,000,000,000,000,000,000,000 | 24 |
sched: Fix information leak in sys_sched_getattr()
We're copying the on-stack structure to userspace, but forgot to give
the right number of bytes to copy. This allows the calling process to
obtain up to PAGE_SIZE bytes from the stack (and possibly adjacent
kernel memory).
This fix copies only as much as we actually have on the stack
(attr->size defaults to the size of the struct) and leaves the rest of
the userspace-provided buffer untouched.
Found using kmemcheck + trinity.
Fixes: d50dde5a10f30 ("sched: Add new scheduler syscalls to support an extended scheduling parameters ABI")
Cc: Dario Faggioli <[email protected]>
Cc: Juri Lelli <[email protected]>
Cc: Ingo Molnar <[email protected]>
Signed-off-by: Vegard Nossum <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Thomas Gleixner <[email protected]>
|
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireMagickMemory(sizeof(*clone_info));
if (clone_info == (DrawInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (clone_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; draw_info->dash_pattern[x] != 0.0; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) x+1UL,
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) CopyMagickMemory(clone_info->dash_pattern,draw_info->dash_pattern,
(size_t) (x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) CopyMagickMemory(clone_info->gradient.stops,
draw_info->gradient.stops,(size_t) number_stops*
sizeof(*clone_info->gradient.stops));
}
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
clone_info->bounds=draw_info->bounds;
clone_info->clip_units=draw_info->clip_units;
clone_info->render=draw_info->render;
clone_info->alpha=draw_info->alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
| 0 |
[
"CWE-399",
"CWE-119"
] |
ImageMagick
|
726812fa2fa7ce16bcf58f6e115f65427a1c0950
| 67,134,845,912,661,660,000,000,000,000,000,000,000 | 107 |
Prevent buffer overflow in magick/draw.c
|
static void gen_reset_hflag(DisasContext *s, uint32_t mask)
{
if (s->flags & mask) {
TCGv_i32 t = tcg_temp_new_i32();
tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
tcg_gen_andi_i32(t, t, ~mask);
tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
tcg_temp_free_i32(t);
s->flags &= ~mask;
}
}
| 0 |
[
"CWE-94"
] |
qemu
|
30663fd26c0307e414622c7a8607fbc04f92ec14
| 338,426,065,718,723,160,000,000,000,000,000,000,000 | 11 |
tcg/i386: Check the size of instruction being translated
This fixes the bug: 'user-to-root privesc inside VM via bad translation
caching' reported by Jann Horn here:
https://bugs.chromium.org/p/project-zero/issues/detail?id=1122
Reviewed-by: Richard Henderson <[email protected]>
CC: Peter Maydell <[email protected]>
CC: Paolo Bonzini <[email protected]>
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Pranith Kumar <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
{
RAMBlock *block;
block = atomic_rcu_read(&ram_list.mru_block);
if (block && addr - block->offset < block->max_length) {
return block;
}
RAMBLOCK_FOREACH(block) {
if (addr - block->offset < block->max_length) {
goto found;
}
}
fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
abort();
found:
/* It is safe to write mru_block outside the iothread lock. This
* is what happens:
*
* mru_block = xxx
* rcu_read_unlock()
* xxx removed from list
* rcu_read_lock()
* read mru_block
* mru_block = NULL;
* call_rcu(reclaim_ramblock, xxx);
* rcu_read_unlock()
*
* atomic_rcu_set is not needed here. The block was already published
* when it was placed into the list. Here we're just making an extra
* copy of the pointer.
*/
ram_list.mru_block = block;
return block;
}
| 0 |
[
"CWE-125"
] |
qemu
|
04bf2526ce87f21b32c9acba1c5518708c243ad0
| 243,908,667,241,637,780,000,000,000,000,000,000,000 | 37 |
exec: use qemu_ram_ptr_length to access guest ram
When accessing guest's ram block during DMA operation, use
'qemu_ram_ptr_length' to get ram block pointer. It ensures
that DMA operation of given length is possible; And avoids
any OOB memory access situations.
Reported-by: Alex <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static inline int zpff_init(struct hid_device *hid)
{
return 0;
}
| 1 |
[
"CWE-787"
] |
linux
|
d9d4b1e46d9543a82c23f6df03f4ad697dab361b
| 21,292,509,463,556,524,000,000,000,000,000,000,000 | 4 |
HID: Fix assumption that devices have inputs
The syzbot fuzzer found a slab-out-of-bounds write bug in the hid-gaff
driver. The problem is caused by the driver's assumption that the
device must have an input report. While this will be true for all
normal HID input devices, a suitably malicious device can violate the
assumption.
The same assumption is present in over a dozen other HID drivers.
This patch fixes them by checking that the list of hid_inputs for the
hid_device is nonempty before allowing it to be used.
Reported-and-tested-by: [email protected]
Signed-off-by: Alan Stern <[email protected]>
CC: <[email protected]>
Signed-off-by: Benjamin Tissoires <[email protected]>
|
static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
u32 idt_vectoring_info,
int instr_len_field,
int error_code_field)
{
u8 vector;
int type;
bool idtv_info_valid;
idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
vcpu->arch.nmi_injected = false;
kvm_clear_exception_queue(vcpu);
kvm_clear_interrupt_queue(vcpu);
if (!idtv_info_valid)
return;
kvm_make_request(KVM_REQ_EVENT, vcpu);
vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
switch (type) {
case INTR_TYPE_NMI_INTR:
vcpu->arch.nmi_injected = true;
/*
* SDM 3: 27.7.1.2 (September 2008)
* Clear bit "block by NMI" before VM entry if a NMI
* delivery faulted.
*/
vmx_set_nmi_mask(vcpu, false);
break;
case INTR_TYPE_SOFT_EXCEPTION:
vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
/* fall through */
case INTR_TYPE_HARD_EXCEPTION:
if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
u32 err = vmcs_read32(error_code_field);
kvm_requeue_exception_e(vcpu, vector, err);
} else
kvm_requeue_exception(vcpu, vector);
break;
case INTR_TYPE_SOFT_INTR:
vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
/* fall through */
case INTR_TYPE_EXT_INTR:
kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
break;
default:
break;
}
}
| 0 |
[] |
kvm
|
a642fc305053cc1c6e47e4f4df327895747ab485
| 136,885,787,296,316,570,000,000,000,000,000,000,000 | 53 |
kvm: vmx: handle invvpid vm exit gracefully
On systems with invvpid instruction support (corresponding bit in
IA32_VMX_EPT_VPID_CAP MSR is set) guest invocation of invvpid
causes vm exit, which is currently not handled and results in
propagation of unknown exit to userspace.
Fix this by installing an invvpid vm exit handler.
This is CVE-2014-3646.
Cc: [email protected]
Signed-off-by: Petr Matousek <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
a2port(const char *s)
{
struct servent *se;
long long port;
const char *errstr;
port = strtonum(s, 0, 65535, &errstr);
if (errstr == NULL)
return (int)port;
if ((se = getservbyname(s, "tcp")) != NULL)
return ntohs(se->s_port);
return -1;
}
| 0 |
[] |
openssh-portable
|
f3cbe43e28fe71427d41cfe3a17125b972710455
| 10,229,006,911,223,411,000,000,000,000,000,000,000 | 13 |
upstream: need initgroups() before setresgid(); reported by anton@,
ok deraadt@
OpenBSD-Commit-ID: 6aa003ee658b316960d94078f2a16edbc25087ce
|
static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
{
read_lock(&udp_hash_lock);
return *pos ? udp_get_idx(seq, *pos-1) : (void *)1;
}
| 0 |
[] |
linux-2.6
|
32c1da70810017a98aa6c431a5494a302b6b9a30
| 321,288,858,554,455,040,000,000,000,000,000,000,000 | 5 |
[UDP]: Randomize port selection.
This patch causes UDP port allocation to be randomized like TCP.
The earlier code would always choose same port (ie first empty list).
Signed-off-by: Stephen Hemminger <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
HANDLE ves_icall_System_Threading_Thread_Thread_internal(MonoThread *this,
MonoObject *start)
{
guint32 (*start_func)(void *);
struct StartInfo *start_info;
HANDLE thread;
gsize tid;
MonoInternalThread *internal;
THREAD_DEBUG (g_message("%s: Trying to start a new thread: this (%p) start (%p)", __func__, this, start));
if (!this->internal_thread)
ves_icall_System_Threading_Thread_ConstructInternalThread (this);
internal = this->internal_thread;
ensure_synch_cs_set (internal);
EnterCriticalSection (internal->synch_cs);
if ((internal->state & ThreadState_Unstarted) == 0) {
LeaveCriticalSection (internal->synch_cs);
mono_raise_exception (mono_get_exception_thread_state ("Thread has already been started."));
return NULL;
}
internal->small_id = -1;
if ((internal->state & ThreadState_Aborted) != 0) {
LeaveCriticalSection (internal->synch_cs);
return this;
}
start_func = NULL;
{
/* This is freed in start_wrapper */
start_info = g_new0 (struct StartInfo, 1);
start_info->func = start_func;
start_info->start_arg = this->start_obj; /* FIXME: GC object stored in unmanaged memory */
start_info->delegate = start;
start_info->obj = this;
g_assert (this->obj.vtable->domain == mono_domain_get ());
internal->start_notify=CreateSemaphore (NULL, 0, 0x7fffffff, NULL);
if (internal->start_notify==NULL) {
LeaveCriticalSection (internal->synch_cs);
g_warning ("%s: CreateSemaphore error 0x%x", __func__, GetLastError ());
g_free (start_info);
return(NULL);
}
mono_threads_lock ();
register_thread_start_argument (this, start_info);
if (threads_starting_up == NULL) {
MONO_GC_REGISTER_ROOT_FIXED (threads_starting_up);
threads_starting_up = mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_KEY_VALUE_GC);
}
mono_g_hash_table_insert (threads_starting_up, this, this);
mono_threads_unlock ();
thread=mono_create_thread(NULL, default_stacksize_for_thread (internal), (LPTHREAD_START_ROUTINE)start_wrapper, start_info,
CREATE_SUSPENDED, &tid);
if(thread==NULL) {
LeaveCriticalSection (internal->synch_cs);
mono_threads_lock ();
mono_g_hash_table_remove (threads_starting_up, this);
mono_threads_unlock ();
g_warning("%s: CreateThread error 0x%x", __func__, GetLastError());
return(NULL);
}
internal->handle=thread;
internal->tid=tid;
internal->small_id = mono_thread_small_id_alloc ();
internal->thread_pinning_ref = internal;
MONO_GC_REGISTER_ROOT (internal->thread_pinning_ref);
/* Don't call handle_store() here, delay it to Start.
* We can't join a thread (trying to will just block
* forever) until it actually starts running, so don't
* store the handle till then.
*/
mono_thread_start (this);
internal->state &= ~ThreadState_Unstarted;
THREAD_DEBUG (g_message ("%s: Started thread ID %"G_GSIZE_FORMAT" (handle %p)", __func__, tid, thread));
LeaveCriticalSection (internal->synch_cs);
return(thread);
}
}
| 0 |
[
"CWE-399",
"CWE-264"
] |
mono
|
722f9890f09aadfc37ae479e7d946d5fc5ef7b91
| 231,144,950,822,846,270,000,000,000,000,000,000,000 | 92 |
Fix access to freed members of a dead thread
* threads.c: Fix access to freed members of a dead thread. Found
and fixed by Rodrigo Kumpera <[email protected]>
Ref: CVE-2011-0992
|
void CServer::ConStopRecord(IConsole::IResult *pResult, void *pUser)
{
((CServer *)pUser)->m_DemoRecorder.Stop();
}
| 0 |
[
"CWE-20"
] |
teeworlds
|
a766cb44bcffcdb0b88e776d01c5ee1323d44f85
| 285,315,944,889,916,300,000,000,000,000,000,000,000 | 4 |
fixed a server crash
|
int RESTArgs::get_uint32(struct req_state *s, const string& name,
uint32_t def_val, uint32_t *val, bool *existed)
{
bool exists;
string sval = s->info.args.get(name, &exists);
if (existed)
*existed = exists;
if (!exists) {
*val = def_val;
return 0;
}
int r = stringtoul(sval, val);
if (r < 0)
return r;
return 0;
}
| 0 |
[
"CWE-770"
] |
ceph
|
ab29bed2fc9f961fe895de1086a8208e21ddaddc
| 237,381,210,060,805,840,000,000,000,000,000,000,000 | 20 |
rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests
|
//! Return \c true if input character is blank (space, tab, or non-printable character).
inline bool is_blank(const char c) {
return c>=0 && c<=' ';
| 0 |
[
"CWE-119",
"CWE-787"
] |
CImg
|
ac8003393569aba51048c9d67e1491559877b1d1
| 148,979,036,118,727,930,000,000,000,000,000,000,000 | 3 |
.
|
parse_tsquery(char *buf,
PushFunction pushval,
Datum opaque,
bool isplain)
{
struct TSQueryParserStateData state;
int i;
TSQuery query;
int commonlen;
QueryItem *ptr;
ListCell *cell;
/* init state */
state.buffer = buf;
state.buf = buf;
state.state = (isplain) ? WAITSINGLEOPERAND : WAITFIRSTOPERAND;
state.count = 0;
state.polstr = NIL;
/* init value parser's state */
state.valstate = init_tsvector_parser(state.buffer, true, true);
/* init list of operand */
state.sumlen = 0;
state.lenop = 64;
state.curop = state.op = (char *) palloc(state.lenop);
*(state.curop) = '\0';
/* parse query & make polish notation (postfix, but in reverse order) */
makepol(&state, pushval, opaque);
close_tsvector_parser(state.valstate);
if (list_length(state.polstr) == 0)
{
ereport(NOTICE,
(errmsg("text-search query doesn't contain lexemes: \"%s\"",
state.buffer)));
query = (TSQuery) palloc(HDRSIZETQ);
SET_VARSIZE(query, HDRSIZETQ);
query->size = 0;
return query;
}
/* Pack the QueryItems in the final TSQuery struct to return to caller */
commonlen = COMPUTESIZE(list_length(state.polstr), state.sumlen);
query = (TSQuery) palloc0(commonlen);
SET_VARSIZE(query, commonlen);
query->size = list_length(state.polstr);
ptr = GETQUERY(query);
/* Copy QueryItems to TSQuery */
i = 0;
foreach(cell, state.polstr)
{
QueryItem *item = (QueryItem *) lfirst(cell);
switch (item->type)
{
case QI_VAL:
memcpy(&ptr[i], item, sizeof(QueryOperand));
break;
case QI_VALSTOP:
ptr[i].type = QI_VALSTOP;
break;
case QI_OPR:
memcpy(&ptr[i], item, sizeof(QueryOperator));
break;
default:
elog(ERROR, "unrecognized QueryItem type: %d", item->type);
}
i++;
}
/* Copy all the operand strings to TSQuery */
memcpy((void *) GETOPERAND(query), (void *) state.op, state.sumlen);
pfree(state.op);
/* Set left operand pointers for every operator. */
findoprnd(ptr, query->size);
return query;
}
| 1 |
[
"CWE-703",
"CWE-189"
] |
postgres
|
31400a673325147e1205326008e32135a78b4d8a
| 74,086,987,630,414,820,000,000,000,000,000,000,000 | 83 |
Predict integer overflow to avoid buffer overruns.
Several functions, mostly type input functions, calculated an allocation
size such that the calculation wrapped to a small positive value when
arguments implied a sufficiently-large requirement. Writes past the end
of the inadvertent small allocation followed shortly thereafter.
Coverity identified the path_in() vulnerability; code inspection led to
the rest. In passing, add check_stack_depth() to prevent stack overflow
in related functions.
Back-patch to 8.4 (all supported versions). The non-comment hstore
changes touch code that did not exist in 8.4, so that part stops at 9.0.
Noah Misch and Heikki Linnakangas, reviewed by Tom Lane.
Security: CVE-2014-0064
|
void NumberFormatTest::TestExponentParse() {
UErrorCode status = U_ZERO_ERROR;
Formattable result;
ParsePosition parsePos(0);
// set the exponent symbol
status = U_ZERO_ERROR;
DecimalFormatSymbols symbols(Locale::getDefault(), status);
if(U_FAILURE(status)) {
dataerrln((UnicodeString)"ERROR: Could not create DecimalFormatSymbols (Default)");
return;
}
// create format instance
status = U_ZERO_ERROR;
DecimalFormat fmt(u"#####", symbols, status);
if(U_FAILURE(status)) {
errln((UnicodeString)"ERROR: Could not create DecimalFormat (pattern, symbols*)");
}
// parse the text
fmt.parse("5.06e-27", result, parsePos);
if(result.getType() != Formattable::kDouble &&
result.getDouble() != 5.06E-27 &&
parsePos.getIndex() != 8
)
{
errln("ERROR: parse failed - expected 5.06E-27, 8 - returned %d, %i",
result.getDouble(), parsePos.getIndex());
}
}
| 0 |
[
"CWE-190"
] |
icu
|
53d8c8f3d181d87a6aa925b449b51c4a2c922a51
| 182,592,639,972,806,000,000,000,000,000,000,000,000 | 32 |
ICU-20246 Fixing another integer overflow in number parsing.
|
xmlFAParseQuantifier(xmlRegParserCtxtPtr ctxt) {
int cur;
cur = CUR;
if ((cur == '?') || (cur == '*') || (cur == '+')) {
if (ctxt->atom != NULL) {
if (cur == '?')
ctxt->atom->quant = XML_REGEXP_QUANT_OPT;
else if (cur == '*')
ctxt->atom->quant = XML_REGEXP_QUANT_MULT;
else if (cur == '+')
ctxt->atom->quant = XML_REGEXP_QUANT_PLUS;
}
NEXT;
return(1);
}
if (cur == '{') {
int min = 0, max = 0;
NEXT;
cur = xmlFAParseQuantExact(ctxt);
if (cur >= 0)
min = cur;
if (CUR == ',') {
NEXT;
if (CUR == '}')
max = INT_MAX;
else {
cur = xmlFAParseQuantExact(ctxt);
if (cur >= 0)
max = cur;
else {
ERROR("Improper quantifier");
}
}
}
if (CUR == '}') {
NEXT;
} else {
ERROR("Unterminated quantifier");
}
if (max == 0)
max = min;
if (ctxt->atom != NULL) {
ctxt->atom->quant = XML_REGEXP_QUANT_RANGE;
ctxt->atom->min = min;
ctxt->atom->max = max;
}
return(1);
}
return(0);
}
| 0 |
[
"CWE-119"
] |
libxml2
|
cbb271655cadeb8dbb258a64701d9a3a0c4835b4
| 69,689,000,487,187,780,000,000,000,000,000,000,000 | 52 |
Bug 757711: heap-buffer-overflow in xmlFAParsePosCharGroup <https://bugzilla.gnome.org/show_bug.cgi?id=757711>
* xmlregexp.c:
(xmlFAParseCharRange): Only advance to the next character if
there is no error. Advancing to the next character in case of
an error while parsing regexp leads to an out of bounds access.
|
xmlSchemaAddElementSubstitutionMember(xmlSchemaParserCtxtPtr pctxt,
xmlSchemaElementPtr head,
xmlSchemaElementPtr member)
{
xmlSchemaSubstGroupPtr substGroup = NULL;
if ((pctxt == NULL) || (head == NULL) || (member == NULL))
return (-1);
substGroup = xmlSchemaSubstGroupGet(pctxt, head);
if (substGroup == NULL)
substGroup = xmlSchemaSubstGroupAdd(pctxt, head);
if (substGroup == NULL)
return(-1);
if (xmlSchemaItemListAdd(substGroup->members, member) == -1)
return(-1);
return(0);
}
| 0 |
[
"CWE-134"
] |
libxml2
|
4472c3a5a5b516aaf59b89be602fbce52756c3e9
| 1,060,751,461,647,697,000,000,000,000,000,000,000 | 18 |
Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports.
|
inline bool IsHybridOp(const TfLiteTensor* input, const TfLiteTensor* weight) {
return ((weight->type == kTfLiteUInt8 || weight->type == kTfLiteInt8) &&
input->type == kTfLiteFloat32);
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
46d5b0852528ddfd614ded79bccc75589f801bd9
| 255,268,135,514,242,750,000,000,000,000,000,000,000 | 4 |
[tflite] Test for `kTfLiteOptionalTensor` in `GetInput`.
`GetInput`, `GetVariableInput` and `GetOutput` all fail to check for the case where `node->inputs->data[index]` is the special `kTfLiteOptionalTensor` value (-1) which then causes `context->tensors[node->inputs->data[index]]` to read from invalid memory location.
This fix makes `GetInput` and related return `nullptr` in those cases, asking the caller to check for `nullptr`. This is better than having `GetOptionalInputTensor` and `GetOptionalOutputTensor` (does not exist but could be added) as using the patched `GetInput` in error would be caught by a sanitizer test in the default optimized build (due to the `-fsanitize=null` option).
PiperOrigin-RevId: 332512190
Change-Id: Iabca54da2f2de02b6ece3c38b54f76d4277d689e
|
static long snd_pcm_oss_bytes(struct snd_pcm_substream *substream, long frames)
{
struct snd_pcm_runtime *runtime = substream->runtime;
long buffer_size = snd_pcm_lib_buffer_bytes(substream);
long bytes = frames_to_bytes(runtime, frames);
if (buffer_size == runtime->oss.buffer_bytes)
return bytes;
#if BITS_PER_LONG >= 64
return runtime->oss.buffer_bytes * bytes / buffer_size;
#else
{
u64 bsize = (u64)runtime->oss.buffer_bytes * (u64)bytes;
return div_u64(bsize, buffer_size);
}
#endif
}
| 0 |
[
"CWE-362"
] |
linux
|
8423f0b6d513b259fdab9c9bf4aaa6188d054c2d
| 113,949,406,328,903,200,000,000,000,000,000,000,000 | 16 |
ALSA: pcm: oss: Fix race at SNDCTL_DSP_SYNC
There is a small race window at snd_pcm_oss_sync() that is called from
OSS PCM SNDCTL_DSP_SYNC ioctl; namely the function calls
snd_pcm_oss_make_ready() at first, then takes the params_lock mutex
for the rest. When the stream is set up again by another thread
between them, it leads to inconsistency, and may result in unexpected
results such as NULL dereference of OSS buffer as a fuzzer spotted
recently.
The fix is simply to cover snd_pcm_oss_make_ready() call into the same
params_lock mutex with snd_pcm_oss_make_ready_locked() variant.
Reported-and-tested-by: butt3rflyh4ck <[email protected]>
Reviewed-by: Jaroslav Kysela <[email protected]>
Cc: <[email protected]>
Link: https://lore.kernel.org/r/CAFcO6XN7JDM4xSXGhtusQfS2mSBcx50VJKwQpCq=WeLt57aaZA@mail.gmail.com
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Takashi Iwai <[email protected]>
|
int find_free_dev_extent_start(struct btrfs_transaction *transaction,
struct btrfs_device *device, u64 num_bytes,
u64 search_start, u64 *start, u64 *len)
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_key key;
struct btrfs_dev_extent *dev_extent;
struct btrfs_path *path;
u64 hole_size;
u64 max_hole_start;
u64 max_hole_size;
u64 extent_end;
u64 search_end = device->total_bytes;
int ret;
int slot;
struct extent_buffer *l;
/*
* We don't want to overwrite the superblock on the drive nor any area
* used by the boot loader (grub for example), so we make sure to start
* at an offset of at least 1MB.
*/
search_start = max_t(u64, search_start, SZ_1M);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
max_hole_start = search_start;
max_hole_size = 0;
again:
if (search_start >= search_end ||
test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = -ENOSPC;
goto out;
}
path->reada = READA_FORWARD;
path->search_commit_root = 1;
path->skip_locking = 1;
key.objectid = device->devid;
key.offset = search_start;
key.type = BTRFS_DEV_EXTENT_KEY;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
if (ret > 0) {
ret = btrfs_previous_item(root, path, key.objectid, key.type);
if (ret < 0)
goto out;
}
while (1) {
l = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(l)) {
ret = btrfs_next_leaf(root, path);
if (ret == 0)
continue;
if (ret < 0)
goto out;
break;
}
btrfs_item_key_to_cpu(l, &key, slot);
if (key.objectid < device->devid)
goto next;
if (key.objectid > device->devid)
break;
if (key.type != BTRFS_DEV_EXTENT_KEY)
goto next;
if (key.offset > search_start) {
hole_size = key.offset - search_start;
/*
* Have to check before we set max_hole_start, otherwise
* we could end up sending back this offset anyway.
*/
if (contains_pending_extent(transaction, device,
&search_start,
hole_size)) {
if (key.offset >= search_start) {
hole_size = key.offset - search_start;
} else {
WARN_ON_ONCE(1);
hole_size = 0;
}
}
if (hole_size > max_hole_size) {
max_hole_start = search_start;
max_hole_size = hole_size;
}
/*
* If this free space is greater than which we need,
* it must be the max free space that we have found
* until now, so max_hole_start must point to the start
* of this free space and the length of this free space
* is stored in max_hole_size. Thus, we return
* max_hole_start and max_hole_size and go back to the
* caller.
*/
if (hole_size >= num_bytes) {
ret = 0;
goto out;
}
}
dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
extent_end = key.offset + btrfs_dev_extent_length(l,
dev_extent);
if (extent_end > search_start)
search_start = extent_end;
next:
path->slots[0]++;
cond_resched();
}
/*
* At this point, search_start should be the end of
* allocated dev extents, and when shrinking the device,
* search_end may be smaller than search_start.
*/
if (search_end > search_start) {
hole_size = search_end - search_start;
if (contains_pending_extent(transaction, device, &search_start,
hole_size)) {
btrfs_release_path(path);
goto again;
}
if (hole_size > max_hole_size) {
max_hole_start = search_start;
max_hole_size = hole_size;
}
}
/* See above. */
if (max_hole_size < num_bytes)
ret = -ENOSPC;
else
ret = 0;
out:
btrfs_free_path(path);
*start = max_hole_start;
if (len)
*len = max_hole_size;
return ret;
}
| 0 |
[
"CWE-476",
"CWE-284"
] |
linux
|
09ba3bc9dd150457c506e4661380a6183af651c1
| 274,657,255,743,536,640,000,000,000,000,000,000,000 | 160 |
btrfs: merge btrfs_find_device and find_device
Both btrfs_find_device() and find_device() does the same thing except
that the latter does not take the seed device onto account in the device
scanning context. We can merge them.
Signed-off-by: Anand Jain <[email protected]>
Reviewed-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]>
|
static int select_idle_sibling(struct task_struct *p, int prev, int target)
{
struct sched_domain *sd;
int i, recent_used_cpu;
if (available_idle_cpu(target))
return target;
/*
* If the previous CPU is cache affine and idle, don't be stupid:
*/
if (prev != target && cpus_share_cache(prev, target) && available_idle_cpu(prev))
return prev;
/* Check a recently used CPU as a potential idle candidate: */
recent_used_cpu = p->recent_used_cpu;
if (recent_used_cpu != prev &&
recent_used_cpu != target &&
cpus_share_cache(recent_used_cpu, target) &&
available_idle_cpu(recent_used_cpu) &&
cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) {
/*
* Replace recent_used_cpu with prev as it is a potential
* candidate for the next wake:
*/
p->recent_used_cpu = prev;
return recent_used_cpu;
}
sd = rcu_dereference(per_cpu(sd_llc, target));
if (!sd)
return target;
i = select_idle_core(p, sd, target);
if ((unsigned)i < nr_cpumask_bits)
return i;
i = select_idle_cpu(p, sd, target);
if ((unsigned)i < nr_cpumask_bits)
return i;
i = select_idle_smt(p, target);
if ((unsigned)i < nr_cpumask_bits)
return i;
return target;
}
| 0 |
[
"CWE-416"
] |
linux
|
16d51a590a8ce3befb1308e0e7ab77f3b661af33
| 80,532,435,922,017,260,000,000,000,000,000,000,000 | 47 |
sched/fair: Don't free p->numa_faults with concurrent readers
When going through execve(), zero out the NUMA fault statistics instead of
freeing them.
During execve, the task is reachable through procfs and the scheduler. A
concurrent /proc/*/sched reader can read data from a freed ->numa_faults
allocation (confirmed by KASAN) and write it back to userspace.
I believe that it would also be possible for a use-after-free read to occur
through a race between a NUMA fault and execve(): task_numa_fault() can
lead to task_numa_compare(), which invokes task_weight() on the currently
running task of a different CPU.
Another way to fix this would be to make ->numa_faults RCU-managed or add
extra locking, but it seems easier to wipe the NUMA fault statistics on
execve.
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Petr Mladek <[email protected]>
Cc: Sergey Senozhatsky <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Will Deacon <[email protected]>
Fixes: 82727018b0d3 ("sched/numa: Call task_numa_free() from do_execve()")
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
{
return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
RECLAIM_DISTANCE;
}
| 0 |
[] |
linux
|
400e22499dd92613821374c8c6c88c7225359980
| 308,852,896,929,862,800,000,000,000,000,000,000,000 | 5 |
mm: don't warn about allocations which stall for too long
Commit 63f53dea0c98 ("mm: warn about allocations which stall for too
long") was a great step for reducing possibility of silent hang up
problem caused by memory allocation stalls. But this commit reverts it,
for it is possible to trigger OOM lockup and/or soft lockups when many
threads concurrently called warn_alloc() (in order to warn about memory
allocation stalls) due to current implementation of printk(), and it is
difficult to obtain useful information due to limitation of synchronous
warning approach.
Current printk() implementation flushes all pending logs using the
context of a thread which called console_unlock(). printk() should be
able to flush all pending logs eventually unless somebody continues
appending to printk() buffer.
Since warn_alloc() started appending to printk() buffer while waiting
for oom_kill_process() to make forward progress when oom_kill_process()
is processing pending logs, it became possible for warn_alloc() to force
oom_kill_process() loop inside printk(). As a result, warn_alloc()
significantly increased possibility of preventing oom_kill_process()
from making forward progress.
---------- Pseudo code start ----------
Before warn_alloc() was introduced:
retry:
if (mutex_trylock(&oom_lock)) {
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_lock)
}
goto retry;
After warn_alloc() was introduced:
retry:
if (mutex_trylock(&oom_lock)) {
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_lock)
} else if (waited_for_10seconds()) {
atomic_inc(&printk_pending_logs);
}
goto retry;
---------- Pseudo code end ----------
Although waited_for_10seconds() becomes true once per 10 seconds,
unbounded number of threads can call waited_for_10seconds() at the same
time. Also, since threads doing waited_for_10seconds() keep doing
almost busy loop, the thread doing print_one_log() can use little CPU
resource. Therefore, this situation can be simplified like
---------- Pseudo code start ----------
retry:
if (mutex_trylock(&oom_lock)) {
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_lock)
} else {
atomic_inc(&printk_pending_logs);
}
goto retry;
---------- Pseudo code end ----------
when printk() is called faster than print_one_log() can process a log.
One of possible mitigation would be to introduce a new lock in order to
make sure that no other series of printk() (either oom_kill_process() or
warn_alloc()) can append to printk() buffer when one series of printk()
(either oom_kill_process() or warn_alloc()) is already in progress.
Such serialization will also help obtaining kernel messages in readable
form.
---------- Pseudo code start ----------
retry:
if (mutex_trylock(&oom_lock)) {
mutex_lock(&oom_printk_lock);
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_printk_lock);
mutex_unlock(&oom_lock)
} else {
if (mutex_trylock(&oom_printk_lock)) {
atomic_inc(&printk_pending_logs);
mutex_unlock(&oom_printk_lock);
}
}
goto retry;
---------- Pseudo code end ----------
But this commit does not go that direction, for we don't want to
introduce a new lock dependency, and we unlikely be able to obtain
useful information even if we serialized oom_kill_process() and
warn_alloc().
Synchronous approach is prone to unexpected results (e.g. too late [1],
too frequent [2], overlooked [3]). As far as I know, warn_alloc() never
helped with providing information other than "something is going wrong".
I want to consider asynchronous approach which can obtain information
during stalls with possibly relevant threads (e.g. the owner of
oom_lock and kswapd-like threads) and serve as a trigger for actions
(e.g. turn on/off tracepoints, ask libvirt daemon to take a memory dump
of stalling KVM guest for diagnostic purpose).
This commit temporarily loses ability to report e.g. OOM lockup due to
unable to invoke the OOM killer due to !__GFP_FS allocation request.
But asynchronous approach will be able to detect such situation and emit
warning. Thus, let's remove warn_alloc().
[1] https://bugzilla.kernel.org/show_bug.cgi?id=192981
[2] http://lkml.kernel.org/r/CAM_iQpWuPVGc2ky8M-9yukECtS+zKjiDasNymX7rMcBjBFyM_A@mail.gmail.com
[3] commit db73ee0d46379922 ("mm, vmscan: do not loop on too_many_isolated for ever"))
Link: http://lkml.kernel.org/r/1509017339-4802-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp
Signed-off-by: Tetsuo Handa <[email protected]>
Reported-by: Cong Wang <[email protected]>
Reported-by: yuwang.yuwang <[email protected]>
Reported-by: Johannes Weiner <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Sergey Senozhatsky <[email protected]>
Cc: Petr Mladek <[email protected]>
Cc: Steven Rostedt <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
tty_clamp_line(struct tty *tty, const struct tty_ctx *ctx, u_int px, u_int py,
u_int nx, u_int *i, u_int *x, u_int *rx, u_int *ry)
{
struct window_pane *wp = ctx->wp;
u_int xoff = wp->xoff + px;
if (!tty_is_visible(tty, ctx, px, py, nx, 1))
return (0);
*ry = ctx->yoff + py - ctx->oy;
if (xoff >= ctx->ox && xoff + nx <= ctx->ox + ctx->sx) {
/* All visible. */
*i = 0;
*x = ctx->xoff + px - ctx->ox;
*rx = nx;
} else if (xoff < ctx->ox && xoff + nx > ctx->ox + ctx->sx) {
/* Both left and right not visible. */
*i = ctx->ox;
*x = 0;
*rx = ctx->sx;
} else if (xoff < ctx->ox) {
/* Left not visible. */
*i = ctx->ox - (ctx->xoff + px);
*x = 0;
*rx = nx - *i;
} else {
/* Right not visible. */
*i = 0;
*x = (ctx->xoff + px) - ctx->ox;
*rx = ctx->sx - *x;
}
if (*rx > nx)
fatalx("%s: x too big, %u > %u", __func__, *rx, nx);
return (1);
}
| 0 |
[] |
src
|
b32e1d34e10a0da806823f57f02a4ae6e93d756e
| 65,289,045,038,995,200,000,000,000,000,000,000,000 | 36 |
evbuffer_new and bufferevent_new can both fail (when malloc fails) and
return NULL. GitHub issue 1547.
|
static int ims_pcu_handle_firmware_update(struct ims_pcu *pcu,
const struct firmware *fw)
{
unsigned int n_fw_records;
int retval;
dev_info(pcu->dev, "Updating firmware %s, size: %zu\n",
IMS_PCU_FIRMWARE_NAME, fw->size);
n_fw_records = ims_pcu_count_fw_records(fw);
retval = ims_pcu_flash_firmware(pcu, fw, n_fw_records);
if (retval)
goto out;
retval = ims_pcu_execute_bl_command(pcu, LAUNCH_APP, NULL, 0, 0);
if (retval)
dev_err(pcu->dev,
"Failed to start application image, error: %d\n",
retval);
out:
pcu->update_firmware_status = retval;
sysfs_notify(&pcu->dev->kobj, NULL, "update_firmware_status");
return retval;
| 0 |
[
"CWE-703"
] |
linux
|
a0ad220c96692eda76b2e3fd7279f3dcd1d8a8ff
| 106,190,797,762,432,690,000,000,000,000,000,000,000 | 26 |
Input: ims-pcu - sanity check against missing interfaces
A malicious device missing interface can make the driver oops.
Add sanity checking.
Signed-off-by: Oliver Neukum <[email protected]>
CC: [email protected]
Signed-off-by: Dmitry Torokhov <[email protected]>
|
inline void Softmax(const float* input_data, const Dims<4>& input_dims,
float beta, float* output_data,
const Dims<4>& output_dims) {
Softmax(input_data, DimsToShape(input_dims), beta, output_data,
DimsToShape(output_dims));
}
| 0 |
[
"CWE-703",
"CWE-835"
] |
tensorflow
|
dfa22b348b70bb89d6d6ec0ff53973bacb4f4695
| 167,457,934,222,326,020,000,000,000,000,000,000,000 | 6 |
Prevent a division by 0 in average ops.
PiperOrigin-RevId: 385184660
Change-Id: I7affd4554f9b336fca29ac68f633232c094d0bd3
|
static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
struct audit_buffer *audit_buf)
{
struct xfrm_sec_ctx *ctx = xp->security;
struct xfrm_selector *sel = &xp->selector;
if (ctx)
audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
switch (sel->family) {
case AF_INET:
audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
if (sel->prefixlen_s != 32)
audit_log_format(audit_buf, " src_prefixlen=%d",
sel->prefixlen_s);
audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
if (sel->prefixlen_d != 32)
audit_log_format(audit_buf, " dst_prefixlen=%d",
sel->prefixlen_d);
break;
case AF_INET6:
audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
if (sel->prefixlen_s != 128)
audit_log_format(audit_buf, " src_prefixlen=%d",
sel->prefixlen_s);
audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
if (sel->prefixlen_d != 128)
audit_log_format(audit_buf, " dst_prefixlen=%d",
sel->prefixlen_d);
break;
}
}
| 0 |
[
"CWE-125"
] |
ipsec
|
7bab09631c2a303f87a7eb7e3d69e888673b9b7e
| 230,169,368,633,517,400,000,000,000,000,000,000,000 | 33 |
xfrm: policy: check policy direction value
The 'dir' parameter in xfrm_migrate() is a user-controlled byte which is used
as an array index. This can lead to an out-of-bound access, kernel lockup and
DoS. Add a check for the 'dir' value.
This fixes CVE-2017-11600.
References: https://bugzilla.redhat.com/show_bug.cgi?id=1474928
Fixes: 80c9abaabf42 ("[XFRM]: Extension for dynamic update of endpoint address(es)")
Cc: <[email protected]> # v2.6.21-rc1
Reported-by: "bo Zhang" <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]>
|
message_handler (const gchar *log_domain,
GLogLevelFlags log_level,
const gchar *message,
gpointer user_data)
{
/* Make this look like normal console output */
if (log_level & G_LOG_LEVEL_DEBUG)
g_printerr ("F: %s\n", message);
else
g_printerr ("%s: %s\n", g_get_prgname (), message);
}
| 0 |
[
"CWE-94",
"CWE-74"
] |
flatpak
|
aeb6a7ab0abaac4a8f4ad98b3df476d9de6b8bd4
| 325,818,870,859,075,670,000,000,000,000,000,000,000 | 11 |
portal: Convert --env in extra-args into --env-fd
This hides overridden variables from the command-line, which means
processes running under other uids can't see them in /proc/*/cmdline,
which might be important if they contain secrets.
Signed-off-by: Simon McVittie <[email protected]>
Part-of: https://github.com/flatpak/flatpak/security/advisories/GHSA-4ppf-fxf6-vxg2
|
static inline void posix_cpu_timers_init_group(struct signal_struct *sig) { }
| 0 |
[
"CWE-416",
"CWE-703"
] |
linux
|
2b7e8665b4ff51c034c55df3cff76518d1a9ee3a
| 225,085,097,159,936,500,000,000,000,000,000,000,000 | 1 |
fork: fix incorrect fput of ->exe_file causing use-after-free
Commit 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for
write killable") made it possible to kill a forking task while it is
waiting to acquire its ->mmap_sem for write, in dup_mmap().
However, it was overlooked that this introduced an new error path before
a reference is taken on the mm_struct's ->exe_file. Since the
->exe_file of the new mm_struct was already set to the old ->exe_file by
the memcpy() in dup_mm(), it was possible for the mmput() in the error
path of dup_mm() to drop a reference to ->exe_file which was never
taken.
This caused the struct file to later be freed prematurely.
Fix it by updating mm_init() to NULL out the ->exe_file, in the same
place it clears other things like the list of mmaps.
This bug was found by syzkaller. It can be reproduced using the
following C program:
#define _GNU_SOURCE
#include <pthread.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/wait.h>
#include <unistd.h>
static void *mmap_thread(void *_arg)
{
for (;;) {
mmap(NULL, 0x1000000, PROT_READ,
MAP_POPULATE|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
}
}
static void *fork_thread(void *_arg)
{
usleep(rand() % 10000);
fork();
}
int main(void)
{
fork();
fork();
fork();
for (;;) {
if (fork() == 0) {
pthread_t t;
pthread_create(&t, NULL, mmap_thread, NULL);
pthread_create(&t, NULL, fork_thread, NULL);
usleep(rand() % 10000);
syscall(__NR_exit_group, 0);
}
wait(NULL);
}
}
No special kernel config options are needed. It usually causes a NULL
pointer dereference in __remove_shared_vm_struct() during exit, or in
dup_mmap() (which is usually inlined into copy_process()) during fork.
Both are due to a vm_area_struct's ->vm_file being used after it's
already been freed.
Google Bug Id: 64772007
Link: http://lkml.kernel.org/r/[email protected]
Fixes: 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for write killable")
Signed-off-by: Eric Biggers <[email protected]>
Tested-by: Mark Rutland <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Konstantin Khlebnikov <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: <[email protected]> [v4.7+]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int _meta_flag_preparse(token_t *tokens, const size_t ntokens,
struct _meta_flags *of, char **errstr) {
unsigned int i;
int32_t tmp_int;
uint8_t seen[127] = {0};
// Start just past the key token. Look at first character of each token.
for (i = KEY_TOKEN+1; i < ntokens-1; i++) {
uint8_t o = (uint8_t)tokens[i].value[0];
// zero out repeat flags so we don't over-parse for return data.
if (o >= 127 || seen[o] != 0) {
*errstr = "CLIENT_ERROR duplicate flag";
return -1;
}
seen[o] = 1;
switch (o) {
/* Negative exptimes can underflow and end up immortal. realtime() will
immediately expire values that are greater than REALTIME_MAXDELTA, but less
than process_started, so lets aim for that. */
case 'N':
of->locked = 1;
of->vivify = 1;
if (!safe_strtol(tokens[i].value+1, &tmp_int)) {
*errstr = "CLIENT_ERROR bad token in command line format";
of->has_error = 1;
} else {
of->autoviv_exptime = realtime(EXPTIME_TO_POSITIVE_TIME(tmp_int));
}
break;
case 'T':
of->locked = 1;
if (!safe_strtol(tokens[i].value+1, &tmp_int)) {
*errstr = "CLIENT_ERROR bad token in command line format";
of->has_error = 1;
} else {
of->exptime = realtime(EXPTIME_TO_POSITIVE_TIME(tmp_int));
of->new_ttl = true;
}
break;
case 'R':
of->locked = 1;
if (!safe_strtol(tokens[i].value+1, &tmp_int)) {
*errstr = "CLIENT_ERROR bad token in command line format";
of->has_error = 1;
} else {
of->recache_time = realtime(EXPTIME_TO_POSITIVE_TIME(tmp_int));
}
break;
case 'l':
of->la = 1;
of->locked = 1; // need locked to delay LRU bump
break;
case 'O':
break;
case 'k': // known but no special handling
case 's':
case 't':
case 'c':
case 'f':
break;
case 'v':
of->value = 1;
break;
case 'h':
of->locked = 1; // need locked to delay LRU bump
break;
case 'u':
of->no_update = 1;
break;
case 'q':
of->no_reply = 1;
break;
// mset-related.
case 'F':
if (!safe_strtoul(tokens[i].value+1, &of->client_flags)) {
of->has_error = true;
}
break;
case 'S':
if (!safe_strtol(tokens[i].value+1, &tmp_int)) {
of->has_error = true;
} else {
// Size is adjusted for underflow or overflow once the
// \r\n terminator is added.
if (tmp_int < 0 || tmp_int > (INT_MAX - 2)) {
*errstr = "CLIENT_ERROR invalid length";
of->has_error = true;
} else {
of->value_len = tmp_int + 2; // \r\n
}
}
break;
case 'C': // mset, mdelete, marithmetic
if (!safe_strtoull(tokens[i].value+1, &of->req_cas_id)) {
*errstr = "CLIENT_ERROR bad token in command line format";
of->has_error = true;
} else {
of->has_cas = true;
}
break;
case 'M': // mset and marithmetic mode switch
if (tokens[i].length != 2) {
*errstr = "CLIENT_ERROR incorrect length for M token";
of->has_error = 1;
} else {
of->mode = tokens[i].value[1];
}
break;
case 'J': // marithmetic initial value
if (!safe_strtoull(tokens[i].value+1, &of->initial)) {
*errstr = "CLIENT_ERROR invalid numeric initial value";
of->has_error = 1;
}
break;
case 'D': // marithmetic delta value
if (!safe_strtoull(tokens[i].value+1, &of->delta)) {
*errstr = "CLIENT_ERROR invalid numeric delta value";
of->has_error = 1;
}
break;
case 'I':
of->set_stale = 1;
break;
default: // unknown flag, bail.
*errstr = "CLIENT_ERROR invalid flag";
return -1;
}
}
return of->has_error ? -1 : 0;
}
| 0 |
[] |
memcached
|
f249724cedcab6605ca8a0769ac4b356a8124f63
| 312,797,347,508,376,330,000,000,000,000,000,000,000 | 130 |
crash fix: errstr wasn't initialized in metaget
if meta_flag_preparse bailed out early it would try to read
uninitialized memory.
|
static int __sdt_alloc(const struct cpumask *cpu_map)
{
struct sched_domain_topology_level *tl;
int j;
for_each_sd_topology(tl) {
struct sd_data *sdd = &tl->data;
sdd->sd = alloc_percpu(struct sched_domain *);
if (!sdd->sd)
return -ENOMEM;
sdd->sg = alloc_percpu(struct sched_group *);
if (!sdd->sg)
return -ENOMEM;
sdd->sgp = alloc_percpu(struct sched_group_power *);
if (!sdd->sgp)
return -ENOMEM;
for_each_cpu(j, cpu_map) {
struct sched_domain *sd;
struct sched_group *sg;
struct sched_group_power *sgp;
sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sd)
return -ENOMEM;
*per_cpu_ptr(sdd->sd, j) = sd;
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sg)
return -ENOMEM;
sg->next = sg;
*per_cpu_ptr(sdd->sg, j) = sg;
sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sgp)
return -ENOMEM;
*per_cpu_ptr(sdd->sgp, j) = sgp;
}
}
return 0;
}
| 0 |
[
"CWE-200"
] |
linux
|
4efbc454ba68def5ef285b26ebfcfdb605b52755
| 326,239,010,577,336,300,000,000,000,000,000,000,000 | 52 |
sched: Fix information leak in sys_sched_getattr()
We're copying the on-stack structure to userspace, but forgot to give
the right number of bytes to copy. This allows the calling process to
obtain up to PAGE_SIZE bytes from the stack (and possibly adjacent
kernel memory).
This fix copies only as much as we actually have on the stack
(attr->size defaults to the size of the struct) and leaves the rest of
the userspace-provided buffer untouched.
Found using kmemcheck + trinity.
Fixes: d50dde5a10f30 ("sched: Add new scheduler syscalls to support an extended scheduling parameters ABI")
Cc: Dario Faggioli <[email protected]>
Cc: Juri Lelli <[email protected]>
Cc: Ingo Molnar <[email protected]>
Signed-off-by: Vegard Nossum <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Thomas Gleixner <[email protected]>
|
static inline int security_inode_create(struct inode *dir,
struct dentry *dentry,
int mode)
{
return 0;
}
| 0 |
[] |
linux-2.6
|
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
| 30,541,137,555,398,145,000,000,000,000,000,000,000 | 6 |
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]>
|
xdr_dprinc_arg(XDR *xdrs, dprinc_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
return (TRUE);
}
| 0 |
[
"CWE-703"
] |
krb5
|
a197e92349a4aa2141b5dff12e9dd44c2a2166e3
| 182,414,707,019,207,330,000,000,000,000,000,000,000 | 10 |
Fix kadm5/gssrpc XDR double free [CVE-2014-9421]
[MITKRB5-SA-2015-001] In auth_gssapi_unwrap_data(), do not free
partial deserialization results upon failure to deserialize. This
responsibility belongs to the callers, svctcp_getargs() and
svcudp_getargs(); doing it in the unwrap function results in freeing
the results twice.
In xdr_krb5_tl_data() and xdr_krb5_principal(), null out the pointers
we are freeing, as other XDR functions such as xdr_bytes() and
xdr_string().
ticket: 8056 (new)
target_version: 1.13.1
tags: pullup
|
static int spl_autoload(const char *class_name, const char * lc_name, int class_name_len, const char * file_extension TSRMLS_DC) /* {{{ */
{
char *class_file;
int class_file_len;
int dummy = 1;
zend_file_handle file_handle;
zend_op_array *new_op_array;
zval *result = NULL;
int ret;
class_file_len = spprintf(&class_file, 0, "%s%s", lc_name, file_extension);
#if DEFAULT_SLASH != '\\'
{
char *ptr = class_file;
char *end = ptr + class_file_len;
while ((ptr = memchr(ptr, '\\', (end - ptr))) != NULL) {
*ptr = DEFAULT_SLASH;
}
}
#endif
ret = php_stream_open_for_zend_ex(class_file, &file_handle, USE_PATH|STREAM_OPEN_FOR_INCLUDE TSRMLS_CC);
if (ret == SUCCESS) {
if (!file_handle.opened_path) {
file_handle.opened_path = estrndup(class_file, class_file_len);
}
if (zend_hash_add(&EG(included_files), file_handle.opened_path, strlen(file_handle.opened_path)+1, (void *)&dummy, sizeof(int), NULL)==SUCCESS) {
new_op_array = zend_compile_file(&file_handle, ZEND_REQUIRE TSRMLS_CC);
zend_destroy_file_handle(&file_handle TSRMLS_CC);
} else {
new_op_array = NULL;
zend_file_handle_dtor(&file_handle TSRMLS_CC);
}
if (new_op_array) {
EG(return_value_ptr_ptr) = &result;
EG(active_op_array) = new_op_array;
if (!EG(active_symbol_table)) {
zend_rebuild_symbol_table(TSRMLS_C);
}
zend_execute(new_op_array TSRMLS_CC);
destroy_op_array(new_op_array TSRMLS_CC);
efree(new_op_array);
if (!EG(exception)) {
if (EG(return_value_ptr_ptr)) {
zval_ptr_dtor(EG(return_value_ptr_ptr));
}
}
efree(class_file);
return zend_hash_exists(EG(class_table), (char*)lc_name, class_name_len+1);
}
}
efree(class_file);
return 0;
| 0 |
[] |
php-src
|
b584b513983319be170f02828bc7c12850b40320
| 59,316,089,617,476,360,000,000,000,000,000,000,000 | 60 |
Fixed bug #70290 (Null pointer deref (segfault) in spl_autoload via ob_start)
|
get_executing (MonoMethod *m, gint32 no, gint32 ilo, gboolean managed, gpointer data)
{
MonoMethod **dest = data;
/* skip unmanaged frames */
if (!managed)
return FALSE;
if (!(*dest)) {
if (!strcmp (m->klass->name_space, "System.Reflection"))
return FALSE;
*dest = m;
return TRUE;
}
return FALSE;
}
| 0 |
[
"CWE-264"
] |
mono
|
035c8587c0d8d307e45f1b7171a0d337bb451f1e
| 262,056,140,424,357,100,000,000,000,000,000,000,000 | 16 |
Allow only primitive types/enums in RuntimeHelpers.InitializeArray ().
|
dp_packet_ip_checksum_bad(const struct dp_packet *p OVS_UNUSED)
{
return false;
}
| 0 |
[
"CWE-400"
] |
ovs
|
3512fb512c76a1f08eba4005aa2eb69160d0840e
| 5,440,970,865,838,749,000,000,000,000,000,000,000 | 4 |
flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]>
|
void saio_del(GF_Box *s)
{
GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox*)s;
if (ptr == NULL) return;
if (ptr->offsets) gf_free(ptr->offsets);
if (ptr->offsets_large) gf_free(ptr->offsets_large);
gf_free(ptr);
| 0 |
[
"CWE-400",
"CWE-401"
] |
gpac
|
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
| 327,692,177,939,004,750,000,000,000,000,000,000,000 | 8 |
prevent dref memleak on invalid input (#1183)
|
void EvalSparseHybridImpl(TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params, OpData* data,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, int thread_start,
int thread_end, TfLiteTensor* input_quantized,
TfLiteTensor* scaling_factors,
TfLiteTensor* accum_scratch, TfLiteTensor* row_sums,
TfLiteTensor* input_offsets, TfLiteTensor* output) {
ruy::profiler::ScopeLabel label("FullyConnected");
ruy::profiler::ScopeLabel inner_label("Sparse Hybrid Kernel");
const auto& input_shape = GetTensorShape(input);
const auto& output_shape = GetTensorShape(output);
const auto& filter_shape = GetTensorShape(filter);
const int input_dims_count = input_shape.DimensionsCount();
const int output_dims_count = output_shape.DimensionsCount();
const int filter_dims_count = filter_shape.DimensionsCount();
const int batch_size = thread_end - thread_start;
const int input_depth = MatchingDim(filter_shape, filter_dims_count - 1,
input_shape, input_dims_count - 1);
const int output_depth = MatchingDim(filter_shape, filter_dims_count - 2,
output_shape, output_dims_count - 1);
const int per_thread_input_size = batch_size * input_depth;
const float* per_thread_input =
GetTensorData<float>(input) + thread_start * input_depth;
float* per_thread_output =
GetTensorData<float>(output) + thread_start * output_depth;
// Output = bias if bias tensor exists.
if (bias) {
tensor_utils::VectorBatchVectorAssign(GetTensorData<float>(bias),
output_depth, batch_size,
per_thread_output);
} else {
std::fill_n(per_thread_output, batch_size * output_depth, 0.0f);
}
// Save matrix multiplication computation for all zero input.
if (tensor_utils::IsZeroVector(per_thread_input, per_thread_input_size)) {
tensor_utils::ApplyActivationToVector(
per_thread_output, batch_size * output_depth, params->activation,
per_thread_output);
return;
}
// Quantize input from float to uint8 + quantization params (scaling factor).
float* scaling_factors_ptr =
GetTensorData<float>(scaling_factors) + thread_start;
int32_t* input_offset_ptr = nullptr;
int32_t* row_sums_ptr = nullptr;
if (params->asymmetric_quantize_inputs) {
input_offset_ptr = GetTensorData<int32_t>(input_offsets) + thread_start;
row_sums_ptr = GetTensorData<int32_t>(row_sums);
}
int8_t* quant_data =
GetTensorData<int8_t>(input_quantized) + thread_start * input_depth;
tensor_utils::BatchQuantizeFloats(per_thread_input, batch_size, input_depth,
quant_data, scaling_factors_ptr,
input_offset_ptr,
params->asymmetric_quantize_inputs);
for (int b = 0; b < batch_size; ++b) {
// Incorporate scaling of the filter.
scaling_factors_ptr[b] *= filter->params.scale;
}
// Compute output += weight * quantized_input
TfLiteTensor* filter_ledger = &context->tensors[node->temporaries->data[5]];
tensor_utils::SparseMatrixBatchVectorMultiplyAccumulate(
GetTensorData<int8_t>(filter), GetTensorData<uint8_t>(filter_ledger),
output_depth, input_depth, quant_data, scaling_factors_ptr, batch_size,
per_thread_output);
// Apply activation function to floats.
tensor_utils::ApplyActivationToVector(per_thread_output,
batch_size * output_depth,
params->activation, per_thread_output);
}
| 0 |
[
"CWE-369"
] |
tensorflow
|
718721986aa137691ee23f03638867151f74935f
| 209,843,386,670,462,520,000,000,000,000,000,000,000 | 77 |
Prevent division by 0 in `fully_connected.cc`
PiperOrigin-RevId: 385137282
Change-Id: If201e69b6e0048f0be001330b4b977e2b46db2cb
|
static uchar* get_grant_table(GRANT_NAME *buff, size_t *length,
my_bool not_used __attribute__((unused)))
{
*length=buff->key_length;
return (uchar*) buff->hash_key;
}
| 0 |
[] |
mysql-server
|
25d1b7e03b9b375a243fabdf0556c063c7282361
| 218,334,366,762,301,600,000,000,000,000,000,000,000 | 6 |
Bug #22722946: integer overflow may lead to wrong results in get_56_lenc_string
|
_gcry_ecc_gost_sign (gcry_mpi_t input, ECC_secret_key *skey,
gcry_mpi_t r, gcry_mpi_t s)
{
gpg_err_code_t rc = 0;
gcry_mpi_t k, dr, sum, ke, x, e;
mpi_point_struct I;
gcry_mpi_t hash;
const void *abuf;
unsigned int abits, qbits;
mpi_ec_t ctx;
if (DBG_CIPHER)
log_mpidump ("gost sign hash ", input );
qbits = mpi_get_nbits (skey->E.n);
/* Convert the INPUT into an MPI if needed. */
if (mpi_is_opaque (input))
{
abuf = mpi_get_opaque (input, &abits);
rc = _gcry_mpi_scan (&hash, GCRYMPI_FMT_USG, abuf, (abits+7)/8, NULL);
if (rc)
return rc;
if (abits > qbits)
mpi_rshift (hash, hash, abits - qbits);
}
else
hash = input;
k = NULL;
dr = mpi_alloc (0);
sum = mpi_alloc (0);
ke = mpi_alloc (0);
e = mpi_alloc (0);
x = mpi_alloc (0);
point_init (&I);
ctx = _gcry_mpi_ec_p_internal_new (skey->E.model, skey->E.dialect, 0,
skey->E.p, skey->E.a, skey->E.b);
mpi_mod (e, input, skey->E.n); /* e = hash mod n */
if (!mpi_cmp_ui (e, 0))
mpi_set_ui (e, 1);
/* Two loops to avoid R or S are zero. This is more of a joke than
a real demand because the probability of them being zero is less
than any hardware failure. Some specs however require it. */
do
{
do
{
mpi_free (k);
k = _gcry_dsa_gen_k (skey->E.n, GCRY_STRONG_RANDOM);
_gcry_mpi_ec_mul_point (&I, k, &skey->E.G, ctx);
if (_gcry_mpi_ec_get_affine (x, NULL, &I, ctx))
{
if (DBG_CIPHER)
log_debug ("ecc sign: Failed to get affine coordinates\n");
rc = GPG_ERR_BAD_SIGNATURE;
goto leave;
}
mpi_mod (r, x, skey->E.n); /* r = x mod n */
}
while (!mpi_cmp_ui (r, 0));
mpi_mulm (dr, skey->d, r, skey->E.n); /* dr = d*r mod n */
mpi_mulm (ke, k, e, skey->E.n); /* ke = k*e mod n */
mpi_addm (s, ke, dr, skey->E.n); /* sum = (k*e+ d*r) mod n */
}
while (!mpi_cmp_ui (s, 0));
if (DBG_CIPHER)
{
log_mpidump ("gost sign result r ", r);
log_mpidump ("gost sign result s ", s);
}
leave:
_gcry_mpi_ec_free (ctx);
point_free (&I);
mpi_free (x);
mpi_free (e);
mpi_free (ke);
mpi_free (sum);
mpi_free (dr);
mpi_free (k);
if (hash != input)
mpi_free (hash);
return rc;
}
| 1 |
[
"CWE-203"
] |
libgcrypt
|
7c2943309d14407b51c8166c4dcecb56a3628567
| 310,920,962,825,354,200,000,000,000,000,000,000,000 | 94 |
dsa,ecdsa: Fix use of nonce, use larger one.
* cipher/dsa-common.c (_gcry_dsa_modify_k): New.
* cipher/pubkey-internal.h (_gcry_dsa_modify_k): New.
* cipher/dsa.c (sign): Use _gcry_dsa_modify_k.
* cipher/ecc-ecdsa.c (_gcry_ecc_ecdsa_sign): Likewise.
* cipher/ecc-gost.c (_gcry_ecc_gost_sign): Likewise.
CVE-id: CVE-2019-13627
GnuPG-bug-id: 4626
Signed-off-by: NIIBE Yutaka <[email protected]>
|
void CiffHeader::print(std::ostream& os, const std::string& prefix) const
{
std::ios::fmtflags f( os.flags() );
os << prefix
<< _("Header, offset") << " = 0x" << std::setw(8) << std::setfill('0')
<< std::hex << std::right << offset_ << "\n";
if (pRootDir_) pRootDir_->print(os, byteOrder_, prefix);
os.flags(f);
} // CiffHeader::print
| 0 |
[
"CWE-400"
] |
exiv2
|
b3d077dcaefb6747fff8204490f33eba5a144edb
| 123,483,883,693,636,650,000,000,000,000,000,000,000 | 9 |
Fix #460 by adding more checks in CiffDirectory::readDirectory
|
static int _rtl_usb_receive(struct ieee80211_hw *hw)
{
struct urb *urb;
int err;
int i;
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
WARN_ON(0 == rtlusb->rx_urb_num);
/* 1600 == 1514 + max WLAN header + rtk info */
WARN_ON(rtlusb->rx_max_size < 1600);
for (i = 0; i < rtlusb->rx_urb_num; i++) {
err = -ENOMEM;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
goto err_out;
err = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
if (err < 0) {
pr_err("Failed to prep_rx_urb!!\n");
usb_free_urb(urb);
goto err_out;
}
usb_anchor_urb(urb, &rtlusb->rx_submitted);
err = usb_submit_urb(urb, GFP_KERNEL);
if (err)
goto err_out;
usb_free_urb(urb);
}
return 0;
err_out:
usb_kill_anchored_urbs(&rtlusb->rx_submitted);
_rtl_usb_cleanup_rx(hw);
return err;
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
3f93616951138a598d930dcaec40f2bfd9ce43bb
| 28,325,736,381,329,965,000,000,000,000,000,000,000 | 37 |
rtlwifi: prevent memory leak in rtl_usb_probe
In rtl_usb_probe if allocation for usb_data fails the allocated hw
should be released. In addition the allocated rtlpriv->usb_data should
be released on error handling path.
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Kalle Valo <[email protected]>
|
static bool ok_inflater_compressed_block(ok_inflater *inflater) {
const bool is_fixed = inflater->state == OK_INFLATER_STATE_READING_FIXED_COMPRESSED_BLOCK;
const ok_inflater_huffman_tree *literal_tree =
(is_fixed ? inflater->fixed_literal_huffman : inflater->literal_huffman);
const ok_inflater_huffman_tree *distance_tree =
(is_fixed ? inflater->fixed_distance_huffman : inflater->distance_huffman);
// decode literal/length value from input stream
size_t max_write = ok_inflater_can_write_total(inflater);
const uint16_t *tree_lookup_table = literal_tree->lookup_table;
const unsigned int tree_bits = literal_tree->bits;
while (max_write > 0) {
int value = ok_inflater_decode_literal(inflater, tree_lookup_table, tree_bits);
if (value < 0) {
// Needs input
return false;
} else if (value < 256) {
ok_inflater_write_byte(inflater, (uint8_t)value);
max_write--;
} else if (value == 256) {
inflater->state = OK_INFLATER_STATE_READY_FOR_NEXT_BLOCK;
return true;
} else if (value < 286) {
inflater->huffman_code = value - 257;
inflater->state_count = -1;
inflater->state_distance = -1;
if (ok_inflater_distance_with_tree(inflater, distance_tree)) {
max_write = ok_inflater_can_write_total(inflater);
} else {
if (is_fixed) {
inflater->state = OK_INFLATER_STATE_READING_FIXED_DISTANCE;
} else {
inflater->state = OK_INFLATER_STATE_READING_DYNAMIC_DISTANCE;
}
return false;
}
} else {
ok_inflater_error(inflater, "Invalid inflater literal");
return false;
}
}
// Output buffer full
return false;
}
| 0 |
[
"CWE-787"
] |
ok-file-formats
|
e49cdfb84fb5eca2a6261f3c51a3c793fab9f62e
| 315,570,036,355,235,570,000,000,000,000,000,000,000 | 45 |
ok_png: Disallow multiple IHDR chunks (#15)
|
TEST_F(ConnectionManagerUtilityTest, ContinueIfEscapedSlashesNotFoundAndConfiguredToRedirect) {
ON_CALL(config_, pathWithEscapedSlashesAction())
.WillByDefault(Return(envoy::extensions::filters::network::http_connection_manager::v3::
HttpConnectionManager::UNESCAPE_AND_REDIRECT));
TestRequestHeaderMapImpl original_headers;
original_headers.setPath("/xyz%30..//abc");
TestRequestHeaderMapImpl header_map(original_headers);
EXPECT_EQ(ConnectionManagerUtility::NormalizePathAction::Continue,
ConnectionManagerUtility::maybeNormalizePath(header_map, config_));
EXPECT_EQ(header_map.getPathValue(), "/xyz%30..//abc");
}
| 0 |
[
"CWE-22"
] |
envoy
|
5333b928d8bcffa26ab19bf018369a835f697585
| 15,584,658,866,602,168,000,000,000,000,000,000,000 | 12 |
Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <[email protected]>
|
void CWebServer::Cmd_ClearLog(WebEmSession & session, const request& req, Json::Value &root)
{
root["status"] = "OK";
root["title"] = "ClearLog";
_log.ClearLog();
}
| 0 |
[
"CWE-89"
] |
domoticz
|
ee70db46f81afa582c96b887b73bcd2a86feda00
| 203,947,333,998,263,400,000,000,000,000,000,000,000 | 6 |
Fixed possible SQL Injection Vulnerability (Thanks to Fabio Carretto!)
|
win_init_size(void)
{
firstwin->w_height = ROWS_AVAIL;
topframe->fr_height = ROWS_AVAIL;
firstwin->w_width = Columns;
topframe->fr_width = Columns;
}
| 0 |
[
"CWE-416"
] |
vim
|
ec66c41d84e574baf8009dbc0bd088d2bc5b2421
| 63,471,513,963,274,820,000,000,000,000,000,000,000 | 7 |
patch 8.1.2136: using freed memory with autocmd from fuzzer
Problem: using freed memory with autocmd from fuzzer. (Dhiraj Mishra,
Dominique Pelle)
Solution: Avoid using "wp" after autocommands. (closes #5041)
|
virNodeDevCapPCIDevParseXML(xmlXPathContextPtr ctxt,
virNodeDeviceDefPtr def,
xmlNodePtr node,
virNodeDevCapPCIDevPtr pci_dev)
{
VIR_XPATH_NODE_AUTORESTORE(ctxt)
xmlNodePtr iommuGroupNode;
xmlNodePtr pciExpress;
xmlNodePtr *nodes = NULL;
int n = 0;
int ret = -1;
virPCIEDeviceInfoPtr pci_express = NULL;
char *tmp = NULL;
size_t i = 0;
ctxt->node = node;
if ((tmp = virXPathString("string(./class[1])", ctxt))) {
if (virStrToLong_i(tmp, NULL, 16, &pci_dev->klass) < 0 ||
pci_dev->klass > 0xffffff) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("invalid PCI class supplied for '%s'"), def->name);
goto out;
}
VIR_FREE(tmp);
} else {
pci_dev->klass = -1;
}
if (virNodeDevCapsDefParseULong("number(./domain[1])", ctxt,
&pci_dev->domain, def,
_("no PCI domain ID supplied for '%s'"),
_("invalid PCI domain ID supplied for '%s'")) < 0)
goto out;
if (virNodeDevCapsDefParseULong("number(./bus[1])", ctxt,
&pci_dev->bus, def,
_("no PCI bus ID supplied for '%s'"),
_("invalid PCI bus ID supplied for '%s'")) < 0)
goto out;
if (virNodeDevCapsDefParseULong("number(./slot[1])", ctxt,
&pci_dev->slot, def,
_("no PCI slot ID supplied for '%s'"),
_("invalid PCI slot ID supplied for '%s'")) < 0)
goto out;
if (virNodeDevCapsDefParseULong("number(./function[1])", ctxt,
&pci_dev->function, def,
_("no PCI function ID supplied for '%s'"),
_("invalid PCI function ID supplied for '%s'")) < 0)
goto out;
if (virNodeDevCapsDefParseHexId("string(./vendor[1]/@id)", ctxt,
&pci_dev->vendor, def,
_("no PCI vendor ID supplied for '%s'"),
_("invalid PCI vendor ID supplied for '%s'")) < 0)
goto out;
if (virNodeDevCapsDefParseHexId("string(./product[1]/@id)", ctxt,
&pci_dev->product, def,
_("no PCI product ID supplied for '%s'"),
_("invalid PCI product ID supplied for '%s'")) < 0)
goto out;
pci_dev->vendor_name = virXPathString("string(./vendor[1])", ctxt);
pci_dev->product_name = virXPathString("string(./product[1])", ctxt);
if ((n = virXPathNodeSet("./capability", ctxt, &nodes)) < 0)
goto out;
for (i = 0; i < n; i++) {
if (virNodeDevPCICapabilityParseXML(ctxt, nodes[i], pci_dev) < 0)
goto out;
}
VIR_FREE(nodes);
if ((iommuGroupNode = virXPathNode("./iommuGroup[1]", ctxt))) {
if (virNodeDevCapPCIDevIommuGroupParseXML(ctxt, iommuGroupNode,
pci_dev) < 0) {
goto out;
}
}
/* The default value is -1 since zero is valid NUMA node number */
pci_dev->numa_node = -1;
if (virNodeDevCapsDefParseIntOptional("number(./numa[1]/@node)", ctxt,
&pci_dev->numa_node, def,
_("invalid NUMA node ID supplied for '%s'")) < 0)
goto out;
if ((pciExpress = virXPathNode("./pci-express[1]", ctxt))) {
pci_express = g_new0(virPCIEDeviceInfo, 1);
if (virPCIEDeviceInfoParseXML(ctxt, pciExpress, pci_express) < 0)
goto out;
pci_dev->pci_express = pci_express;
pci_express = NULL;
pci_dev->flags |= VIR_NODE_DEV_CAP_FLAG_PCIE;
}
ret = 0;
out:
VIR_FREE(nodes);
VIR_FREE(tmp);
virPCIEDeviceInfoFree(pci_express);
return ret;
}
| 0 |
[
"CWE-119"
] |
libvirt
|
4c4d0e2da07b5a035b26a0ff13ec27070f7c7b1a
| 185,495,564,130,212,030,000,000,000,000,000,000,000 | 109 |
conf: Fix segfault when parsing mdev types
Commit f1b0890 introduced a potential crash due to incorrect operator
precedence when accessing an element from a pointer to an array.
Backtrace below:
#0 virNodeDeviceGetMdevTypesCaps (sysfspath=0x7fff801661e0 "/sys/devices/pci0000:00/0000:00:02.0", mdev_types=0x7fff801c9b40, nmdev_types=0x7fff801c9b48) at ../src/conf/node_device_conf.c:2676
#1 0x00007ffff7caf53d in virNodeDeviceGetPCIDynamicCaps (sysfsPath=0x7fff801661e0 "/sys/devices/pci0000:00/0000:00:02.0", pci_dev=0x7fff801c9ac8) at ../src/conf/node_device_conf.c:2705
#2 0x00007ffff7cae38f in virNodeDeviceUpdateCaps (def=0x7fff80168a10) at ../src/conf/node_device_conf.c:2342
#3 0x00007ffff7cb11c0 in virNodeDeviceObjMatch (obj=0x7fff84002e50, flags=0) at ../src/conf/virnodedeviceobj.c:850
#4 0x00007ffff7cb153d in virNodeDeviceObjListExportCallback (payload=0x7fff84002e50, name=0x7fff801cbc20 "pci_0000_00_02_0", opaque=0x7fffe2ffc6a0) at ../src/conf/virnodedeviceobj.c:909
#5 0x00007ffff7b69146 in virHashForEach (table=0x7fff9814b700 = {...}, iter=0x7ffff7cb149e <virNodeDeviceObjListExportCallback>, opaque=0x7fffe2ffc6a0) at ../src/util/virhash.c:394
#6 0x00007ffff7cb1694 in virNodeDeviceObjListExport (conn=0x7fff98013170, devs=0x7fff98154430, devices=0x7fffe2ffc798, filter=0x7ffff7cf47a1 <virConnectListAllNodeDevicesCheckACL>, flags=0)
at ../src/conf/virnodedeviceobj.c:943
#7 0x00007fffe00694b2 in nodeConnectListAllNodeDevices (conn=0x7fff98013170, devices=0x7fffe2ffc798, flags=0) at ../src/node_device/node_device_driver.c:228
#8 0x00007ffff7e703aa in virConnectListAllNodeDevices (conn=0x7fff98013170, devices=0x7fffe2ffc798, flags=0) at ../src/libvirt-nodedev.c:130
#9 0x000055555557f796 in remoteDispatchConnectListAllNodeDevices (server=0x555555627080, client=0x5555556bf050, msg=0x5555556c0000, rerr=0x7fffe2ffc8a0, args=0x7fffd4008470, ret=0x7fffd40084e0)
at src/remote/remote_daemon_dispatch_stubs.h:1613
#10 0x000055555557f6f9 in remoteDispatchConnectListAllNodeDevicesHelper (server=0x555555627080, client=0x5555556bf050, msg=0x5555556c0000, rerr=0x7fffe2ffc8a0, args=0x7fffd4008470, ret=0x7fffd40084e0)
at src/remote/remote_daemon_dispatch_stubs.h:1591
#11 0x00007ffff7ce9542 in virNetServerProgramDispatchCall (prog=0x555555690c10, server=0x555555627080, client=0x5555556bf050, msg=0x5555556c0000) at ../src/rpc/virnetserverprogram.c:428
#12 0x00007ffff7ce90bd in virNetServerProgramDispatch (prog=0x555555690c10, server=0x555555627080, client=0x5555556bf050, msg=0x5555556c0000) at ../src/rpc/virnetserverprogram.c:302
#13 0x00007ffff7cf042b in virNetServerProcessMsg (srv=0x555555627080, client=0x5555556bf050, prog=0x555555690c10, msg=0x5555556c0000) at ../src/rpc/virnetserver.c:137
#14 0x00007ffff7cf04eb in virNetServerHandleJob (jobOpaque=0x5555556b66b0, opaque=0x555555627080) at ../src/rpc/virnetserver.c:154
#15 0x00007ffff7bd912f in virThreadPoolWorker (opaque=0x55555562bc70) at ../src/util/virthreadpool.c:163
#16 0x00007ffff7bd8645 in virThreadHelper (data=0x55555562bc90) at ../src/util/virthread.c:233
#17 0x00007ffff6d90432 in start_thread () at /lib64/libpthread.so.0
#18 0x00007ffff75c5913 in clone () at /lib64/libc.so.6
Signed-off-by: Jonathon Jongsma <[email protected]>
Reviewed-by: Ján Tomko <[email protected]>
Signed-off-by: Ján Tomko <[email protected]>
|
cdf_dump(void *v, size_t len)
{
size_t i, j;
unsigned char *p = v;
char abuf[16];
(void)fprintf(stderr, "%.4x: ", 0);
for (i = 0, j = 0; i < len; i++, p++) {
(void)fprintf(stderr, "%.2x ", *p);
abuf[j++] = isprint(*p) ? *p : '.';
if (j == 16) {
j = 0;
abuf[15] = '\0';
(void)fprintf(stderr, "%s\n%.4" SIZE_T_FORMAT "x: ",
abuf, i + 1);
}
}
(void)fprintf(stderr, "\n");
}
| 0 |
[
"CWE-119"
] |
file
|
1140872578eedaeecf828f1841d17ff574372dba
| 183,300,078,831,096,080,000,000,000,000,000,000,000 | 18 |
- add float and double types
- fix debug printf formats
- fix short stream sizes
- don't fail if we don't know about a type
|
Status privilegeVectorToBSONArray(const PrivilegeVector& privileges, BSONArray* result) {
BSONArrayBuilder arrBuilder;
for (PrivilegeVector::const_iterator it = privileges.begin(); it != privileges.end(); ++it) {
const Privilege& privilege = *it;
ParsedPrivilege parsedPrivilege;
std::string errmsg;
if (!ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg)) {
return Status(ErrorCodes::FailedToParse, errmsg);
}
if (!parsedPrivilege.isValid(&errmsg)) {
return Status(ErrorCodes::FailedToParse, errmsg);
}
arrBuilder.append(parsedPrivilege.toBSON());
}
*result = arrBuilder.arr();
return Status::OK();
}
| 0 |
[
"CWE-613"
] |
mongo
|
e55d6e2292e5dbe2f97153251d8193d1cc89f5d7
| 79,831,873,125,636,390,000,000,000,000,000,000,000 | 18 |
SERVER-38984 Validate unique User ID on UserCache hit
|
const char* XMLRPC_RequestSetMethodName(XMLRPC_REQUEST request, const char* methodName) {
if(request) {
simplestring_clear(&request->methodName);
simplestring_add(&request->methodName, methodName);
return request->methodName.str;
}
return NULL;
}
| 0 |
[
"CWE-119"
] |
php-src
|
88412772d295ebf7dd34409534507dc9bcac726e
| 130,577,393,458,453,470,000,000,000,000,000,000,000 | 8 |
Fix bug #68027 - fix date parsing in XMLRPC lib
|
int TS_MSG_IMPRINT_print_bio(BIO *bio, TS_MSG_IMPRINT *a)
{
ASN1_OCTET_STRING *msg;
TS_X509_ALGOR_print_bio(bio, TS_MSG_IMPRINT_get_algo(a));
BIO_printf(bio, "Message data:\n");
msg = TS_MSG_IMPRINT_get_msg(a);
BIO_dump_indent(bio, (const char *)M_ASN1_STRING_data(msg),
M_ASN1_STRING_length(msg), 4);
return 1;
}
| 0 |
[] |
openssl
|
c7235be6e36c4bef84594aa3b2f0561db84b63d8
| 203,061,500,788,008,070,000,000,000,000,000,000,000 | 13 |
RFC 3161 compliant time stamp request creation, response generation
and response verification.
Submitted by: Zoltan Glozik <[email protected]>
Reviewed by: Ulf Moeller
|
CImgDisplay& move(const int posx, const int posy) {
if (is_empty()) return *this;
if (_window_x!=posx || _window_y!=posy) {
if (!_is_fullscreen) {
RECT rect;
rect.left = rect.top = 0; rect.right = (LONG)_window_width - 1; rect.bottom = (LONG)_window_height - 1;
AdjustWindowRect(&rect,WS_CAPTION | WS_SYSMENU | WS_THICKFRAME | WS_MINIMIZEBOX | WS_MAXIMIZEBOX,false);
const int
border1 = (int)((rect.right - rect.left + 1 -_width)/2),
border2 = (int)(rect.bottom - rect.top + 1 - _height - border1);
SetWindowPos(_window,0,posx - border1,posy - border2,0,0,SWP_NOSIZE | SWP_NOZORDER);
} else SetWindowPos(_window,0,posx,posy,0,0,SWP_NOSIZE | SWP_NOZORDER);
_window_x = posx;
_window_y = posy;
show();
}
_is_moved = false;
return *this;
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 116,706,287,631,234,930,000,000,000,000,000,000,000 | 19 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
TEST_P(SdsDynamicDownstreamCertValidationContextTest, CombinedValidationContextWithSharedSecret) {
enableCombinedValidationContext(true);
shareValidationSecret(true);
on_server_init_function_ = [this]() {
createSdsStream(*(fake_upstreams_[1]));
sendSdsResponse(getCvcSecretWithOnlyTrustedCa());
};
initialize();
// Wait for "ssl_context_updated_by_sds" counters to indicate that both resources
// depending on the verification_secret were updated.
test_server_->waitForCounterGe(
"cluster.cluster_0.client_ssl_socket_factory.ssl_context_update_by_sds", 1);
test_server_->waitForCounterGe(
listenerStatPrefix("server_ssl_socket_factory.ssl_context_update_by_sds"), 1);
ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr {
return makeSslClientConnection();
};
testRouterHeaderOnlyRequestAndResponse(&creator);
}
| 0 |
[
"CWE-400"
] |
envoy
|
0e49a495826ea9e29134c1bd54fdeb31a034f40c
| 294,829,617,204,250,500,000,000,000,000,000,000,000 | 21 |
http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]>
|
create_attr_ival(
int attr,
int value
)
{
attr_val *my_val;
my_val = emalloc_zero(sizeof(*my_val));
my_val->attr = attr;
my_val->value.i = value;
my_val->type = T_Integer;
return my_val;
}
| 0 |
[
"CWE-19"
] |
ntp
|
fe46889f7baa75fc8e6c0fcde87706d396ce1461
| 148,922,125,622,517,050,000,000,000,000,000,000,000 | 14 |
[Sec 2942]: Off-path DoS attack on auth broadcast mode. HStenn.
|
static int mxf_read_header(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
KLVPacket klv;
int64_t essence_offset = 0;
int64_t last_pos = -1;
uint64_t last_pos_index = 1;
int ret;
mxf->last_forward_tell = INT64_MAX;
mxf->edit_units_per_packet = 1;
if (!mxf_read_sync(s->pb, mxf_header_partition_pack_key, 14)) {
av_log(s, AV_LOG_ERROR, "could not find header partition pack key\n");
return AVERROR_INVALIDDATA;
}
avio_seek(s->pb, -14, SEEK_CUR);
mxf->fc = s;
mxf->run_in = avio_tell(s->pb);
mxf_read_random_index_pack(s);
while (!avio_feof(s->pb)) {
const MXFMetadataReadTableEntry *metadata;
if (avio_tell(s->pb) == last_pos) {
av_log(mxf->fc, AV_LOG_ERROR, "MXF structure loop detected\n");
return AVERROR_INVALIDDATA;
}
if ((1ULL<<61) % last_pos_index++ == 0)
last_pos = avio_tell(s->pb);
if (klv_read_packet(&klv, s->pb) < 0) {
/* EOF - seek to previous partition or stop */
if(mxf_parse_handle_partition_or_eof(mxf) <= 0)
break;
else
continue;
}
PRINT_KEY(s, "read header", klv.key);
av_dlog(s, "size %"PRIu64" offset %#"PRIx64"\n", klv.length, klv.offset);
if (IS_KLV_KEY(klv.key, mxf_encrypted_triplet_key) ||
IS_KLV_KEY(klv.key, mxf_essence_element_key) ||
IS_KLV_KEY(klv.key, mxf_avid_essence_element_key) ||
IS_KLV_KEY(klv.key, mxf_system_item_key)) {
if (!mxf->current_partition) {
av_log(mxf->fc, AV_LOG_ERROR, "found essence prior to first PartitionPack\n");
return AVERROR_INVALIDDATA;
}
if (!mxf->current_partition->essence_offset) {
/* for OP1a we compute essence_offset
* for OPAtom we point essence_offset after the KL (usually op1a_essence_offset + 20 or 25)
* TODO: for OP1a we could eliminate this entire if statement, always stopping parsing at op1a_essence_offset
* for OPAtom we still need the actual essence_offset though (the KL's length can vary)
*/
int64_t op1a_essence_offset =
round_to_kag(mxf->current_partition->this_partition +
mxf->current_partition->pack_length, mxf->current_partition->kag_size) +
round_to_kag(mxf->current_partition->header_byte_count, mxf->current_partition->kag_size) +
round_to_kag(mxf->current_partition->index_byte_count, mxf->current_partition->kag_size);
if (mxf->op == OPAtom) {
/* point essence_offset to the actual data
* OPAtom has all the essence in one big KLV
*/
mxf->current_partition->essence_offset = avio_tell(s->pb);
mxf->current_partition->essence_length = klv.length;
} else {
/* NOTE: op1a_essence_offset may be less than to klv.offset (C0023S01.mxf) */
mxf->current_partition->essence_offset = op1a_essence_offset;
}
}
if (!essence_offset)
essence_offset = klv.offset;
/* seek to footer, previous partition or stop */
if (mxf_parse_handle_essence(mxf) <= 0)
break;
continue;
} else if (!memcmp(klv.key, mxf_header_partition_pack_key, 13) &&
klv.key[13] >= 2 && klv.key[13] <= 4 && mxf->current_partition) {
/* next partition pack - keep going, seek to previous partition or stop */
if(mxf_parse_handle_partition_or_eof(mxf) <= 0)
break;
else if (mxf->parsing_backward)
continue;
/* we're still parsing forward. proceed to parsing this partition pack */
}
for (metadata = mxf_metadata_read_table; metadata->read; metadata++) {
if (IS_KLV_KEY(klv.key, metadata->key)) {
int res;
if (klv.key[5] == 0x53) {
res = mxf_read_local_tags(mxf, &klv, metadata->read, metadata->ctx_size, metadata->type);
} else {
uint64_t next = avio_tell(s->pb) + klv.length;
res = metadata->read(mxf, s->pb, 0, klv.length, klv.key, klv.offset);
/* only seek forward, else this can loop for a long time */
if (avio_tell(s->pb) > next) {
av_log(s, AV_LOG_ERROR, "read past end of KLV @ %#"PRIx64"\n",
klv.offset);
return AVERROR_INVALIDDATA;
}
avio_seek(s->pb, next, SEEK_SET);
}
if (res < 0) {
av_log(s, AV_LOG_ERROR, "error reading header metadata\n");
return res;
}
break;
} else {
av_log(s, AV_LOG_VERBOSE, "Dark key " PRIxUID "\n",
UID_ARG(klv.key));
}
}
if (!metadata->read)
avio_skip(s->pb, klv.length);
}
/* FIXME avoid seek */
if (!essence_offset) {
av_log(s, AV_LOG_ERROR, "no essence\n");
return AVERROR_INVALIDDATA;
}
avio_seek(s->pb, essence_offset, SEEK_SET);
mxf_compute_essence_containers(mxf);
/* we need to do this before computing the index tables
* to be able to fill in zero IndexDurations with st->duration */
if ((ret = mxf_parse_structural_metadata(mxf)) < 0)
goto fail;
if ((ret = mxf_compute_index_tables(mxf)) < 0)
goto fail;
if (mxf->nb_index_tables > 1) {
/* TODO: look up which IndexSID to use via EssenceContainerData */
av_log(mxf->fc, AV_LOG_INFO, "got %i index tables - only the first one (IndexSID %i) will be used\n",
mxf->nb_index_tables, mxf->index_tables[0].index_sid);
} else if (mxf->nb_index_tables == 0 && mxf->op == OPAtom) {
av_log(mxf->fc, AV_LOG_ERROR, "cannot demux OPAtom without an index\n");
ret = AVERROR_INVALIDDATA;
goto fail;
}
mxf_handle_small_eubc(s);
return 0;
fail:
mxf_read_close(s);
return ret;
}
| 0 |
[
"CWE-703"
] |
FFmpeg
|
f173cdfe669556aa92857adafe60cbe5f2aa1210
| 130,537,631,542,949,500,000,000,000,000,000,000,000 | 157 |
avformat/mxfdec: Fix DoS issues in mxf_read_index_entry_array()
Fixes: 20170829A.mxf
Co-Author: 张洪亮(望初)" <[email protected]>
Found-by: Xiaohei and Wangchu from Alibaba Security Team
Signed-off-by: Michael Niedermayer <[email protected]>
(cherry picked from commit 900f39692ca0337a98a7cf047e4e2611071810c2)
Signed-off-by: Michael Niedermayer <[email protected]>
|
static int soft_offline_in_use_page(struct page *page, int flags)
{
int ret;
struct page *hpage = compound_head(page);
if (!PageHuge(page) && PageTransHuge(hpage)) {
lock_page(hpage);
if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
unlock_page(hpage);
if (!PageAnon(hpage))
pr_info("soft offline: %#lx: non anonymous thp\n", page_to_pfn(page));
else
pr_info("soft offline: %#lx: thp split failed\n", page_to_pfn(page));
put_hwpoison_page(hpage);
return -EBUSY;
}
unlock_page(hpage);
get_hwpoison_page(page);
put_hwpoison_page(hpage);
}
if (PageHuge(page))
ret = soft_offline_huge_page(page, flags);
else
ret = __soft_offline_page(page, flags);
return ret;
}
| 0 |
[] |
linux
|
c3901e722b2975666f42748340df798114742d6d
| 38,483,790,193,530,018,000,000,000,000,000,000,000 | 28 |
mm: hwpoison: fix thp split handling in memory_failure()
When memory_failure() runs on a thp tail page after pmd is split, we
trigger the following VM_BUG_ON_PAGE():
page:ffffd7cd819b0040 count:0 mapcount:0 mapping: (null) index:0x1
flags: 0x1fffc000400000(hwpoison)
page dumped because: VM_BUG_ON_PAGE(!page_count(p))
------------[ cut here ]------------
kernel BUG at /src/linux-dev/mm/memory-failure.c:1132!
memory_failure() passed refcount and page lock from tail page to head
page, which is not needed because we can pass any subpage to
split_huge_page().
Fixes: 61f5d698cc97 ("mm: re-enable THP")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Naoya Horiguchi <[email protected]>
Cc: <[email protected]> [4.5+]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
ecma_destroy_ecma_string (ecma_string_t *string_p) /**< ecma-string */
{
JERRY_ASSERT (string_p != NULL);
JERRY_ASSERT (!ECMA_IS_DIRECT_STRING (string_p));
JERRY_ASSERT ((string_p->refs_and_container < ECMA_STRING_REF_ONE) || ECMA_STRING_IS_STATIC (string_p));
switch (ECMA_STRING_GET_CONTAINER (string_p))
{
case ECMA_STRING_CONTAINER_HEAP_UTF8_STRING:
{
ecma_dealloc_string_buffer (string_p, ((ecma_short_string_t *) string_p)->size + sizeof (ecma_short_string_t));
return;
}
case ECMA_STRING_CONTAINER_LONG_OR_EXTERNAL_STRING:
{
ecma_long_string_t *long_string_p = (ecma_long_string_t *) string_p;
if (long_string_p->string_p == ECMA_LONG_STRING_BUFFER_START (long_string_p))
{
ecma_dealloc_string_buffer (string_p, long_string_p->size + sizeof (ecma_long_string_t));
return;
}
ecma_external_string_t *external_string_p = (ecma_external_string_t *) string_p;
jerry_external_string_free_callback_t free_cb = JERRY_CONTEXT (external_string_free_callback_p);
if (free_cb != NULL)
{
free_cb ((lit_utf8_byte_t *) external_string_p->header.string_p,
external_string_p->header.size,
external_string_p->user_p);
}
ecma_dealloc_external_string (external_string_p);
return;
}
case ECMA_STRING_CONTAINER_HEAP_ASCII_STRING:
{
ecma_dealloc_string_buffer (string_p, ECMA_ASCII_STRING_GET_SIZE (string_p) + ECMA_ASCII_STRING_HEADER_SIZE);
return;
}
#if JERRY_ESNEXT
case ECMA_STRING_CONTAINER_SYMBOL:
{
ecma_extended_string_t *symbol_p = (ecma_extended_string_t *) string_p;
ecma_free_value (symbol_p->u.symbol_descriptor);
ecma_dealloc_extended_string (symbol_p);
return;
}
#endif /* JERRY_ESNEXT */
default:
{
JERRY_ASSERT (ECMA_STRING_GET_CONTAINER (string_p) == ECMA_STRING_CONTAINER_UINT32_IN_DESC
|| ECMA_STRING_GET_CONTAINER (string_p) == ECMA_STRING_CONTAINER_MAGIC_STRING_EX);
/* only the string descriptor itself should be freed */
ecma_dealloc_string (string_p);
}
}
} /* ecma_destroy_ecma_string */
| 0 |
[
"CWE-416"
] |
jerryscript
|
3bcd48f72d4af01d1304b754ef19fe1a02c96049
| 307,654,181,305,817,300,000,000,000,000,000,000,000 | 60 |
Improve parse_identifier (#4691)
Ascii string length is no longer computed during string allocation.
JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected]
|
static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
{
unsigned long long recovery_start;
if (cmd_match(buf, "none"))
recovery_start = MaxSector;
else if (kstrtoull(buf, 10, &recovery_start))
return -EINVAL;
if (rdev->mddev->pers &&
rdev->raid_disk >= 0)
return -EBUSY;
rdev->recovery_offset = recovery_start;
if (recovery_start == MaxSector)
set_bit(In_sync, &rdev->flags);
else
clear_bit(In_sync, &rdev->flags);
return len;
}
| 0 |
[
"CWE-200"
] |
linux
|
b6878d9e03043695dbf3fa1caa6dfc09db225b16
| 95,629,043,417,583,720,000,000,000,000,000,000,000 | 20 |
md: use kzalloc() when bitmap is disabled
In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a
mdu_bitmap_file_t called "file".
5769 file = kmalloc(sizeof(*file), GFP_NOIO);
5770 if (!file)
5771 return -ENOMEM;
This structure is copied to user space at the end of the function.
5786 if (err == 0 &&
5787 copy_to_user(arg, file, sizeof(*file)))
5788 err = -EFAULT
But if bitmap is disabled only the first byte of "file" is initialized
with zero, so it's possible to read some bytes (up to 4095) of kernel
space memory from user space. This is an information leak.
5775 /* bitmap disabled, zero the first byte and copy out */
5776 if (!mddev->bitmap_info.file)
5777 file->pathname[0] = '\0';
Signed-off-by: Benjamin Randazzo <[email protected]>
Signed-off-by: NeilBrown <[email protected]>
|
xps_init_truetype_font(xps_context_t *ctx, xps_font_t *font)
{
int code = 0;
font->font = (void*) gs_alloc_struct(ctx->memory, gs_font_type42, &st_gs_font_type42, "xps_font type42");
if (!font->font)
return gs_throw(gs_error_VMerror, "out of memory");
/* no shortage of things to initialize */
{
gs_font_type42 *p42 = (gs_font_type42*) font->font;
/* Common to all fonts: */
p42->next = 0;
p42->prev = 0;
p42->memory = ctx->memory;
p42->dir = ctx->fontdir; /* NB also set by gs_definefont later */
p42->base = font->font; /* NB also set by gs_definefont later */
p42->is_resource = false;
gs_notify_init(&p42->notify_list, gs_memory_stable(ctx->memory));
p42->id = gs_next_ids(ctx->memory, 1);
p42->client_data = font; /* that's us */
/* this is overwritten in grid_fit() */
gs_make_identity(&p42->FontMatrix);
gs_make_identity(&p42->orig_FontMatrix); /* NB ... original or zeroes? */
p42->FontType = ft_TrueType;
p42->BitmapWidths = false;
p42->ExactSize = fbit_use_outlines;
p42->InBetweenSize = fbit_use_outlines;
p42->TransformedChar = fbit_use_outlines;
p42->WMode = 0;
p42->PaintType = 0;
p42->StrokeWidth = 0;
p42->is_cached = 0;
p42->procs.define_font = gs_no_define_font;
p42->procs.make_font = gs_no_make_font;
p42->procs.font_info = gs_type42_font_info;
p42->procs.same_font = gs_default_same_font;
p42->procs.encode_char = xps_true_callback_encode_char;
p42->procs.decode_glyph = xps_true_callback_decode_glyph;
p42->procs.enumerate_glyph = gs_type42_enumerate_glyph;
p42->procs.glyph_info = gs_type42_glyph_info;
p42->procs.glyph_outline = gs_type42_glyph_outline;
p42->procs.glyph_name = xps_true_callback_glyph_name;
p42->procs.init_fstack = gs_default_init_fstack;
p42->procs.next_char_glyph = gs_default_next_char_glyph;
p42->procs.build_char = xps_true_callback_build_char;
memset(p42->font_name.chars, 0, sizeof(p42->font_name.chars));
xps_load_sfnt_name(font, (char*)p42->font_name.chars, sizeof(p42->font_name.chars));
p42->font_name.size = strlen((char*)p42->font_name.chars);
memset(p42->key_name.chars, 0, sizeof(p42->key_name.chars));
strcpy((char*)p42->key_name.chars, (char*)p42->font_name.chars);
p42->key_name.size = strlen((char*)p42->key_name.chars);
/* Base font specific: */
p42->FontBBox.p.x = 0;
p42->FontBBox.p.y = 0;
p42->FontBBox.q.x = 0;
p42->FontBBox.q.y = 0;
uid_set_UniqueID(&p42->UID, p42->id);
p42->encoding_index = ENCODING_INDEX_UNKNOWN;
p42->nearest_encoding_index = ENCODING_INDEX_ISOLATIN1;
p42->FAPI = 0;
p42->FAPI_font_data = 0;
/* Type 42 specific: */
p42->data.string_proc = xps_true_callback_string_proc;
p42->data.proc_data = font;
gs_type42_font_init(p42, font->subfontid);
p42->data.get_glyph_index = xps_true_get_glyph_index;
}
if ((code = gs_definefont(ctx->fontdir, font->font)) < 0) {
return(code);
}
code = xps_fapi_passfont (font->font, NULL, NULL, font->data, font->length);
return code;
}
| 0 |
[
"CWE-119"
] |
ghostpdl
|
3c2aebbedd37fab054e80f2e315de07d7e9b5bdb
| 207,008,593,509,182,070,000,000,000,000,000,000,000 | 93 |
Bug 698044: restrict font name length to the buffer size.
|
do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
{
struct net *net = sock_net(sk);
int ret;
unsigned char arg[MAX_ARG_LEN];
struct ip_vs_service_user *usvc_compat;
struct ip_vs_service_user_kern usvc;
struct ip_vs_service *svc;
struct ip_vs_dest_user *udest_compat;
struct ip_vs_dest_user_kern udest;
struct netns_ipvs *ipvs = net_ipvs(net);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX)
return -EINVAL;
if (len < 0 || len > MAX_ARG_LEN)
return -EINVAL;
if (len != set_arglen[SET_CMDID(cmd)]) {
pr_err("set_ctl: len %u != %u\n",
len, set_arglen[SET_CMDID(cmd)]);
return -EINVAL;
}
if (copy_from_user(arg, user, len) != 0)
return -EFAULT;
/* increase the module use count */
ip_vs_use_count_inc();
/* Handle daemons since they have another lock */
if (cmd == IP_VS_SO_SET_STARTDAEMON ||
cmd == IP_VS_SO_SET_STOPDAEMON) {
struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
if (mutex_lock_interruptible(&ipvs->sync_mutex)) {
ret = -ERESTARTSYS;
goto out_dec;
}
if (cmd == IP_VS_SO_SET_STARTDAEMON)
ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
dm->syncid);
else
ret = stop_sync_thread(net, dm->state);
mutex_unlock(&ipvs->sync_mutex);
goto out_dec;
}
if (mutex_lock_interruptible(&__ip_vs_mutex)) {
ret = -ERESTARTSYS;
goto out_dec;
}
if (cmd == IP_VS_SO_SET_FLUSH) {
/* Flush the virtual service */
ret = ip_vs_flush(net);
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_TIMEOUT) {
/* Set timeout values for (tcp tcpfin udp) */
ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg);
goto out_unlock;
}
usvc_compat = (struct ip_vs_service_user *)arg;
udest_compat = (struct ip_vs_dest_user *)(usvc_compat + 1);
/* We only use the new structs internally, so copy userspace compat
* structs to extended internal versions */
ip_vs_copy_usvc_compat(&usvc, usvc_compat);
ip_vs_copy_udest_compat(&udest, udest_compat);
if (cmd == IP_VS_SO_SET_ZERO) {
/* if no service address is set, zero counters in all */
if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) {
ret = ip_vs_zero_all(net);
goto out_unlock;
}
}
/* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
usvc.protocol != IPPROTO_SCTP) {
pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n",
usvc.protocol, &usvc.addr.ip,
ntohs(usvc.port), usvc.sched_name);
ret = -EFAULT;
goto out_unlock;
}
/* Lookup the exact service by <protocol, addr, port> or fwmark */
if (usvc.fwmark == 0)
svc = __ip_vs_service_find(net, usvc.af, usvc.protocol,
&usvc.addr, usvc.port);
else
svc = __ip_vs_svc_fwm_find(net, usvc.af, usvc.fwmark);
if (cmd != IP_VS_SO_SET_ADD
&& (svc == NULL || svc->protocol != usvc.protocol)) {
ret = -ESRCH;
goto out_unlock;
}
switch (cmd) {
case IP_VS_SO_SET_ADD:
if (svc != NULL)
ret = -EEXIST;
else
ret = ip_vs_add_service(net, &usvc, &svc);
break;
case IP_VS_SO_SET_EDIT:
ret = ip_vs_edit_service(svc, &usvc);
break;
case IP_VS_SO_SET_DEL:
ret = ip_vs_del_service(svc);
if (!ret)
goto out_unlock;
break;
case IP_VS_SO_SET_ZERO:
ret = ip_vs_zero_service(svc);
break;
case IP_VS_SO_SET_ADDDEST:
ret = ip_vs_add_dest(svc, &udest);
break;
case IP_VS_SO_SET_EDITDEST:
ret = ip_vs_edit_dest(svc, &udest);
break;
case IP_VS_SO_SET_DELDEST:
ret = ip_vs_del_dest(svc, &udest);
break;
default:
ret = -EINVAL;
}
out_unlock:
mutex_unlock(&__ip_vs_mutex);
out_dec:
/* decrease the module use count */
ip_vs_use_count_dec();
return ret;
}
| 0 |
[
"CWE-200"
] |
linux
|
2d8a041b7bfe1097af21441cb77d6af95f4f4680
| 108,505,597,363,522,640,000,000,000,000,000,000,000 | 142 |
ipvs: fix info leak in getsockopt(IP_VS_SO_GET_TIMEOUT)
If at least one of CONFIG_IP_VS_PROTO_TCP or CONFIG_IP_VS_PROTO_UDP is
not set, __ip_vs_get_timeouts() does not fully initialize the structure
that gets copied to userland and that for leaks up to 12 bytes of kernel
stack. Add an explicit memset(0) before passing the structure to
__ip_vs_get_timeouts() to avoid the info leak.
Signed-off-by: Mathias Krause <[email protected]>
Cc: Wensong Zhang <[email protected]>
Cc: Simon Horman <[email protected]>
Cc: Julian Anastasov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
findfilendir(
typval_T *argvars UNUSED,
typval_T *rettv,
int find_what UNUSED)
{
#ifdef FEAT_SEARCHPATH
char_u *fname;
char_u *fresult = NULL;
char_u *path = *curbuf->b_p_path == NUL ? p_path : curbuf->b_p_path;
char_u *p;
char_u pathbuf[NUMBUFLEN];
int count = 1;
int first = TRUE;
int error = FALSE;
#endif
rettv->vval.v_string = NULL;
rettv->v_type = VAR_STRING;
#ifdef FEAT_SEARCHPATH
fname = tv_get_string(&argvars[0]);
if (argvars[1].v_type != VAR_UNKNOWN)
{
p = tv_get_string_buf_chk(&argvars[1], pathbuf);
if (p == NULL)
error = TRUE;
else
{
if (*p != NUL)
path = p;
if (argvars[2].v_type != VAR_UNKNOWN)
count = (int)tv_get_number_chk(&argvars[2], &error);
}
}
if (count < 0 && rettv_list_alloc(rettv) == FAIL)
error = TRUE;
if (*fname != NUL && !error)
{
do
{
if (rettv->v_type == VAR_STRING || rettv->v_type == VAR_LIST)
vim_free(fresult);
fresult = find_file_in_path_option(first ? fname : NULL,
first ? (int)STRLEN(fname) : 0,
0, first, path,
find_what,
curbuf->b_ffname,
find_what == FINDFILE_DIR
? (char_u *)"" : curbuf->b_p_sua);
first = FALSE;
if (fresult != NULL && rettv->v_type == VAR_LIST)
list_append_string(rettv->vval.v_list, fresult, -1);
} while ((rettv->v_type == VAR_LIST || --count > 0) && fresult != NULL);
}
if (rettv->v_type == VAR_STRING)
rettv->vval.v_string = fresult;
#endif
}
| 0 |
[
"CWE-78"
] |
vim
|
8c62a08faf89663e5633dc5036cd8695c80f1075
| 115,084,973,229,200,480,000,000,000,000,000,000,000 | 65 |
patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others.
|
f_getcmdscreenpos(typval_T *argvars UNUSED, typval_T *rettv)
{
cmdline_info_T *p = get_ccline_ptr();
rettv->vval.v_number = p != NULL ? p->cmdspos + 1 : 0;
}
| 0 |
[
"CWE-416"
] |
vim
|
1c3dd8ddcba63c1af5112e567215b3cec2de11d0
| 254,614,686,902,485,700,000,000,000,000,000,000,000 | 6 |
patch 9.0.0490: using freed memory with cmdwin and BufEnter autocmd
Problem: Using freed memory with cmdwin and BufEnter autocmd.
Solution: Make sure pointer to b_p_iminsert is still valid.
|
void free_pages_exact(void *virt, size_t size)
{
unsigned long addr = (unsigned long)virt;
unsigned long end = addr + PAGE_ALIGN(size);
while (addr < end) {
free_page(addr);
addr += PAGE_SIZE;
}
}
| 0 |
[] |
linux
|
400e22499dd92613821374c8c6c88c7225359980
| 103,910,749,823,634,600,000,000,000,000,000,000,000 | 10 |
mm: don't warn about allocations which stall for too long
Commit 63f53dea0c98 ("mm: warn about allocations which stall for too
long") was a great step for reducing possibility of silent hang up
problem caused by memory allocation stalls. But this commit reverts it,
for it is possible to trigger OOM lockup and/or soft lockups when many
threads concurrently called warn_alloc() (in order to warn about memory
allocation stalls) due to current implementation of printk(), and it is
difficult to obtain useful information due to limitation of synchronous
warning approach.
Current printk() implementation flushes all pending logs using the
context of a thread which called console_unlock(). printk() should be
able to flush all pending logs eventually unless somebody continues
appending to printk() buffer.
Since warn_alloc() started appending to printk() buffer while waiting
for oom_kill_process() to make forward progress when oom_kill_process()
is processing pending logs, it became possible for warn_alloc() to force
oom_kill_process() loop inside printk(). As a result, warn_alloc()
significantly increased possibility of preventing oom_kill_process()
from making forward progress.
---------- Pseudo code start ----------
Before warn_alloc() was introduced:
retry:
if (mutex_trylock(&oom_lock)) {
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_lock)
}
goto retry;
After warn_alloc() was introduced:
retry:
if (mutex_trylock(&oom_lock)) {
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_lock)
} else if (waited_for_10seconds()) {
atomic_inc(&printk_pending_logs);
}
goto retry;
---------- Pseudo code end ----------
Although waited_for_10seconds() becomes true once per 10 seconds,
unbounded number of threads can call waited_for_10seconds() at the same
time. Also, since threads doing waited_for_10seconds() keep doing
almost busy loop, the thread doing print_one_log() can use little CPU
resource. Therefore, this situation can be simplified like
---------- Pseudo code start ----------
retry:
if (mutex_trylock(&oom_lock)) {
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_lock)
} else {
atomic_inc(&printk_pending_logs);
}
goto retry;
---------- Pseudo code end ----------
when printk() is called faster than print_one_log() can process a log.
One of possible mitigation would be to introduce a new lock in order to
make sure that no other series of printk() (either oom_kill_process() or
warn_alloc()) can append to printk() buffer when one series of printk()
(either oom_kill_process() or warn_alloc()) is already in progress.
Such serialization will also help obtaining kernel messages in readable
form.
---------- Pseudo code start ----------
retry:
if (mutex_trylock(&oom_lock)) {
mutex_lock(&oom_printk_lock);
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_printk_lock);
mutex_unlock(&oom_lock)
} else {
if (mutex_trylock(&oom_printk_lock)) {
atomic_inc(&printk_pending_logs);
mutex_unlock(&oom_printk_lock);
}
}
goto retry;
---------- Pseudo code end ----------
But this commit does not go that direction, for we don't want to
introduce a new lock dependency, and we unlikely be able to obtain
useful information even if we serialized oom_kill_process() and
warn_alloc().
Synchronous approach is prone to unexpected results (e.g. too late [1],
too frequent [2], overlooked [3]). As far as I know, warn_alloc() never
helped with providing information other than "something is going wrong".
I want to consider asynchronous approach which can obtain information
during stalls with possibly relevant threads (e.g. the owner of
oom_lock and kswapd-like threads) and serve as a trigger for actions
(e.g. turn on/off tracepoints, ask libvirt daemon to take a memory dump
of stalling KVM guest for diagnostic purpose).
This commit temporarily loses ability to report e.g. OOM lockup due to
unable to invoke the OOM killer due to !__GFP_FS allocation request.
But asynchronous approach will be able to detect such situation and emit
warning. Thus, let's remove warn_alloc().
[1] https://bugzilla.kernel.org/show_bug.cgi?id=192981
[2] http://lkml.kernel.org/r/CAM_iQpWuPVGc2ky8M-9yukECtS+zKjiDasNymX7rMcBjBFyM_A@mail.gmail.com
[3] commit db73ee0d46379922 ("mm, vmscan: do not loop on too_many_isolated for ever"))
Link: http://lkml.kernel.org/r/1509017339-4802-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp
Signed-off-by: Tetsuo Handa <[email protected]>
Reported-by: Cong Wang <[email protected]>
Reported-by: yuwang.yuwang <[email protected]>
Reported-by: Johannes Weiner <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Sergey Senozhatsky <[email protected]>
Cc: Petr Mladek <[email protected]>
Cc: Steven Rostedt <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static inline void gen_op_testl_T0_T1_cc(void)
{
tcg_gen_and_tl(cpu_cc_dst, cpu_T0, cpu_T1);
}
| 0 |
[
"CWE-94"
] |
qemu
|
30663fd26c0307e414622c7a8607fbc04f92ec14
| 102,364,961,485,211,550,000,000,000,000,000,000,000 | 4 |
tcg/i386: Check the size of instruction being translated
This fixes the bug: 'user-to-root privesc inside VM via bad translation
caching' reported by Jann Horn here:
https://bugs.chromium.org/p/project-zero/issues/detail?id=1122
Reviewed-by: Richard Henderson <[email protected]>
CC: Peter Maydell <[email protected]>
CC: Paolo Bonzini <[email protected]>
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Pranith Kumar <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
soup_auth_ntlm_finalize (GObject *object)
{
SoupAuthNTLMPrivate *priv = soup_auth_ntlm_get_instance_private (SOUP_AUTH_NTLM (object));
g_free (priv->username);
g_free (priv->domain);
memset (priv->nt_hash, 0, sizeof (priv->nt_hash));
memset (priv->lm_hash, 0, sizeof (priv->lm_hash));
#ifdef USE_NTLM_AUTH
sso_ntlm_close (priv);
#endif
G_OBJECT_CLASS (soup_auth_ntlm_parent_class)->finalize (object);
}
| 0 |
[
"CWE-125"
] |
libsoup
|
f8a54ac85eec2008c85393f331cdd251af8266ad
| 3,793,070,642,111,338,500,000,000,000,000,000,000 | 16 |
NTLM: Avoid a potential heap buffer overflow in v2 authentication
Check the length of the decoded v2 challenge before attempting to
parse it, to avoid reading past it.
Fixes #173
|
GF_Box *avcc_box_new()
{
GF_AVCConfigurationBox *tmp = (GF_AVCConfigurationBox *) gf_malloc(sizeof(GF_AVCConfigurationBox));
if (tmp == NULL) return NULL;
memset(tmp, 0, sizeof(GF_AVCConfigurationBox));
tmp->type = GF_ISOM_BOX_TYPE_AVCC;
return (GF_Box *)tmp;
}
| 0 |
[
"CWE-401"
] |
gpac
|
0a85029d694f992f3631e2f249e4999daee15cbf
| 293,058,273,887,973,420,000,000,000,000,000,000,000 | 8 |
fixed #1785 (fuzz)
|
static void complete_incr_bin(conn *c) {
item *it;
char *key;
size_t nkey;
/* Weird magic in add_delta forces me to pad here */
char tmpbuf[INCR_MAX_STORAGE_LEN];
uint64_t cas = 0;
protocol_binary_response_incr* rsp = (protocol_binary_response_incr*)c->wbuf;
protocol_binary_request_incr* req = binary_get_request(c);
assert(c != NULL);
assert(c->wsize >= sizeof(*rsp));
/* fix byteorder in the request */
req->message.body.delta = ntohll(req->message.body.delta);
req->message.body.initial = ntohll(req->message.body.initial);
req->message.body.expiration = ntohl(req->message.body.expiration);
key = binary_get_key(c);
nkey = c->binary_header.request.keylen;
if (settings.verbose > 1) {
int i;
fprintf(stderr, "incr ");
for (i = 0; i < nkey; i++) {
fprintf(stderr, "%c", key[i]);
}
fprintf(stderr, " %lld, %llu, %d\n",
(long long)req->message.body.delta,
(long long)req->message.body.initial,
req->message.body.expiration);
}
if (c->binary_header.request.cas != 0) {
cas = c->binary_header.request.cas;
}
switch(add_delta(c, key, nkey, c->cmd == PROTOCOL_BINARY_CMD_INCREMENT,
req->message.body.delta, tmpbuf,
&cas)) {
case OK:
rsp->message.body.value = htonll(strtoull(tmpbuf, NULL, 10));
if (cas) {
c->cas = cas;
}
write_bin_response(c, &rsp->message.body, 0, 0,
sizeof(rsp->message.body.value));
break;
case NON_NUMERIC:
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_DELTA_BADVAL, NULL, 0);
break;
case EOM:
out_of_memory(c, "SERVER_ERROR Out of memory incrementing value");
break;
case DELTA_ITEM_NOT_FOUND:
if (req->message.body.expiration != 0xffffffff) {
/* Save some room for the response */
rsp->message.body.value = htonll(req->message.body.initial);
snprintf(tmpbuf, INCR_MAX_STORAGE_LEN, "%llu",
(unsigned long long)req->message.body.initial);
int res = strlen(tmpbuf);
it = item_alloc(key, nkey, 0, realtime(req->message.body.expiration),
res + 2);
if (it != NULL) {
memcpy(ITEM_data(it), tmpbuf, res);
memcpy(ITEM_data(it) + res, "\r\n", 2);
if (store_item(it, NREAD_ADD, c)) {
c->cas = ITEM_get_cas(it);
write_bin_response(c, &rsp->message.body, 0, 0, sizeof(rsp->message.body.value));
} else {
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_NOT_STORED,
NULL, 0);
}
item_remove(it); /* release our reference */
} else {
out_of_memory(c,
"SERVER_ERROR Out of memory allocating new item");
}
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
if (c->cmd == PROTOCOL_BINARY_CMD_INCREMENT) {
c->thread->stats.incr_misses++;
} else {
c->thread->stats.decr_misses++;
}
pthread_mutex_unlock(&c->thread->stats.mutex);
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, NULL, 0);
}
break;
case DELTA_ITEM_CAS_MISMATCH:
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS, NULL, 0);
break;
}
}
| 0 |
[
"CWE-190"
] |
memcached
|
bd578fc34b96abe0f8d99c1409814a09f51ee71c
| 174,367,827,284,680,140,000,000,000,000,000,000,000 | 98 |
CVE reported by cisco talos
|
static int veth_validate(struct nlattr *tb[], struct nlattr *data[])
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
return -EINVAL;
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
return -EADDRNOTAVAIL;
}
if (tb[IFLA_MTU]) {
if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
return -EINVAL;
}
return 0;
}
| 0 |
[
"CWE-399"
] |
linux
|
6ec82562ffc6f297d0de36d65776cff8e5704867
| 223,581,417,108,674,400,000,000,000,000,000,000,000 | 14 |
veth: Dont kfree_skb() after dev_forward_skb()
In case of congestion, netif_rx() frees the skb, so we must assume
dev_forward_skb() also consume skb.
Bug introduced by commit 445409602c092
(veth: move loopback logic to common location)
We must change dev_forward_skb() to always consume skb, and veth to not
double free it.
Bug report : http://marc.info/?l=linux-netdev&m=127310770900442&w=3
Reported-by: Martín Ferrari <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
gdm_session_new (GdmSessionVerificationMode verification_mode,
uid_t allowed_user,
const char *display_name,
const char *display_hostname,
const char *display_device,
const char *display_seat_id,
const char *display_x11_authority_file,
gboolean display_is_local,
const char * const *environment)
{
GdmSession *self;
self = g_object_new (GDM_TYPE_SESSION,
"verification-mode", verification_mode,
"allowed-user", (guint) allowed_user,
"display-name", display_name,
"display-hostname", display_hostname,
"display-device", display_device,
"display-seat-id", display_seat_id,
"display-x11-authority-file", display_x11_authority_file,
"display-is-local", display_is_local,
"conversation-environment", environment,
NULL);
return self;
}
| 0 |
[] |
gdm
|
5ac224602f1d603aac5eaa72e1760d3e33a26f0a
| 140,015,224,188,529,520,000,000,000,000,000,000,000 | 26 |
session: disconnect signals from worker proxy when conversation is freed
We don't want an outstanding reference on the worker proxy to lead to
signal handlers getting dispatched after the conversation is freed.
https://bugzilla.gnome.org/show_bug.cgi?id=758032
|
static void enable_step(struct task_struct *child, bool block)
{
/*
* Make sure block stepping (BTF) is not enabled unless it should be.
* Note that we don't try to worry about any is_setting_trap_flag()
* instructions after the first when using block stepping.
* So no one should try to use debugger block stepping in a program
* that uses user-mode single stepping itself.
*/
if (enable_single_step(child) && block)
set_task_blockstep(child, true);
else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
set_task_blockstep(child, false);
}
| 0 |
[
"CWE-20",
"CWE-362"
] |
linux
|
9899d11f654474d2d54ea52ceaa2a1f4db3abd68
| 186,398,726,262,849,700,000,000,000,000,000,000,000 | 14 |
ptrace: ensure arch_ptrace/ptrace_request can never race with SIGKILL
putreg() assumes that the tracee is not running and pt_regs_access() can
safely play with its stack. However a killed tracee can return from
ptrace_stop() to the low-level asm code and do RESTORE_REST, this means
that debugger can actually read/modify the kernel stack until the tracee
does SAVE_REST again.
set_task_blockstep() can race with SIGKILL too and in some sense this
race is even worse, the very fact the tracee can be woken up breaks the
logic.
As Linus suggested we can clear TASK_WAKEKILL around the arch_ptrace()
call, this ensures that nobody can ever wakeup the tracee while the
debugger looks at it. Not only this fixes the mentioned problems, we
can do some cleanups/simplifications in arch_ptrace() paths.
Probably ptrace_unfreeze_traced() needs more callers, for example it
makes sense to make the tracee killable for oom-killer before
access_process_vm().
While at it, add the comment into may_ptrace_stop() to explain why
ptrace_stop() still can't rely on SIGKILL and signal_pending_state().
Reported-by: Salman Qazi <[email protected]>
Reported-by: Suleiman Souhlal <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Signed-off-by: Oleg Nesterov <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
void http_parse_connection_header(struct http_txn *txn, struct http_msg *msg, int to_del)
{
struct hdr_ctx ctx;
const char *hdr_val = "Connection";
int hdr_len = 10;
if (txn->flags & TX_HDR_CONN_PRS)
return;
if (unlikely(txn->flags & TX_USE_PX_CONN)) {
hdr_val = "Proxy-Connection";
hdr_len = 16;
}
ctx.idx = 0;
txn->flags &= ~(TX_CON_KAL_SET|TX_CON_CLO_SET);
while (http_find_header2(hdr_val, hdr_len, msg->chn->buf->p, &txn->hdr_idx, &ctx)) {
if (ctx.vlen >= 10 && word_match(ctx.line + ctx.val, ctx.vlen, "keep-alive", 10)) {
txn->flags |= TX_HDR_CONN_KAL;
if (to_del & 2)
http_remove_header2(msg, &txn->hdr_idx, &ctx);
else
txn->flags |= TX_CON_KAL_SET;
}
else if (ctx.vlen >= 5 && word_match(ctx.line + ctx.val, ctx.vlen, "close", 5)) {
txn->flags |= TX_HDR_CONN_CLO;
if (to_del & 1)
http_remove_header2(msg, &txn->hdr_idx, &ctx);
else
txn->flags |= TX_CON_CLO_SET;
}
else if (ctx.vlen >= 7 && word_match(ctx.line + ctx.val, ctx.vlen, "upgrade", 7)) {
txn->flags |= TX_HDR_CONN_UPG;
}
}
txn->flags |= TX_HDR_CONN_PRS;
return;
}
| 0 |
[] |
haproxy
|
aae75e3279c6c9bd136413a72dafdcd4986bb89a
| 55,505,483,470,069,330,000,000,000,000,000,000,000 | 39 |
BUG/CRITICAL: using HTTP information in tcp-request content may crash the process
During normal HTTP request processing, request buffers are realigned if
there are less than global.maxrewrite bytes available after them, in
order to leave enough room for rewriting headers after the request. This
is done in http_wait_for_request().
However, if some HTTP inspection happens during a "tcp-request content"
rule, this realignment is not performed. In theory this is not a problem
because empty buffers are always aligned and TCP inspection happens at
the beginning of a connection. But with HTTP keep-alive, it also happens
at the beginning of each subsequent request. So if a second request was
pipelined by the client before the first one had a chance to be forwarded,
the second request will not be realigned. Then, http_wait_for_request()
will not perform such a realignment either because the request was
already parsed and marked as such. The consequence of this, is that the
rewrite of a sufficient number of such pipelined, unaligned requests may
leave less room past the request been processed than the configured
reserve, which can lead to a buffer overflow if request processing appends
some data past the end of the buffer.
A number of conditions are required for the bug to be triggered :
- HTTP keep-alive must be enabled ;
- HTTP inspection in TCP rules must be used ;
- some request appending rules are needed (reqadd, x-forwarded-for)
- since empty buffers are always realigned, the client must pipeline
enough requests so that the buffer always contains something till
the point where there is no more room for rewriting.
While such a configuration is quite unlikely to be met (which is
confirmed by the bug's lifetime), a few people do use these features
together for very specific usages. And more importantly, writing such
a configuration and the request to attack it is trivial.
A quick workaround consists in forcing keep-alive off by adding
"option httpclose" or "option forceclose" in the frontend. Alternatively,
disabling HTTP-based TCP inspection rules enough if the application
supports it.
At first glance, this bug does not look like it could lead to remote code
execution, as the overflowing part is controlled by the configuration and
not by the user. But some deeper analysis should be performed to confirm
this. And anyway, corrupting the process' memory and crashing it is quite
trivial.
Special thanks go to Yves Lafon from the W3C who reported this bug and
deployed significant efforts to collect the relevant data needed to
understand it in less than one week.
CVE-2013-1912 was assigned to this issue.
Note that 1.4 is also affected so the fix must be backported.
|
svcauth_gss_accept_sec_context(struct svc_req *rqst,
struct rpc_gss_init_res *gr)
{
struct svc_rpc_gss_data *gd;
struct rpc_gss_cred *gc;
gss_buffer_desc recv_tok, seqbuf;
gss_OID mech;
OM_uint32 maj_stat = 0, min_stat = 0, ret_flags, seq;
log_debug("in svcauth_gss_accept_context()");
gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth);
gc = (struct rpc_gss_cred *)rqst->rq_clntcred;
memset(gr, 0, sizeof(*gr));
/* Deserialize arguments. */
memset(&recv_tok, 0, sizeof(recv_tok));
if (!svc_getargs(rqst->rq_xprt, xdr_rpc_gss_init_args,
(caddr_t)&recv_tok))
return (FALSE);
gr->gr_major = gss_accept_sec_context(&gr->gr_minor,
&gd->ctx,
svcauth_gss_creds,
&recv_tok,
GSS_C_NO_CHANNEL_BINDINGS,
&gd->client_name,
&mech,
&gr->gr_token,
&ret_flags,
NULL,
NULL);
svc_freeargs(rqst->rq_xprt, xdr_rpc_gss_init_args, (caddr_t)&recv_tok);
log_status("accept_sec_context", gr->gr_major, gr->gr_minor);
if (gr->gr_major != GSS_S_COMPLETE &&
gr->gr_major != GSS_S_CONTINUE_NEEDED) {
badauth(gr->gr_major, gr->gr_minor, rqst->rq_xprt);
gd->ctx = GSS_C_NO_CONTEXT;
goto errout;
}
/*
* ANDROS: krb5 mechglue returns ctx of size 8 - two pointers,
* one to the mechanism oid, one to the internal_ctx_id
*/
if ((gr->gr_ctx.value = mem_alloc(sizeof(gss_union_ctx_id_desc))) == NULL) {
fprintf(stderr, "svcauth_gss_accept_context: out of memory\n");
goto errout;
}
memcpy(gr->gr_ctx.value, gd->ctx, sizeof(gss_union_ctx_id_desc));
gr->gr_ctx.length = sizeof(gss_union_ctx_id_desc);
/* gr->gr_win = 0x00000005; ANDROS: for debugging linux kernel version... */
gr->gr_win = sizeof(gd->seqmask) * 8;
/* Save client info. */
gd->sec.mech = mech;
gd->sec.qop = GSS_C_QOP_DEFAULT;
gd->sec.svc = gc->gc_svc;
gd->seq = gc->gc_seq;
gd->win = gr->gr_win;
if (gr->gr_major == GSS_S_COMPLETE) {
#ifdef SPKM
/* spkm3: no src_name (anonymous) */
if(!g_OID_equal(gss_mech_spkm3, mech)) {
#endif
maj_stat = gss_display_name(&min_stat, gd->client_name,
&gd->cname, &gd->sec.mech);
#ifdef SPKM
}
#endif
if (maj_stat != GSS_S_COMPLETE) {
log_status("display_name", maj_stat, min_stat);
goto errout;
}
#ifdef DEBUG
#ifdef HAVE_HEIMDAL
log_debug("accepted context for %.*s with "
"<mech {}, qop %d, svc %d>",
gd->cname.length, (char *)gd->cname.value,
gd->sec.qop, gd->sec.svc);
#else
{
gss_buffer_desc mechname;
gss_oid_to_str(&min_stat, mech, &mechname);
log_debug("accepted context for %.*s with "
"<mech %.*s, qop %d, svc %d>",
gd->cname.length, (char *)gd->cname.value,
mechname.length, (char *)mechname.value,
gd->sec.qop, gd->sec.svc);
gss_release_buffer(&min_stat, &mechname);
}
#endif
#endif /* DEBUG */
seq = htonl(gr->gr_win);
seqbuf.value = &seq;
seqbuf.length = sizeof(seq);
gss_release_buffer(&min_stat, &gd->checksum);
maj_stat = gss_sign(&min_stat, gd->ctx, GSS_C_QOP_DEFAULT,
&seqbuf, &gd->checksum);
if (maj_stat != GSS_S_COMPLETE) {
goto errout;
}
rqst->rq_xprt->xp_verf.oa_flavor = RPCSEC_GSS;
rqst->rq_xprt->xp_verf.oa_base = gd->checksum.value;
rqst->rq_xprt->xp_verf.oa_length = gd->checksum.length;
}
return (TRUE);
errout:
gss_release_buffer(&min_stat, &gr->gr_token);
return (FALSE);
}
| 1 |
[
"CWE-200"
] |
krb5
|
5bb8a6b9c9eb8dd22bc9526751610aaa255ead9c
| 257,295,965,070,572,100,000,000,000,000,000,000,000 | 122 |
Fix gssrpc data leakage [CVE-2014-9423]
[MITKRB5-SA-2015-001] In svcauth_gss_accept_sec_context(), do not copy
bytes from the union context into the handle field we send to the
client. We do not use this handle field, so just supply a fixed
string of "xxxx".
In gss_union_ctx_id_struct, remove the unused "interposer" field which
was causing part of the union context to remain uninitialized.
ticket: 8058 (new)
target_version: 1.13.1
tags: pullup
|
void bgp_dump_routes_attr(struct stream *s, struct attr *attr,
struct prefix *prefix)
{
unsigned long cp;
unsigned long len;
size_t aspath_lenp;
struct aspath *aspath;
int addpath_encode = 0;
uint32_t addpath_tx_id = 0;
/* Remember current pointer. */
cp = stream_get_endp(s);
/* Place holder of length. */
stream_putw(s, 0);
/* Origin attribute. */
stream_putc(s, BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_ORIGIN);
stream_putc(s, 1);
stream_putc(s, attr->origin);
aspath = attr->aspath;
stream_putc(s, BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_EXTLEN);
stream_putc(s, BGP_ATTR_AS_PATH);
aspath_lenp = stream_get_endp(s);
stream_putw(s, 0);
stream_putw_at(s, aspath_lenp, aspath_put(s, aspath, 1));
/* Nexthop attribute. */
/* If it's an IPv6 prefix, don't dump the IPv4 nexthop to save space */
if (prefix != NULL && prefix->family != AF_INET6) {
stream_putc(s, BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_NEXT_HOP);
stream_putc(s, 4);
stream_put_ipv4(s, attr->nexthop.s_addr);
}
/* MED attribute. */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC)) {
stream_putc(s, BGP_ATTR_FLAG_OPTIONAL);
stream_putc(s, BGP_ATTR_MULTI_EXIT_DISC);
stream_putc(s, 4);
stream_putl(s, attr->med);
}
/* Local preference. */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)) {
stream_putc(s, BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_LOCAL_PREF);
stream_putc(s, 4);
stream_putl(s, attr->local_pref);
}
/* Atomic aggregate. */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE)) {
stream_putc(s, BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_ATOMIC_AGGREGATE);
stream_putc(s, 0);
}
/* Aggregator. */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR)) {
stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_AGGREGATOR);
stream_putc(s, 8);
stream_putl(s, attr->aggregator_as);
stream_put_ipv4(s, attr->aggregator_addr.s_addr);
}
/* Community attribute. */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES)) {
if (attr->community->size * 4 > 255) {
stream_putc(s,
BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS
| BGP_ATTR_FLAG_EXTLEN);
stream_putc(s, BGP_ATTR_COMMUNITIES);
stream_putw(s, attr->community->size * 4);
} else {
stream_putc(s,
BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_COMMUNITIES);
stream_putc(s, attr->community->size * 4);
}
stream_put(s, attr->community->val, attr->community->size * 4);
}
/* Large Community attribute. */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES)) {
if (lcom_length(attr->lcommunity) > 255) {
stream_putc(s,
BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS
| BGP_ATTR_FLAG_EXTLEN);
stream_putc(s, BGP_ATTR_LARGE_COMMUNITIES);
stream_putw(s, lcom_length(attr->lcommunity));
} else {
stream_putc(s,
BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_LARGE_COMMUNITIES);
stream_putc(s, lcom_length(attr->lcommunity));
}
stream_put(s, attr->lcommunity->val,
lcom_length(attr->lcommunity));
}
/* Add a MP_NLRI attribute to dump the IPv6 next hop */
if (prefix != NULL && prefix->family == AF_INET6
&& (attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL
|| attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL)) {
int sizep;
stream_putc(s, BGP_ATTR_FLAG_OPTIONAL);
stream_putc(s, BGP_ATTR_MP_REACH_NLRI);
sizep = stream_get_endp(s);
/* MP header */
stream_putc(s, 0); /* Marker: Attribute length. */
stream_putw(s, AFI_IP6); /* AFI */
stream_putc(s, SAFI_UNICAST); /* SAFI */
/* Next hop */
stream_putc(s, attr->mp_nexthop_len);
stream_put(s, &attr->mp_nexthop_global, IPV6_MAX_BYTELEN);
if (attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL)
stream_put(s, &attr->mp_nexthop_local,
IPV6_MAX_BYTELEN);
/* SNPA */
stream_putc(s, 0);
/* Prefix */
stream_put_prefix_addpath(s, prefix, addpath_encode,
addpath_tx_id);
/* Set MP attribute length. */
stream_putc_at(s, sizep, (stream_get_endp(s) - sizep) - 1);
}
/* Prefix SID */
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_PREFIX_SID)) {
if (attr->label_index != BGP_INVALID_LABEL_INDEX) {
stream_putc(s,
BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_TRANS);
stream_putc(s, BGP_ATTR_PREFIX_SID);
stream_putc(s, 10);
stream_putc(s, BGP_PREFIX_SID_LABEL_INDEX);
stream_putc(s, BGP_PREFIX_SID_LABEL_INDEX_LENGTH);
stream_putc(s, 0); // reserved
stream_putw(s, 0); // flags
stream_putl(s, attr->label_index);
}
}
/* Return total size of attribute. */
len = stream_get_endp(s) - cp - 2;
stream_putw_at(s, cp, len);
}
| 0 |
[
"CWE-20",
"CWE-436"
] |
frr
|
943d595a018e69b550db08cccba1d0778a86705a
| 44,029,970,126,037,300,000,000,000,000,000,000,000 | 163 |
bgpd: don't use BGP_ATTR_VNC(255) unless ENABLE_BGP_VNC_ATTR is defined
Signed-off-by: Lou Berger <[email protected]>
|
static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
return ignore_write(vcpu, p);
} else {
p->regval = read_sysreg(dbgauthstatus_el1);
return true;
}
}
| 0 |
[
"CWE-20",
"CWE-617"
] |
linux
|
9e3f7a29694049edd728e2400ab57ad7553e5aa9
| 304,269,066,422,316,120,000,000,000,000,000,000,000 | 11 |
arm64: KVM: pmu: Fix AArch32 cycle counter access
We're missing the handling code for the cycle counter accessed
from a 32bit guest, leading to unexpected results.
Cc: [email protected] # 4.6+
Signed-off-by: Wei Huang <[email protected]>
Signed-off-by: Marc Zyngier <[email protected]>
|
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
| 0 |
[] |
ImageMagick
|
bd9f1e7d1bd2c8e2cf7895d133c5c5b5cd3526b6
| 229,469,984,288,374,500,000,000,000,000,000,000,000 | 15 |
Fixed memory leak when reading incorrect PSD files
|
int check_frequency(struct wif *wi[], int cards)
{
int i, freq;
for(i=0; i<cards; i++)
{
freq = wi_get_freq(wi[i]);
if(freq < 0) continue;
if(G.frequency[i] != freq)
{
memset(G.message, '\x00', sizeof(G.message));
snprintf(G.message, sizeof(G.message), "][ fixed frequency %s: %d ", wi_get_ifname(wi[i]), freq);
wi_set_freq(wi[i], G.frequency[i]);
}
}
return 0;
}
| 0 |
[
"CWE-787"
] |
aircrack-ng
|
ff70494dd389ba570dbdbf36f217c28d4381c6b5
| 328,288,212,086,341,150,000,000,000,000,000,000,000 | 16 |
Airodump-ng: Fixed GPS stack overflow (Closes #13 on GitHub).
git-svn-id: http://svn.aircrack-ng.org/trunk@2416 28c6078b-6c39-48e3-add9-af49d547ecab
|
cifs_find_tcon(struct cifs_ses *ses, const char *unc)
{
struct list_head *tmp;
struct cifs_tcon *tcon;
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &ses->tcon_list) {
tcon = list_entry(tmp, struct cifs_tcon, tcon_list);
if (!match_tcon(tcon, unc))
continue;
++tcon->tc_count;
spin_unlock(&cifs_tcp_ses_lock);
return tcon;
}
spin_unlock(&cifs_tcp_ses_lock);
return NULL;
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
linux
|
1fc29bacedeabb278080e31bb9c1ecb49f143c3b
| 290,607,253,701,991,150,000,000,000,000,000,000,000 | 17 |
cifs: fix off-by-one bug in build_unc_path_to_root
commit 839db3d10a (cifs: fix up handling of prefixpath= option) changed
the code such that the vol->prepath no longer contained a leading
delimiter and then fixed up the places that accessed that field to
account for that change.
One spot in build_unc_path_to_root was missed however. When doing the
pointer addition on pos, that patch failed to account for the fact that
we had already incremented "pos" by one when adding the length of the
prepath. This caused a buffer overrun by one byte.
This patch fixes the problem by correcting the handling of "pos".
Cc: <[email protected]> # v3.8+
Reported-by: Marcus Moeller <[email protected]>
Reported-by: Ken Fallon <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]>
|
KernelAndDeviceFunc::~KernelAndDeviceFunc() {
if (handle_ != kInvalidHandle) {
Status status = pflr_->ReleaseHandle(handle_);
if (!status.ok()) {
LOG(INFO) << "Ignoring error status when releasing multi-device function "
"handle "
<< status.ToString();
}
}
}
| 0 |
[
"CWE-476",
"CWE-369"
] |
tensorflow
|
da8558533d925694483d2c136a9220d6d49d843c
| 269,813,431,707,707,800,000,000,000,000,000,000,000 | 10 |
Fix undefined behavior in `tf.raw_ops.Switch` in eager mode.
PiperOrigin-RevId: 332578058
Change-Id: I9727571d2f21476b10d8aa27c1b7176564b76ac9
|
bool remoteComplete() const { return state_.remote_complete_; }
| 1 |
[
"CWE-416"
] |
envoy
|
148de954ed3585d8b4298b424aa24916d0de6136
| 195,304,725,909,186,800,000,000,000,000,000,000,000 | 1 |
CVE-2021-43825
Response filter manager crash
Signed-off-by: Yan Avlasov <[email protected]>
|
static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
unsigned long arg, void __user *argp)
{
int status;
int retval = 0;
#ifdef LP_DEBUG
printk(KERN_DEBUG "lp%d ioctl, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg);
#endif
if (minor >= LP_NO)
return -ENODEV;
if ((LP_F(minor) & LP_EXIST) == 0)
return -ENODEV;
switch ( cmd ) {
case LPTIME:
if (arg > UINT_MAX / HZ)
return -EINVAL;
LP_TIME(minor) = arg * HZ/100;
break;
case LPCHAR:
LP_CHAR(minor) = arg;
break;
case LPABORT:
if (arg)
LP_F(minor) |= LP_ABORT;
else
LP_F(minor) &= ~LP_ABORT;
break;
case LPABORTOPEN:
if (arg)
LP_F(minor) |= LP_ABORTOPEN;
else
LP_F(minor) &= ~LP_ABORTOPEN;
break;
case LPCAREFUL:
if (arg)
LP_F(minor) |= LP_CAREFUL;
else
LP_F(minor) &= ~LP_CAREFUL;
break;
case LPWAIT:
LP_WAIT(minor) = arg;
break;
case LPSETIRQ:
return -EINVAL;
break;
case LPGETIRQ:
if (copy_to_user(argp, &LP_IRQ(minor),
sizeof(int)))
return -EFAULT;
break;
case LPGETSTATUS:
if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
return -EINTR;
lp_claim_parport_or_block (&lp_table[minor]);
status = r_str(minor);
lp_release_parport (&lp_table[minor]);
mutex_unlock(&lp_table[minor].port_mutex);
if (copy_to_user(argp, &status, sizeof(int)))
return -EFAULT;
break;
case LPRESET:
lp_reset(minor);
break;
#ifdef LP_STATS
case LPGETSTATS:
if (copy_to_user(argp, &LP_STAT(minor),
sizeof(struct lp_stats)))
return -EFAULT;
if (capable(CAP_SYS_ADMIN))
memset(&LP_STAT(minor), 0,
sizeof(struct lp_stats));
break;
#endif
case LPGETFLAGS:
status = LP_F(minor);
if (copy_to_user(argp, &status, sizeof(int)))
return -EFAULT;
break;
default:
retval = -EINVAL;
}
return retval;
}
| 0 |
[
"CWE-787"
] |
linux
|
3e21f4af170bebf47c187c1ff8bf155583c9f3b1
| 130,937,153,518,089,690,000,000,000,000,000,000,000 | 86 |
char: lp: fix possible integer overflow in lp_setup()
The lp_setup() code doesn't apply any bounds checking when passing
"lp=none", and only in this case, resulting in an overflow of the
parport_nr[] array. All versions in Git history are affected.
Reported-By: Roee Hay <[email protected]>
Cc: Ben Hutchings <[email protected]>
Cc: [email protected]
Signed-off-by: Willy Tarreau <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static void test_bug49972()
{
int rc;
MYSQL_STMT *stmt;
MYSQL_BIND in_param_bind;
MYSQL_BIND out_param_bind;
int int_data;
my_bool is_null;
DBUG_ENTER("test_bug49972");
myheader("test_bug49972");
rc= mysql_query(mysql, "DROP FUNCTION IF EXISTS f1");
myquery(rc);
rc= mysql_query(mysql, "DROP PROCEDURE IF EXISTS p1");
myquery(rc);
rc= mysql_query(mysql, "CREATE FUNCTION f1() RETURNS INT RETURN 1");
myquery(rc);
rc= mysql_query(mysql, "CREATE PROCEDURE p1(IN a INT, OUT b INT) SET b = a");
myquery(rc);
stmt= mysql_simple_prepare(mysql, "CALL p1((SELECT f1()), ?)");
check_stmt(stmt);
memset(&in_param_bind, 0, sizeof (in_param_bind));
in_param_bind.buffer_type= MYSQL_TYPE_LONG;
in_param_bind.buffer= (char *) &int_data;
in_param_bind.length= 0;
in_param_bind.is_null= 0;
rc= mysql_stmt_bind_param(stmt, &in_param_bind);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
{
memset(&out_param_bind, 0, sizeof (out_param_bind));
out_param_bind.buffer_type= MYSQL_TYPE_LONG;
out_param_bind.is_null= &is_null;
out_param_bind.buffer= &int_data;
out_param_bind.buffer_length= sizeof (int_data);
rc= mysql_stmt_bind_result(stmt, &out_param_bind);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
rc= mysql_stmt_fetch(stmt);
DBUG_ASSERT(rc == MYSQL_NO_DATA);
mysql_stmt_next_result(stmt);
mysql_stmt_fetch(stmt);
}
rc= mysql_query(mysql, "DROP FUNCTION f1");
myquery(rc);
rc= mysql_query(mysql, "CREATE FUNCTION f1() RETURNS INT RETURN 1");
myquery(rc);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
{
memset(&out_param_bind, 0, sizeof (out_param_bind));
out_param_bind.buffer_type= MYSQL_TYPE_LONG;
out_param_bind.is_null= &is_null;
out_param_bind.buffer= &int_data;
out_param_bind.buffer_length= sizeof (int_data);
rc= mysql_stmt_bind_result(stmt, &out_param_bind);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
rc= mysql_stmt_fetch(stmt);
DBUG_ASSERT(rc == MYSQL_NO_DATA);
mysql_stmt_next_result(stmt);
mysql_stmt_fetch(stmt);
}
mysql_stmt_close(stmt);
rc= mysql_query(mysql, "DROP PROCEDURE p1");
myquery(rc);
rc= mysql_query(mysql, "DROP FUNCTION f1");
myquery(rc);
DBUG_VOID_RETURN;
}
| 0 |
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
| 119,763,646,766,259,470,000,000,000,000,000,000,000 | 97 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
|
TEST(AsyncSSLSocketTest, SSLClientTest) {
// Start listening on a local port
WriteCallbackBase writeCallback;
ReadCallback readCallback(&writeCallback);
HandshakeCallback handshakeCallback(&readCallback);
SSLServerAcceptCallbackDelay acceptCallback(&handshakeCallback);
TestSSLServer server(&acceptCallback);
// Set up SSL client
EventBase eventBase;
auto client = std::make_shared<SSLClient>(&eventBase, server.getAddress(), 1);
client->connect();
EventBaseAborter eba(&eventBase, 3000);
eventBase.loop();
EXPECT_EQ(client->getMiss(), 1);
EXPECT_EQ(client->getHit(), 0);
cerr << "SSLClientTest test completed" << endl;
}
| 0 |
[
"CWE-125"
] |
folly
|
c321eb588909646c15aefde035fd3133ba32cdee
| 91,390,531,748,019,440,000,000,000,000,000,000,000 | 21 |
Handle close_notify as standard writeErr in AsyncSSLSocket.
Summary: Fixes CVE-2019-11934
Reviewed By: mingtaoy
Differential Revision: D18020613
fbshipit-source-id: db82bb250e53f0d225f1280bd67bc74abd417836
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.