func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
do_cmdline(
char_u *cmdline,
char_u *(*fgetline)(int, void *, int, getline_opt_T),
void *cookie, // argument for fgetline()
int flags)
{
char_u *next_cmdline; // next cmd to execute
char_u *cmdline_copy = NULL; // copy of cmd line
int used_getline = FALSE; // used "fgetline" to obtain command
static int recursive = 0; // recursive depth
int msg_didout_before_start = 0;
int count = 0; // line number count
int did_inc = FALSE; // incremented RedrawingDisabled
int retval = OK;
#ifdef FEAT_EVAL
cstack_T cstack; // conditional stack
garray_T lines_ga; // keep lines for ":while"/":for"
int current_line = 0; // active line in lines_ga
int current_line_before = 0;
char_u *fname = NULL; // function or script name
linenr_T *breakpoint = NULL; // ptr to breakpoint field in cookie
int *dbg_tick = NULL; // ptr to dbg_tick field in cookie
struct dbg_stuff debug_saved; // saved things for debug mode
int initial_trylevel;
msglist_T **saved_msg_list = NULL;
msglist_T *private_msg_list = NULL;
// "fgetline" and "cookie" passed to do_one_cmd()
char_u *(*cmd_getline)(int, void *, int, getline_opt_T);
void *cmd_cookie;
struct loop_cookie cmd_loop_cookie;
void *real_cookie;
int getline_is_func;
#else
# define cmd_getline fgetline
# define cmd_cookie cookie
#endif
static int call_depth = 0; // recursiveness
#ifdef FEAT_EVAL
// For every pair of do_cmdline()/do_one_cmd() calls, use an extra memory
// location for storing error messages to be converted to an exception.
// This ensures that the do_errthrow() call in do_one_cmd() does not
// combine the messages stored by an earlier invocation of do_one_cmd()
// with the command name of the later one. This would happen when
// BufWritePost autocommands are executed after a write error.
saved_msg_list = msg_list;
msg_list = &private_msg_list;
#endif
// It's possible to create an endless loop with ":execute", catch that
// here. The value of 200 allows nested function calls, ":source", etc.
// Allow 200 or 'maxfuncdepth', whatever is larger.
if (call_depth >= 200
#ifdef FEAT_EVAL
&& call_depth >= p_mfd
#endif
)
{
emsg(_("E169: Command too recursive"));
#ifdef FEAT_EVAL
// When converting to an exception, we do not include the command name
// since this is not an error of the specific command.
do_errthrow((cstack_T *)NULL, (char_u *)NULL);
msg_list = saved_msg_list;
#endif
return FAIL;
}
++call_depth;
#ifdef FEAT_EVAL
CLEAR_FIELD(cstack);
cstack.cs_idx = -1;
ga_init2(&lines_ga, (int)sizeof(wcmd_T), 10);
real_cookie = getline_cookie(fgetline, cookie);
// Inside a function use a higher nesting level.
getline_is_func = getline_equal(fgetline, cookie, get_func_line);
if (getline_is_func && ex_nesting_level == func_level(real_cookie))
++ex_nesting_level;
// Get the function or script name and the address where the next breakpoint
// line and the debug tick for a function or script are stored.
if (getline_is_func)
{
fname = func_name(real_cookie);
breakpoint = func_breakpoint(real_cookie);
dbg_tick = func_dbg_tick(real_cookie);
}
else if (getline_equal(fgetline, cookie, getsourceline))
{
fname = SOURCING_NAME;
breakpoint = source_breakpoint(real_cookie);
dbg_tick = source_dbg_tick(real_cookie);
}
/*
* Initialize "force_abort" and "suppress_errthrow" at the top level.
*/
if (!recursive)
{
force_abort = FALSE;
suppress_errthrow = FALSE;
}
/*
* If requested, store and reset the global values controlling the
* exception handling (used when debugging). Otherwise clear it to avoid
* a bogus compiler warning when the optimizer uses inline functions...
*/
if (flags & DOCMD_EXCRESET)
save_dbg_stuff(&debug_saved);
else
CLEAR_FIELD(debug_saved);
initial_trylevel = trylevel;
/*
* "did_throw" will be set to TRUE when an exception is being thrown.
*/
did_throw = FALSE;
#endif
/*
* "did_emsg" will be set to TRUE when emsg() is used, in which case we
* cancel the whole command line, and any if/endif or loop.
* If force_abort is set, we cancel everything.
*/
#ifdef FEAT_EVAL
did_emsg_cumul += did_emsg;
#endif
did_emsg = FALSE;
/*
* KeyTyped is only set when calling vgetc(). Reset it here when not
* calling vgetc() (sourced command lines).
*/
if (!(flags & DOCMD_KEYTYPED)
&& !getline_equal(fgetline, cookie, getexline))
KeyTyped = FALSE;
/*
* Continue executing command lines:
* - when inside an ":if", ":while" or ":for"
* - for multiple commands on one line, separated with '|'
* - when repeating until there are no more lines (for ":source")
*/
next_cmdline = cmdline;
do
{
#ifdef FEAT_EVAL
getline_is_func = getline_equal(fgetline, cookie, get_func_line);
#endif
// stop skipping cmds for an error msg after all endif/while/for
if (next_cmdline == NULL
#ifdef FEAT_EVAL
&& !force_abort
&& cstack.cs_idx < 0
&& !(getline_is_func && func_has_abort(real_cookie))
#endif
)
{
#ifdef FEAT_EVAL
did_emsg_cumul += did_emsg;
#endif
did_emsg = FALSE;
}
/*
* 1. If repeating a line in a loop, get a line from lines_ga.
* 2. If no line given: Get an allocated line with fgetline().
* 3. If a line is given: Make a copy, so we can mess with it.
*/
#ifdef FEAT_EVAL
// 1. If repeating, get a previous line from lines_ga.
if (cstack.cs_looplevel > 0 && current_line < lines_ga.ga_len)
{
// Each '|' separated command is stored separately in lines_ga, to
// be able to jump to it. Don't use next_cmdline now.
VIM_CLEAR(cmdline_copy);
// Check if a function has returned or, unless it has an unclosed
// try conditional, aborted.
if (getline_is_func)
{
# ifdef FEAT_PROFILE
if (do_profiling == PROF_YES)
func_line_end(real_cookie);
# endif
if (func_has_ended(real_cookie))
{
retval = FAIL;
break;
}
}
#ifdef FEAT_PROFILE
else if (do_profiling == PROF_YES
&& getline_equal(fgetline, cookie, getsourceline))
script_line_end();
#endif
// Check if a sourced file hit a ":finish" command.
if (source_finished(fgetline, cookie))
{
retval = FAIL;
break;
}
// If breakpoints have been added/deleted need to check for it.
if (breakpoint != NULL && dbg_tick != NULL
&& *dbg_tick != debug_tick)
{
*breakpoint = dbg_find_breakpoint(
getline_equal(fgetline, cookie, getsourceline),
fname, SOURCING_LNUM);
*dbg_tick = debug_tick;
}
next_cmdline = ((wcmd_T *)(lines_ga.ga_data))[current_line].line;
SOURCING_LNUM = ((wcmd_T *)(lines_ga.ga_data))[current_line].lnum;
// Did we encounter a breakpoint?
if (breakpoint != NULL && *breakpoint != 0
&& *breakpoint <= SOURCING_LNUM)
{
dbg_breakpoint(fname, SOURCING_LNUM);
// Find next breakpoint.
*breakpoint = dbg_find_breakpoint(
getline_equal(fgetline, cookie, getsourceline),
fname, SOURCING_LNUM);
*dbg_tick = debug_tick;
}
# ifdef FEAT_PROFILE
if (do_profiling == PROF_YES)
{
if (getline_is_func)
func_line_start(real_cookie, SOURCING_LNUM);
else if (getline_equal(fgetline, cookie, getsourceline))
script_line_start();
}
# endif
}
#endif
// 2. If no line given, get an allocated line with fgetline().
if (next_cmdline == NULL)
{
/*
* Need to set msg_didout for the first line after an ":if",
* otherwise the ":if" will be overwritten.
*/
if (count == 1 && getline_equal(fgetline, cookie, getexline))
msg_didout = TRUE;
if (fgetline == NULL || (next_cmdline = fgetline(':', cookie,
#ifdef FEAT_EVAL
cstack.cs_idx < 0 ? 0 : (cstack.cs_idx + 1) * 2
#else
0
#endif
, in_vim9script() ? GETLINE_CONCAT_CONTBAR
: GETLINE_CONCAT_CONT)) == NULL)
{
// Don't call wait_return for aborted command line. The NULL
// returned for the end of a sourced file or executed function
// doesn't do this.
if (KeyTyped && !(flags & DOCMD_REPEAT))
need_wait_return = FALSE;
retval = FAIL;
break;
}
used_getline = TRUE;
/*
* Keep the first typed line. Clear it when more lines are typed.
*/
if (flags & DOCMD_KEEPLINE)
{
vim_free(repeat_cmdline);
if (count == 0)
repeat_cmdline = vim_strsave(next_cmdline);
else
repeat_cmdline = NULL;
}
}
// 3. Make a copy of the command so we can mess with it.
else if (cmdline_copy == NULL)
{
next_cmdline = vim_strsave(next_cmdline);
if (next_cmdline == NULL)
{
emsg(_(e_out_of_memory));
retval = FAIL;
break;
}
}
cmdline_copy = next_cmdline;
#ifdef FEAT_EVAL
/*
* Inside a while/for loop, and when the command looks like a ":while"
* or ":for", the line is stored, because we may need it later when
* looping.
*
* When there is a '|' and another command, it is stored separately,
* because we need to be able to jump back to it from an
* :endwhile/:endfor.
*
* Pass a different "fgetline" function to do_one_cmd() below,
* that it stores lines in or reads them from "lines_ga". Makes it
* possible to define a function inside a while/for loop and handles
* line continuation.
*/
if ((cstack.cs_looplevel > 0 || has_loop_cmd(next_cmdline)))
{
cmd_getline = get_loop_line;
cmd_cookie = (void *)&cmd_loop_cookie;
cmd_loop_cookie.lines_gap = &lines_ga;
cmd_loop_cookie.current_line = current_line;
cmd_loop_cookie.getline = fgetline;
cmd_loop_cookie.cookie = cookie;
cmd_loop_cookie.repeating = (current_line < lines_ga.ga_len);
// Save the current line when encountering it the first time.
if (current_line == lines_ga.ga_len
&& store_loop_line(&lines_ga, next_cmdline) == FAIL)
{
retval = FAIL;
break;
}
current_line_before = current_line;
}
else
{
cmd_getline = fgetline;
cmd_cookie = cookie;
}
did_endif = FALSE;
#endif
if (count++ == 0)
{
/*
* All output from the commands is put below each other, without
* waiting for a return. Don't do this when executing commands
* from a script or when being called recursive (e.g. for ":e
* +command file").
*/
if (!(flags & DOCMD_NOWAIT) && !recursive)
{
msg_didout_before_start = msg_didout;
msg_didany = FALSE; // no output yet
msg_start();
msg_scroll = TRUE; // put messages below each other
++no_wait_return; // don't wait for return until finished
++RedrawingDisabled;
did_inc = TRUE;
}
}
if ((p_verbose >= 15 && SOURCING_NAME != NULL) || p_verbose >= 16)
msg_verbose_cmd(SOURCING_LNUM, cmdline_copy);
/*
* 2. Execute one '|' separated command.
* do_one_cmd() will return NULL if there is no trailing '|'.
* "cmdline_copy" can change, e.g. for '%' and '#' expansion.
*/
++recursive;
next_cmdline = do_one_cmd(&cmdline_copy, flags,
#ifdef FEAT_EVAL
&cstack,
#endif
cmd_getline, cmd_cookie);
--recursive;
#ifdef FEAT_EVAL
if (cmd_cookie == (void *)&cmd_loop_cookie)
// Use "current_line" from "cmd_loop_cookie", it may have been
// incremented when defining a function.
current_line = cmd_loop_cookie.current_line;
#endif
if (next_cmdline == NULL)
{
VIM_CLEAR(cmdline_copy);
/*
* If the command was typed, remember it for the ':' register.
* Do this AFTER executing the command to make :@: work.
*/
if (getline_equal(fgetline, cookie, getexline)
&& new_last_cmdline != NULL)
{
vim_free(last_cmdline);
last_cmdline = new_last_cmdline;
new_last_cmdline = NULL;
}
}
else
{
// need to copy the command after the '|' to cmdline_copy, for the
// next do_one_cmd()
STRMOVE(cmdline_copy, next_cmdline);
next_cmdline = cmdline_copy;
}
#ifdef FEAT_EVAL
// reset did_emsg for a function that is not aborted by an error
if (did_emsg && !force_abort
&& getline_equal(fgetline, cookie, get_func_line)
&& !func_has_abort(real_cookie))
{
// did_emsg_cumul is not set here
did_emsg = FALSE;
}
if (cstack.cs_looplevel > 0)
{
++current_line;
/*
* An ":endwhile", ":endfor" and ":continue" is handled here.
* If we were executing commands, jump back to the ":while" or
* ":for".
* If we were not executing commands, decrement cs_looplevel.
*/
if (cstack.cs_lflags & (CSL_HAD_CONT | CSL_HAD_ENDLOOP))
{
cstack.cs_lflags &= ~(CSL_HAD_CONT | CSL_HAD_ENDLOOP);
// Jump back to the matching ":while" or ":for". Be careful
// not to use a cs_line[] from an entry that isn't a ":while"
// or ":for": It would make "current_line" invalid and can
// cause a crash.
if (!did_emsg && !got_int && !did_throw
&& cstack.cs_idx >= 0
&& (cstack.cs_flags[cstack.cs_idx]
& (CSF_WHILE | CSF_FOR))
&& cstack.cs_line[cstack.cs_idx] >= 0
&& (cstack.cs_flags[cstack.cs_idx] & CSF_ACTIVE))
{
current_line = cstack.cs_line[cstack.cs_idx];
// remember we jumped there
cstack.cs_lflags |= CSL_HAD_LOOP;
line_breakcheck(); // check if CTRL-C typed
// Check for the next breakpoint at or after the ":while"
// or ":for".
if (breakpoint != NULL)
{
*breakpoint = dbg_find_breakpoint(
getline_equal(fgetline, cookie, getsourceline),
fname,
((wcmd_T *)lines_ga.ga_data)[current_line].lnum-1);
*dbg_tick = debug_tick;
}
}
else
{
// can only get here with ":endwhile" or ":endfor"
if (cstack.cs_idx >= 0)
rewind_conditionals(&cstack, cstack.cs_idx - 1,
CSF_WHILE | CSF_FOR, &cstack.cs_looplevel);
}
}
/*
* For a ":while" or ":for" we need to remember the line number.
*/
else if (cstack.cs_lflags & CSL_HAD_LOOP)
{
cstack.cs_lflags &= ~CSL_HAD_LOOP;
cstack.cs_line[cstack.cs_idx] = current_line_before;
}
}
// Check for the next breakpoint after a watchexpression
if (breakpoint != NULL && has_watchexpr())
{
*breakpoint = dbg_find_breakpoint(FALSE, fname, SOURCING_LNUM);
*dbg_tick = debug_tick;
}
/*
* When not inside any ":while" loop, clear remembered lines.
*/
if (cstack.cs_looplevel == 0)
{
if (lines_ga.ga_len > 0)
{
SOURCING_LNUM =
((wcmd_T *)lines_ga.ga_data)[lines_ga.ga_len - 1].lnum;
free_cmdlines(&lines_ga);
}
current_line = 0;
}
/*
* A ":finally" makes did_emsg, got_int, and did_throw pending for
* being restored at the ":endtry". Reset them here and set the
* ACTIVE and FINALLY flags, so that the finally clause gets executed.
* This includes the case where a missing ":endif", ":endwhile" or
* ":endfor" was detected by the ":finally" itself.
*/
if (cstack.cs_lflags & CSL_HAD_FINA)
{
cstack.cs_lflags &= ~CSL_HAD_FINA;
report_make_pending(cstack.cs_pending[cstack.cs_idx]
& (CSTP_ERROR | CSTP_INTERRUPT | CSTP_THROW),
did_throw ? (void *)current_exception : NULL);
did_emsg = got_int = did_throw = FALSE;
cstack.cs_flags[cstack.cs_idx] |= CSF_ACTIVE | CSF_FINALLY;
}
// Update global "trylevel" for recursive calls to do_cmdline() from
// within this loop.
trylevel = initial_trylevel + cstack.cs_trylevel;
/*
* If the outermost try conditional (across function calls and sourced
* files) is aborted because of an error, an interrupt, or an uncaught
* exception, cancel everything. If it is left normally, reset
* force_abort to get the non-EH compatible abortion behavior for
* the rest of the script.
*/
if (trylevel == 0 && !did_emsg && !got_int && !did_throw)
force_abort = FALSE;
// Convert an interrupt to an exception if appropriate.
(void)do_intthrow(&cstack);
#endif // FEAT_EVAL
}
/*
* Continue executing command lines when:
* - no CTRL-C typed, no aborting error, no exception thrown or try
* conditionals need to be checked for executing finally clauses or
* catching an interrupt exception
* - didn't get an error message or lines are not typed
* - there is a command after '|', inside a :if, :while, :for or :try, or
* looping for ":source" command or function call.
*/
while (!((got_int
#ifdef FEAT_EVAL
|| (did_emsg && (force_abort || in_vim9script()))
|| did_throw
#endif
)
#ifdef FEAT_EVAL
&& cstack.cs_trylevel == 0
#endif
)
&& !(did_emsg
#ifdef FEAT_EVAL
// Keep going when inside try/catch, so that the error can be
// deal with, except when it is a syntax error, it may cause
// the :endtry to be missed.
&& (cstack.cs_trylevel == 0 || did_emsg_syntax)
#endif
&& used_getline
&& (getline_equal(fgetline, cookie, getexmodeline)
|| getline_equal(fgetline, cookie, getexline)))
&& (next_cmdline != NULL
#ifdef FEAT_EVAL
|| cstack.cs_idx >= 0
#endif
|| (flags & DOCMD_REPEAT)));
vim_free(cmdline_copy);
did_emsg_syntax = FALSE;
#ifdef FEAT_EVAL
free_cmdlines(&lines_ga);
ga_clear(&lines_ga);
if (cstack.cs_idx >= 0)
{
/*
* If a sourced file or executed function ran to its end, report the
* unclosed conditional.
* In Vim9 script do not give a second error, executing aborts after
* the first one.
*/
if (!got_int && !did_throw && !(did_emsg && in_vim9script())
&& ((getline_equal(fgetline, cookie, getsourceline)
&& !source_finished(fgetline, cookie))
|| (getline_equal(fgetline, cookie, get_func_line)
&& !func_has_ended(real_cookie))))
{
if (cstack.cs_flags[cstack.cs_idx] & CSF_TRY)
emsg(_(e_endtry));
else if (cstack.cs_flags[cstack.cs_idx] & CSF_WHILE)
emsg(_(e_endwhile));
else if (cstack.cs_flags[cstack.cs_idx] & CSF_FOR)
emsg(_(e_endfor));
else
emsg(_(e_endif));
}
/*
* Reset "trylevel" in case of a ":finish" or ":return" or a missing
* ":endtry" in a sourced file or executed function. If the try
* conditional is in its finally clause, ignore anything pending.
* If it is in a catch clause, finish the caught exception.
* Also cleanup any "cs_forinfo" structures.
*/
do
{
int idx = cleanup_conditionals(&cstack, 0, TRUE);
if (idx >= 0)
--idx; // remove try block not in its finally clause
rewind_conditionals(&cstack, idx, CSF_WHILE | CSF_FOR,
&cstack.cs_looplevel);
}
while (cstack.cs_idx >= 0);
trylevel = initial_trylevel;
}
// If a missing ":endtry", ":endwhile", ":endfor", or ":endif" or a memory
// lack was reported above and the error message is to be converted to an
// exception, do this now after rewinding the cstack.
do_errthrow(&cstack, getline_equal(fgetline, cookie, get_func_line)
? (char_u *)"endfunction" : (char_u *)NULL);
if (trylevel == 0)
{
// Just in case did_throw got set but current_exception wasn't.
if (current_exception == NULL)
did_throw = FALSE;
/*
* When an exception is being thrown out of the outermost try
* conditional, discard the uncaught exception, disable the conversion
* of interrupts or errors to exceptions, and ensure that no more
* commands are executed.
*/
if (did_throw)
handle_did_throw();
/*
* On an interrupt or an aborting error not converted to an exception,
* disable the conversion of errors to exceptions. (Interrupts are not
* converted anymore, here.) This enables also the interrupt message
* when force_abort is set and did_emsg unset in case of an interrupt
* from a finally clause after an error.
*/
else if (got_int || (did_emsg && force_abort))
suppress_errthrow = TRUE;
}
/*
* The current cstack will be freed when do_cmdline() returns. An uncaught
* exception will have to be rethrown in the previous cstack. If a function
* has just returned or a script file was just finished and the previous
* cstack belongs to the same function or, respectively, script file, it
* will have to be checked for finally clauses to be executed due to the
* ":return" or ":finish". This is done in do_one_cmd().
*/
if (did_throw)
need_rethrow = TRUE;
if ((getline_equal(fgetline, cookie, getsourceline)
&& ex_nesting_level > source_level(real_cookie))
|| (getline_equal(fgetline, cookie, get_func_line)
&& ex_nesting_level > func_level(real_cookie) + 1))
{
if (!did_throw)
check_cstack = TRUE;
}
else
{
// When leaving a function, reduce nesting level.
if (getline_equal(fgetline, cookie, get_func_line))
--ex_nesting_level;
/*
* Go to debug mode when returning from a function in which we are
* single-stepping.
*/
if ((getline_equal(fgetline, cookie, getsourceline)
|| getline_equal(fgetline, cookie, get_func_line))
&& ex_nesting_level + 1 <= debug_break_level)
do_debug(getline_equal(fgetline, cookie, getsourceline)
? (char_u *)_("End of sourced file")
: (char_u *)_("End of function"));
}
/*
* Restore the exception environment (done after returning from the
* debugger).
*/
if (flags & DOCMD_EXCRESET)
restore_dbg_stuff(&debug_saved);
msg_list = saved_msg_list;
// Cleanup if "cs_emsg_silent_list" remains.
if (cstack.cs_emsg_silent_list != NULL)
{
eslist_T *elem, *temp;
for (elem = cstack.cs_emsg_silent_list; elem != NULL; elem = temp)
{
temp = elem->next;
vim_free(elem);
}
}
#endif // FEAT_EVAL
/*
* If there was too much output to fit on the command line, ask the user to
* hit return before redrawing the screen. With the ":global" command we do
* this only once after the command is finished.
*/
if (did_inc)
{
--RedrawingDisabled;
--no_wait_return;
msg_scroll = FALSE;
/*
* When just finished an ":if"-":else" which was typed, no need to
* wait for hit-return. Also for an error situation.
*/
if (retval == FAIL
#ifdef FEAT_EVAL
|| (did_endif && KeyTyped && !did_emsg)
#endif
)
{
need_wait_return = FALSE;
msg_didany = FALSE; // don't wait when restarting edit
}
else if (need_wait_return)
{
/*
* The msg_start() above clears msg_didout. The wait_return we do
* here should not overwrite the command that may be shown before
* doing that.
*/
msg_didout |= msg_didout_before_start;
wait_return(FALSE);
}
}
#ifdef FEAT_EVAL
did_endif = FALSE; // in case do_cmdline used recursively
#else
/*
* Reset if_level, in case a sourced script file contains more ":if" than
* ":endif" (could be ":if x | foo | endif").
*/
if_level = 0;
#endif
--call_depth;
return retval;
} | 0 | [
"CWE-122"
] | vim | 35a319b77f897744eec1155b736e9372c9c5575f | 164,984,185,573,810,960,000,000,000,000,000,000,000 | 760 | patch 8.2.3489: ml_get error after search with range
Problem: ml_get error after search with range.
Solution: Limit the line number to the buffer line count. |
static int __copy_insn(struct address_space *mapping, struct file *filp,
void *insn, int nbytes, loff_t offset)
{
struct page *page;
/*
* Ensure that the page that has the original instruction is populated
* and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
* see uprobe_register().
*/
if (mapping->a_ops->readpage)
page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
else
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
copy_from_page(page, offset, insn, nbytes);
put_page(page);
return 0;
} | 0 | [
"CWE-416"
] | linux | 355627f518978b5167256d27492fe0b343aaf2f2 | 38,782,965,824,142,510,000,000,000,000,000,000,000 | 21 | mm, uprobes: fix multiple free of ->uprobes_state.xol_area
Commit 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for
write killable") made it possible to kill a forking task while it is
waiting to acquire its ->mmap_sem for write, in dup_mmap().
However, it was overlooked that this introduced an new error path before
the new mm_struct's ->uprobes_state.xol_area has been set to NULL after
being copied from the old mm_struct by the memcpy in dup_mm(). For a
task that has previously hit a uprobe tracepoint, this resulted in the
'struct xol_area' being freed multiple times if the task was killed at
just the right time while forking.
Fix it by setting ->uprobes_state.xol_area to NULL in mm_init() rather
than in uprobe_dup_mmap().
With CONFIG_UPROBE_EVENTS=y, the bug can be reproduced by the same C
program given by commit 2b7e8665b4ff ("fork: fix incorrect fput of
->exe_file causing use-after-free"), provided that a uprobe tracepoint
has been set on the fork_thread() function. For example:
$ gcc reproducer.c -o reproducer -lpthread
$ nm reproducer | grep fork_thread
0000000000400719 t fork_thread
$ echo "p $PWD/reproducer:0x719" > /sys/kernel/debug/tracing/uprobe_events
$ echo 1 > /sys/kernel/debug/tracing/events/uprobes/enable
$ ./reproducer
Here is the use-after-free reported by KASAN:
BUG: KASAN: use-after-free in uprobe_clear_state+0x1c4/0x200
Read of size 8 at addr ffff8800320a8b88 by task reproducer/198
CPU: 1 PID: 198 Comm: reproducer Not tainted 4.13.0-rc7-00015-g36fde05f3fb5 #255
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-20170228_101828-anatol 04/01/2014
Call Trace:
dump_stack+0xdb/0x185
print_address_description+0x7e/0x290
kasan_report+0x23b/0x350
__asan_report_load8_noabort+0x19/0x20
uprobe_clear_state+0x1c4/0x200
mmput+0xd6/0x360
do_exit+0x740/0x1670
do_group_exit+0x13f/0x380
get_signal+0x597/0x17d0
do_signal+0x99/0x1df0
exit_to_usermode_loop+0x166/0x1e0
syscall_return_slowpath+0x258/0x2c0
entry_SYSCALL_64_fastpath+0xbc/0xbe
...
Allocated by task 199:
save_stack_trace+0x1b/0x20
kasan_kmalloc+0xfc/0x180
kmem_cache_alloc_trace+0xf3/0x330
__create_xol_area+0x10f/0x780
uprobe_notify_resume+0x1674/0x2210
exit_to_usermode_loop+0x150/0x1e0
prepare_exit_to_usermode+0x14b/0x180
retint_user+0x8/0x20
Freed by task 199:
save_stack_trace+0x1b/0x20
kasan_slab_free+0xa8/0x1a0
kfree+0xba/0x210
uprobe_clear_state+0x151/0x200
mmput+0xd6/0x360
copy_process.part.8+0x605f/0x65d0
_do_fork+0x1a5/0xbd0
SyS_clone+0x19/0x20
do_syscall_64+0x22f/0x660
return_from_SYSCALL_64+0x0/0x7a
Note: without KASAN, you may instead see a "Bad page state" message, or
simply a general protection fault.
Link: http://lkml.kernel.org/r/[email protected]
Fixes: 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for write killable")
Signed-off-by: Eric Biggers <[email protected]>
Reported-by: Oleg Nesterov <[email protected]>
Acked-by: Oleg Nesterov <[email protected]>
Cc: Alexander Shishkin <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Konstantin Khlebnikov <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: <[email protected]> [4.7+]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static void swizzle_io_read(RDyldCache *cache, RIO *io) {
if (!io || !io->desc || !io->desc->plugin) {
return;
}
RIOPlugin *plugin = io->desc->plugin;
cache->original_io_read = plugin->read;
plugin->read = &dyldcache_io_read;
} | 0 | [
"CWE-787"
] | radare2 | c84b7232626badd075caf3ae29661b609164bac6 | 19,374,063,061,070,073,000,000,000,000,000,000,000 | 9 | Fix heap buffer overflow in dyldcache parser ##crash
* Reported by: Lazymio via huntr.dev
* Reproducer: dyldovf |
longlong Item_null::val_datetime_packed()
{
null_value= true;
return 0;
} | 0 | [
"CWE-416"
] | server | c02ebf3510850ba78a106be9974c94c3b97d8585 | 16,211,246,792,761,246,000,000,000,000,000,000,000 | 5 | MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments. |
struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
__u32 vtag, int ecn_capable)
{
struct sctp_chunk *chunk = NULL;
SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
packet, vtag);
packet->vtag = vtag;
if (ecn_capable && sctp_packet_empty(packet)) {
chunk = sctp_get_ecne_prepend(packet->transport->asoc);
/* If there a is a prepend chunk stick it on the list before
* any other chunks get appended.
*/
if (chunk)
sctp_packet_append_chunk(packet, chunk);
}
return packet;
} | 0 | [] | linux | 196d67593439b03088913227093e374235596e33 | 128,547,905,431,491,050,000,000,000,000,000,000,000 | 22 | sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call
The current SCTP stack is lacking a mechanism to have per association
statistics. This is an implementation modeled after OpenSolaris'
SCTP_GET_ASSOC_STATS.
Userspace part will follow on lksctp if/when there is a general ACK on
this.
V4:
- Move ipackets++ before q->immediate.func() for consistency reasons
- Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid
returning bogus RTO values
- return asoc->rto_min when max_obs_rto value has not changed
V3:
- Increase ictrlchunks in sctp_assoc_bh_rcv() as well
- Move ipackets++ to sctp_inq_push()
- return 0 when no rto updates took place since the last call
V2:
- Implement partial retrieval of stat struct to cope for future expansion
- Kill the rtxpackets counter as it cannot be precise anyway
- Rename outseqtsns to outofseqtsns to make it clearer that these are out
of sequence unexpected TSNs
- Move asoc->ipackets++ under a lock to avoid potential miscounts
- Fold asoc->opackets++ into the already existing asoc check
- Kill unneeded (q->asoc) test when increasing rtxchunks
- Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0)
- Don't count SHUTDOWNs as SACKs
- Move SCTP_GET_ASSOC_STATS to the private space API
- Adjust the len check in sctp_getsockopt_assoc_stats() to allow for
future struct growth
- Move association statistics in their own struct
- Update idupchunks when we send a SACK with dup TSNs
- return min_rto in max_rto when RTO has not changed. Also return the
transport when max_rto last changed.
Signed-off: Michele Baldessari <[email protected]>
Acked-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
max3421_transfer_out(struct usb_hcd *hcd, struct urb *urb, int fast_retransmit)
{
struct spi_device *spi = to_spi_device(hcd->self.controller);
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
int epnum = usb_pipeendpoint(urb->pipe);
u32 max_packet;
void *src;
src = urb->transfer_buffer + urb->actual_length;
if (fast_retransmit) {
if (max3421_hcd->rev == 0x12) {
/* work around rev 0x12 bug: */
spi_wr8(hcd, MAX3421_REG_SNDBC, 0);
spi_wr8(hcd, MAX3421_REG_SNDFIFO, ((u8 *) src)[0]);
spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
}
return MAX3421_HXFR_BULK_OUT(epnum);
}
max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
if (max_packet > MAX3421_FIFO_SIZE) {
/*
* We do not support isochronous transfers at this
* time.
*/
dev_err(&spi->dev,
"%s: packet-size of %u too big (limit is %u bytes)",
__func__, max_packet, MAX3421_FIFO_SIZE);
max3421_hcd->urb_done = -EMSGSIZE;
return -EMSGSIZE;
}
max3421_hcd->curr_len = min((urb->transfer_buffer_length -
urb->actual_length), max_packet);
spi_wr_buf(hcd, MAX3421_REG_SNDFIFO, src, max3421_hcd->curr_len);
spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
return MAX3421_HXFR_BULK_OUT(epnum);
} | 0 | [
"CWE-416"
] | linux | b5fdf5c6e6bee35837e160c00ac89327bdad031b | 153,716,226,318,282,250,000,000,000,000,000,000,000 | 40 | usb: max-3421: Prevent corruption of freed memory
The MAX-3421 USB driver remembers the state of the USB toggles for a
device/endpoint. To save SPI writes, this was only done when a new
device/endpoint was being used. Unfortunately, if the old device was
removed, this would cause writes to freed memory.
To fix this, a simpler scheme is used. The toggles are read from
hardware when a URB is completed, and the toggles are always written to
hardware when any URB transaction is started. This will cause a few more
SPI transactions, but no causes kernel panics.
Fixes: 2d53139f3162 ("Add support for using a MAX3421E chip as a host driver.")
Cc: stable <[email protected]>
Signed-off-by: Mark Tomlinson <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static inline UBool instanceOfMeasure(const UObject* a) {
return dynamic_cast<const Measure*>(a) != NULL;
} | 0 | [
"CWE-190"
] | icu | 53d8c8f3d181d87a6aa925b449b51c4a2c922a51 | 22,062,975,890,161,167,000,000,000,000,000,000,000 | 3 | ICU-20246 Fixing another integer overflow in number parsing. |
get_y_current(void)
{
return y_current;
} | 0 | [
"CWE-122",
"CWE-787"
] | vim | d25f003342aca9889067f2e839963dfeccf1fe05 | 89,411,024,154,969,400,000,000,000,000,000,000,000 | 4 | patch 9.0.0011: reading beyond the end of the line with put command
Problem: Reading beyond the end of the line with put command.
Solution: Adjust the end mark position. |
static int TS_TST_INFO_content_new(PKCS7 *p7)
{
PKCS7 *ret = NULL;
ASN1_OCTET_STRING *octet_string = NULL;
/* Create new encapsulated NID_id_smime_ct_TSTInfo content. */
if (!(ret = PKCS7_new())) goto err;
if (!(ret->d.other = ASN1_TYPE_new())) goto err;
ret->type = OBJ_nid2obj(NID_id_smime_ct_TSTInfo);
if (!(octet_string = ASN1_OCTET_STRING_new())) goto err;
ASN1_TYPE_set(ret->d.other, V_ASN1_OCTET_STRING, octet_string);
octet_string = NULL;
/* Add encapsulated content to signed PKCS7 structure. */
if (!PKCS7_set_content(p7, ret)) goto err;
return 1;
err:
ASN1_OCTET_STRING_free(octet_string);
PKCS7_free(ret);
return 0;
} | 0 | [] | openssl | c7235be6e36c4bef84594aa3b2f0561db84b63d8 | 248,371,865,548,702,540,000,000,000,000,000,000,000 | 22 | RFC 3161 compliant time stamp request creation, response generation
and response verification.
Submitted by: Zoltan Glozik <[email protected]>
Reviewed by: Ulf Moeller |
bool ZrtpQueue::isParanoidMode() {
return enableParanoidMode;
} | 0 | [
"CWE-119"
] | ZRTPCPP | c8617100f359b217a974938c5539a1dd8a120b0e | 162,696,896,555,890,440,000,000,000,000,000,000,000 | 3 | Fix vulnerabilities found and reported by Mark Dowd
- limit length of memcpy
- limit number of offered algorithms in Hello packet
- length check in PING packet
- fix a small coding error |
MagickExport MagickBooleanType DeleteImageOption(ImageInfo *image_info,
const char *option)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->options == (void *) NULL)
return(MagickFalse);
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image_info->options,option));
} | 0 | [
"CWE-399"
] | ImageMagick | 6790815c75bdea0357df5564345847856e995d6b | 306,931,074,846,038,700,000,000,000,000,000,000,000 | 12 | Fixed memory leak in IsOptionMember. |
nautilus_file_list_call_when_ready (GList *file_list,
NautilusFileAttributes attributes,
NautilusFileListHandle **handle,
NautilusFileListCallback callback,
gpointer callback_data)
{
GList *l;
FileListReadyData *data;
NautilusFile *file;
g_return_if_fail (file_list != NULL);
data = file_list_ready_data_new
(file_list, callback, callback_data);
if (handle) {
*handle = (NautilusFileListHandle *) data;
}
l = file_list;
while (l != NULL) {
file = NAUTILUS_FILE (l->data);
/* Need to do this here, as the list can be modified by this call */
l = l->next;
nautilus_file_call_when_ready (file,
attributes,
file_list_file_ready_callback,
data);
}
} | 0 | [] | nautilus | 7632a3e13874a2c5e8988428ca913620a25df983 | 82,610,546,782,035,920,000,000,000,000,000,000,000 | 31 | Check for trusted desktop file launchers.
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-directory-async.c:
Check for trusted desktop file launchers.
* libnautilus-private/nautilus-file-private.h:
* libnautilus-private/nautilus-file.c:
* libnautilus-private/nautilus-file.h:
Add nautilus_file_is_trusted_link.
Allow unsetting of custom display name.
* libnautilus-private/nautilus-mime-actions.c:
Display dialog when trying to launch a non-trusted desktop file.
svn path=/trunk/; revision=15003 |
static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
BT_DBG("sock %p, sk %p", sock, sk);
sa->rc_family = AF_BLUETOOTH;
sa->rc_channel = rfcomm_pi(sk)->channel;
if (peer)
bacpy(&sa->rc_bdaddr, &bt_sk(sk)->dst);
else
bacpy(&sa->rc_bdaddr, &bt_sk(sk)->src);
*len = sizeof(struct sockaddr_rc);
return 0;
} | 1 | [
"CWE-200"
] | linux | 9344a972961d1a6d2c04d9008b13617bcb6ec2ef | 45,354,779,554,130,560,000,000,000,000,000,000,000 | 17 | Bluetooth: RFCOMM - Fix info leak via getsockname()
The RFCOMM code fails to initialize the trailing padding byte of struct
sockaddr_rc added for alignment. It that for leaks one byte kernel stack
via the getsockname() syscall. Add an explicit memset(0) before filling
the structure to avoid the info leak.
Signed-off-by: Mathias Krause <[email protected]>
Cc: Marcel Holtmann <[email protected]>
Cc: Gustavo Padovan <[email protected]>
Cc: Johan Hedberg <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int rand_drbg_unlock(RAND_DRBG *drbg)
{
if (drbg->lock != NULL)
return CRYPTO_THREAD_unlock(drbg->lock);
return 1;
} | 0 | [
"CWE-330"
] | openssl | 1b0fe00e2704b5e20334a16d3c9099d1ba2ef1be | 110,803,343,942,506,340,000,000,000,000,000,000,000 | 7 | drbg: ensure fork-safety without using a pthread_atfork handler
When the new OpenSSL CSPRNG was introduced in version 1.1.1,
it was announced in the release notes that it would be fork-safe,
which the old CSPRNG hadn't been.
The fork-safety was implemented using a fork count, which was
incremented by a pthread_atfork handler. Initially, this handler
was enabled by default. Unfortunately, the default behaviour
had to be changed for other reasons in commit b5319bdbd095, so
the new OpenSSL CSPRNG failed to keep its promise.
This commit restores the fork-safety using a different approach.
It replaces the fork count by a fork id, which coincides with
the process id on UNIX-like operating systems and is zero on other
operating systems. It is used to detect when an automatic reseed
after a fork is necessary.
To prevent a future regression, it also adds a test to verify that
the child reseeds after fork.
CVE-2019-1549
Reviewed-by: Paul Dale <[email protected]>
Reviewed-by: Matt Caswell <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/9802) |
int udp_disconnect(struct sock *sk, int flags)
{
struct inet_sock *inet = inet_sk(sk);
/*
* 1003.1g - break association.
*/
sk->sk_state = TCP_CLOSE;
inet->inet_daddr = 0;
inet->inet_dport = 0;
sock_rps_save_rxhash(sk, 0);
sk->sk_bound_dev_if = 0;
if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
sk->sk_prot->unhash(sk);
inet->inet_sport = 0;
}
sk_dst_reset(sk);
return 0;
} | 0 | [
"CWE-400"
] | linux-2.6 | c377411f2494a931ff7facdbb3a6839b1266bcf6 | 263,318,696,200,097,230,000,000,000,000,000,000,000 | 22 | net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
zxcheck(i_ctx_t *i_ctx_p)
{
os_ptr op = osp;
check_op(1);
make_bool(op, (r_has_attr(ACCESS_REF(op), a_executable) ? 1 : 0));
return 0;
} | 0 | [] | ghostpdl | 0edd3d6c634a577db261615a9dc2719bca7f6e01 | 332,088,424,848,887,440,000,000,000,000,000,000,000 | 8 | Bug 699659: Don't just assume an object is a t_(a)struct |
Prompt& PamData::findPrompt(const struct pam_message* msg) {
AuthPrompt::Type type = detectPrompt(msg);
for (Prompt &p : m_currentRequest.prompts) {
if (type == AuthPrompt::UNKNOWN && QString::fromLocal8Bit(msg->msg) == p.message)
return p;
if (type == p.type)
return p;
}
return invalidPrompt;
} | 0 | [
"CWE-613",
"CWE-287",
"CWE-284"
] | sddm | 147cec383892d143b5e02daa70f1e7def50f5d98 | 291,773,568,826,639,670,000,000,000,000,000,000,000 | 12 | Fix authentication when reusing an existing session
- Check the success value before unlocking the session
- Don't attempt to use the nonexistant "sddm-check" PAM service |
vmxnet3_revert_rxc_descr(VMXNET3State *s, int qidx)
{
vmxnet3_dec_rx_completion_counter(s, qidx);
} | 0 | [
"CWE-20"
] | qemu | a7278b36fcab9af469563bd7b9dadebe2ae25e48 | 161,575,260,915,916,870,000,000,000,000,000,000,000 | 4 | net/vmxnet3: Refine l2 header validation
Validation of l2 header length assumed minimal packet size as
eth_header + 2 * vlan_header regardless of the actual protocol.
This caused crash for valid non-IP packets shorter than 22 bytes, as
'tx_pkt->packet_type' hasn't been assigned for such packets, and
'vmxnet3_on_tx_done_update_stats()' expects it to be properly set.
Refine header length validation in 'vmxnet_tx_pkt_parse_headers'.
Check its return value during packet processing flow.
As a side effect, in case IPv4 and IPv6 header validation failure,
corrupt packets will be dropped.
Signed-off-by: Dana Rubin <[email protected]>
Signed-off-by: Shmulik Ladkani <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s)
{
bool page_aligned = false;
unsigned int begin;
const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK;
uint32_t boundary_chk = 1 << (((s->blksize & ~BLOCK_SIZE_MASK) >> 12) + 12);
uint32_t boundary_count = boundary_chk - (s->sdmasysad % boundary_chk);
if (!(s->trnmod & SDHC_TRNS_BLK_CNT_EN) || !s->blkcnt) {
qemu_log_mask(LOG_UNIMP, "infinite transfer is not supported\n");
return;
}
/* XXX: Some sd/mmc drivers (for example, u-boot-slp) do not account for
* possible stop at page boundary if initial address is not page aligned,
* allow them to work properly */
if ((s->sdmasysad % boundary_chk) == 0) {
page_aligned = true;
}
if (s->trnmod & SDHC_TRNS_READ) {
s->prnsts |= SDHC_DOING_READ | SDHC_DATA_INHIBIT |
SDHC_DAT_LINE_ACTIVE;
while (s->blkcnt) {
if (s->data_count == 0) {
sdbus_read_data(&s->sdbus, s->fifo_buffer, block_size);
}
begin = s->data_count;
if (((boundary_count + begin) < block_size) && page_aligned) {
s->data_count = boundary_count + begin;
boundary_count = 0;
} else {
s->data_count = block_size;
boundary_count -= block_size - begin;
if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) {
s->blkcnt--;
}
}
dma_memory_write(s->dma_as, s->sdmasysad,
&s->fifo_buffer[begin], s->data_count - begin);
s->sdmasysad += s->data_count - begin;
if (s->data_count == block_size) {
s->data_count = 0;
}
if (page_aligned && boundary_count == 0) {
break;
}
}
} else {
s->prnsts |= SDHC_DOING_WRITE | SDHC_DATA_INHIBIT |
SDHC_DAT_LINE_ACTIVE;
while (s->blkcnt) {
begin = s->data_count;
if (((boundary_count + begin) < block_size) && page_aligned) {
s->data_count = boundary_count + begin;
boundary_count = 0;
} else {
s->data_count = block_size;
boundary_count -= block_size - begin;
}
dma_memory_read(s->dma_as, s->sdmasysad,
&s->fifo_buffer[begin], s->data_count - begin);
s->sdmasysad += s->data_count - begin;
if (s->data_count == block_size) {
sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size);
s->data_count = 0;
if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) {
s->blkcnt--;
}
}
if (page_aligned && boundary_count == 0) {
break;
}
}
}
if (s->blkcnt == 0) {
sdhci_end_transfer(s);
} else {
if (s->norintstsen & SDHC_NISEN_DMA) {
s->norintsts |= SDHC_NIS_DMA;
}
sdhci_update_irq(s);
}
} | 0 | [
"CWE-119"
] | qemu | dfba99f17feb6d4a129da19d38df1bcd8579d1c3 | 291,301,677,380,284,960,000,000,000,000,000,000,000 | 85 | hw/sd/sdhci: Fix DMA Transfer Block Size field
The 'Transfer Block Size' field is 12-bit wide.
See section '2.2.2. Block Size Register (Offset 004h)' in datasheet.
Two different bug reproducer available:
- https://bugs.launchpad.net/qemu/+bug/1892960
- https://ruhr-uni-bochum.sciebo.de/s/NNWP2GfwzYKeKwE?path=%2Fsdhci_oob_write1
Cc: [email protected]
Buglink: https://bugs.launchpad.net/qemu/+bug/1892960
Fixes: d7dfca0807a ("hw/sdhci: introduce standard SD host controller")
Reported-by: Alexander Bulekov <[email protected]>
Signed-off-by: Philippe Mathieu-Daudé <[email protected]>
Reviewed-by: Prasad J Pandit <[email protected]>
Tested-by: Alexander Bulekov <[email protected]>
Message-Id: <[email protected]> |
static int PKCS7_type_is_other(PKCS7 *p7)
{
int isOther = 1;
int nid = OBJ_obj2nid(p7->type);
switch (nid) {
case NID_pkcs7_data:
case NID_pkcs7_signed:
case NID_pkcs7_enveloped:
case NID_pkcs7_signedAndEnveloped:
case NID_pkcs7_digest:
case NID_pkcs7_encrypted:
isOther = 0;
break;
default:
isOther = 1;
}
return isOther;
} | 0 | [] | openssl | c0334c2c92dd1bc3ad8138ba6e74006c3631b0f9 | 94,978,571,743,130,300,000,000,000,000,000,000,000 | 22 | PKCS#7: avoid NULL pointer dereferences with missing content
In PKCS#7, the ASN.1 content component is optional.
This typically applies to inner content (detached signatures),
however we must also handle unexpected missing outer content
correctly.
This patch only addresses functions reachable from parsing,
decryption and verification, and functions otherwise associated
with reading potentially untrusted data.
Correcting all low-level API calls requires further work.
CVE-2015-0289
Thanks to Michal Zalewski (Google) for reporting this issue.
Reviewed-by: Steve Henson <[email protected]> |
static int wolfSSH_SFTP_RecvRealPath(WOLFSSH* ssh, int reqId, byte* data,
int maxSz)
{
WS_SFTP_FILEATRB atr;
char r[WOLFSSH_MAX_FILENAME];
word32 rSz;
word32 lidx = 0;
word32 i;
int ret;
byte* out;
word32 outSz = 0;
WLOG(WS_LOG_SFTP, "Receiving WOLFSSH_FTP_REALPATH");
if (ssh == NULL) {
WLOG(WS_LOG_SFTP, "Bad argument passed in");
return WS_BAD_ARGUMENT;
}
if (maxSz < UINT32_SZ) {
/* not enough for an ato32 call */
return WS_BUFFER_E;
}
ato32(data + lidx, &rSz);
if (rSz >= WOLFSSH_MAX_FILENAME || (int)(rSz + UINT32_SZ) > maxSz) {
return WS_BUFFER_E;
}
lidx += UINT32_SZ;
WMEMCPY(r, data + lidx, rSz);
r[rSz] = '\0';
/* get working directory in the case of receiving non absolute path */
if (r[0] != '/' && r[1] != ':') {
char wd[WOLFSSH_MAX_FILENAME];
WMEMSET(wd, 0, WOLFSSH_MAX_FILENAME);
if (ssh->sftpDefaultPath) {
XSTRNCPY(wd, ssh->sftpDefaultPath, WOLFSSH_MAX_FILENAME - 1);
}
else {
#ifndef USE_WINDOWS_API
if (WGETCWD(ssh->fs, wd, WOLFSSH_MAX_FILENAME) == NULL) {
WLOG(WS_LOG_SFTP, "Unable to get current working directory");
if (wolfSSH_SFTP_CreateStatus(ssh, WOLFSSH_FTP_FAILURE, reqId,
"Directory error", "English", NULL, &outSz)
!= WS_SIZE_ONLY) {
return WS_FATAL_ERROR;
}
out = (byte*) WMALLOC(outSz, ssh->ctx->heap, DYNTYPE_BUFFER);
if (out == NULL) {
return WS_MEMORY_E;
}
if (wolfSSH_SFTP_CreateStatus(ssh, WOLFSSH_FTP_FAILURE, reqId,
"Directory error", "English", out, &outSz)
!= WS_SUCCESS) {
WFREE(out, ssh->ctx->heap, DYNTYPE_BUFFER);
return WS_FATAL_ERROR;
}
/* take over control of buffer */
wolfSSH_SFTP_RecvSetSend(ssh, out, outSz);
return WS_BAD_FILE_E;
}
#endif
}
WSTRNCAT(wd, "/", WOLFSSH_MAX_FILENAME);
WSTRNCAT(wd, r, WOLFSSH_MAX_FILENAME);
WMEMCPY(r, wd, WOLFSSH_MAX_FILENAME);
}
if ((ret = wolfSSH_CleanPath(ssh, r)) < 0) {
return WS_FATAL_ERROR;
}
rSz = (word32)ret;
/* For real path remove ending case of /.
* Lots of peers send a '.' wanting a return of the current absolute path
* not the absolute path + .
*/
if (r[rSz - 2] == WS_DELIM && r[rSz - 1] == '.') {
r[rSz - 1] = '\0';
rSz -= 1;
}
/* for real path always send '/' chars */
for (i = 0; i < rSz; i++) {
if (r[i] == WS_DELIM) r[i] = '/';
}
WLOG(WS_LOG_SFTP, "Real Path Directory = %s", r);
/* send response */
outSz = WOLFSSH_SFTP_HEADER + (UINT32_SZ * 3) + (rSz * 2);
WMEMSET(&atr, 0, sizeof(WS_SFTP_FILEATRB));
outSz += SFTP_AtributesSz(ssh, &atr);
lidx = 0;
/* reuse state buffer if large enough */
out = (outSz > (word32)maxSz)?
(byte*)WMALLOC(outSz, ssh->ctx->heap, DYNTYPE_BUFFER) :
wolfSSH_SFTP_RecvGetData(ssh);
if (out == NULL) {
return WS_MEMORY_E;
}
SFTP_SetHeader(ssh, reqId, WOLFSSH_FTP_NAME,
outSz - WOLFSSH_SFTP_HEADER, out);
lidx += WOLFSSH_SFTP_HEADER;
/* set number of files */
c32toa(1, out + lidx); lidx += UINT32_SZ; /* only sending one file name */
/* set file name size and string */
c32toa(rSz, out + lidx); lidx += UINT32_SZ;
WMEMCPY(out + lidx, r, rSz); lidx += rSz;
/* set long name size and string */
c32toa(rSz, out + lidx); lidx += UINT32_SZ;
WMEMCPY(out + lidx, r, rSz); lidx += rSz;
/* set attributes */
SFTP_SetAttributes(ssh, out + lidx, outSz - lidx, &atr);
/* set send out buffer, "out" buffer is taken over by "ssh" */
wolfSSH_SFTP_RecvSetSend(ssh, out, outSz);
return WS_SUCCESS;
} | 0 | [
"CWE-190"
] | wolfssh | edb272e35ee57e7b89f3e127222c6981b6a1e730 | 114,798,905,563,734,080,000,000,000,000,000,000,000 | 126 | ASAN SFTP Fixes
When decoding SFTP messages, fix the size checks so they don't wrap. (ZD12766) |
PHP_FUNCTION(filter_id)
{
int i, filter_len;
int size = sizeof(filter_list) / sizeof(filter_list_entry);
char *filter;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &filter, &filter_len) == FAILURE) {
return;
}
for (i = 0; i < size; ++i) {
if (strcmp(filter_list[i].name, filter) == 0) {
RETURN_LONG(filter_list[i].id);
}
}
RETURN_FALSE;
} | 0 | [
"CWE-190"
] | php-src | a5b5743d71fbd5ae944469a1ca443a1cdb30663a | 241,200,260,415,494,350,000,000,000,000,000,000,000 | 18 | full_special_chars filter from trunk - approved by johannes |
static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, type);
unsigned int new_segno = curseg->next_segno;
struct f2fs_summary_block *sum_node;
struct page *sum_page;
write_sum_page(sbi, curseg->sum_blk,
GET_SUM_BLOCK(sbi, curseg->segno));
__set_test_and_inuse(sbi, new_segno);
mutex_lock(&dirty_i->seglist_lock);
__remove_dirty_segment(sbi, new_segno, PRE);
__remove_dirty_segment(sbi, new_segno, DIRTY);
mutex_unlock(&dirty_i->seglist_lock);
reset_curseg(sbi, type, 1);
curseg->alloc_type = SSR;
__next_free_blkoff(sbi, curseg, 0);
if (reuse) {
sum_page = get_sum_page(sbi, new_segno);
sum_node = (struct f2fs_summary_block *)page_address(sum_page);
memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
f2fs_put_page(sum_page, 1);
}
} | 0 | [
"CWE-200",
"CWE-476"
] | linux | d4fdf8ba0e5808ba9ad6b44337783bd9935e0982 | 302,004,675,520,304,850,000,000,000,000,000,000,000 | 28 | f2fs: fix a panic caused by NULL flush_cmd_control
Mount fs with option noflush_merge, boot failed for illegal address
fcc in function f2fs_issue_flush:
if (!test_opt(sbi, FLUSH_MERGE)) {
ret = submit_flush_wait(sbi);
atomic_inc(&fcc->issued_flush); -> Here, fcc illegal
return ret;
}
Signed-off-by: Yunlei He <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]> |
deprecated_command (const char *name)
{
log_info(_("WARNING: \"%s\" is a deprecated command - do not use it\n"),
name);
} | 0 | [
"CWE-20"
] | gnupg | 2183683bd633818dd031b090b5530951de76f392 | 100,397,037,120,914,060,000,000,000,000,000,000,000 | 5 | Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]> |
ByteVector::Iterator ByteVector::end()
{
return d->data.end();
} | 0 | [
"CWE-189"
] | taglib | dcdf4fd954e3213c355746fa15b7480461972308 | 163,057,638,470,106,620,000,000,000,000,000,000,000 | 4 | Avoid uint overflow in case the length + index is over UINT_MAX |
Network::FilterStatus Context::onUpstreamData(int data_length, bool end_of_stream) {
if (!in_vm_context_created_ || !wasm_->onUpstreamData_) {
return Network::FilterStatus::Continue;
}
auto result = wasm_->onUpstreamData_(this, id_, static_cast<uint32_t>(data_length),
static_cast<uint32_t>(end_of_stream));
// TODO(PiotrSikora): pull Proxy-WASM's FilterStatus values.
return result.u64_ == 0 ? Network::FilterStatus::Continue : Network::FilterStatus::StopIteration;
} | 0 | [
"CWE-476"
] | envoy | 8788a3cf255b647fd14e6b5e2585abaaedb28153 | 187,375,367,945,449,970,000,000,000,000,000,000,000 | 9 | 1.4 - Do not call into the VM unless the VM Context has been created. (#24)
* Ensure that the in VM Context is created before onDone is called.
Signed-off-by: John Plevyak <[email protected]>
* Update as per offline discussion.
Signed-off-by: John Plevyak <[email protected]>
* Set in_vm_context_created_ in onNetworkNewConnection.
Signed-off-by: John Plevyak <[email protected]>
* Add guards to other network calls.
Signed-off-by: John Plevyak <[email protected]>
* Fix common/wasm tests.
Signed-off-by: John Plevyak <[email protected]>
* Patch tests.
Signed-off-by: John Plevyak <[email protected]>
* Remove unecessary file from cherry-pick.
Signed-off-by: John Plevyak <[email protected]> |
static void *pvrdma_map_to_pdir(PCIDevice *pdev, uint64_t pdir_dma,
uint32_t nchunks, size_t length)
{
uint64_t *dir, *tbl;
int tbl_idx, dir_idx, addr_idx;
void *host_virt = NULL, *curr_page;
if (!nchunks) {
rdma_error_report("Got nchunks=0");
return NULL;
}
dir = rdma_pci_dma_map(pdev, pdir_dma, TARGET_PAGE_SIZE);
if (!dir) {
rdma_error_report("Failed to map to page directory");
return NULL;
}
tbl = rdma_pci_dma_map(pdev, dir[0], TARGET_PAGE_SIZE);
if (!tbl) {
rdma_error_report("Failed to map to page table 0");
goto out_unmap_dir;
}
curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[0], TARGET_PAGE_SIZE);
if (!curr_page) {
rdma_error_report("Failed to map the page 0");
goto out_unmap_tbl;
}
host_virt = mremap(curr_page, 0, length, MREMAP_MAYMOVE);
if (host_virt == MAP_FAILED) {
host_virt = NULL;
rdma_error_report("Failed to remap memory for host_virt");
goto out_unmap_tbl;
}
trace_pvrdma_map_to_pdir_host_virt(curr_page, host_virt);
rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
dir_idx = 0;
tbl_idx = 1;
addr_idx = 1;
while (addr_idx < nchunks) {
if (tbl_idx == TARGET_PAGE_SIZE / sizeof(uint64_t)) {
tbl_idx = 0;
dir_idx++;
rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
tbl = rdma_pci_dma_map(pdev, dir[dir_idx], TARGET_PAGE_SIZE);
if (!tbl) {
rdma_error_report("Failed to map to page table %d", dir_idx);
goto out_unmap_host_virt;
}
}
curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[tbl_idx],
TARGET_PAGE_SIZE);
if (!curr_page) {
rdma_error_report("Failed to map to page %d, dir %d", tbl_idx,
dir_idx);
goto out_unmap_host_virt;
}
mremap(curr_page, 0, TARGET_PAGE_SIZE, MREMAP_MAYMOVE | MREMAP_FIXED,
host_virt + TARGET_PAGE_SIZE * addr_idx);
trace_pvrdma_map_to_pdir_next_page(addr_idx, curr_page, host_virt +
TARGET_PAGE_SIZE * addr_idx);
rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
addr_idx++;
tbl_idx++;
}
goto out_unmap_tbl;
out_unmap_host_virt:
munmap(host_virt, length);
host_virt = NULL;
out_unmap_tbl:
rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
out_unmap_dir:
rdma_pci_dma_unmap(pdev, dir, TARGET_PAGE_SIZE);
return host_virt;
} | 1 | [
"CWE-787"
] | qemu | 284f191b4abad213aed04cb0458e1600fd18d7c4 | 334,074,804,490,448,700,000,000,000,000,000,000,000 | 90 | hw/rdma: Fix possible mremap overflow in the pvrdma device (CVE-2021-3582)
Ensure mremap boundaries not trusting the guest kernel to
pass the correct buffer length.
Fixes: CVE-2021-3582
Reported-by: VictorV (Kunlun Lab) <[email protected]>
Tested-by: VictorV (Kunlun Lab) <[email protected]>
Signed-off-by: Marcel Apfelbaum <[email protected]>
Message-Id: <[email protected]>
Reviewed-by: Yuval Shaia <[email protected]>
Tested-by: Yuval Shaia <[email protected]>
Reviewed-by: Prasad J Pandit <[email protected]>
Signed-off-by: Marcel Apfelbaum <[email protected]> |
static void app_inbytes_progress(pn_transport_t *transport, size_t minimum)
{
pni_ssl_t *ssl = transport->ssl;
// Make more decrypted data available, if possible. Otherwise, move
// unread bytes to front of inbuf2 to make room for next bulk decryption.
// SSL may have chopped up data that app layer expects to be
// contiguous. Start, continue or stop double buffering here.
if (ssl->double_buffered) {
if (ssl->app_inbytes.size == 0) {
// no straggler bytes, optimistically stop for now
ssl->double_buffered = false;
pn_buffer_clear(ssl->inbuf2);
ssl->app_inbytes.start = ssl->in_data;
ssl->app_inbytes.size = ssl->in_data_count;
} else {
pn_bytes_t ib2 = pn_buffer_bytes(ssl->inbuf2);
assert(ssl->app_inbytes.size <= ib2.size);
size_t consumed = ib2.size - ssl->app_inbytes.size;
if (consumed > 0) {
memmove((void *)ib2.start, ib2.start + consumed, ssl->app_inbytes.size);
pn_buffer_trim(ssl->inbuf2, 0, consumed);
}
if (!pn_buffer_available(ssl->inbuf2)) {
if (!grow_inbuf2(transport, minimum))
// could not add room
return;
}
size_t count = _pni_min(ssl->in_data_count, pn_buffer_available(ssl->inbuf2));
pn_buffer_append(ssl->inbuf2, ssl->in_data, count);
ssl->in_data += count;
ssl->in_data_count -= count;
ssl->app_inbytes = pn_buffer_bytes(ssl->inbuf2);
}
} else {
if (ssl->app_inbytes.size) {
// start double buffering the left over bytes
ssl->double_buffered = true;
pn_buffer_clear(ssl->inbuf2);
if (!pn_buffer_available(ssl->inbuf2)) {
if (!grow_inbuf2(transport, minimum))
// could not add room
return;
}
size_t count = _pni_min(ssl->in_data_count, pn_buffer_available(ssl->inbuf2));
pn_buffer_append(ssl->inbuf2, ssl->in_data, count);
ssl->in_data += count;
ssl->in_data_count -= count;
ssl->app_inbytes = pn_buffer_bytes(ssl->inbuf2);
} else {
// already pointing at all available bytes until next decrypt
}
}
if (ssl->in_data_count == 0)
rewind_sc_inbuf(ssl);
} | 0 | [] | qpid-proton | 4aea0fd8502f5e9af7f22fd60645eeec07bce0b2 | 33,612,965,044,424,880,000,000,000,000,000,000,000 | 55 | PROTON-2014: [c] Ensure SSL mutual authentication
(cherry picked from commit 97c7733f07712665f3d08091c82c393e4c3adbf7) |
ast_for_async_stmt(struct compiling *c, const node *n)
{
/* async_stmt: 'async' (funcdef | with_stmt | for_stmt) */
REQ(n, async_stmt);
REQ(CHILD(n, 0), NAME);
assert(strcmp(STR(CHILD(n, 0)), "async") == 0);
switch (TYPE(CHILD(n, 1))) {
case funcdef:
return ast_for_funcdef_impl(c, n, NULL,
true /* is_async */);
case with_stmt:
return ast_for_with_stmt(c, n,
true /* is_async */);
case for_stmt:
return ast_for_for_stmt(c, n,
true /* is_async */);
default:
PyErr_Format(PyExc_SystemError,
"invalid async stament: %s",
STR(CHILD(n, 1)));
return NULL;
}
} | 0 | [
"CWE-125"
] | cpython | dcfcd146f8e6fc5c2fc16a4c192a0c5f5ca8c53c | 333,949,764,001,919,630,000,000,000,000,000,000,000 | 26 | bpo-35766: Merge typed_ast back into CPython (GH-11645) |
void *Type_MPEclut_Read(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, cmsUInt32Number* nItems, cmsUInt32Number SizeOfTag)
{
cmsStage* mpe = NULL;
cmsUInt16Number InputChans, OutputChans;
cmsUInt8Number Dimensions8[16];
cmsUInt32Number i, nMaxGrids, GridPoints[MAX_INPUT_DIMENSIONS];
_cmsStageCLutData* clut;
if (!_cmsReadUInt16Number(io, &InputChans)) return NULL;
if (!_cmsReadUInt16Number(io, &OutputChans)) return NULL;
if (InputChans == 0) goto Error;
if (OutputChans == 0) goto Error;
if (io ->Read(io, Dimensions8, sizeof(cmsUInt8Number), 16) != 16)
goto Error;
// Copy MAX_INPUT_DIMENSIONS at most. Expand to cmsUInt32Number
nMaxGrids = InputChans > MAX_INPUT_DIMENSIONS ? MAX_INPUT_DIMENSIONS : InputChans;
for (i=0; i < nMaxGrids; i++) GridPoints[i] = (cmsUInt32Number) Dimensions8[i];
// Allocate the true CLUT
mpe = cmsStageAllocCLutFloatGranular(self ->ContextID, GridPoints, InputChans, OutputChans, NULL);
if (mpe == NULL) goto Error;
// Read the data
clut = (_cmsStageCLutData*) mpe ->Data;
for (i=0; i < clut ->nEntries; i++) {
if (!_cmsReadFloat32Number(io, &clut ->Tab.TFloat[i])) goto Error;
}
*nItems = 1;
return mpe;
Error:
*nItems = 0;
if (mpe != NULL) cmsStageFree(mpe);
return NULL;
cmsUNUSED_PARAMETER(SizeOfTag);
} | 0 | [] | Little-CMS | 41d222df1bc6188131a8f46c32eab0a4d4cdf1b6 | 141,168,831,747,630,560,000,000,000,000,000,000,000 | 42 | Memory squeezing fix: lcms2 cmsPipeline construction
When creating a new pipeline, lcms would often try to allocate a stage
and pass it to cmsPipelineInsertStage without checking whether the
allocation succeeded. cmsPipelineInsertStage would then assert (or crash)
if it had not.
The fix here is to change cmsPipelineInsertStage to check and return
an error value. All calling code is then checked to test this return
value and cope. |
void Greeter::stop() {
// check flag
if (!m_started)
return;
// log message
qDebug() << "Greeter stopping...";
if (daemonApp->testing()) {
// terminate process
m_process->terminate();
// wait for finished
if (!m_process->waitForFinished(5000))
m_process->kill();
}
} | 0 | [
"CWE-284",
"CWE-264"
] | sddm | 4cfed6b0a625593fb43876f04badc4dd99799d86 | 71,249,888,967,030,350,000,000,000,000,000,000,000 | 17 | Disable greeters from loading KDE's debug hander
Some themes may use KDE components which will automatically load KDE's
crash handler.
If the greeter were to then somehow crash, that would leave a crash
handler allowing other actions, albeit as the locked down SDDM user.
Only SDDM users using the breeze theme from plasma-workspace are
affected. Safest and simplest fix is to handle this inside SDDM
disabling kcrash via an environment variable for all future themes that
may use these libraries.
CVE-2015-0856 |
static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
struct request_sock *req)
{
return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
} | 0 | [
"CWE-362"
] | linux-2.6 | f6d8bd051c391c1c0458a30b2a7abcd939329259 | 46,894,972,018,742,965,000,000,000,000,000,000,000 | 5 | inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
GF_Box *gitn_box_new()
{
ISOM_DECL_BOX_ALLOC(GroupIdToNameBox, GF_ISOM_BOX_TYPE_GITN);
return (GF_Box *)tmp; | 0 | [
"CWE-787"
] | gpac | 388ecce75d05e11fc8496aa4857b91245007d26e | 22,828,005,960,932,184,000,000,000,000,000,000,000 | 5 | fixed #1587 |
void* LibRaw:: calloc(size_t n,size_t t)
{
void *p = memmgr.calloc(n,t);
return p;
} | 0 | [
"CWE-399"
] | LibRaw | c14ae36d28e80139b2f31b5d9d7623db3b597a3a | 182,633,444,145,020,040,000,000,000,000,000,000,000 | 5 | fixed error handling for broken full-color images |
static void commit_tree(struct mount *mnt)
{
struct mount *parent = mnt->mnt_parent;
struct mount *m;
LIST_HEAD(head);
struct mnt_namespace *n = parent->mnt_ns;
BUG_ON(parent == mnt);
list_add_tail(&head, &mnt->mnt_list);
list_for_each_entry(m, &head, mnt_list)
m->mnt_ns = n;
list_splice(&head, n->list.prev);
n->mounts += n->pending_mounts;
n->pending_mounts = 0;
__attach_mnt(mnt, parent);
touch_mnt_namespace(n);
} | 0 | [
"CWE-200"
] | linux | 427215d85e8d1476da1a86b8d67aceb485eb3631 | 297,667,815,246,155,540,000,000,000,000,000,000,000 | 21 | ovl: prevent private clone if bind mount is not allowed
Add the following checks from __do_loopback() to clone_private_mount() as
well:
- verify that the mount is in the current namespace
- verify that there are no locked children
Reported-by: Alois Wohlschlager <[email protected]>
Fixes: c771d683a62e ("vfs: introduce clone_private_mount()")
Cc: <[email protected]> # v3.18
Signed-off-by: Miklos Szeredi <[email protected]> |
uint STDCALL mysql_errno(MYSQL *mysql)
{
return mysql ? mysql->net.last_errno : mysql_server_last_errno; | 0 | [
"CWE-284",
"CWE-295"
] | mysql-server | 3bd5589e1a5a93f9c224badf983cd65c45215390 | 277,896,501,467,361,040,000,000,000,000,000,000,000 | 4 | WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options |
static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
!nested_cpu_has_apic_reg_virt(vmcs12) &&
!nested_cpu_has_vid(vmcs12) &&
!nested_cpu_has_posted_intr(vmcs12))
return 0;
/*
* If virtualize x2apic mode is enabled,
* virtualize apic access must be disabled.
*/
if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
return -EINVAL;
/*
* If virtual interrupt delivery is enabled,
* we must exit on external interrupts.
*/
if (nested_cpu_has_vid(vmcs12) &&
!nested_exit_on_intr(vcpu))
return -EINVAL;
/*
* bits 15:8 should be zero in posted_intr_nv,
* the descriptor address has been already checked
* in nested_get_vmcs12_pages.
*/
if (nested_cpu_has_posted_intr(vmcs12) &&
(!nested_cpu_has_vid(vmcs12) ||
!nested_exit_intr_ack_set(vcpu) ||
vmcs12->posted_intr_nv & 0xff00))
return -EINVAL;
/* tpr shadow is needed by all apicv features. */
if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
return -EINVAL;
return 0;
} | 0 | [
"CWE-284"
] | linux | 727ba748e110b4de50d142edca9d6a9b7e6111d8 | 202,094,883,161,172,500,000,000,000,000,000,000,000 | 42 | kvm: nVMX: Enforce cpl=0 for VMX instructions
VMX instructions executed inside a L1 VM will always trigger a VM exit
even when executed with cpl 3. This means we must perform the
privilege check in software.
Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks")
Cc: [email protected]
Signed-off-by: Felix Wilhelm <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
QPDFFormFieldObjectHelper::generateTextAppearance(
QPDFAnnotationObjectHelper& aoh)
{
QPDFObjectHandle AS = aoh.getAppearanceStream("/N");
if (AS.isNull())
{
QTC::TC("qpdf", "QPDFFormFieldObjectHelper create AS from scratch");
QPDFObjectHandle::Rectangle rect = aoh.getRect();
QPDFObjectHandle::Rectangle bbox(
0, 0, rect.urx - rect.llx, rect.ury - rect.lly);
QPDFObjectHandle dict = QPDFObjectHandle::parse(
"<< /Resources << /ProcSet [ /PDF /Text ] >>"
" /Type /XObject /Subtype /Form >>");
dict.replaceKey("/BBox", QPDFObjectHandle::newFromRectangle(bbox));
AS = QPDFObjectHandle::newStream(
this->oh.getOwningQPDF(), "/Tx BMC\nEMC\n");
AS.replaceDict(dict);
QPDFObjectHandle AP = aoh.getAppearanceDictionary();
if (AP.isNull())
{
QTC::TC("qpdf", "QPDFFormFieldObjectHelper create AP from scratch");
aoh.getObjectHandle().replaceKey(
"/AP", QPDFObjectHandle::newDictionary());
AP = aoh.getAppearanceDictionary();
}
AP.replaceKey("/N", AS);
}
if (! AS.isStream())
{
aoh.getObjectHandle().warnIfPossible(
"unable to get normal appearance stream for update");
return;
}
QPDFObjectHandle bbox_obj = AS.getDict().getKey("/BBox");
if (! bbox_obj.isRectangle())
{
aoh.getObjectHandle().warnIfPossible(
"unable to get appearance stream bounding box");
return;
}
QPDFObjectHandle::Rectangle bbox = bbox_obj.getArrayAsRectangle();
std::string DA = getDefaultAppearance();
std::string V = getValueAsString();
std::vector<std::string> opt;
if (isChoice() && ((getFlags() & ff_ch_combo) == 0))
{
opt = getChoices();
}
TfFinder tff;
Pl_QPDFTokenizer tok("tf", &tff);
tok.write(QUtil::unsigned_char_pointer(DA.c_str()), DA.length());
tok.finish();
double tf = tff.getTf();
DA = tff.getDA();
std::string (*encoder)(std::string const&, char) = &QUtil::utf8_to_ascii;
std::string font_name = tff.getFontName();
if (! font_name.empty())
{
// See if the font is encoded with something we know about.
QPDFObjectHandle resources = AS.getDict().getKey("/Resources");
QPDFObjectHandle font = getFontFromResource(resources, font_name);
if (! font.isInitialized())
{
QPDFObjectHandle dr = getInheritableFieldValue("/DR");
font = getFontFromResource(dr, font_name);
}
if (font.isDictionary() &&
font.getKey("/Encoding").isName())
{
std::string encoding = font.getKey("/Encoding").getName();
if (encoding == "/WinAnsiEncoding")
{
QTC::TC("qpdf", "QPDFFormFieldObjectHelper WinAnsi");
encoder = &QUtil::utf8_to_win_ansi;
}
else if (encoding == "/MacRomanEncoding")
{
encoder = &QUtil::utf8_to_mac_roman;
}
}
}
V = (*encoder)(V, '?');
for (size_t i = 0; i < opt.size(); ++i)
{
opt.at(i) = (*encoder)(opt.at(i), '?');
}
AS.addTokenFilter(new ValueSetter(DA, V, opt, tf, bbox));
} | 0 | [
"CWE-787"
] | qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 321,213,778,841,131,200,000,000,000,000,000,000,000 | 92 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
void test_nghttp2_session_pack_headers_with_padding(void) {
nghttp2_session *session, *sv_session;
accumulator acc;
my_user_data ud;
nghttp2_session_callbacks callbacks;
memset(&callbacks, 0, sizeof(callbacks));
callbacks.send_callback = accumulator_send_callback;
callbacks.on_frame_send_callback = on_frame_send_callback;
callbacks.select_padding_callback = select_padding_callback;
callbacks.on_frame_recv_callback = on_frame_recv_callback;
acc.length = 0;
ud.acc = &acc;
nghttp2_session_client_new(&session, &callbacks, &ud);
nghttp2_session_server_new(&sv_session, &callbacks, &ud);
ud.padlen = 163;
CU_ASSERT(1 == nghttp2_submit_request(session, NULL, reqnv, ARRLEN(reqnv),
NULL, NULL));
CU_ASSERT(0 == nghttp2_session_send(session));
CU_ASSERT(acc.length < NGHTTP2_MAX_PAYLOADLEN);
ud.frame_recv_cb_called = 0;
CU_ASSERT((ssize_t)acc.length ==
nghttp2_session_mem_recv(sv_session, acc.buf, acc.length));
CU_ASSERT(1 == ud.frame_recv_cb_called);
CU_ASSERT(NULL == nghttp2_session_get_next_ob_item(sv_session));
nghttp2_session_del(sv_session);
nghttp2_session_del(session);
} | 0 | [] | nghttp2 | 0a6ce87c22c69438ecbffe52a2859c3a32f1620f | 53,417,262,830,825,020,000,000,000,000,000,000,000 | 34 | Add nghttp2_option_set_max_outbound_ack |
long long round_down_sec(long long bytes)
{
return ((bytes % SECTOR_SIZE) ? (((bytes / SECTOR_SIZE)) * SECTOR_SIZE) : bytes);
} | 0 | [] | eucalyptus | c252889a46f41b4c396b89e005ec89836f2524be | 305,498,973,504,912,570,000,000,000,000,000,000,000 | 4 | Input validation, shellout hardening on back-end
- validating bucketName and bucketPath in BundleInstance
- validating device name in Attach and DetachVolume
- removed some uses of system() and popen()
Fixes EUCA-7572, EUCA-7520 |
format_LEARN(const struct ofpact_learn *a,
const struct ofpact_format_params *fp)
{
learn_format(a, fp->port_map, fp->table_map, fp->s);
} | 0 | [
"CWE-416"
] | ovs | 77cccc74deede443e8b9102299efc869a52b65b2 | 178,945,318,792,093,800,000,000,000,000,000,000,000 | 5 | ofp-actions: Fix use-after-free while decoding RAW_ENCAP.
While decoding RAW_ENCAP action, decode_ed_prop() might re-allocate
ofpbuf if there is no enough space left. However, function
'decode_NXAST_RAW_ENCAP' continues to use old pointer to 'encap'
structure leading to write-after-free and incorrect decoding.
==3549105==ERROR: AddressSanitizer: heap-use-after-free on address
0x60600000011a at pc 0x0000005f6cc6 bp 0x7ffc3a2d4410 sp 0x7ffc3a2d4408
WRITE of size 2 at 0x60600000011a thread T0
#0 0x5f6cc5 in decode_NXAST_RAW_ENCAP lib/ofp-actions.c:4461:20
#1 0x5f0551 in ofpact_decode ./lib/ofp-actions.inc2:4777:16
#2 0x5ed17c in ofpacts_decode lib/ofp-actions.c:7752:21
#3 0x5eba9a in ofpacts_pull_openflow_actions__ lib/ofp-actions.c:7791:13
#4 0x5eb9fc in ofpacts_pull_openflow_actions lib/ofp-actions.c:7835:12
#5 0x64bb8b in ofputil_decode_packet_out lib/ofp-packet.c:1113:17
#6 0x65b6f4 in ofp_print_packet_out lib/ofp-print.c:148:13
#7 0x659e3f in ofp_to_string__ lib/ofp-print.c:1029:16
#8 0x659b24 in ofp_to_string lib/ofp-print.c:1244:21
#9 0x65a28c in ofp_print lib/ofp-print.c:1288:28
#10 0x540d11 in ofctl_ofp_parse utilities/ovs-ofctl.c:2814:9
#11 0x564228 in ovs_cmdl_run_command__ lib/command-line.c:247:17
#12 0x56408a in ovs_cmdl_run_command lib/command-line.c:278:5
#13 0x5391ae in main utilities/ovs-ofctl.c:179:9
#14 0x7f6911ce9081 in __libc_start_main (/lib64/libc.so.6+0x27081)
#15 0x461fed in _start (utilities/ovs-ofctl+0x461fed)
Fix that by getting a new pointer before using.
Credit to OSS-Fuzz.
Fuzzer regression test will fail only with AddressSanitizer enabled.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=27851
Fixes: f839892a206a ("OF support and translation of generic encap and decap")
Acked-by: William Tu <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]> |
STATIC ptr_t GC_store_debug_info_inner(ptr_t p, word sz GC_ATTR_UNUSED,
const char *string, int linenum)
{
word * result = (word *)((oh *)p + 1);
GC_ASSERT(GC_size(p) >= sizeof(oh) + sz);
GC_ASSERT(!(SMALL_OBJ(sz) && CROSSES_HBLK(p, sz)));
# ifdef KEEP_BACK_PTRS
((oh *)p) -> oh_back_ptr = HIDE_BACK_PTR(NOT_MARKED);
# endif
# ifdef MAKE_BACK_GRAPH
((oh *)p) -> oh_bg_ptr = HIDE_BACK_PTR((ptr_t)0);
# endif
((oh *)p) -> oh_string = string;
((oh *)p) -> oh_int = (word)linenum;
# ifndef SHORT_DBG_HDRS
((oh *)p) -> oh_sz = sz;
((oh *)p) -> oh_sf = START_FLAG ^ (word)result;
((word *)p)[BYTES_TO_WORDS(GC_size(p))-1] =
result[SIMPLE_ROUNDED_UP_WORDS(sz)] = END_FLAG ^ (word)result;
# endif
return((ptr_t)result);
} | 0 | [
"CWE-119"
] | bdwgc | 7292c02fac2066d39dd1bcc37d1a7054fd1e32ee | 92,256,614,014,317,790,000,000,000,000,000,000,000 | 23 | Fix malloc routines to prevent size value wrap-around
See issue #135 on Github.
* allchblk.c (GC_allochblk, GC_allochblk_nth): Use
OBJ_SZ_TO_BLOCKS_CHECKED instead of OBJ_SZ_TO_BLOCKS.
* malloc.c (GC_alloc_large): Likewise.
* alloc.c (GC_expand_hp_inner): Type of "bytes" local variable changed
from word to size_t; cast ROUNDUP_PAGESIZE argument to size_t; prevent
overflow when computing GC_heapsize+bytes > GC_max_heapsize.
* dbg_mlc.c (GC_debug_malloc, GC_debug_malloc_ignore_off_page,
GC_debug_malloc_atomic_ignore_off_page, GC_debug_generic_malloc,
GC_debug_generic_malloc_inner,
GC_debug_generic_malloc_inner_ignore_off_page,
GC_debug_malloc_stubborn, GC_debug_malloc_atomic,
GC_debug_malloc_uncollectable, GC_debug_malloc_atomic_uncollectable):
Use SIZET_SAT_ADD (instead of "+" operator) to add extra bytes to lb
value.
* fnlz_mlc.c (GC_finalized_malloc): Likewise.
* gcj_mlc.c (GC_debug_gcj_malloc): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Likewise.
* include/private/gcconfig.h (GET_MEM): Likewise.
* mallocx.c (GC_malloc_many, GC_memalign): Likewise.
* os_dep.c (GC_wince_get_mem, GC_win32_get_mem): Likewise.
* typd_mlc.c (GC_malloc_explicitly_typed,
GC_malloc_explicitly_typed_ignore_off_page,
GC_calloc_explicitly_typed): Likewise.
* headers.c (GC_scratch_alloc): Change type of bytes_to_get from word
to size_t (because ROUNDUP_PAGESIZE_IF_MMAP result type changed).
* include/private/gc_priv.h: Include limits.h (unless SIZE_MAX already
defined).
* include/private/gc_priv.h (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Move from
malloc.c file.
* include/private/gc_priv.h (SIZET_SAT_ADD): New macro (defined before
include gcconfig.h).
* include/private/gc_priv.h (EXTRA_BYTES, GC_page_size): Change type
to size_t.
* os_dep.c (GC_page_size): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Add comment about the argument.
* include/private/gcconfig.h (GET_MEM): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, OBJ_SZ_TO_BLOCKS, ROUNDUP_PAGESIZE,
ROUNDUP_PAGESIZE_IF_MMAP): Rename argument to "lb".
* include/private/gc_priv.h (OBJ_SZ_TO_BLOCKS_CHECKED): New macro.
* include/private/gcconfig.h (GC_win32_get_mem, GC_wince_get_mem,
GC_unix_get_mem): Change argument type from word to int.
* os_dep.c (GC_unix_mmap_get_mem, GC_unix_get_mem,
GC_unix_sbrk_get_mem, GC_wince_get_mem, GC_win32_get_mem): Likewise.
* malloc.c (GC_alloc_large_and_clear): Call OBJ_SZ_TO_BLOCKS only
if no value wrap around is guaranteed.
* malloc.c (GC_generic_malloc): Do not check for lb_rounded < lb case
(because ROUNDED_UP_GRANULES and GRANULES_TO_BYTES guarantees no value
wrap around).
* mallocx.c (GC_generic_malloc_ignore_off_page): Likewise.
* misc.c (GC_init_size_map): Change "i" local variable type from int
to size_t.
* os_dep.c (GC_write_fault_handler, catch_exception_raise): Likewise.
* misc.c (GC_envfile_init): Cast len to size_t when passed to
ROUNDUP_PAGESIZE_IF_MMAP.
* os_dep.c (GC_setpagesize): Cast GC_sysinfo.dwPageSize and
GETPAGESIZE() to size_t (when setting GC_page_size).
* os_dep.c (GC_unix_mmap_get_mem, GC_unmap_start, GC_remove_protection):
Expand ROUNDUP_PAGESIZE macro but without value wrap-around checking
(the argument is of word type).
* os_dep.c (GC_unix_mmap_get_mem): Replace -GC_page_size with
~GC_page_size+1 (because GC_page_size is unsigned); remove redundant
cast to size_t.
* os_dep.c (GC_unix_sbrk_get_mem): Add explicit cast of GC_page_size
to SBRK_ARG_T.
* os_dep.c (GC_wince_get_mem): Change type of res_bytes local variable
to size_t.
* typd_mlc.c: Do not include limits.h.
* typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Remove (as defined in
gc_priv.h now). |
gre_print_0(netdissect_options *ndo, const u_char *bp, u_int length)
{
u_int len = length;
uint16_t flags, prot;
/* 16 bits ND_TCHECKed in gre_print() */
flags = EXTRACT_16BITS(bp);
if (ndo->ndo_vflag)
ND_PRINT((ndo, ", Flags [%s]",
bittok2str(gre_flag_values,"none",flags)));
len -= 2;
bp += 2;
ND_TCHECK2(*bp, 2);
if (len < 2)
goto trunc;
prot = EXTRACT_16BITS(bp);
len -= 2;
bp += 2;
if ((flags & GRE_CP) | (flags & GRE_RP)) {
ND_TCHECK2(*bp, 2);
if (len < 2)
goto trunc;
if (ndo->ndo_vflag)
ND_PRINT((ndo, ", sum 0x%x", EXTRACT_16BITS(bp)));
bp += 2;
len -= 2;
ND_TCHECK2(*bp, 2);
if (len < 2)
goto trunc;
ND_PRINT((ndo, ", off 0x%x", EXTRACT_16BITS(bp)));
bp += 2;
len -= 2;
}
if (flags & GRE_KP) {
ND_TCHECK2(*bp, 4);
if (len < 4)
goto trunc;
ND_PRINT((ndo, ", key=0x%x", EXTRACT_32BITS(bp)));
bp += 4;
len -= 4;
}
if (flags & GRE_SP) {
ND_TCHECK2(*bp, 4);
if (len < 4)
goto trunc;
ND_PRINT((ndo, ", seq %u", EXTRACT_32BITS(bp)));
bp += 4;
len -= 4;
}
if (flags & GRE_RP) {
for (;;) {
uint16_t af;
uint8_t sreoff;
uint8_t srelen;
ND_TCHECK2(*bp, 4);
if (len < 4)
goto trunc;
af = EXTRACT_16BITS(bp);
sreoff = *(bp + 2);
srelen = *(bp + 3);
bp += 4;
len -= 4;
if (af == 0 && srelen == 0)
break;
if (!gre_sre_print(ndo, af, sreoff, srelen, bp, len))
goto trunc;
if (len < srelen)
goto trunc;
bp += srelen;
len -= srelen;
}
}
if (ndo->ndo_eflag)
ND_PRINT((ndo, ", proto %s (0x%04x)",
tok2str(ethertype_values,"unknown",prot),
prot));
ND_PRINT((ndo, ", length %u",length));
if (ndo->ndo_vflag < 1)
ND_PRINT((ndo, ": ")); /* put in a colon as protocol demarc */
else
ND_PRINT((ndo, "\n\t")); /* if verbose go multiline */
switch (prot) {
case ETHERTYPE_IP:
ip_print(ndo, bp, len);
break;
case ETHERTYPE_IPV6:
ip6_print(ndo, bp, len);
break;
case ETHERTYPE_MPLS:
mpls_print(ndo, bp, len);
break;
case ETHERTYPE_IPX:
ipx_print(ndo, bp, len);
break;
case ETHERTYPE_ATALK:
atalk_print(ndo, bp, len);
break;
case ETHERTYPE_GRE_ISO:
isoclns_print(ndo, bp, len);
break;
case ETHERTYPE_TEB:
ether_print(ndo, bp, len, ndo->ndo_snapend - bp, NULL, NULL);
break;
default:
ND_PRINT((ndo, "gre-proto-0x%x", prot));
}
return;
trunc:
ND_PRINT((ndo, "%s", tstr));
} | 0 | [
"CWE-125",
"CWE-787"
] | tcpdump | 1dcd10aceabbc03bf571ea32b892c522cbe923de | 220,033,876,619,900,140,000,000,000,000,000,000,000 | 126 | CVE-2017-12897/ISO CLNS: Use ND_TTEST() for the bounds checks in isoclns_print().
This fixes a buffer over-read discovered by Kamil Frankowicz.
Don't pass the remaining caplen - that's too hard to get right, and we
were getting it wrong in at least one case; just use ND_TTEST().
Add a test using the capture file supplied by the reporter(s). |
static int no_open(struct inode *inode, struct file *file)
{
return -ENXIO;
} | 0 | [
"CWE-269"
] | linux | 0fa3ecd87848c9c93c2c828ef4c3a8ca36ce46c7 | 304,689,247,519,746,400,000,000,000,000,000,000,000 | 4 | Fix up non-directory creation in SGID directories
sgid directories have special semantics, making newly created files in
the directory belong to the group of the directory, and newly created
subdirectories will also become sgid. This is historically used for
group-shared directories.
But group directories writable by non-group members should not imply
that such non-group members can magically join the group, so make sure
to clear the sgid bit on non-directories for non-members (but remember
that sgid without group execute means "mandatory locking", just to
confuse things even more).
Reported-by: Jann Horn <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Al Viro <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
//! Return the factorial of n
inline double factorial(const int n) {
if (n<0) return cimg::type<double>::nan();
if (n<2) return 1;
double res = 2;
for (int i = 3; i<=n; ++i) res*=i;
return res; | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 331,591,955,797,229,300,000,000,000,000,000,000,000 | 7 | Fix other issues in 'CImg<T>::load_bmp()'. |
inline T max(T a, T b)
{
const T a_larger = b - a; // negative if a is larger
return select(expand_top_bit(a), a, b);
} | 0 | [
"CWE-200"
] | botan | bcf13fa153a11b3e0ad54e2af6962441cea3adf1 | 128,633,561,606,808,110,000,000,000,000,000,000,000 | 5 | Fixes for CVE-2015-7827 and CVE-2016-2849 |
void intel_pmu_pebs_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
if (cpuc->n_pebs == cpuc->n_large_pebs &&
cpuc->n_pebs != cpuc->n_pebs_via_pt)
intel_pmu_drain_pebs_buffer();
cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) &&
(x86_pmu.version < 5))
cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
cpuc->pebs_enabled &= ~(1ULL << 63);
intel_pmu_pebs_via_pt_disable(event);
if (cpuc->enabled)
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
} | 0 | [
"CWE-755"
] | linux | d88d05a9e0b6d9356e97129d4ff9942d765f46ea | 255,820,684,251,003,570,000,000,000,000,000,000,000 | 24 | perf/x86/intel: Fix a crash caused by zero PEBS status
A repeatable crash can be triggered by the perf_fuzzer on some Haswell
system.
https://lore.kernel.org/lkml/[email protected]/
For some old CPUs (HSW and earlier), the PEBS status in a PEBS record
may be mistakenly set to 0. To minimize the impact of the defect, the
commit was introduced to try to avoid dropping the PEBS record for some
cases. It adds a check in the intel_pmu_drain_pebs_nhm(), and updates
the local pebs_status accordingly. However, it doesn't correct the PEBS
status in the PEBS record, which may trigger the crash, especially for
the large PEBS.
It's possible that all the PEBS records in a large PEBS have the PEBS
status 0. If so, the first get_next_pebs_record_by_bit() in the
__intel_pmu_pebs_event() returns NULL. The at = NULL. Since it's a large
PEBS, the 'count' parameter must > 1. The second
get_next_pebs_record_by_bit() will crash.
Besides the local pebs_status, correct the PEBS status in the PEBS
record as well.
Fixes: 01330d7288e0 ("perf/x86: Allow zero PEBS status with only single active event")
Reported-by: Vince Weaver <[email protected]>
Suggested-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Kan Liang <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected] |
static uint8_t excluded_channels(bitfile *ld, drc_info *drc)
{
uint8_t i, n = 0;
uint8_t num_excl_chan = 7;
for (i = 0; i < 7; i++)
{
drc->exclude_mask[i] = faad_get1bit(ld
DEBUGVAR(1,103,"excluded_channels(): exclude_mask"));
}
n++;
while ((drc->additional_excluded_chns[n-1] = faad_get1bit(ld
DEBUGVAR(1,104,"excluded_channels(): additional_excluded_chns"))) == 1)
{
for (i = num_excl_chan; i < num_excl_chan+7; i++)
{
drc->exclude_mask[i] = faad_get1bit(ld
DEBUGVAR(1,105,"excluded_channels(): exclude_mask"));
}
n++;
num_excl_chan += 7;
}
return n;
} | 1 | [
"CWE-119",
"CWE-787"
] | faad2 | 942c3e0aee748ea6fe97cb2c1aa5893225316174 | 161,374,649,706,340,650,000,000,000,000,000,000,000 | 26 | Fix a couple buffer overflows
https://hackerone.com/reports/502816
https://hackerone.com/reports/507858
https://github.com/videolan/vlc/blob/master/contrib/src/faad2/faad2-fix-overflows.patch |
static unsigned d_flags_for_inode(struct inode *inode)
{
unsigned add_flags = DCACHE_REGULAR_TYPE;
if (!inode)
return DCACHE_MISS_TYPE;
if (S_ISDIR(inode->i_mode)) {
add_flags = DCACHE_DIRECTORY_TYPE;
if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
if (unlikely(!inode->i_op->lookup))
add_flags = DCACHE_AUTODIR_TYPE;
else
inode->i_opflags |= IOP_LOOKUP;
}
goto type_determined;
}
if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
if (unlikely(inode->i_op->get_link)) {
add_flags = DCACHE_SYMLINK_TYPE;
goto type_determined;
}
inode->i_opflags |= IOP_NOFOLLOW;
}
if (unlikely(!S_ISREG(inode->i_mode)))
add_flags = DCACHE_SPECIAL_TYPE;
type_determined:
if (unlikely(IS_AUTOMOUNT(inode)))
add_flags |= DCACHE_NEED_AUTOMOUNT;
return add_flags;
} | 0 | [
"CWE-362",
"CWE-399"
] | linux | 49d31c2f389acfe83417083e1208422b4091cd9e | 271,981,231,331,327,560,000,000,000,000,000,000,000 | 34 | dentry name snapshots
take_dentry_name_snapshot() takes a safe snapshot of dentry name;
if the name is a short one, it gets copied into caller-supplied
structure, otherwise an extra reference to external name is grabbed
(those are never modified). In either case the pointer to stable
string is stored into the same structure.
dentry must be held by the caller of take_dentry_name_snapshot(),
but may be freely dropped afterwards - the snapshot will stay
until destroyed by release_dentry_name_snapshot().
Intended use:
struct name_snapshot s;
take_dentry_name_snapshot(&s, dentry);
...
access s.name
...
release_dentry_name_snapshot(&s);
Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name
to pass down with event.
Signed-off-by: Al Viro <[email protected]> |
__xmlParserInputBufferCreateFilename(const char *URI, xmlCharEncoding enc) {
xmlParserInputBufferPtr ret;
int i = 0;
void *context = NULL;
if (xmlInputCallbackInitialized == 0)
xmlRegisterDefaultInputCallbacks();
if (URI == NULL) return(NULL);
/*
* Try to find one of the input accept method accepting that scheme
* Go in reverse to give precedence to user defined handlers.
*/
if (context == NULL) {
for (i = xmlInputCallbackNr - 1;i >= 0;i--) {
if ((xmlInputCallbackTable[i].matchcallback != NULL) &&
(xmlInputCallbackTable[i].matchcallback(URI) != 0)) {
context = xmlInputCallbackTable[i].opencallback(URI);
if (context != NULL) {
break;
}
}
}
}
if (context == NULL) {
return(NULL);
}
/*
* Allocate the Input buffer front-end.
*/
ret = xmlAllocParserInputBuffer(enc);
if (ret != NULL) {
ret->context = context;
ret->readcallback = xmlInputCallbackTable[i].readcallback;
ret->closecallback = xmlInputCallbackTable[i].closecallback;
#ifdef HAVE_ZLIB_H
if ((xmlInputCallbackTable[i].opencallback == xmlGzfileOpen) &&
(strcmp(URI, "-") != 0)) {
#if defined(ZLIB_VERNUM) && ZLIB_VERNUM >= 0x1230
ret->compressed = !gzdirect(context);
#else
if (((z_stream *)context)->avail_in > 4) {
char *cptr, buff4[4];
cptr = (char *) ((z_stream *)context)->next_in;
if (gzread(context, buff4, 4) == 4) {
if (strncmp(buff4, cptr, 4) == 0)
ret->compressed = 0;
else
ret->compressed = 1;
gzrewind(context);
}
}
#endif
}
#endif
#ifdef LIBXML_LZMA_ENABLED
if ((xmlInputCallbackTable[i].opencallback == xmlXzfileOpen) &&
(strcmp(URI, "-") != 0)) {
ret->compressed = __libxml2_xzcompressed(context);
}
#endif
}
else
xmlInputCallbackTable[i].closecallback (context);
return(ret);
} | 0 | [
"CWE-134"
] | libxml2 | 4472c3a5a5b516aaf59b89be602fbce52756c3e9 | 250,738,401,232,973,000,000,000,000,000,000,000,000 | 69 | Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports. |
Input::readImageData(boost::shared_ptr<IOChannel> in, FileType type)
{
std::auto_ptr<GnashImage> im;
std::auto_ptr<Input> inChannel;
switch (type) {
#ifdef USE_PNG
case GNASH_FILETYPE_PNG:
inChannel = createPngInput(in);
break;
#endif
#ifdef USE_GIF
case GNASH_FILETYPE_GIF:
inChannel = createGifInput(in);
break;
#endif
case GNASH_FILETYPE_JPEG:
inChannel = JpegInput::create(in);
break;
default:
break;
}
if (!inChannel.get()) return im;
const size_t height = inChannel->getHeight();
const size_t width = inChannel->getWidth();
try {
switch (inChannel->imageType()) {
case TYPE_RGB:
im.reset(new ImageRGB(width, height));
break;
case TYPE_RGBA:
im.reset(new ImageRGBA(width, height));
break;
default:
log_error(_("Invalid image returned"));
return im;
}
}
catch (std::bad_alloc& e) {
// This should be caught here because ~JpegInput can also
// throw an exception on stack unwinding and this confuses
// remote catchers.
log_error(_("Out of memory while trying to create %dx%d image"),
width, height);
return im;
}
for (size_t i = 0; i < height; ++i) {
inChannel->readScanline(scanline(*im, i));
}
// The renderers expect RGBA data to be preprocessed. JPEG images are
// never transparent, but the addition of alpha data stored elsewhere
// in the SWF is possible; in that case, the processing happens during
// mergeAlpha().
if (im->type() == TYPE_RGBA) {
processAlpha(im->begin(), width * height);
}
return im;
} | 0 | [
"CWE-189"
] | gnash | bb4dc77eecb6ed1b967e3ecbce3dac6c5e6f1527 | 88,779,772,369,733,990,000,000,000,000,000,000,000 | 64 | Fix crash in GnashImage.cpp |
handle_get_property (GDBusConnection *connection,
const gchar *sender,
const gchar *object_path,
const gchar *interface_name,
const gchar *property_name,
GError **error,
gpointer user_data)
{
A11yBusLauncher *app = user_data;
if (g_strcmp0 (property_name, "IsEnabled") == 0)
return g_variant_new ("b", app->a11y_enabled);
else if (g_strcmp0 (property_name, "ScreenReaderEnabled") == 0)
return g_variant_new ("b", app->screen_reader_enabled);
else
return NULL;
} | 0 | [] | at-spi2-core | c2e87fe00b596dba20c9d57d406ab8faa744b15a | 337,182,814,677,588,150,000,000,000,000,000,000,000 | 17 | Fix inverted logic.
Don't write more into a buffer than it can hold.
https://bugzilla.gnome.org/show_bug.cgi?id=791124 |
static void dump_pagetable(unsigned long address)
{
pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
pgd_t *pgd = base + pgd_index(address);
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if (bad_address(pgd))
goto bad;
printk("PGD %lx ", pgd_val(*pgd));
if (!pgd_present(*pgd))
goto out;
pud = pud_offset(pgd, address);
if (bad_address(pud))
goto bad;
printk("PUD %lx ", pud_val(*pud));
if (!pud_present(*pud) || pud_large(*pud))
goto out;
pmd = pmd_offset(pud, address);
if (bad_address(pmd))
goto bad;
printk("PMD %lx ", pmd_val(*pmd));
if (!pmd_present(*pmd) || pmd_large(*pmd))
goto out;
pte = pte_offset_kernel(pmd, address);
if (bad_address(pte))
goto bad;
printk("PTE %lx", pte_val(*pte));
out:
printk("\n");
return;
bad:
printk("BAD\n");
} | 0 | [
"CWE-264"
] | linux | 548acf19234dbda5a52d5a8e7e205af46e9da840 | 251,186,358,616,526,800,000,000,000,000,000,000,000 | 43 | x86/mm: Expand the exception table logic to allow new handling options
Huge amounts of help from Andy Lutomirski and Borislav Petkov to
produce this. Andy provided the inspiration to add classes to the
exception table with a clever bit-squeezing trick, Boris pointed
out how much cleaner it would all be if we just had a new field.
Linus Torvalds blessed the expansion with:
' I'd rather not be clever in order to save just a tiny amount of space
in the exception table, which isn't really criticial for anybody. '
The third field is another relative function pointer, this one to a
handler that executes the actions.
We start out with three handlers:
1: Legacy - just jumps the to fixup IP
2: Fault - provide the trap number in %ax to the fixup code
3: Cleaned up legacy for the uaccess error hack
Signed-off-by: Tony Luck <[email protected]>
Reviewed-by: Borislav Petkov <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: http://lkml.kernel.org/r/f6af78fcbd348cf4939875cfda9c19689b5e50b8.1455732970.git.tony.luck@intel.com
Signed-off-by: Ingo Molnar <[email protected]> |
page_objects_sort(fz_context *ctx, page_objects *po)
{
int i, j;
int n = po->len;
/* Step 1: Make a heap */
/* Invariant: Valid heap in [0..i), unsorted elements in [i..n) */
for (i = 1; i < n; i++)
{
/* Now bubble backwards to maintain heap invariant */
j = i;
while (j != 0)
{
int tmp;
int k = (j-1)>>1;
if (po->object[k] >= po->object[j])
break;
tmp = po->object[k];
po->object[k] = po->object[j];
po->object[j] = tmp;
j = k;
}
}
/* Step 2: Heap sort */
/* Invariant: valid heap in [0..i), sorted list in [i..n) */
/* Initially: i = n */
for (i = n-1; i > 0; i--)
{
/* Swap the maximum (0th) element from the page_objects into its place
* in the sorted list (position i). */
int tmp = po->object[0];
po->object[0] = po->object[i];
po->object[i] = tmp;
/* Now, the page_objects is invalid because the 0th element is out
* of place. Bubble it until the page_objects is valid. */
j = 0;
while (1)
{
/* Children are k and k+1 */
int k = (j+1)*2-1;
/* If both children out of the page_objects, we're done */
if (k > i-1)
break;
/* If both are in the page_objects, pick the larger one */
if (k < i-1 && po->object[k] < po->object[k+1])
k++;
/* If j is bigger than k (i.e. both of its children),
* we're done */
if (po->object[j] > po->object[k])
break;
tmp = po->object[k];
po->object[k] = po->object[j];
po->object[j] = tmp;
j = k;
}
}
} | 0 | [
"CWE-119"
] | mupdf | 520cc26d18c9ee245b56e9e91f9d4fcae02be5f0 | 13,810,362,733,035,538,000,000,000,000,000,000,000 | 58 | Bug 689699: Avoid buffer overrun.
When cleaning a pdf file, various lists (of pdf_xref_len length) are
defined early on.
If we trigger a repair during the clean, this can cause pdf_xref_len
to increase causing an overrun.
Fix this by watching for changes in the length, and checking accesses
to the list for validity.
This also appears to fix bugs 698700-698703. |
static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
int ret;
u8 i;
ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
if (ret)
return ret;
for (i = 0; i < kinfo->num_tc; i++) {
u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
sch_mode);
if (ret)
return ret;
}
return 0;
} | 1 | [
"CWE-125"
] | linux | 04f25edb48c441fc278ecc154c270f16966cbb90 | 338,400,725,075,583,130,000,000,000,000,000,000,000 | 22 | net: hns3: add some error checking in hclge_tm module
When hdev->tx_sch_mode is HCLGE_FLAG_VNET_BASE_SCH_MODE, the
hclge_tm_schd_mode_vnet_base_cfg calls hclge_tm_pri_schd_mode_cfg
with vport->vport_id as pri_id, which is used as index for
hdev->tm_info.tc_info, it will cause out of bound access issue
if vport_id is equal to or larger than HNAE3_MAX_TC.
Also hardware only support maximum speed of HCLGE_ETHER_MAX_RATE.
So this patch adds two checks for above cases.
Fixes: 848440544b41 ("net: hns3: Add support of TX Scheduler & Shaper to HNS3 driver")
Signed-off-by: Yunsheng Lin <[email protected]>
Signed-off-by: Peng Li <[email protected]>
Signed-off-by: Huazhong Tan <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int tcos_construct_fci(const sc_file_t *file,
u8 *out, size_t *outlen)
{
u8 *p = out;
u8 buf[64];
size_t n;
/* FIXME: possible buffer overflow */
*p++ = 0x6F; /* FCI */
p++;
/* File size */
buf[0] = (file->size >> 8) & 0xFF;
buf[1] = file->size & 0xFF;
sc_asn1_put_tag(0x81, buf, 2, p, 16, &p);
/* File descriptor */
n = 0;
buf[n] = file->shareable ? 0x40 : 0;
switch (file->type) {
case SC_FILE_TYPE_WORKING_EF:
break;
case SC_FILE_TYPE_DF:
buf[0] |= 0x38;
break;
default:
return SC_ERROR_NOT_SUPPORTED;
}
buf[n++] |= file->ef_structure & 7;
if ( (file->ef_structure & 7) > 1) {
/* record structured file */
buf[n++] = 0x41; /* indicate 3rd byte */
buf[n++] = file->record_length;
}
sc_asn1_put_tag(0x82, buf, n, p, 8, &p);
/* File identifier */
buf[0] = (file->id >> 8) & 0xFF;
buf[1] = file->id & 0xFF;
sc_asn1_put_tag(0x83, buf, 2, p, 16, &p);
/* Directory name */
if (file->type == SC_FILE_TYPE_DF) {
if (file->namelen) {
sc_asn1_put_tag(0x84, file->name, file->namelen,
p, 16, &p);
}
else {
/* TCOS needs one, so we use a faked one */
snprintf ((char *) buf, sizeof(buf)-1, "foo-%lu",
(unsigned long) time (NULL));
sc_asn1_put_tag(0x84, buf, strlen ((char *) buf), p, 16, &p);
}
}
/* File descriptor extension */
if (file->prop_attr_len && file->prop_attr) {
n = file->prop_attr_len;
memcpy(buf, file->prop_attr, n);
}
else {
n = 0;
buf[n++] = 0x01; /* not invalidated, permanent */
if (file->type == SC_FILE_TYPE_WORKING_EF)
buf[n++] = 0x00; /* generic data file */
}
sc_asn1_put_tag(0x85, buf, n, p, 16, &p);
/* Security attributes */
if (file->sec_attr_len && file->sec_attr) {
memcpy(buf, file->sec_attr, file->sec_attr_len);
n = file->sec_attr_len;
}
else {
/* no attributes given - fall back to default one */
memcpy (buf+ 0, "\xa4\x00\x00\x00\xff\xff", 6); /* select */
memcpy (buf+ 6, "\xb0\x00\x00\x00\xff\xff", 6); /* read bin */
memcpy (buf+12, "\xd6\x00\x00\x00\xff\xff", 6); /* upd bin */
memcpy (buf+18, "\x60\x00\x00\x00\xff\xff", 6); /* admin grp*/
n = 24;
}
sc_asn1_put_tag(0x86, buf, n, p, sizeof (buf), &p);
/* fixup length of FCI */
out[1] = p - out - 2;
*outlen = p - out;
return 0;
} | 0 | [
"CWE-415",
"CWE-119"
] | OpenSC | 360e95d45ac4123255a4c796db96337f332160ad | 178,475,258,370,357,300,000,000,000,000,000,000,000 | 91 | fixed out of bounds writes
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting the problems. |
get_data_block (GifContext *context,
unsigned char *buf,
gint *empty_block)
{
if (context->block_count == 0) {
if (!gif_read (context, &context->block_count, 1)) {
return -1;
}
}
if (context->block_count == 0)
if (empty_block) {
*empty_block = TRUE;
return 0;
}
if (!gif_read (context, buf, context->block_count)) {
return -1;
}
return 0;
} | 0 | [] | gdk-pixbuf | f8569bb13e2aa1584dde61ca545144750f7a7c98 | 324,821,776,867,187,350,000,000,000,000,000,000,000 | 23 | GIF: Don't return a partially initialized pixbuf structure
It was found that gdk-pixbuf GIF image loader gdk_pixbuf__gif_image_load()
routine did not properly handle certain return values from their subroutines.
A remote attacker could provide a specially-crafted GIF image, which once
opened in an application, linked against gdk-pixbuf would lead to gdk-pixbuf
to return partially initialized pixbuf structure, possibly having huge
width and height, leading to that particular application termination due
excessive memory use.
The CVE identifier of CVE-2011-2485 has been assigned to this issue. |
MagickExport int CompareStringInfo(const StringInfo *target,
const StringInfo *source)
{
int
status;
assert(target != (StringInfo *) NULL);
assert(target->signature == MagickCoreSignature);
assert(source != (StringInfo *) NULL);
assert(source->signature == MagickCoreSignature);
status=memcmp(target->datum,source->datum,MagickMin(target->length,
source->length));
if (status != 0)
return(status);
if (target->length == source->length)
return(0);
return(target->length < source->length ? -1 : 1);
} | 0 | [
"CWE-190"
] | ImageMagick | be90a5395695f0d19479a5d46b06c678be7f7927 | 174,979,552,977,076,200,000,000,000,000,000,000,000 | 18 | https://github.com/ImageMagick/ImageMagick/issues/1721 |
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
} | 0 | [
"CWE-665"
] | ImageMagick6 | 27b1c74979ac473a430e266ff6c4b645664bc805 | 273,838,736,694,252,750,000,000,000,000,000,000,000 | 18 | https://github.com/ImageMagick/ImageMagick/issues/1522 |
struct condition * compiler_concat_conditions(struct condition *a, u_int16 op, struct condition *b)
{
struct condition *head = a;
/* go to the last conditions in 'a' */
while(a->next != NULL)
a = a->next;
/* set the operation */
a->op = op;
/* contatenate the two block */
a->next = b;
/* return the head of the conditions */
return head;
} | 0 | [
"CWE-703",
"CWE-125"
] | ettercap | 626dc56686f15f2dda13c48f78c2a666cb6d8506 | 167,399,848,557,463,550,000,000,000,000,000,000,000 | 17 | Exit gracefully in case of corrupted filters (Closes issue #782) |
addCharacterClass(FileInfo *nested, const widechar *name, int length,
CharacterClass **characterClasses,
TranslationTableCharacterAttributes *characterClassAttribute) {
/* Define a character class, Whether predefined or user-defined */
CharacterClass *class;
if (*characterClassAttribute) {
if (!(class = malloc(sizeof(*class) + CHARSIZE * (length - 1))))
_lou_outOfMemory();
else {
memset(class, 0, sizeof(*class));
memcpy(class->name, name, CHARSIZE * (class->length = length));
class->attribute = *characterClassAttribute;
*characterClassAttribute <<= 1;
class->next = *characterClasses;
*characterClasses = class;
return class;
}
}
compileError(nested, "character class table overflow.");
return NULL;
} | 0 | [
"CWE-787"
] | liblouis | fb2bfce4ed49ac4656a8f7e5b5526e4838da1dde | 289,034,669,699,913,700,000,000,000,000,000,000,000 | 21 | Fix yet another buffer overflow in the braille table parser
Reported by Henri Salo
Fixes #592 |
int print_embedded_options(PerlIO *stream, char ** options_list, int options_count)
{
int i;
for (i=0; i<options_count; i++)
{
if (options_list[i])
PerlIO_printf(stream,
"Embedded server, parameter[%d]=%s\n",
i, options_list[i]);
}
return 1;
} | 0 | [
"CWE-125"
] | DBD-mysql | 793b72b1a0baa5070adacaac0e12fd995a6fbabe | 135,203,197,064,049,400,000,000,000,000,000,000,000 | 13 | Added Pali's fix for CVE-2016-1249 |
ar6000_alloc_cookie(struct ar6_softc *ar)
{
struct ar_cookie *cookie;
cookie = ar->arCookieList;
if(cookie != NULL)
{
ar->arCookieList = cookie->arc_list_next;
ar->arCookieCount--;
}
return cookie;
} | 0 | [
"CWE-703",
"CWE-264"
] | linux | 550fd08c2cebad61c548def135f67aba284c6162 | 61,903,050,168,055,795,000,000,000,000,000,000,000 | 13 | net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <[email protected]>
CC: Karsten Keil <[email protected]>
CC: "David S. Miller" <[email protected]>
CC: Jay Vosburgh <[email protected]>
CC: Andy Gospodarek <[email protected]>
CC: Patrick McHardy <[email protected]>
CC: Krzysztof Halasa <[email protected]>
CC: "John W. Linville" <[email protected]>
CC: Greg Kroah-Hartman <[email protected]>
CC: Marcel Holtmann <[email protected]>
CC: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int MACH0_(get_bits)(struct MACH0_(obj_t)* bin) {
if (bin) {
int bits = MACH0_(get_bits_from_hdr) (&bin->hdr);
if (bin->hdr.cputype == CPU_TYPE_ARM && bin->entry & 1) {
return 16;
}
return bits;
}
return 32;
} | 0 | [
"CWE-415",
"CWE-125"
] | radare2 | 60208765887f5f008b3b9a883f3addc8bdb9c134 | 51,461,715,102,317,970,000,000,000,000,000,000,000 | 10 | Fix #9970 - heap oobread in mach0 parser (#10026) |
xrdp_mm_setup_mod2(struct xrdp_mm* self)
{
char text[256];
char* name;
char* value;
int i;
int rv;
int key_flags;
int device_flags;
g_memset(text,0,sizeof(char) * 256);
rv = 1;
text[0] = 0;
if (!g_is_wait_obj_set(self->wm->pro_layer->self_term_event))
{
if (self->mod->mod_start(self->mod, self->wm->screen->width,
self->wm->screen->height,
self->wm->screen->bpp) != 0)
{
g_set_wait_obj(self->wm->pro_layer->self_term_event); /* kill session */
}
}
if (!g_is_wait_obj_set(self->wm->pro_layer->self_term_event))
{
if (self->display > 0)
{
if (self->code == 0) /* Xvnc */
{
g_snprintf(text, 255, "%d", 5900 + self->display);
}
else if (self->code == 10) /* X11rdp */
{
g_snprintf(text, 255, "%d", 6200 + self->display);
}
else
{
g_set_wait_obj(self->wm->pro_layer->self_term_event); /* kill session */
}
}
}
if (!g_is_wait_obj_set(self->wm->pro_layer->self_term_event))
{
/* this adds the port to the end of the list, it will already be in
the list as -1
the module should use the last one */
if (g_strlen(text) > 0)
{
list_add_item(self->login_names, (long)g_strdup("port"));
list_add_item(self->login_values, (long)g_strdup(text));
}
/* always set these */
name = self->wm->session->client_info->hostname;
self->mod->mod_set_param(self->mod, "hostname", name);
g_snprintf(text, 255, "%d", self->wm->session->client_info->keylayout);
self->mod->mod_set_param(self->mod, "keylayout", text);
for (i = 0; i < self->login_names->count; i++)
{
name = (char*)list_get_item(self->login_names, i);
value = (char*)list_get_item(self->login_values, i);
self->mod->mod_set_param(self->mod, name, value);
}
/* connect */
if (self->mod->mod_connect(self->mod) == 0)
{
rv = 0;
}
}
if (rv == 0)
{
/* sync modifiers */
key_flags = 0;
device_flags = 0;
if (self->wm->scroll_lock)
{
key_flags |= 1;
}
if (self->wm->num_lock)
{
key_flags |= 2;
}
if (self->wm->caps_lock)
{
key_flags |= 4;
}
if (self->mod != 0)
{
if (self->mod->mod_event != 0)
{
self->mod->mod_event(self->mod, 17, key_flags, device_flags,
key_flags, device_flags);
}
}
}
return rv;
} | 0 | [] | xrdp | d8f9e8310dac362bb9578763d1024178f94f4ecc | 104,917,831,373,081,060,000,000,000,000,000,000,000 | 95 | move temp files from /tmp to /tmp/.xrdp |
MagickExport MagickBooleanType InterpolatePixelInfo(const Image *image,
const CacheView_ *image_view,const PixelInterpolateMethod method,
const double x,const double y,PixelInfo *pixel,ExceptionInfo *exception)
{
MagickBooleanType
status;
double
alpha[16],
gamma;
PixelInfo
pixels[16];
register const Quantum
*p;
register ssize_t
i;
ssize_t
x_offset,
y_offset;
PixelInterpolateMethod
interpolate;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image_view != (CacheView *) NULL);
status=MagickTrue;
x_offset=(ssize_t) floor(ConstrainPixelOffset(x));
y_offset=(ssize_t) floor(ConstrainPixelOffset(y));
interpolate=method;
if (interpolate == UndefinedInterpolatePixel)
interpolate=image->interpolate;
GetPixelInfoPixel(image,(const Quantum *) NULL,pixel);
(void) memset(&pixels,0,sizeof(pixels));
switch (interpolate)
{
case AverageInterpolatePixel: /* nearest 4 neighbours */
case Average9InterpolatePixel: /* nearest 9 neighbours */
case Average16InterpolatePixel: /* nearest 16 neighbours */
{
ssize_t
count;
count=2; /* size of the area to average - default nearest 4 */
if (interpolate == Average9InterpolatePixel)
{
count=3;
x_offset=(ssize_t) (floor(ConstrainPixelOffset(x)+0.5)-1);
y_offset=(ssize_t) (floor(ConstrainPixelOffset(y)+0.5)-1);
}
else if (interpolate == Average16InterpolatePixel)
{
count=4;
x_offset--;
y_offset--;
}
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,(size_t) count,
(size_t) count,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
count*=count; /* number of pixels - square of size */
for (i=0; i < (ssize_t) count; i++)
{
AlphaBlendPixelInfo(image,p,pixels,alpha);
gamma=PerceptibleReciprocal(alpha[0]);
pixel->red+=gamma*pixels[0].red;
pixel->green+=gamma*pixels[0].green;
pixel->blue+=gamma*pixels[0].blue;
pixel->black+=gamma*pixels[0].black;
pixel->alpha+=pixels[0].alpha;
p += GetPixelChannels(image);
}
gamma=1.0/count; /* average weighting of each pixel in area */
pixel->red*=gamma;
pixel->green*=gamma;
pixel->blue*=gamma;
pixel->black*=gamma;
pixel->alpha*=gamma;
break;
}
case BackgroundInterpolatePixel:
{
*pixel=image->background_color; /* Copy PixelInfo Structure */
break;
}
case BilinearInterpolatePixel:
default:
{
PointInfo
delta,
epsilon;
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < 4L; i++)
AlphaBlendPixelInfo(image,p+i*GetPixelChannels(image),pixels+i,alpha+i);
delta.x=x-x_offset;
delta.y=y-y_offset;
epsilon.x=1.0-delta.x;
epsilon.y=1.0-delta.y;
gamma=((epsilon.y*(epsilon.x*alpha[0]+delta.x*alpha[1])+delta.y*
(epsilon.x*alpha[2]+delta.x*alpha[3])));
gamma=PerceptibleReciprocal(gamma);
pixel->red=gamma*(epsilon.y*(epsilon.x*pixels[0].red+delta.x*
pixels[1].red)+delta.y*(epsilon.x*pixels[2].red+delta.x*pixels[3].red));
pixel->green=gamma*(epsilon.y*(epsilon.x*pixels[0].green+delta.x*
pixels[1].green)+delta.y*(epsilon.x*pixels[2].green+delta.x*
pixels[3].green));
pixel->blue=gamma*(epsilon.y*(epsilon.x*pixels[0].blue+delta.x*
pixels[1].blue)+delta.y*(epsilon.x*pixels[2].blue+delta.x*
pixels[3].blue));
if (image->colorspace == CMYKColorspace)
pixel->black=gamma*(epsilon.y*(epsilon.x*pixels[0].black+delta.x*
pixels[1].black)+delta.y*(epsilon.x*pixels[2].black+delta.x*
pixels[3].black));
gamma=((epsilon.y*(epsilon.x+delta.x)+delta.y*(epsilon.x+delta.x)));
gamma=PerceptibleReciprocal(gamma);
pixel->alpha=gamma*(epsilon.y*(epsilon.x*pixels[0].alpha+delta.x*
pixels[1].alpha)+delta.y*(epsilon.x*pixels[2].alpha+delta.x*
pixels[3].alpha));
break;
}
case BlendInterpolatePixel:
{
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < 4L; i++)
{
GetPixelInfoPixel(image,p+i*GetPixelChannels(image),pixels+i);
AlphaBlendPixelInfo(image,p+i*GetPixelChannels(image),pixels+i,alpha+i);
}
gamma=1.0; /* number of pixels blended together (its variable) */
for (i=0; i <= 1L; i++)
{
if ((y-y_offset) >= 0.75)
{
alpha[i]=alpha[i+2]; /* take right pixels */
pixels[i]=pixels[i+2];
}
else
if ((y-y_offset) > 0.25)
{
gamma=2.0; /* blend both pixels in row */
alpha[i]+=alpha[i+2]; /* add up alpha weights */
pixels[i].red+=pixels[i+2].red;
pixels[i].green+=pixels[i+2].green;
pixels[i].blue+=pixels[i+2].blue;
pixels[i].black+=pixels[i+2].black;
pixels[i].alpha+=pixels[i+2].alpha;
}
}
if ((x-x_offset) >= 0.75)
{
alpha[0]=alpha[1];
pixels[0]=pixels[1];
}
else
if ((x-x_offset) > 0.25)
{
gamma*=2.0; /* blend both rows */
alpha[0]+= alpha[1]; /* add up alpha weights */
pixels[0].red+=pixels[1].red;
pixels[0].green+=pixels[1].green;
pixels[0].blue+=pixels[1].blue;
pixels[0].black+=pixels[1].black;
pixels[0].alpha+=pixels[1].alpha;
}
gamma=1.0/gamma;
alpha[0]=PerceptibleReciprocal(alpha[0]);
pixel->red=alpha[0]*pixels[0].red;
pixel->green=alpha[0]*pixels[0].green; /* divide by sum of alpha */
pixel->blue=alpha[0]*pixels[0].blue;
pixel->black=alpha[0]*pixels[0].black;
pixel->alpha=gamma*pixels[0].alpha; /* divide by number of pixels */
break;
}
case CatromInterpolatePixel:
{
double
cx[4],
cy[4];
p=GetCacheViewVirtualPixels(image_view,x_offset-1,y_offset-1,4,4,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < 16L; i++)
AlphaBlendPixelInfo(image,p+i*GetPixelChannels(image),pixels+i,alpha+i);
CatromWeights((double) (x-x_offset),&cx);
CatromWeights((double) (y-y_offset),&cy);
pixel->red=(cy[0]*(cx[0]*pixels[0].red+cx[1]*pixels[1].red+cx[2]*
pixels[2].red+cx[3]*pixels[3].red)+cy[1]*(cx[0]*pixels[4].red+cx[1]*
pixels[5].red+cx[2]*pixels[6].red+cx[3]*pixels[7].red)+cy[2]*(cx[0]*
pixels[8].red+cx[1]*pixels[9].red+cx[2]*pixels[10].red+cx[3]*
pixels[11].red)+cy[3]*(cx[0]*pixels[12].red+cx[1]*pixels[13].red+cx[2]*
pixels[14].red+cx[3]*pixels[15].red));
pixel->green=(cy[0]*(cx[0]*pixels[0].green+cx[1]*pixels[1].green+cx[2]*
pixels[2].green+cx[3]*pixels[3].green)+cy[1]*(cx[0]*pixels[4].green+
cx[1]*pixels[5].green+cx[2]*pixels[6].green+cx[3]*pixels[7].green)+
cy[2]*(cx[0]*pixels[8].green+cx[1]*pixels[9].green+cx[2]*
pixels[10].green+cx[3]*pixels[11].green)+cy[3]*(cx[0]*
pixels[12].green+cx[1]*pixels[13].green+cx[2]*pixels[14].green+cx[3]*
pixels[15].green));
pixel->blue=(cy[0]*(cx[0]*pixels[0].blue+cx[1]*pixels[1].blue+cx[2]*
pixels[2].blue+cx[3]*pixels[3].blue)+cy[1]*(cx[0]*pixels[4].blue+cx[1]*
pixels[5].blue+cx[2]*pixels[6].blue+cx[3]*pixels[7].blue)+cy[2]*(cx[0]*
pixels[8].blue+cx[1]*pixels[9].blue+cx[2]*pixels[10].blue+cx[3]*
pixels[11].blue)+cy[3]*(cx[0]*pixels[12].blue+cx[1]*pixels[13].blue+
cx[2]*pixels[14].blue+cx[3]*pixels[15].blue));
if (image->colorspace == CMYKColorspace)
pixel->black=(cy[0]*(cx[0]*pixels[0].black+cx[1]*pixels[1].black+cx[2]*
pixels[2].black+cx[3]*pixels[3].black)+cy[1]*(cx[0]*pixels[4].black+
cx[1]*pixels[5].black+cx[2]*pixels[6].black+cx[3]*pixels[7].black)+
cy[2]*(cx[0]*pixels[8].black+cx[1]*pixels[9].black+cx[2]*
pixels[10].black+cx[3]*pixels[11].black)+cy[3]*(cx[0]*
pixels[12].black+cx[1]*pixels[13].black+cx[2]*pixels[14].black+cx[3]*
pixels[15].black));
pixel->alpha=(cy[0]*(cx[0]*pixels[0].alpha+cx[1]*pixels[1].alpha+cx[2]*
pixels[2].alpha+cx[3]*pixels[3].alpha)+cy[1]*(cx[0]*pixels[4].alpha+
cx[1]*pixels[5].alpha+cx[2]*pixels[6].alpha+cx[3]*pixels[7].alpha)+
cy[2]*(cx[0]*pixels[8].alpha+cx[1]*pixels[9].alpha+cx[2]*
pixels[10].alpha+cx[3]*pixels[11].alpha)+cy[3]*(cx[0]*pixels[12].alpha+
cx[1]*pixels[13].alpha+cx[2]*pixels[14].alpha+cx[3]*pixels[15].alpha));
break;
}
case IntegerInterpolatePixel:
{
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,1,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
GetPixelInfoPixel(image,p,pixel);
break;
}
case MeshInterpolatePixel:
{
PointInfo
delta,
luminance;
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
delta.x=x-x_offset;
delta.y=y-y_offset;
luminance.x=GetPixelLuma(image,p)-(double)
GetPixelLuma(image,p+3*GetPixelChannels(image));
luminance.y=GetPixelLuma(image,p+GetPixelChannels(image))-(double)
GetPixelLuma(image,p+2*GetPixelChannels(image));
AlphaBlendPixelInfo(image,p,pixels+0,alpha+0);
AlphaBlendPixelInfo(image,p+GetPixelChannels(image),pixels+1,alpha+1);
AlphaBlendPixelInfo(image,p+2*GetPixelChannels(image),pixels+2,alpha+2);
AlphaBlendPixelInfo(image,p+3*GetPixelChannels(image),pixels+3,alpha+3);
if (fabs(luminance.x) < fabs(luminance.y))
{
/*
Diagonal 0-3 NW-SE.
*/
if (delta.x <= delta.y)
{
/*
Bottom-left triangle (pixel: 2, diagonal: 0-3).
*/
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[2],alpha[3],alpha[0]);
gamma=PerceptibleReciprocal(gamma);
pixel->red=gamma*MeshInterpolate(&delta,pixels[2].red,
pixels[3].red,pixels[0].red);
pixel->green=gamma*MeshInterpolate(&delta,pixels[2].green,
pixels[3].green,pixels[0].green);
pixel->blue=gamma*MeshInterpolate(&delta,pixels[2].blue,
pixels[3].blue,pixels[0].blue);
if (image->colorspace == CMYKColorspace)
pixel->black=gamma*MeshInterpolate(&delta,pixels[2].black,
pixels[3].black,pixels[0].black);
gamma=MeshInterpolate(&delta,1.0,1.0,1.0);
pixel->alpha=gamma*MeshInterpolate(&delta,pixels[2].alpha,
pixels[3].alpha,pixels[0].alpha);
}
else
{
/*
Top-right triangle (pixel:1 , diagonal: 0-3).
*/
delta.x=1.0-delta.x;
gamma=MeshInterpolate(&delta,alpha[1],alpha[0],alpha[3]);
gamma=PerceptibleReciprocal(gamma);
pixel->red=gamma*MeshInterpolate(&delta,pixels[1].red,
pixels[0].red,pixels[3].red);
pixel->green=gamma*MeshInterpolate(&delta,pixels[1].green,
pixels[0].green,pixels[3].green);
pixel->blue=gamma*MeshInterpolate(&delta,pixels[1].blue,
pixels[0].blue,pixels[3].blue);
if (image->colorspace == CMYKColorspace)
pixel->black=gamma*MeshInterpolate(&delta,pixels[1].black,
pixels[0].black,pixels[3].black);
gamma=MeshInterpolate(&delta,1.0,1.0,1.0);
pixel->alpha=gamma*MeshInterpolate(&delta,pixels[1].alpha,
pixels[0].alpha,pixels[3].alpha);
}
}
else
{
/*
Diagonal 1-2 NE-SW.
*/
if (delta.x <= (1.0-delta.y))
{
/*
Top-left triangle (pixel: 0, diagonal: 1-2).
*/
gamma=MeshInterpolate(&delta,alpha[0],alpha[1],alpha[2]);
gamma=PerceptibleReciprocal(gamma);
pixel->red=gamma*MeshInterpolate(&delta,pixels[0].red,
pixels[1].red,pixels[2].red);
pixel->green=gamma*MeshInterpolate(&delta,pixels[0].green,
pixels[1].green,pixels[2].green);
pixel->blue=gamma*MeshInterpolate(&delta,pixels[0].blue,
pixels[1].blue,pixels[2].blue);
if (image->colorspace == CMYKColorspace)
pixel->black=gamma*MeshInterpolate(&delta,pixels[0].black,
pixels[1].black,pixels[2].black);
gamma=MeshInterpolate(&delta,1.0,1.0,1.0);
pixel->alpha=gamma*MeshInterpolate(&delta,pixels[0].alpha,
pixels[1].alpha,pixels[2].alpha);
}
else
{
/*
Bottom-right triangle (pixel: 3, diagonal: 1-2).
*/
delta.x=1.0-delta.x;
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[3],alpha[2],alpha[1]);
gamma=PerceptibleReciprocal(gamma);
pixel->red=gamma*MeshInterpolate(&delta,pixels[3].red,
pixels[2].red,pixels[1].red);
pixel->green=gamma*MeshInterpolate(&delta,pixels[3].green,
pixels[2].green,pixels[1].green);
pixel->blue=gamma*MeshInterpolate(&delta,pixels[3].blue,
pixels[2].blue,pixels[1].blue);
if (image->colorspace == CMYKColorspace)
pixel->black=gamma*MeshInterpolate(&delta,pixels[3].black,
pixels[2].black,pixels[1].black);
gamma=MeshInterpolate(&delta,1.0,1.0,1.0);
pixel->alpha=gamma*MeshInterpolate(&delta,pixels[3].alpha,
pixels[2].alpha,pixels[1].alpha);
}
}
break;
}
case NearestInterpolatePixel:
{
x_offset=(ssize_t) floor(ConstrainPixelOffset(x)+0.5);
y_offset=(ssize_t) floor(ConstrainPixelOffset(y)+0.5);
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,1,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
GetPixelInfoPixel(image,p,pixel);
break;
}
case SplineInterpolatePixel:
{
double
cx[4],
cy[4];
p=GetCacheViewVirtualPixels(image_view,x_offset-1,y_offset-1,4,4,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < 16L; i++)
AlphaBlendPixelInfo(image,p+i*GetPixelChannels(image),pixels+i,alpha+i);
SplineWeights((double) (x-x_offset),&cx);
SplineWeights((double) (y-y_offset),&cy);
pixel->red=(cy[0]*(cx[0]*pixels[0].red+cx[1]*pixels[1].red+cx[2]*
pixels[2].red+cx[3]*pixels[3].red)+cy[1]*(cx[0]*pixels[4].red+cx[1]*
pixels[5].red+cx[2]*pixels[6].red+cx[3]*pixels[7].red)+cy[2]*(cx[0]*
pixels[8].red+cx[1]*pixels[9].red+cx[2]*pixels[10].red+cx[3]*
pixels[11].red)+cy[3]*(cx[0]*pixels[12].red+cx[1]*pixels[13].red+cx[2]*
pixels[14].red+cx[3]*pixels[15].red));
pixel->green=(cy[0]*(cx[0]*pixels[0].green+cx[1]*pixels[1].green+cx[2]*
pixels[2].green+cx[3]*pixels[3].green)+cy[1]*(cx[0]*pixels[4].green+
cx[1]*pixels[5].green+cx[2]*pixels[6].green+cx[3]*pixels[7].green)+
cy[2]*(cx[0]*pixels[8].green+cx[1]*pixels[9].green+cx[2]*
pixels[10].green+cx[3]*pixels[11].green)+cy[3]*(cx[0]*pixels[12].green+
cx[1]*pixels[13].green+cx[2]*pixels[14].green+cx[3]*pixels[15].green));
pixel->blue=(cy[0]*(cx[0]*pixels[0].blue+cx[1]*pixels[1].blue+cx[2]*
pixels[2].blue+cx[3]*pixels[3].blue)+cy[1]*(cx[0]*pixels[4].blue+cx[1]*
pixels[5].blue+cx[2]*pixels[6].blue+cx[3]*pixels[7].blue)+cy[2]*(cx[0]*
pixels[8].blue+cx[1]*pixels[9].blue+cx[2]*pixels[10].blue+cx[3]*
pixels[11].blue)+cy[3]*(cx[0]*pixels[12].blue+cx[1]*pixels[13].blue+
cx[2]*pixels[14].blue+cx[3]*pixels[15].blue));
if (image->colorspace == CMYKColorspace)
pixel->black=(cy[0]*(cx[0]*pixels[0].black+cx[1]*pixels[1].black+cx[2]*
pixels[2].black+cx[3]*pixels[3].black)+cy[1]*(cx[0]*pixels[4].black+
cx[1]*pixels[5].black+cx[2]*pixels[6].black+cx[3]*pixels[7].black)+
cy[2]*(cx[0]*pixels[8].black+cx[1]*pixels[9].black+cx[2]*
pixels[10].black+cx[3]*pixels[11].black)+cy[3]*(cx[0]*
pixels[12].black+cx[1]*pixels[13].black+cx[2]*pixels[14].black+cx[3]*
pixels[15].black));
pixel->alpha=(cy[0]*(cx[0]*pixels[0].alpha+cx[1]*pixels[1].alpha+cx[2]*
pixels[2].alpha+cx[3]*pixels[3].alpha)+cy[1]*(cx[0]*pixels[4].alpha+
cx[1]*pixels[5].alpha+cx[2]*pixels[6].alpha+cx[3]*pixels[7].alpha)+
cy[2]*(cx[0]*pixels[8].alpha+cx[1]*pixels[9].alpha+cx[2]*
pixels[10].alpha+cx[3]*pixels[11].alpha)+cy[3]*(cx[0]*pixels[12].alpha+
cx[1]*pixels[13].alpha+cx[2]*pixels[14].alpha+cx[3]*pixels[15].alpha));
break;
}
}
return(status);
} | 0 | [
"CWE-190"
] | ImageMagick | 406da3af9e09649cda152663c179902edf5ab3ac | 298,044,264,693,074,470,000,000,000,000,000,000,000 | 441 | https://github.com/ImageMagick/ImageMagick/issues/1732 |
*/
static unsigned int
xmlXPathNodeValHash(xmlNodePtr node) {
int len = 2;
const xmlChar * string = NULL;
xmlNodePtr tmp = NULL;
unsigned int ret = 0;
if (node == NULL)
return(0);
if (node->type == XML_DOCUMENT_NODE) {
tmp = xmlDocGetRootElement((xmlDocPtr) node);
if (tmp == NULL)
node = node->children;
else
node = tmp;
if (node == NULL)
return(0);
}
switch (node->type) {
case XML_COMMENT_NODE:
case XML_PI_NODE:
case XML_CDATA_SECTION_NODE:
case XML_TEXT_NODE:
string = node->content;
if (string == NULL)
return(0);
if (string[0] == 0)
return(0);
return(((unsigned int) string[0]) +
(((unsigned int) string[1]) << 8));
case XML_NAMESPACE_DECL:
string = ((xmlNsPtr)node)->href;
if (string == NULL)
return(0);
if (string[0] == 0)
return(0);
return(((unsigned int) string[0]) +
(((unsigned int) string[1]) << 8));
case XML_ATTRIBUTE_NODE:
tmp = ((xmlAttrPtr) node)->children;
break;
case XML_ELEMENT_NODE:
tmp = node->children;
break;
default:
return(0);
}
while (tmp != NULL) {
switch (tmp->type) {
case XML_CDATA_SECTION_NODE:
case XML_TEXT_NODE:
string = tmp->content;
break;
default:
string = NULL;
break;
}
if ((string != NULL) && (string[0] != 0)) {
if (len == 1) {
return(ret + (((unsigned int) string[0]) << 8));
}
if (string[1] == 0) {
len = 1;
ret = (unsigned int) string[0];
} else {
return(((unsigned int) string[0]) +
(((unsigned int) string[1]) << 8));
}
}
/*
* Skip to next node
*/
if ((tmp->children != NULL) && (tmp->type != XML_DTD_NODE)) {
if (tmp->children->type != XML_ENTITY_DECL) {
tmp = tmp->children;
continue;
}
}
if (tmp == node)
break;
if (tmp->next != NULL) {
tmp = tmp->next;
continue;
}
do {
tmp = tmp->parent;
if (tmp == NULL)
break;
if (tmp == node) {
tmp = NULL;
break;
}
if (tmp->next != NULL) {
tmp = tmp->next;
break;
}
} while (tmp != NULL);
} | 0 | [
"CWE-476"
] | libxml2 | a436374994c47b12d5de1b8b1d191a098fa23594 | 66,021,678,249,954,870,000,000,000,000,000,000,000 | 104 | Fix nullptr deref with XPath logic ops
If the XPath stack is corrupted, for example by a misbehaving extension
function, the "and" and "or" XPath operators could dereference NULL
pointers. Check that the XPath stack isn't empty and optimize the
logic operators slightly.
Closes: https://gitlab.gnome.org/GNOME/libxml2/issues/5
Also see
https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=901817
https://bugzilla.redhat.com/show_bug.cgi?id=1595985
This is CVE-2018-14404.
Thanks to Guy Inbar for the report. |
int uwsgi_str2_num(char *str) {
int num = 0;
num = 10 * (str[0] - 48);
num += str[1] - 48;
return num;
} | 0 | [
"CWE-119",
"CWE-703",
"CWE-787"
] | uwsgi | cb4636f7c0af2e97a4eef7a3cdcbd85a71247bfe | 4,866,224,974,540,100,600,000,000,000,000,000,000 | 9 | improve uwsgi_expand_path() to sanitize input, avoiding stack corruption and potential security issue |
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
} | 0 | [
"CWE-20",
"CWE-190"
] | tinyexr | a685e3332f61cd4e59324bf3f669d36973d64270 | 228,090,321,447,284,770,000,000,000,000,000,000,000 | 15 | Make line_no with too large value(2**20) invalid. Fixes #124 |
struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
struct inode *inode;
again:
inode = ilookup5_nowait(sb, hashval, test, data);
if (inode) {
wait_on_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
}
}
return inode;
} | 0 | [
"CWE-269"
] | linux | 0fa3ecd87848c9c93c2c828ef4c3a8ca36ce46c7 | 109,200,420,900,109,340,000,000,000,000,000,000,000 | 15 | Fix up non-directory creation in SGID directories
sgid directories have special semantics, making newly created files in
the directory belong to the group of the directory, and newly created
subdirectories will also become sgid. This is historically used for
group-shared directories.
But group directories writable by non-group members should not imply
that such non-group members can magically join the group, so make sure
to clear the sgid bit on non-directories for non-members (but remember
that sgid without group execute means "mandatory locking", just to
confuse things even more).
Reported-by: Jann Horn <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Al Viro <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int on_header_value(http_parser* self_, const char* at, size_t length)
{
HTTPParser* self = static_cast<HTTPParser*>(self_);
switch (self->header_building_state)
{
case 0:
self->header_value.insert(self->header_value.end(), at, at + length);
break;
case 1:
self->header_building_state = 0;
self->header_value.assign(at, at + length);
break;
}
return 0;
} | 0 | [
"CWE-416"
] | Crow | fba01dc76d6ea940ad7c8392e8f39f9647241d8e | 118,023,509,720,426,670,000,000,000,000,000,000,000 | 15 | Prevent HTTP pipelining which Crow doesn't support. |
void CalendarRegressionTest::test4125892() {
UErrorCode status = U_ZERO_ERROR;
GregorianCalendar *cal = (GregorianCalendar*) Calendar::createInstance(status);
if(U_FAILURE(status)) {
dataerrln("Error creating calendar %s", u_errorName(status));
delete cal;
return;
}
DateFormat *fmt = new SimpleDateFormat(UnicodeString("MMMM d, yyyy G"),status);
if(U_FAILURE(status)) {
dataerrln("Error creating SimpleDateFormat - %s", u_errorName(status));
delete cal;
return;
}
cal->clear();
cal->set(UCAL_ERA, GregorianCalendar::BC);
cal->set(UCAL_YEAR, 81); // 81 BC is a leap year (proleptically)
cal->set(UCAL_MONTH, UCAL_FEBRUARY);
cal->set(UCAL_DATE, 28);
cal->add(UCAL_DATE, 1,status);
if(U_FAILURE(status))
errln("add(DATE,1) failed");
if (cal->get(UCAL_DATE,status) != 29 ||
!cal->isLeapYear(-80)) // -80 == 81 BC
errln("Calendar not proleptic");
delete cal;
delete fmt;
} | 0 | [
"CWE-190"
] | icu | 71dd84d4ffd6600a70e5bca56a22b957e6642bd4 | 25,526,573,066,522,320,000,000,000,000,000,000,000 | 29 | ICU-12504 in ICU4C Persian cal, use int64_t math for one operation to avoid overflow; add tests in C and J
X-SVN-Rev: 40654 |
static int x509_get_uid( unsigned char **p,
const unsigned char *end,
mbedtls_x509_buf *uid, int n )
{
int ret;
if( *p == end )
return( 0 );
uid->tag = **p;
if( ( ret = mbedtls_asn1_get_tag( p, end, &uid->len,
MBEDTLS_ASN1_CONTEXT_SPECIFIC | MBEDTLS_ASN1_CONSTRUCTED | n ) ) != 0 )
{
if( ret == MBEDTLS_ERR_ASN1_UNEXPECTED_TAG )
return( 0 );
return( ret );
}
uid->p = *p;
*p += uid->len;
return( 0 );
} | 0 | [
"CWE-287",
"CWE-284"
] | mbedtls | d15795acd5074e0b44e71f7ede8bdfe1b48591fc | 122,639,152,589,735,000,000,000,000,000,000,000,000 | 25 | Improve behaviour on fatal errors
If we didn't walk the whole chain, then there may be any kind of errors in the
part of the chain we didn't check, so setting all flags looks like the safe
thing to do. |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* diag = GetInput(context, node, kDiagonalTensor);
FillDiagHelper(input, diag, output);
return kTfLiteOk;
} | 1 | [
"CWE-125",
"CWE-787"
] | tensorflow | 1970c2158b1ffa416d159d03c3370b9a462aee35 | 136,972,669,465,364,800,000,000,000,000,000,000,000 | 7 | [tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56 |
process_pointer_pdu(STREAM s)
{
uint16 message_type;
uint16 x, y;
logger(Protocol, Debug, "%s()", __func__);
in_uint16_le(s, message_type);
in_uint8s(s, 2); /* pad */
switch (message_type)
{
case RDP_POINTER_MOVE:
in_uint16_le(s, x);
in_uint16_le(s, y);
if (s_check(s))
ui_move_pointer(x, y);
break;
case RDP_POINTER_COLOR:
process_colour_pointer_pdu(s);
break;
case RDP_POINTER_CACHED:
process_cached_pointer_pdu(s);
break;
case RDP_POINTER_SYSTEM:
process_system_pointer_pdu(s);
break;
case RDP_POINTER_NEW:
process_new_pointer_pdu(s);
break;
default:
logger(Protocol, Warning,
"process_pointer_pdu(), unhandled message type 0x%x", message_type);
}
} | 0 | [
"CWE-119",
"CWE-125",
"CWE-703",
"CWE-787"
] | rdesktop | 4dca546d04321a610c1835010b5dad85163b65e1 | 192,928,451,169,130,570,000,000,000,000,000,000,000 | 40 | Malicious RDP server security fixes
This commit includes fixes for a set of 21 vulnerabilities in
rdesktop when a malicious RDP server is used.
All vulnerabilities was identified and reported by Eyal Itkin.
* Add rdp_protocol_error function that is used in several fixes
* Refactor of process_bitmap_updates
* Fix possible integer overflow in s_check_rem() on 32bit arch
* Fix memory corruption in process_bitmap_data - CVE-2018-8794
* Fix remote code execution in process_bitmap_data - CVE-2018-8795
* Fix remote code execution in process_plane - CVE-2018-8797
* Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175
* Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175
* Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176
* Fix Denial of Service in sec_recv - CVE-2018-20176
* Fix minor information leak in rdpdr_process - CVE-2018-8791
* Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792
* Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793
* Fix Denial of Service in process_bitmap_data - CVE-2018-8796
* Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798
* Fix Denial of Service in process_secondary_order - CVE-2018-8799
* Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800
* Fix major information leak in ui_clip_handle_data - CVE-2018-20174
* Fix memory corruption in rdp_in_unistr - CVE-2018-20177
* Fix Denial of Service in process_demand_active - CVE-2018-20178
* Fix remote code execution in lspci_process - CVE-2018-20179
* Fix remote code execution in rdpsnddbg_process - CVE-2018-20180
* Fix remote code execution in seamless_process - CVE-2018-20181
* Fix remote code execution in seamless_process_line - CVE-2018-20182 |
int fib6_lookup(struct net *net, int oif, struct flowi6 *fl6,
struct fib6_result *res, int flags)
{
int err;
if (net->ipv6.fib6_has_custom_rules) {
struct fib_lookup_arg arg = {
.lookup_ptr = fib6_table_lookup,
.lookup_data = &oif,
.result = res,
.flags = FIB_LOOKUP_NOREF,
};
l3mdev_update_flow(net, flowi6_to_flowi(fl6));
err = fib_rules_lookup(net->ipv6.fib6_rules_ops,
flowi6_to_flowi(fl6), flags, &arg);
} else {
err = fib6_table_lookup(net, net->ipv6.fib6_local_tbl, oif,
fl6, res, flags);
if (err || res->f6i == net->ipv6.fib6_null_entry)
err = fib6_table_lookup(net, net->ipv6.fib6_main_tbl,
oif, fl6, res, flags);
}
return err;
} | 0 | [
"CWE-772",
"CWE-401"
] | linux | ca7a03c4175366a92cee0ccc4fec0038c3266e26 | 75,495,365,219,470,710,000,000,000,000,000,000,000 | 27 | ipv6: do not free rt if FIB_LOOKUP_NOREF is set on suppress rule
Commit 7d9e5f422150 removed references from certain dsts, but accounting
for this never translated down into the fib6 suppression code. This bug
was triggered by WireGuard users who use wg-quick(8), which uses the
"suppress-prefix" directive to ip-rule(8) for routing all of their
internet traffic without routing loops. The test case added here
causes the reference underflow by causing packets to evaluate a suppress
rule.
Fixes: 7d9e5f422150 ("ipv6: convert major tx path to use RT6_LOOKUP_F_DST_NOREF")
Signed-off-by: Jason A. Donenfeld <[email protected]>
Acked-by: Wei Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
struct ceph_x_ticket_handler *th,
struct ceph_x_authorizer *au)
{
int maxlen;
struct ceph_x_authorize_a *msg_a;
struct ceph_x_authorize_b msg_b;
void *p, *end;
int ret;
int ticket_blob_len =
(th->ticket_blob ? th->ticket_blob->vec.iov_len : 0);
dout("build_authorizer for %s %p\n",
ceph_entity_type_name(th->service), au);
maxlen = sizeof(*msg_a) + sizeof(msg_b) +
ceph_x_encrypt_buflen(ticket_blob_len);
dout(" need len %d\n", maxlen);
if (au->buf && au->buf->alloc_len < maxlen) {
ceph_buffer_put(au->buf);
au->buf = NULL;
}
if (!au->buf) {
au->buf = ceph_buffer_new(maxlen, GFP_NOFS);
if (!au->buf)
return -ENOMEM;
}
au->service = th->service;
au->secret_id = th->secret_id;
msg_a = au->buf->vec.iov_base;
msg_a->struct_v = 1;
msg_a->global_id = cpu_to_le64(ac->global_id);
msg_a->service_id = cpu_to_le32(th->service);
msg_a->ticket_blob.struct_v = 1;
msg_a->ticket_blob.secret_id = cpu_to_le64(th->secret_id);
msg_a->ticket_blob.blob_len = cpu_to_le32(ticket_blob_len);
if (ticket_blob_len) {
memcpy(msg_a->ticket_blob.blob, th->ticket_blob->vec.iov_base,
th->ticket_blob->vec.iov_len);
}
dout(" th %p secret_id %lld %lld\n", th, th->secret_id,
le64_to_cpu(msg_a->ticket_blob.secret_id));
p = msg_a + 1;
p += ticket_blob_len;
end = au->buf->vec.iov_base + au->buf->vec.iov_len;
get_random_bytes(&au->nonce, sizeof(au->nonce));
msg_b.struct_v = 1;
msg_b.nonce = cpu_to_le64(au->nonce);
ret = ceph_x_encrypt(&th->session_key, &msg_b, sizeof(msg_b),
p, end - p);
if (ret < 0)
goto out_buf;
p += ret;
au->buf->vec.iov_len = p - au->buf->vec.iov_base;
dout(" built authorizer nonce %llx len %d\n", au->nonce,
(int)au->buf->vec.iov_len);
BUG_ON(au->buf->vec.iov_len > maxlen);
return 0;
out_buf:
ceph_buffer_put(au->buf);
au->buf = NULL;
return ret;
} | 0 | [
"CWE-399",
"CWE-119"
] | linux | c27a3e4d667fdcad3db7b104f75659478e0c68d8 | 246,426,966,969,060,170,000,000,000,000,000,000,000 | 67 | libceph: do not hard code max auth ticket len
We hard code cephx auth ticket buffer size to 256 bytes. This isn't
enough for any moderate setups and, in case tickets themselves are not
encrypted, leads to buffer overflows (ceph_x_decrypt() errors out, but
ceph_decode_copy() doesn't - it's just a memcpy() wrapper). Since the
buffer is allocated dynamically anyway, allocated it a bit later, at
the point where we know how much is going to be needed.
Fixes: http://tracker.ceph.com/issues/8979
Cc: [email protected]
Signed-off-by: Ilya Dryomov <[email protected]>
Reviewed-by: Sage Weil <[email protected]> |
_gnutls_kx_needs_dh_params (gnutls_kx_algorithm_t algorithm)
{
ssize_t ret = 0;
GNUTLS_KX_ALG_LOOP (ret = p->needs_dh_params);
return ret;
} | 0 | [
"CWE-310"
] | gnutls | 34d87a7c3f12794a3ec2305cd2fdbae152bf2a76 | 281,071,938,565,323,760,000,000,000,000,000,000,000 | 6 | (_gnutls_x509_oid2mac_algorithm): Don't crash trying to strcmp the
NULL OID value in the hash_algorithms array, which happens when the
input OID doesn't match our OIDs for SHA1, MD5, MD2 or RIPEMD160.
Reported by satyakumar <[email protected]>. |
http_close (http_t hd, int keep_read_stream)
{
if (!hd)
return;
log_assert (hd->magic == HTTP_CONTEXT_MAGIC);
/* First remove the close notifications for the streams. */
if (hd->fp_read)
es_onclose (hd->fp_read, 0, fp_onclose_notification, hd);
if (hd->fp_write)
es_onclose (hd->fp_write, 0, fp_onclose_notification, hd);
/* Now we can close the streams. */
my_socket_unref (hd->sock, NULL, NULL);
if (hd->fp_read && !keep_read_stream)
es_fclose (hd->fp_read);
if (hd->fp_write)
es_fclose (hd->fp_write);
http_session_unref (hd->session);
hd->magic = 0xdeadbeef;
http_release_parsed_uri (hd->uri);
while (hd->headers)
{
header_t tmp = hd->headers->next;
xfree (hd->headers->value);
xfree (hd->headers);
hd->headers = tmp;
}
xfree (hd->buffer);
xfree (hd);
} | 0 | [
"CWE-352"
] | gnupg | 4a4bb874f63741026bd26264c43bb32b1099f060 | 36,255,248,119,649,250,000,000,000,000,000,000,000 | 32 | dirmngr: Avoid possible CSRF attacks via http redirects.
* dirmngr/http.h (parsed_uri_s): Add fields off_host and off_path.
(http_redir_info_t): New.
* dirmngr/http.c (do_parse_uri): Set new fields.
(same_host_p): New.
(http_prepare_redirect): New.
* dirmngr/t-http-basic.c: New test.
* dirmngr/ks-engine-hkp.c (send_request): Use http_prepare_redirect
instead of the open code.
* dirmngr/ks-engine-http.c (ks_http_fetch): Ditto.
--
With this change a http query will not follow a redirect unless the
Location header gives the same host. If the host is different only
the host and port is taken from the Location header and the original
path and query parts are kept.
Signed-off-by: Werner Koch <[email protected]>
(cherry picked from commit fa1b1eaa4241ff3f0634c8bdf8591cbc7c464144) |
GF_Err moof_box_read(GF_Box *s, GF_BitStream *bs)
{
return gf_isom_box_array_read(s, bs);
} | 0 | [
"CWE-476",
"CWE-787"
] | gpac | b8f8b202d4fc23eb0ab4ce71ae96536ca6f5d3f8 | 139,864,788,700,003,810,000,000,000,000,000,000,000 | 4 | fixed #1757 |
static ssize_t wolfssl_recv(struct Curl_easy *data,
int num,
char *buf,
size_t buffersize,
CURLcode *curlcode)
{
struct connectdata *conn = data->conn;
struct ssl_connect_data *connssl = &conn->ssl[num];
struct ssl_backend_data *backend = connssl->backend;
char error_buffer[WOLFSSL_MAX_ERROR_SZ];
int buffsize = (buffersize > (size_t)INT_MAX) ? INT_MAX : (int)buffersize;
int nread = SSL_read(backend->handle, buf, buffsize);
if(nread < 0) {
int err = SSL_get_error(backend->handle, nread);
switch(err) {
case SSL_ERROR_ZERO_RETURN: /* no more data */
break;
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
/* there's data pending, re-invoke SSL_read() */
*curlcode = CURLE_AGAIN;
return -1;
default:
failf(data, "SSL read: %s, errno %d",
ERR_error_string(err, error_buffer), SOCKERRNO);
*curlcode = CURLE_RECV_ERROR;
return -1;
}
}
return nread;
} | 0 | [
"CWE-290"
] | curl | b09c8ee15771c614c4bf3ddac893cdb12187c844 | 242,805,741,819,328,400,000,000,000,000,000,000,000 | 33 | vtls: add 'isproxy' argument to Curl_ssl_get/addsessionid()
To make sure we set and extract the correct session.
Reported-by: Mingtao Yang
Bug: https://curl.se/docs/CVE-2021-22890.html
CVE-2021-22890 |
static struct passwd *check_user(const char *user)
{
#if !defined(__WIN__)
struct passwd *tmp_user_info;
uid_t user_id= geteuid();
// Don't bother if we aren't superuser
if (user_id)
{
if (user)
{
/* Don't give a warning, if real user is same as given with --user */
/* purecov: begin tested */
tmp_user_info= getpwnam(user);
if ((!tmp_user_info || user_id != tmp_user_info->pw_uid) &&
global_system_variables.log_warnings)
sql_print_warning(
"One can only use the --user switch if running as root\n");
/* purecov: end */
}
return NULL;
}
if (!user)
{
if (!opt_bootstrap && !opt_help)
{
sql_print_error("Fatal error: Please read \"Security\" section of the manual to find out how to run mysqld as root!\n");
unireg_abort(1);
}
return NULL;
}
/* purecov: begin tested */
if (!strcmp(user,"root"))
return NULL; // Avoid problem with dynamic libraries
if (!(tmp_user_info= getpwnam(user)))
{
// Allow a numeric uid to be used
const char *pos;
for (pos= user; my_isdigit(mysqld_charset,*pos); pos++) ;
if (*pos) // Not numeric id
goto err;
if (!(tmp_user_info= getpwuid(atoi(user))))
goto err;
}
return tmp_user_info;
/* purecov: end */
err:
sql_print_error("Fatal error: Can't change to run as user '%s' ; Please check that the user exists!\n",user);
unireg_abort(1);
#ifdef PR_SET_DUMPABLE
if (test_flags & TEST_CORE_ON_SIGNAL)
{
/* inform kernel that process is dumpable */
(void) prctl(PR_SET_DUMPABLE, 1);
}
#endif
#endif
return NULL;
} | 0 | [
"CWE-264"
] | mysql-server | 48bd8b16fe382be302c6f0b45931be5aa6f29a0e | 199,657,477,419,856,160,000,000,000,000,000,000,000 | 63 | Bug#24388753: PRIVILEGE ESCALATION USING MYSQLD_SAFE
[This is the 5.5/5.6 version of the bugfix].
The problem was that it was possible to write log files ending
in .ini/.cnf that later could be parsed as an options file.
This made it possible for users to specify startup options
without the permissions to do so.
This patch fixes the problem by disallowing general query log
and slow query log to be written to files ending in .ini and .cnf. |
bool Window_funcs_sort::exec(JOIN *join, bool keep_filesort_result)
{
THD *thd= join->thd;
JOIN_TAB *join_tab= join->join_tab + join->total_join_tab_cnt();
/* Sort the table based on the most specific sorting criteria of
the window functions. */
if (create_sort_index(thd, join, join_tab, filesort))
return true;
TABLE *tbl= join_tab->table;
SORT_INFO *filesort_result= join_tab->filesort_result;
bool is_error= runner.exec(thd, tbl, filesort_result);
if (!keep_filesort_result)
{
delete join_tab->filesort_result;
join_tab->filesort_result= NULL;
}
return is_error;
} | 0 | [] | server | ba4927e520190bbad763bb5260ae154f29a61231 | 337,361,263,906,908,860,000,000,000,000,000,000,000 | 22 | MDEV-19398: Assertion `item1->type() == Item::FIELD_ITEM ...
Window Functions code tries to minimize the number of times it
needs to sort the select's resultset by finding "compatible"
OVER (PARTITION BY ... ORDER BY ...) clauses.
This employs compare_order_elements(). That function assumed that
the order expressions are Item_field-derived objects (that refer
to a temp.table). But this is not always the case: one can
construct queries order expressions are arbitrary item expressions.
Add handling for such expressions: sort them according to the window
specification they appeared in.
This means we cannot detect that two compatible PARTITION BY clauses
that use expressions can share the sorting step.
But at least we won't crash. |
static void do_notify_parent_cldstop(struct task_struct *tsk,
bool for_ptracer, int why)
{
struct siginfo info;
unsigned long flags;
struct task_struct *parent;
struct sighand_struct *sighand;
cputime_t utime, stime;
if (for_ptracer) {
parent = tsk->parent;
} else {
tsk = tsk->group_leader;
parent = tsk->real_parent;
}
info.si_signo = SIGCHLD;
info.si_errno = 0;
/*
* see comment in do_notify_parent() about the following 4 lines
*/
rcu_read_lock();
info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
rcu_read_unlock();
task_cputime(tsk, &utime, &stime);
info.si_utime = cputime_to_clock_t(utime);
info.si_stime = cputime_to_clock_t(stime);
info.si_code = why;
switch (why) {
case CLD_CONTINUED:
info.si_status = SIGCONT;
break;
case CLD_STOPPED:
info.si_status = tsk->signal->group_exit_code & 0x7f;
break;
case CLD_TRAPPED:
info.si_status = tsk->exit_code & 0x7f;
break;
default:
BUG();
}
sighand = parent->sighand;
spin_lock_irqsave(&sighand->siglock, flags);
if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
!(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
__group_send_sig_info(SIGCHLD, &info, parent);
/*
* Even if SIGCHLD is not generated, we must wake up wait4 calls.
*/
__wake_up_parent(tsk, parent);
spin_unlock_irqrestore(&sighand->siglock, flags);
} | 0 | [
"CWE-399"
] | linux | b9e146d8eb3b9ecae5086d373b50fa0c1f3e7f0f | 89,348,323,099,764,310,000,000,000,000,000,000,000 | 56 | kernel/signal.c: stop info leak via the tkill and the tgkill syscalls
This fixes a kernel memory contents leak via the tkill and tgkill syscalls
for compat processes.
This is visible in the siginfo_t->_sifields._rt.si_sigval.sival_ptr field
when handling signals delivered from tkill.
The place of the infoleak:
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
{
...
put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr);
...
}
Signed-off-by: Emese Revfy <[email protected]>
Reviewed-by: PaX Team <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Cc: Al Viro <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: Serge Hallyn <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
RGWOp::init(store, s, h);
} | 0 | [
"CWE-770"
] | ceph | ab29bed2fc9f961fe895de1086a8208e21ddaddc | 106,216,626,035,916,800,000,000,000,000,000,000,000 | 3 | rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests |
bool Smb4KUnmountJob::createUnmountAction(Smb4KShare *share, Action *action)
{
Q_ASSERT(share);
Q_ASSERT(action);
if (!share || !action)
{
return false;
}
else
{
// Do nothing
}
// Find the umount program.
QString umount;
QStringList paths;
paths << "/bin";
paths << "/sbin";
paths << "/usr/bin";
paths << "/usr/sbin";
paths << "/usr/local/bin";
paths << "/usr/local/sbin";
for ( int i = 0; i < paths.size(); ++i )
{
umount = KGlobal::dirs()->findExe("umount", paths.at(i));
if (!umount.isEmpty())
{
break;
}
else
{
continue;
}
}
if (umount.isEmpty() && !m_silent)
{
Smb4KNotification::commandNotFound("umount");
return false;
}
else
{
// Do nothing
}
QStringList options;
#if defined(Q_OS_LINUX)
if (m_force)
{
options << "-l"; // lazy unmount
}
else
{
// Do nothing
}
#endif
action->setName("net.sourceforge.smb4k.mounthelper.unmount");
action->setHelperID("net.sourceforge.smb4k.mounthelper");
action->addArgument("mh_command", umount);
action->addArgument("mh_url", share->url().url());
action->addArgument("mh_mountpoint", share->canonicalPath());
action->addArgument("mh_options", options);
return true;
} | 1 | [
"CWE-20"
] | smb4k | 71554140bdaede27b95dbe4c9b5a028a83c83cce | 88,554,569,460,871,600,000,000,000,000,000,000,000 | 69 | Find the mount/umount commands in the helper
Instead of trusting what we get passed in
CVE-2017-8849 |
int re_yyget_debug (yyscan_t yyscanner)
{
struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
return yy_flex_debug; | 0 | [
"CWE-476",
"CWE-703",
"CWE-125"
] | yara | 3119b232c9c453c98d8fa8b6ae4e37ba18117cd4 | 225,131,417,393,502,160,000,000,000,000,000,000,000 | 5 | re_lexer: Make reading escape sequences more robust (#586)
* Add test for issue #503
* re_lexer: Make reading escape sequences more robust
This commit fixes parsing incomplete escape sequences at the end of a
regular expression and parsing things like \xxy (invalid hex digits)
which before were silently turned into (char)255.
Close #503
* Update re_lexer.c |
void init_update_queries(void)
{
/* Initialize the server command flags array. */
memset(server_command_flags, 0, sizeof(server_command_flags));
server_command_flags[COM_STATISTICS]= CF_SKIP_QUERY_ID | CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK;
server_command_flags[COM_PING]= CF_SKIP_QUERY_ID | CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK | CF_NO_COM_MULTI;
server_command_flags[COM_QUIT]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_PROCESS_INFO]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_PROCESS_KILL]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_SHUTDOWN]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_SLEEP]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_TIME]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_INIT_DB]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_END]= CF_SKIP_WSREP_CHECK;
for (uint i= COM_MDB_GAP_BEG; i <= COM_MDB_GAP_END; i++)
{
server_command_flags[i]= CF_SKIP_WSREP_CHECK;
}
/*
COM_QUERY, COM_SET_OPTION and COM_STMT_XXX are allowed to pass the early
COM_xxx filter, they're checked later in mysql_execute_command().
*/
server_command_flags[COM_QUERY]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_SET_OPTION]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_STMT_PREPARE]= CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK;
server_command_flags[COM_STMT_EXECUTE]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_STMT_FETCH]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_STMT_CLOSE]= CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK;
server_command_flags[COM_STMT_RESET]= CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK;
server_command_flags[COM_STMT_EXECUTE]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_STMT_SEND_LONG_DATA]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_REGISTER_SLAVE]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_MULTI]= CF_SKIP_WSREP_CHECK | CF_NO_COM_MULTI;
/* Initialize the sql command flags array. */
memset(sql_command_flags, 0, sizeof(sql_command_flags));
/*
In general, DDL statements do not generate row events and do not go
through a cache before being written to the binary log. However, the
CREATE TABLE...SELECT is an exception because it may generate row
events. For that reason, the SQLCOM_CREATE_TABLE which represents
a CREATE TABLE, including the CREATE TABLE...SELECT, has the
CF_CAN_GENERATE_ROW_EVENTS flag. The distinction between a regular
CREATE TABLE and the CREATE TABLE...SELECT is made in other parts of
the code, in particular in the Query_log_event's constructor.
*/
sql_command_flags[SQLCOM_CREATE_TABLE]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_AUTO_COMMIT_TRANS | CF_REPORT_PROGRESS |
CF_CAN_GENERATE_ROW_EVENTS;
sql_command_flags[SQLCOM_CREATE_INDEX]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS |
CF_REPORT_PROGRESS | CF_ADMIN_COMMAND;
sql_command_flags[SQLCOM_ALTER_TABLE]= CF_CHANGES_DATA | CF_WRITE_LOGS_COMMAND |
CF_AUTO_COMMIT_TRANS | CF_REPORT_PROGRESS |
CF_INSERTS_DATA | CF_ADMIN_COMMAND;
sql_command_flags[SQLCOM_TRUNCATE]= CF_CHANGES_DATA | CF_WRITE_LOGS_COMMAND |
CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_DROP_TABLE]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_LOAD]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS | CF_REPORT_PROGRESS |
CF_INSERTS_DATA;
sql_command_flags[SQLCOM_CREATE_DB]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_DROP_DB]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_ALTER_DB_UPGRADE]= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_ALTER_DB]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_RENAME_TABLE]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS |
CF_ADMIN_COMMAND;
sql_command_flags[SQLCOM_DROP_INDEX]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS |
CF_REPORT_PROGRESS | CF_ADMIN_COMMAND;
sql_command_flags[SQLCOM_CREATE_VIEW]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_DROP_VIEW]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_CREATE_TRIGGER]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_DROP_TRIGGER]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_CREATE_EVENT]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_ALTER_EVENT]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_DROP_EVENT]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_UPDATE]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE |
CF_CAN_BE_EXPLAINED |
CF_UPDATES_DATA |
CF_PS_ARRAY_BINDING_SAFE;
sql_command_flags[SQLCOM_UPDATE_MULTI]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE |
CF_CAN_BE_EXPLAINED |
CF_UPDATES_DATA |
CF_PS_ARRAY_BINDING_SAFE;
sql_command_flags[SQLCOM_INSERT]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE |
CF_CAN_BE_EXPLAINED |
CF_INSERTS_DATA |
CF_PS_ARRAY_BINDING_SAFE |
CF_PS_ARRAY_BINDING_OPTIMIZED;
sql_command_flags[SQLCOM_INSERT_SELECT]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE |
CF_CAN_BE_EXPLAINED |
CF_INSERTS_DATA;
sql_command_flags[SQLCOM_DELETE]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE |
CF_CAN_BE_EXPLAINED |
CF_PS_ARRAY_BINDING_SAFE;
sql_command_flags[SQLCOM_DELETE_MULTI]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE |
CF_CAN_BE_EXPLAINED;;
sql_command_flags[SQLCOM_REPLACE]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE |
CF_CAN_BE_EXPLAINED |
CF_INSERTS_DATA |
CF_PS_ARRAY_BINDING_SAFE |
CF_PS_ARRAY_BINDING_OPTIMIZED;
sql_command_flags[SQLCOM_REPLACE_SELECT]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE |
CF_CAN_BE_EXPLAINED |
CF_INSERTS_DATA;
sql_command_flags[SQLCOM_SELECT]= CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE |
CF_CAN_BE_EXPLAINED;
// (1) so that subquery is traced when doing "SET @var = (subquery)"
/*
@todo SQLCOM_SET_OPTION should have CF_CAN_GENERATE_ROW_EVENTS
set, because it may invoke a stored function that generates row
events. /Sven
*/
sql_command_flags[SQLCOM_SET_OPTION]= CF_REEXECUTION_FRAGILE |
CF_AUTO_COMMIT_TRANS |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE; // (1)
// (1) so that subquery is traced when doing "DO @var := (subquery)"
sql_command_flags[SQLCOM_DO]= CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE; // (1)
sql_command_flags[SQLCOM_SHOW_STATUS_PROC]= CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
sql_command_flags[SQLCOM_SHOW_STATUS]= CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
sql_command_flags[SQLCOM_SHOW_DATABASES]= CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
sql_command_flags[SQLCOM_SHOW_TRIGGERS]= CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
sql_command_flags[SQLCOM_SHOW_EVENTS]= CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
sql_command_flags[SQLCOM_SHOW_OPEN_TABLES]= CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
sql_command_flags[SQLCOM_SHOW_PLUGINS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_GENERIC]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_FIELDS]= CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
sql_command_flags[SQLCOM_SHOW_KEYS]= CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
sql_command_flags[SQLCOM_SHOW_VARIABLES]= CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
sql_command_flags[SQLCOM_SHOW_CHARSETS]= CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
sql_command_flags[SQLCOM_SHOW_COLLATIONS]= CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
sql_command_flags[SQLCOM_SHOW_BINLOGS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_SLAVE_HOSTS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_BINLOG_EVENTS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_STORAGE_ENGINES]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_AUTHORS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_CONTRIBUTORS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_PRIVILEGES]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_WARNS]= CF_STATUS_COMMAND | CF_DIAGNOSTIC_STMT;
sql_command_flags[SQLCOM_SHOW_ERRORS]= CF_STATUS_COMMAND | CF_DIAGNOSTIC_STMT;
sql_command_flags[SQLCOM_SHOW_ENGINE_STATUS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_ENGINE_MUTEX]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_ENGINE_LOGS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_EXPLAIN]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_PROCESSLIST]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_GRANTS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_CREATE_USER]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_CREATE_DB]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_CREATE]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_MASTER_STAT]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_SLAVE_STAT]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_CREATE_PROC]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_CREATE_FUNC]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_CREATE_TRIGGER]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_STATUS_FUNC]= CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
sql_command_flags[SQLCOM_SHOW_PROC_CODE]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_FUNC_CODE]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_CREATE_EVENT]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_PROFILES]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_PROFILE]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_BINLOG_BASE64_EVENT]= CF_STATUS_COMMAND | CF_CAN_GENERATE_ROW_EVENTS;
sql_command_flags[SQLCOM_SHOW_TABLES]= (CF_STATUS_COMMAND | CF_SHOW_TABLE_COMMAND | CF_REEXECUTION_FRAGILE);
sql_command_flags[SQLCOM_SHOW_TABLE_STATUS]= (CF_STATUS_COMMAND | CF_SHOW_TABLE_COMMAND | CF_REEXECUTION_FRAGILE);
sql_command_flags[SQLCOM_CREATE_USER]= CF_CHANGES_DATA;
sql_command_flags[SQLCOM_RENAME_USER]= CF_CHANGES_DATA;
sql_command_flags[SQLCOM_DROP_USER]= CF_CHANGES_DATA;
sql_command_flags[SQLCOM_ALTER_USER]= CF_CHANGES_DATA;
sql_command_flags[SQLCOM_CREATE_ROLE]= CF_CHANGES_DATA;
sql_command_flags[SQLCOM_GRANT]= CF_CHANGES_DATA;
sql_command_flags[SQLCOM_GRANT_ROLE]= CF_CHANGES_DATA;
sql_command_flags[SQLCOM_REVOKE]= CF_CHANGES_DATA;
sql_command_flags[SQLCOM_REVOKE_ROLE]= CF_CHANGES_DATA;
sql_command_flags[SQLCOM_OPTIMIZE]= CF_CHANGES_DATA;
sql_command_flags[SQLCOM_CREATE_FUNCTION]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_CREATE_PROCEDURE]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_CREATE_SPFUNCTION]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_DROP_PROCEDURE]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_DROP_FUNCTION]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_ALTER_PROCEDURE]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_ALTER_FUNCTION]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_INSTALL_PLUGIN]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_UNINSTALL_PLUGIN]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
/*
The following is used to preserver CF_ROW_COUNT during the
a CALL or EXECUTE statement, so the value generated by the
last called (or executed) statement is preserved.
See mysql_execute_command() for how CF_ROW_COUNT is used.
*/
/*
(1): without it, in "CALL some_proc((subq))", subquery would not be
traced.
*/
sql_command_flags[SQLCOM_CALL]= CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE; // (1)
sql_command_flags[SQLCOM_EXECUTE]= CF_CAN_GENERATE_ROW_EVENTS;
sql_command_flags[SQLCOM_EXECUTE_IMMEDIATE]= CF_CAN_GENERATE_ROW_EVENTS;
sql_command_flags[SQLCOM_COMPOUND]= CF_CAN_GENERATE_ROW_EVENTS;
/*
We don't want to change to statement based replication for these commands
*/
sql_command_flags[SQLCOM_ROLLBACK]|= CF_FORCE_ORIGINAL_BINLOG_FORMAT;
/* We don't want to replicate ALTER TABLE for temp tables in row format */
sql_command_flags[SQLCOM_ALTER_TABLE]|= CF_FORCE_ORIGINAL_BINLOG_FORMAT;
/* We don't want to replicate TRUNCATE for temp tables in row format */
sql_command_flags[SQLCOM_TRUNCATE]|= CF_FORCE_ORIGINAL_BINLOG_FORMAT;
/* We don't want to replicate DROP for temp tables in row format */
sql_command_flags[SQLCOM_DROP_TABLE]|= CF_FORCE_ORIGINAL_BINLOG_FORMAT;
/* We don't want to replicate CREATE/DROP INDEX for temp tables in row format */
sql_command_flags[SQLCOM_CREATE_INDEX]|= CF_FORCE_ORIGINAL_BINLOG_FORMAT;
sql_command_flags[SQLCOM_DROP_INDEX]|= CF_FORCE_ORIGINAL_BINLOG_FORMAT;
/* One can change replication mode with SET */
sql_command_flags[SQLCOM_SET_OPTION]|= CF_FORCE_ORIGINAL_BINLOG_FORMAT;
/*
The following admin table operations are allowed
on log tables.
*/
sql_command_flags[SQLCOM_REPAIR]= CF_WRITE_LOGS_COMMAND | CF_AUTO_COMMIT_TRANS |
CF_REPORT_PROGRESS | CF_ADMIN_COMMAND;
sql_command_flags[SQLCOM_OPTIMIZE]|= CF_WRITE_LOGS_COMMAND | CF_AUTO_COMMIT_TRANS |
CF_REPORT_PROGRESS | CF_ADMIN_COMMAND;
sql_command_flags[SQLCOM_ANALYZE]= CF_WRITE_LOGS_COMMAND | CF_AUTO_COMMIT_TRANS |
CF_REPORT_PROGRESS | CF_ADMIN_COMMAND;
sql_command_flags[SQLCOM_CHECK]= CF_WRITE_LOGS_COMMAND | CF_AUTO_COMMIT_TRANS |
CF_REPORT_PROGRESS | CF_ADMIN_COMMAND;
sql_command_flags[SQLCOM_CHECKSUM]= CF_REPORT_PROGRESS;
sql_command_flags[SQLCOM_CREATE_USER]|= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_ALTER_USER]|= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_DROP_USER]|= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_RENAME_USER]|= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_CREATE_ROLE]|= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_DROP_ROLE]|= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_REVOKE]|= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_REVOKE_ALL]= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_REVOKE_ROLE]|= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_GRANT]|= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_GRANT_ROLE]|= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_FLUSH]= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_RESET]= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_CREATE_SERVER]= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_ALTER_SERVER]= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_DROP_SERVER]= CF_AUTO_COMMIT_TRANS;
/*
The following statements can deal with temporary tables,
so temporary tables should be pre-opened for those statements to
simplify privilege checking.
There are other statements that deal with temporary tables and open
them, but which are not listed here. The thing is that the order of
pre-opening temporary tables for those statements is somewhat custom.
Note that SQLCOM_RENAME_TABLE should not be in this list!
*/
sql_command_flags[SQLCOM_CREATE_TABLE]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_CREATE_INDEX]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_ALTER_TABLE]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_TRUNCATE]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_LOAD]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_DROP_INDEX]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_UPDATE]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_UPDATE_MULTI]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_INSERT_SELECT]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_DELETE]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_DELETE_MULTI]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_REPLACE_SELECT]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_SELECT]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_SET_OPTION]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_DO]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_HA_OPEN]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_CALL]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_CHECKSUM]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_ANALYZE]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_CHECK]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_OPTIMIZE]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_REPAIR]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_PRELOAD_KEYS]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_ASSIGN_TO_KEYCACHE]|= CF_PREOPEN_TMP_TABLES;
/*
DDL statements that should start with closing opened handlers.
We use this flag only for statements for which open HANDLERs
have to be closed before temporary tables are pre-opened.
*/
sql_command_flags[SQLCOM_CREATE_TABLE]|= CF_HA_CLOSE;
sql_command_flags[SQLCOM_DROP_TABLE]|= CF_HA_CLOSE;
sql_command_flags[SQLCOM_ALTER_TABLE]|= CF_HA_CLOSE;
sql_command_flags[SQLCOM_TRUNCATE]|= CF_HA_CLOSE;
sql_command_flags[SQLCOM_REPAIR]|= CF_HA_CLOSE;
sql_command_flags[SQLCOM_OPTIMIZE]|= CF_HA_CLOSE;
sql_command_flags[SQLCOM_ANALYZE]|= CF_HA_CLOSE;
sql_command_flags[SQLCOM_CHECK]|= CF_HA_CLOSE;
sql_command_flags[SQLCOM_CREATE_INDEX]|= CF_HA_CLOSE;
sql_command_flags[SQLCOM_DROP_INDEX]|= CF_HA_CLOSE;
sql_command_flags[SQLCOM_PRELOAD_KEYS]|= CF_HA_CLOSE;
sql_command_flags[SQLCOM_ASSIGN_TO_KEYCACHE]|= CF_HA_CLOSE;
/*
Mark statements that always are disallowed in read-only
transactions. Note that according to the SQL standard,
even temporary table DDL should be disallowed.
*/
sql_command_flags[SQLCOM_CREATE_TABLE]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_ALTER_TABLE]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_DROP_TABLE]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_RENAME_TABLE]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_CREATE_INDEX]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_DROP_INDEX]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_CREATE_DB]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_DROP_DB]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_ALTER_DB_UPGRADE]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_ALTER_DB]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_CREATE_VIEW]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_DROP_VIEW]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_CREATE_TRIGGER]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_DROP_TRIGGER]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_CREATE_EVENT]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_ALTER_EVENT]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_DROP_EVENT]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_CREATE_USER]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_ALTER_USER]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_RENAME_USER]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_DROP_USER]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_CREATE_SERVER]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_ALTER_SERVER]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_DROP_SERVER]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_CREATE_FUNCTION]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_CREATE_PROCEDURE]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_CREATE_SPFUNCTION]|=CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_DROP_PROCEDURE]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_DROP_FUNCTION]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_ALTER_PROCEDURE]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_ALTER_FUNCTION]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_TRUNCATE]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_ALTER_TABLESPACE]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_REPAIR]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_OPTIMIZE]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_GRANT]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_REVOKE]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_REVOKE_ALL]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_INSTALL_PLUGIN]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_UNINSTALL_PLUGIN]|= CF_DISALLOW_IN_RO_TRANS;
} | 0 | [] | server | ba4927e520190bbad763bb5260ae154f29a61231 | 151,245,943,994,092,660,000,000,000,000,000,000,000 | 378 | MDEV-19398: Assertion `item1->type() == Item::FIELD_ITEM ...
Window Functions code tries to minimize the number of times it
needs to sort the select's resultset by finding "compatible"
OVER (PARTITION BY ... ORDER BY ...) clauses.
This employs compare_order_elements(). That function assumed that
the order expressions are Item_field-derived objects (that refer
to a temp.table). But this is not always the case: one can
construct queries order expressions are arbitrary item expressions.
Add handling for such expressions: sort them according to the window
specification they appeared in.
This means we cannot detect that two compatible PARTITION BY clauses
that use expressions can share the sorting step.
But at least we won't crash. |
void test_nghttp2_submit_headers_push_reply(void) {
nghttp2_session *session;
nghttp2_session_callbacks callbacks;
my_user_data ud;
nghttp2_stream *stream;
int foo;
memset(&callbacks, 0, sizeof(nghttp2_session_callbacks));
callbacks.send_callback = null_send_callback;
callbacks.on_frame_send_callback = on_frame_send_callback;
CU_ASSERT(0 == nghttp2_session_server_new(&session, &callbacks, &ud));
stream = open_sent_stream2(session, 2, NGHTTP2_STREAM_RESERVED);
CU_ASSERT(0 == nghttp2_submit_headers(session, NGHTTP2_FLAG_NONE, 2, NULL,
resnv, ARRLEN(resnv), &foo));
ud.frame_send_cb_called = 0;
ud.sent_frame_type = 0;
CU_ASSERT(0 == nghttp2_session_send(session));
CU_ASSERT(1 == ud.frame_send_cb_called);
CU_ASSERT(NGHTTP2_HEADERS == ud.sent_frame_type);
CU_ASSERT(NGHTTP2_STREAM_OPENED == stream->state);
CU_ASSERT(&foo == stream->stream_user_data);
nghttp2_session_del(session);
/* Sending HEADERS from client against stream in reserved state is
error */
CU_ASSERT(0 == nghttp2_session_client_new(&session, &callbacks, &ud));
open_recv_stream2(session, 2, NGHTTP2_STREAM_RESERVED);
CU_ASSERT(0 == nghttp2_submit_headers(session, NGHTTP2_FLAG_NONE, 2, NULL,
reqnv, ARRLEN(reqnv), NULL));
ud.frame_send_cb_called = 0;
ud.sent_frame_type = 0;
CU_ASSERT(0 == nghttp2_session_send(session));
CU_ASSERT(0 == ud.frame_send_cb_called);
nghttp2_session_del(session);
} | 0 | [] | nghttp2 | 0a6ce87c22c69438ecbffe52a2859c3a32f1620f | 11,445,567,393,767,084,000,000,000,000,000,000,000 | 40 | Add nghttp2_option_set_max_outbound_ack |
ssize_t vnc_client_io_error(VncState *vs, ssize_t ret, int last_errno)
{
if (ret == 0 || ret == -1) {
if (ret == -1) {
switch (last_errno) {
case EINTR:
case EAGAIN:
#ifdef _WIN32
case WSAEWOULDBLOCK:
#endif
return 0;
default:
break;
}
}
VNC_DEBUG("Closing down client sock: ret %zd, errno %d\n",
ret, ret < 0 ? last_errno : 0);
vnc_disconnect_start(vs);
return 0;
}
return ret;
} | 0 | [] | qemu | 4c65fed8bdf96780735dbdb92a8bd0d6b6526cc3 | 131,768,812,103,268,400,000,000,000,000,000,000,000 | 24 | ui: vnc: avoid floating point exception
While sending 'SetPixelFormat' messages to a VNC server,
the client could set the 'red-max', 'green-max' and 'blue-max'
values to be zero. This leads to a floating point exception in
write_png_palette while doing frame buffer updates.
Reported-by: Lian Yihan <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Reviewed-by: Gerd Hoffmann <[email protected]>
Signed-off-by: Peter Maydell <[email protected]> |
void regulator_unregister(struct regulator_dev *rdev)
{
if (rdev == NULL)
return;
if (rdev->supply) {
while (rdev->use_count--)
regulator_disable(rdev->supply);
regulator_put(rdev->supply);
}
mutex_lock(®ulator_list_mutex);
debugfs_remove_recursive(rdev->debugfs);
flush_work(&rdev->disable_work.work);
WARN_ON(rdev->open_count);
unset_regulator_supplies(rdev);
list_del(&rdev->list);
kfree(rdev->constraints);
regulator_ena_gpio_free(rdev);
of_node_put(rdev->dev.of_node);
device_unregister(&rdev->dev);
mutex_unlock(®ulator_list_mutex);
} | 0 | [
"CWE-416"
] | linux | 60a2362f769cf549dc466134efe71c8bf9fbaaba | 251,324,086,583,771,600,000,000,000,000,000,000,000 | 22 | regulator: core: Fix regualtor_ena_gpio_free not to access pin after freeing
After freeing pin from regulator_ena_gpio_free, loop can access
the pin. So this patch fixes not to access pin after freeing.
Signed-off-by: Seung-Woo Kim <[email protected]>
Signed-off-by: Mark Brown <[email protected]> |
UnicodeString::UnicodeString(UChar ch) {
fUnion.fFields.fLengthAndFlags = kLength1 | kShortString;
fUnion.fStackFields.fBuffer[0] = ch;
} | 0 | [
"CWE-190",
"CWE-787"
] | icu | b7d08bc04a4296982fcef8b6b8a354a9e4e7afca | 290,203,878,726,837,900,000,000,000,000,000,000,000 | 4 | ICU-20958 Prevent SEGV_MAPERR in append
See #971 |
template<typename t>
CImg<T>& _quicksort(const long indm, const long indM, CImg<t>& permutations,
const bool is_increasing, const bool is_permutations) {
if (indm<indM) {
const long mid = (indm + indM)/2;
if (is_increasing) {
if ((*this)[indm]>(*this)[mid]) {
cimg::swap((*this)[indm],(*this)[mid]);
if (is_permutations) cimg::swap(permutations[indm],permutations[mid]);
}
if ((*this)[mid]>(*this)[indM]) {
cimg::swap((*this)[indM],(*this)[mid]);
if (is_permutations) cimg::swap(permutations[indM],permutations[mid]);
}
if ((*this)[indm]>(*this)[mid]) {
cimg::swap((*this)[indm],(*this)[mid]);
if (is_permutations) cimg::swap(permutations[indm],permutations[mid]);
}
} else {
if ((*this)[indm]<(*this)[mid]) {
cimg::swap((*this)[indm],(*this)[mid]);
if (is_permutations) cimg::swap(permutations[indm],permutations[mid]);
}
if ((*this)[mid]<(*this)[indM]) {
cimg::swap((*this)[indM],(*this)[mid]);
if (is_permutations) cimg::swap(permutations[indM],permutations[mid]);
}
if ((*this)[indm]<(*this)[mid]) {
cimg::swap((*this)[indm],(*this)[mid]);
if (is_permutations) cimg::swap(permutations[indm],permutations[mid]);
}
}
if (indM - indm>=3) {
const T pivot = (*this)[mid];
long i = indm, j = indM;
if (is_increasing) {
do {
while ((*this)[i]<pivot) ++i;
while ((*this)[j]>pivot) --j;
if (i<=j) {
if (is_permutations) cimg::swap(permutations[i],permutations[j]);
cimg::swap((*this)[i++],(*this)[j--]);
}
} while (i<=j);
} else {
do {
while ((*this)[i]>pivot) ++i;
while ((*this)[j]<pivot) --j;
if (i<=j) {
if (is_permutations) cimg::swap(permutations[i],permutations[j]);
cimg::swap((*this)[i++],(*this)[j--]);
}
} while (i<=j);
}
if (indm<j) _quicksort(indm,j,permutations,is_increasing,is_permutations);
if (i<indM) _quicksort(i,indM,permutations,is_increasing,is_permutations);
}
}
return *this; | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 80,076,500,437,109,060,000,000,000,000,000,000,000 | 59 | Fix other issues in 'CImg<T>::load_bmp()'. |
TEST_F(
QuicServerTransportTest,
MigrateToValidatePeerCancelsPendingPathChallenge) {
server->getNonConstConn().transportSettings.disableMigration = false;
auto data = IOBuf::copyBuffer("bad data");
auto packetData = packetToBuf(createStreamPacket(
*clientConnectionId,
*server->getConn().serverConnectionId,
clientNextAppDataPacketNum++,
2,
*data,
0 /* cipherOverhead */,
0 /* largestAcked */));
auto peerAddress = server->getConn().peerAddress;
auto congestionController = server->getConn().congestionController.get();
auto srtt = server->getConn().lossState.srtt;
auto lrtt = server->getConn().lossState.lrtt;
auto rttvar = server->getConn().lossState.rttvar;
auto mrtt = server->getConn().lossState.mrtt;
folly::SocketAddress newPeer("100.101.102.103", 23456);
deliverData(std::move(packetData), false, &newPeer);
EXPECT_EQ(server->getConn().peerAddress, newPeer);
EXPECT_TRUE(server->getConn().pendingEvents.pathChallenge);
EXPECT_FALSE(server->getConn().outstandingPathValidation);
EXPECT_FALSE(server->getConn().pendingEvents.schedulePathValidationTimeout);
EXPECT_FALSE(server->pathValidationTimeout().isScheduled());
EXPECT_EQ(server->getConn().migrationState.previousPeerAddresses.size(), 1);
EXPECT_EQ(
server->getConn().migrationState.previousPeerAddresses.back(),
peerAddress);
EXPECT_EQ(server->getConn().lossState.srtt, 0us);
EXPECT_EQ(server->getConn().lossState.lrtt, 0us);
EXPECT_EQ(server->getConn().lossState.rttvar, 0us);
EXPECT_EQ(server->getConn().lossState.mrtt, kDefaultMinRtt);
EXPECT_NE(server->getConn().congestionController.get(), nullptr);
EXPECT_NE(server->getConn().congestionController.get(), congestionController);
EXPECT_EQ(
server->getConn().migrationState.lastCongestionAndRtt->peerAddress,
clientAddr);
EXPECT_EQ(
server->getConn()
.migrationState.lastCongestionAndRtt->congestionController.get(),
congestionController);
EXPECT_EQ(server->getConn().migrationState.lastCongestionAndRtt->srtt, srtt);
EXPECT_EQ(server->getConn().migrationState.lastCongestionAndRtt->lrtt, lrtt);
EXPECT_EQ(
server->getConn().migrationState.lastCongestionAndRtt->rttvar, rttvar);
EXPECT_EQ(server->getConn().migrationState.lastCongestionAndRtt->mrtt, mrtt);
auto packetData2 = packetToBuf(createStreamPacket(
*clientConnectionId,
*server->getConn().serverConnectionId,
clientNextAppDataPacketNum++,
6,
*data,
0 /* cipherOverhead */,
0 /* largestAcked */));
deliverData(std::move(packetData2), false);
EXPECT_FALSE(server->getConn().pendingEvents.pathChallenge);
EXPECT_FALSE(server->getConn().outstandingPathValidation);
EXPECT_FALSE(server->getConn().pendingEvents.schedulePathValidationTimeout);
EXPECT_FALSE(server->pathValidationTimeout().isScheduled());
EXPECT_EQ(server->getConn().migrationState.previousPeerAddresses.size(), 0);
EXPECT_EQ(server->getConn().lossState.srtt, srtt);
EXPECT_EQ(server->getConn().lossState.lrtt, lrtt);
EXPECT_EQ(server->getConn().lossState.rttvar, rttvar);
EXPECT_EQ(server->getConn().lossState.mrtt, mrtt);
EXPECT_EQ(server->getConn().congestionController.get(), congestionController);
EXPECT_FALSE(server->getConn().migrationState.lastCongestionAndRtt);
} | 0 | [
"CWE-617",
"CWE-703"
] | mvfst | a67083ff4b8dcbb7ee2839da6338032030d712b0 | 297,728,233,446,516,300,000,000,000,000,000,000,000 | 74 | Close connection if we derive an extra 1-rtt write cipher
Summary: Fixes CVE-2021-24029
Reviewed By: mjoras, lnicco
Differential Revision: D26613890
fbshipit-source-id: 19bb2be2c731808144e1a074ece313fba11f1945 |
int r_jwe_advanced_parse_json_t(jwe_t * jwe, json_t * jwe_json, uint32_t parse_flags, int x5u_flags) {
int ret;
size_t header_len = 0, cypher_key_len = 0, iv_len = 0, index = 0;;
char * str_header = NULL;
json_t * j_header = NULL, * j_recipient;
unsigned char * iv = NULL;
if (jwe != NULL && json_is_object(jwe_json)) {
if (json_string_length(json_object_get(jwe_json, "protected")) &&
json_string_length(json_object_get(jwe_json, "iv")) &&
json_string_length(json_object_get(jwe_json, "ciphertext")) &&
json_string_length(json_object_get(jwe_json, "tag"))) {
ret = RHN_OK;
r_jwe_set_cypher_key(jwe, NULL, 0);
r_jwe_set_iv(jwe, NULL, 0);
r_jwe_set_aad(jwe, NULL, 0);
r_jwe_set_payload(jwe, NULL, 0);
o_free(jwe->header_b64url);
jwe->header_b64url = NULL;
o_free(jwe->encrypted_key_b64url);
jwe->encrypted_key_b64url = NULL;
o_free(jwe->iv_b64url);
jwe->iv_b64url = NULL;
o_free(jwe->ciphertext_b64url);
jwe->ciphertext_b64url = NULL;
o_free(jwe->auth_tag_b64url);
jwe->auth_tag_b64url = NULL;
o_free(jwe->aad_b64url);
jwe->aad_b64url = NULL;
json_decref(jwe->j_header);
jwe->j_header = json_object();
json_decref(jwe->j_unprotected_header);
jwe->j_unprotected_header = NULL;
do {
json_decref(jwe->j_json_serialization);
if ((jwe->j_json_serialization = json_deep_copy(jwe_json)) == NULL) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_parse_json_t - Error setting j_json_serialization");
ret = RHN_ERROR;
break;
}
if (json_object_get(jwe_json, "unprotected") != NULL && r_jwe_set_full_unprotected_header_json_t(jwe, json_object_get(jwe_json, "unprotected")) != RHN_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_parse_json_t - Error r_jwe_set_full_unprotected_header_json_t");
ret = RHN_ERROR_PARAM;
break;
}
if (!o_base64url_decode((unsigned char *)json_string_value(json_object_get(jwe_json, "protected")), json_string_length(json_object_get(jwe_json, "protected")), NULL, &header_len)) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_parse_json_t - Error invalid protected base64");
ret = RHN_ERROR_PARAM;
break;
}
if ((str_header = o_malloc(header_len+4)) == NULL) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_parse_json_t - Error allocating resources for str_header");
ret = RHN_ERROR_PARAM;
break;
}
if (!o_base64url_decode((unsigned char *)json_string_value(json_object_get(jwe_json, "protected")), json_string_length(json_object_get(jwe_json, "protected")), (unsigned char *)str_header, &header_len)) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_parse_json_t - Error invalid protected base64");
ret = RHN_ERROR_PARAM;
break;
}
str_header[header_len] = '\0';
if ((j_header = json_loads(str_header, JSON_DECODE_ANY, NULL)) == NULL) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_parse_json_t - Error json_loads str_header");
ret = RHN_ERROR_PARAM;
break;
}
if (r_jwe_extract_header(jwe, j_header, parse_flags, x5u_flags) != RHN_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_parse_json_t - error extracting header params");
ret = RHN_ERROR_PARAM;
break;
}
json_decref(jwe->j_header);
jwe->j_header = json_incref(j_header);
// Decode iv
if (!o_base64url_decode((unsigned char *)json_string_value(json_object_get(jwe_json, "iv")), json_string_length(json_object_get(jwe_json, "iv")), NULL, &iv_len)) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_parse_json_t - Error invalid iv base64");
ret = RHN_ERROR_PARAM;
break;
}
if ((iv = o_malloc(iv_len+4)) == NULL) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_parse_json_t - Error allocating resources for iv");
ret = RHN_ERROR_MEMORY;
break;
}
if (!o_base64url_decode((unsigned char *)json_string_value(json_object_get(jwe_json, "iv")), json_string_length(json_object_get(jwe_json, "iv")), iv, &iv_len)) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_parse_json_t - Error o_base64url_decode iv");
ret = RHN_ERROR_PARAM;
break;
}
if (r_jwe_set_iv(jwe, iv, iv_len) != RHN_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_parse_json_t - Error r_jwe_set_iv");
ret = RHN_ERROR;
break;
}
jwe->header_b64url = (unsigned char *)o_strdup(json_string_value(json_object_get(jwe_json, "protected")));
jwe->ciphertext_b64url = (unsigned char *)o_strdup(json_string_value(json_object_get(jwe_json, "ciphertext")));
jwe->auth_tag_b64url = (unsigned char *)o_strdup(json_string_value(json_object_get(jwe_json, "tag")));
jwe->aad_b64url = (unsigned char *)o_strdup(json_string_value(json_object_get(jwe_json, "aad")));
} while (0);
json_decref(j_header);
o_free(str_header);
o_free(iv);
if (ret == RHN_OK) {
if (json_array_size(json_object_get(jwe_json, "recipients"))) {
jwe->token_mode = R_JSON_MODE_GENERAL;
json_array_foreach(json_object_get(jwe_json, "recipients"), index, j_recipient) {
if (!json_is_object(j_recipient)) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_parse_json_t - Invalid recipient at index %zu, must be a JSON object", index);
ret = RHN_ERROR_PARAM;
break;
} else {
if (!o_base64url_decode((const unsigned char*)json_string_value(json_object_get(j_recipient, "encrypted_key")), json_string_length(json_object_get(j_recipient, "encrypted_key")), NULL, &cypher_key_len)) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_parse_json_t - Error at index %zu, invalid encrypted_key base64 %s", index);
ret = RHN_ERROR_PARAM;
break;
}
if (json_object_get(j_recipient, "header") != NULL && !json_is_object(json_object_get(j_recipient, "header"))) {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_parse_json_t - Invalid header at index %zu, must be a JSON object", index);
ret = RHN_ERROR_PARAM;
break;
}
}
}
} else {
jwe->token_mode = R_JSON_MODE_FLATTENED;
jwe->encrypted_key_b64url = (unsigned char *)o_strdup(json_string_value(json_object_get(jwe_json, "encrypted_key")));
if (json_object_get(jwe_json, "header") == NULL || r_jwe_extract_header(jwe, json_object_get(jwe_json, "header"), parse_flags, x5u_flags) == RHN_OK) {
json_object_update_missing(jwe->j_header, json_object_get(jwe_json, "header"));
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_parse_json_t - error extracting header params");
ret = RHN_ERROR_PARAM;
}
}
}
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_parse_json_t - Error invalid content");
ret = RHN_ERROR_PARAM;
}
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_parse_json_t - Error input parameters");
ret = RHN_ERROR_PARAM;
}
return ret;
} | 0 | [
"CWE-787"
] | rhonabwy | b4c2923a1ba4fabf9b55a89244127e153a3e549b | 264,688,282,216,598,800,000,000,000,000,000,000,000 | 156 | Fix buffer overflow on r_jwe_aesgcm_key_unwrap |
SetDeviceIndicators(char *wire,
DeviceIntPtr dev,
unsigned changed,
int num,
int *status_rtrn,
ClientPtr client, xkbExtensionDeviceNotify * ev)
{
xkbDeviceLedsWireDesc *ledWire;
int i;
XkbEventCauseRec cause;
unsigned namec, mapc, statec;
xkbExtensionDeviceNotify ed;
XkbChangesRec changes;
DeviceIntPtr kbd;
memset((char *) &ed, 0, sizeof(xkbExtensionDeviceNotify));
memset((char *) &changes, 0, sizeof(XkbChangesRec));
XkbSetCauseXkbReq(&cause, X_kbSetDeviceInfo, client);
ledWire = (xkbDeviceLedsWireDesc *) wire;
for (i = 0; i < num; i++) {
register int n;
register unsigned bit;
CARD32 *atomWire;
xkbIndicatorMapWireDesc *mapWire;
XkbSrvLedInfoPtr sli;
namec = mapc = statec = 0;
sli = XkbFindSrvLedInfo(dev, ledWire->ledClass, ledWire->ledID,
XkbXI_IndicatorMapsMask);
if (!sli) {
/* SHOULD NEVER HAPPEN!! */
return (char *) ledWire;
}
atomWire = (CARD32 *) &ledWire[1];
if (changed & XkbXI_IndicatorNamesMask) {
namec = sli->namesPresent | ledWire->namesPresent;
memset((char *) sli->names, 0, XkbNumIndicators * sizeof(Atom));
}
if (ledWire->namesPresent) {
sli->namesPresent = ledWire->namesPresent;
memset((char *) sli->names, 0, XkbNumIndicators * sizeof(Atom));
for (n = 0, bit = 1; n < XkbNumIndicators; n++, bit <<= 1) {
if (ledWire->namesPresent & bit) {
sli->names[n] = (Atom) *atomWire;
if (sli->names[n] == None)
ledWire->namesPresent &= ~bit;
atomWire++;
}
}
}
mapWire = (xkbIndicatorMapWireDesc *) atomWire;
if (changed & XkbXI_IndicatorMapsMask) {
mapc = sli->mapsPresent | ledWire->mapsPresent;
sli->mapsPresent = ledWire->mapsPresent;
memset((char *) sli->maps, 0,
XkbNumIndicators * sizeof(XkbIndicatorMapRec));
}
if (ledWire->mapsPresent) {
for (n = 0, bit = 1; n < XkbNumIndicators; n++, bit <<= 1) {
if (ledWire->mapsPresent & bit) {
sli->maps[n].flags = mapWire->flags;
sli->maps[n].which_groups = mapWire->whichGroups;
sli->maps[n].groups = mapWire->groups;
sli->maps[n].which_mods = mapWire->whichMods;
sli->maps[n].mods.mask = mapWire->mods;
sli->maps[n].mods.real_mods = mapWire->realMods;
sli->maps[n].mods.vmods = mapWire->virtualMods;
sli->maps[n].ctrls = mapWire->ctrls;
mapWire++;
}
}
}
if (changed & XkbXI_IndicatorStateMask) {
statec = sli->effectiveState ^ ledWire->state;
sli->explicitState &= ~statec;
sli->explicitState |= (ledWire->state & statec);
}
if (namec)
XkbApplyLedNameChanges(dev, sli, namec, &ed, &changes, &cause);
if (mapc)
XkbApplyLedMapChanges(dev, sli, mapc, &ed, &changes, &cause);
if (statec)
XkbApplyLedStateChanges(dev, sli, statec, &ed, &changes, &cause);
kbd = dev;
if ((sli->flags & XkbSLI_HasOwnState) == 0)
kbd = inputInfo.keyboard;
XkbFlushLedEvents(dev, kbd, sli, &ed, &changes, &cause);
ledWire = (xkbDeviceLedsWireDesc *) mapWire;
}
return (char *) ledWire;
} | 1 | [
"CWE-122"
] | xserver | 87c64fc5b0db9f62f4e361444f4b60501ebf67b9 | 239,920,769,198,937,040,000,000,000,000,000,000,000 | 94 | Fix XkbSetDeviceInfo() and SetDeviceIndicators() heap overflows
ZDI-CAN 11389 / CVE-2020-25712
This vulnerability was discovered by:
Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
Signed-off-by: Matthieu Herrb <[email protected]> |
static int wp_pfn_shared(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
int ret;
pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf->flags |= FAULT_FLAG_MKWRITE;
ret = vma->vm_ops->pfn_mkwrite(vmf);
if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
return ret;
return finish_mkwrite_fault(vmf);
}
wp_page_reuse(vmf);
return VM_FAULT_WRITE;
} | 0 | [
"CWE-119"
] | linux | 1be7107fbe18eed3e319a6c3e83c78254b693acb | 119,050,879,517,927,220,000,000,000,000,000,000,000 | 17 | mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]> |
RemoteIo::Impl::Impl(const std::string& url, size_t blockSize)
: path_(url), blockSize_(blockSize), blocksMap_(0), size_(0),
idx_(0), isMalloced_(false), eof_(false), protocol_(fileProtocol(url)),totalRead_(0)
{
} | 0 | [
"CWE-125"
] | exiv2 | 6e3855aed7ba8bb4731fc4087ca7f9078b2f3d97 | 296,000,372,638,660,500,000,000,000,000,000,000,000 | 5 | Fix https://github.com/Exiv2/exiv2/issues/55 |
static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_kiocb **nxt, bool force_nonblock)
{
#if defined(CONFIG_NET)
struct sockaddr __user *addr;
unsigned file_flags;
int addr_len, ret;
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
return -EINVAL;
if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
return -EINVAL;
addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr);
addr_len = READ_ONCE(sqe->addr2);
file_flags = force_nonblock ? O_NONBLOCK : 0;
ret = __sys_connect_file(req->file, addr, addr_len, file_flags);
if (ret == -EAGAIN && force_nonblock)
return -EAGAIN;
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
io_cqring_add_event(req, ret);
io_put_req_find_next(req, nxt);
return 0;
#else
return -EOPNOTSUPP;
#endif
} | 0 | [] | linux | 181e448d8709e517c9c7b523fcd209f24eb38ca7 | 216,779,484,693,542,300,000,000,000,000,000,000,000 | 31 | io_uring: async workers should inherit the user creds
If we don't inherit the original task creds, then we can confuse users
like fuse that pass creds in the request header. See link below on
identical aio issue.
Link: https://lore.kernel.org/linux-fsdevel/[email protected]/T/#u
Signed-off-by: Jens Axboe <[email protected]> |
static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
struct file *file, unsigned flags,
umode_t mode)
{
int err;
struct fuse_conn *fc = get_fuse_conn(dir);
struct dentry *res = NULL;
if (fuse_is_bad(dir))
return -EIO;
if (d_in_lookup(entry)) {
res = fuse_lookup(dir, entry, 0);
if (IS_ERR(res))
return PTR_ERR(res);
if (res)
entry = res;
}
if (!(flags & O_CREAT) || d_really_is_positive(entry))
goto no_open;
/* Only creates */
file->f_mode |= FMODE_CREATED;
if (fc->no_create)
goto mknod;
err = fuse_create_open(dir, entry, file, flags, mode);
if (err == -ENOSYS) {
fc->no_create = 1;
goto mknod;
}
out_dput:
dput(res);
return err;
mknod:
err = fuse_mknod(dir, entry, mode, 0);
if (err)
goto out_dput;
no_open:
return finish_no_open(file, res);
} | 0 | [
"CWE-459"
] | linux | 5d069dbe8aaf2a197142558b6fb2978189ba3454 | 122,852,299,121,347,150,000,000,000,000,000,000,000 | 45 | fuse: fix bad inode
Jan Kara's analysis of the syzbot report (edited):
The reproducer opens a directory on FUSE filesystem, it then attaches
dnotify mark to the open directory. After that a fuse_do_getattr() call
finds that attributes returned by the server are inconsistent, and calls
make_bad_inode() which, among other things does:
inode->i_mode = S_IFREG;
This then confuses dnotify which doesn't tear down its structures
properly and eventually crashes.
Avoid calling make_bad_inode() on a live inode: switch to a private flag on
the fuse inode. Also add the test to ops which the bad_inode_ops would
have caught.
This bug goes back to the initial merge of fuse in 2.6.14...
Reported-by: [email protected]
Signed-off-by: Miklos Szeredi <[email protected]>
Tested-by: Jan Kara <[email protected]>
Cc: <[email protected]> |
Subsets and Splits