func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
Status explain(OperationContext* opCtx,
const OpMsgRequest& opMsgRequest,
ExplainOptions::Verbosity verbosity,
rpc::ReplyBuilderInterface* result) const override {
std::string dbname = opMsgRequest.getDatabase().toString();
const BSONObj& cmdObj = opMsgRequest.body;
// Acquire locks. The RAII object is optional, because in the case
// of a view, the locks need to be released.
boost::optional<AutoGetCollectionForReadCommand> ctx;
ctx.emplace(opCtx,
CommandHelpers::parseNsCollectionRequired(dbname, cmdObj),
AutoGetCollection::ViewMode::kViewsPermitted);
const auto nss = ctx->getNss();
const bool isExplain = true;
auto request = CountRequest::parseFromBSON(nss, cmdObj, isExplain);
if (!request.isOK()) {
return request.getStatus();
}
if (ctx->getView()) {
// Relinquish locks. The aggregation command will re-acquire them.
ctx.reset();
auto viewAggregation = request.getValue().asAggregationCommand();
if (!viewAggregation.isOK()) {
return viewAggregation.getStatus();
}
auto viewAggRequest = AggregationRequest::parseFromBSON(
request.getValue().getNs(), viewAggregation.getValue(), verbosity);
if (!viewAggRequest.isOK()) {
return viewAggRequest.getStatus();
}
return runAggregate(opCtx,
viewAggRequest.getValue().getNamespaceString(),
viewAggRequest.getValue(),
viewAggregation.getValue(),
result);
}
Collection* const collection = ctx->getCollection();
// Prevent chunks from being cleaned up during yields - this allows us to only check the
// version on initial entry into count.
auto rangePreserver =
CollectionShardingState::get(opCtx, nss)->getMetadataForOperation(opCtx);
auto statusWithPlanExecutor =
getExecutorCount(opCtx, collection, request.getValue(), true /*explain*/);
if (!statusWithPlanExecutor.isOK()) {
return statusWithPlanExecutor.getStatus();
}
auto exec = std::move(statusWithPlanExecutor.getValue());
auto bodyBuilder = result->getBodyBuilder();
Explain::explainStages(exec.get(), collection, verbosity, &bodyBuilder);
return Status::OK();
} | 0 | [
"CWE-20"
] | mongo | 722f06f3217c029ef9c50062c8cc775966fd7ead | 212,572,836,034,715,100,000,000,000,000,000,000,000 | 61 | SERVER-38275 ban find explain with UUID |
evdns_base_load_hosts(struct evdns_base *base, const char *hosts_fname)
{
int res;
if (!base)
base = current_base;
EVDNS_LOCK(base);
res = evdns_base_load_hosts_impl(base, hosts_fname);
EVDNS_UNLOCK(base);
return res;
} | 0 | [
"CWE-125"
] | libevent | 96f64a022014a208105ead6c8a7066018449d86d | 109,821,499,767,773,260,000,000,000,000,000,000,000 | 10 | evdns: name_parse(): fix remote stack overread
@asn-the-goblin-slayer:
"the name_parse() function in libevent's DNS code is vulnerable to a buffer overread.
971 if (cp != name_out) {
972 if (cp + 1 >= end) return -1;
973 *cp++ = '.';
974 }
975 if (cp + label_len >= end) return -1;
976 memcpy(cp, packet + j, label_len);
977 cp += label_len;
978 j += label_len;
No check is made against length before the memcpy occurs.
This was found through the Tor bug bounty program and the discovery should be credited to 'Guido Vranken'."
Reproducer for gdb (https://gist.github.com/azat/e4fcf540e9b89ab86d02):
set $PROT_NONE=0x0
set $PROT_READ=0x1
set $PROT_WRITE=0x2
set $MAP_ANONYMOUS=0x20
set $MAP_SHARED=0x01
set $MAP_FIXED=0x10
set $MAP_32BIT=0x40
start
set $length=202
# overread
set $length=2
# allocate with mmap to have a seg fault on page boundary
set $l=(1<<20)*2
p mmap(0, $l, $PROT_READ|$PROT_WRITE, $MAP_ANONYMOUS|$MAP_SHARED|$MAP_32BIT, -1, 0)
set $packet=(char *)$1+$l-$length
# hack the packet
set $packet[0]=63
set $packet[1]='/'
p malloc(sizeof(int))
set $idx=(int *)$2
set $idx[0]=0
set $name_out_len=202
p malloc($name_out_len)
set $name_out=$3
# have WRITE only mapping to fail on read
set $end=$1+$l
p (void *)mmap($end, 1<<12, $PROT_NONE, $MAP_ANONYMOUS|$MAP_SHARED|$MAP_FIXED|$MAP_32BIT, -1, 0)
set $m=$4
p name_parse($packet, $length, $idx, $name_out, $name_out_len)
x/2s (char *)$name_out
Before this patch:
$ gdb -ex 'source gdb' dns-example
$1 = 1073741824
$2 = (void *) 0x633010
$3 = (void *) 0x633030
$4 = (void *) 0x40200000
Program received signal SIGSEGV, Segmentation fault.
__memcpy_sse2_unaligned () at memcpy-sse2-unaligned.S:33
After this patch:
$ gdb -ex 'source gdb' dns-example
$1 = 1073741824
$2 = (void *) 0x633010
$3 = (void *) 0x633030
$4 = (void *) 0x40200000
$5 = -1
0x633030: "/"
0x633032: ""
(gdb) p $m
$6 = (void *) 0x40200000
(gdb) p $1
$7 = 1073741824
(gdb) p/x $1
$8 = 0x40000000
(gdb) quit
P.S. plus drop one condition duplicate.
Fixes: #317 |
explicit SparseFillEmptyRowsOp(OpKernelConstruction* context)
: OpKernel(context) {} | 0 | [
"CWE-476",
"CWE-703"
] | tensorflow | faa76f39014ed3b5e2c158593b1335522e573c7f | 229,708,862,694,855,550,000,000,000,000,000,000,000 | 2 | Fix heap-buffer-overflow issue with `tf.raw_ops.SparseFillEmptyRows`.
PiperOrigin-RevId: 372009178
Change-Id: Ia1a9e9691ecaa072f32fb39a0887b2aabd399210 |
// Insert code instructions for processing scalars.
unsigned int scalar() { // Insert new scalar in memory.
if (mempos>=mem._width) { mem.resize(-200,1,1,1,0); memtype.resize(mem._width,1,1,1,0); }
return mempos++; | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 303,409,094,442,821,300,000,000,000,000,000,000,000 | 4 | Fix other issues in 'CImg<T>::load_bmp()'. |
do_line()
{
/* Line continuation has already been handled by read_line() */
char *inlptr;
/* Expand any string variables in the current input line */
string_expand_macros();
/* Skip leading whitespace */
inlptr = gp_input_line;
while (isspace((unsigned char) *inlptr))
inlptr++;
/* Leading '!' indicates a shell command that bypasses normal gnuplot
* tokenization and parsing. This doesn't work inside a bracketed clause.
*/
if (is_system(*inlptr)) {
do_system(inlptr + 1);
return (0);
}
/* Strip off trailing comment */
FPRINTF((stderr,"doline( \"%s\" )\n", gp_input_line));
if (strchr(inlptr, '#')) {
num_tokens = scanner(&gp_input_line, &gp_input_line_len);
if (gp_input_line[token[num_tokens].start_index] == '#')
gp_input_line[token[num_tokens].start_index] = NUL;
}
if (inlptr != gp_input_line) {
/* If there was leading whitespace, copy the actual
* command string to the front. use memmove() because
* source and target may overlap */
memmove(gp_input_line, inlptr, strlen(inlptr));
/* Terminate resulting string */
gp_input_line[strlen(inlptr)] = NUL;
}
FPRINTF((stderr, " echo: \"%s\"\n", gp_input_line));
num_tokens = scanner(&gp_input_line, &gp_input_line_len);
/*
* Expand line if necessary to contain a complete bracketed clause {...}
* Insert a ';' after current line and append the next input line.
* NB: This may leave an "else" condition on the next line.
*/
if (curly_brace_count < 0)
int_error(NO_CARET,"Unexpected }");
while (curly_brace_count > 0) {
if (lf_head && lf_head->depth > 0) {
/* This catches the case that we are inside a "load foo" operation
* and therefore requesting interactive input is not an option.
* FIXME: or is it?
*/
int_error(NO_CARET, "Syntax error: missing block terminator }");
}
else if (interactive || noinputfiles) {
/* If we are really in interactive mode and there are unterminated blocks,
* then we want to display a "more>" prompt to get the rest of the block.
* However, there are two more cases that must be dealt here:
* One is when commands are piped to gnuplot - on the command line,
* the other is when commands are piped to gnuplot which is opened
* as a slave process. The test for noinputfiles is for the latter case.
* If we didn't have that test here, unterminated blocks sent via a pipe
* would trigger the error message in the else branch below. */
int retval;
strcat(gp_input_line,";");
retval = read_line("more> ", strlen(gp_input_line));
if (retval)
int_error(NO_CARET, "Syntax error: missing block terminator }");
/* Expand any string variables in the current input line */
string_expand_macros();
num_tokens = scanner(&gp_input_line, &gp_input_line_len);
if (gp_input_line[token[num_tokens].start_index] == '#')
gp_input_line[token[num_tokens].start_index] = NUL;
}
else {
/* Non-interactive mode here means that we got a string from -e.
* Having curly_brace_count > 0 means that there are at least one
* unterminated blocks in the string.
* Likely user error, so we die with an error message. */
int_error(NO_CARET, "Syntax error: missing block terminator }");
}
}
c_token = 0;
while (c_token < num_tokens) {
command();
if (command_exit_requested) {
command_exit_requested = 0; /* yes this is necessary */
return 1;
}
if (iteration_early_exit()) {
c_token = num_tokens;
break;
}
if (c_token < num_tokens) { /* something after command */
if (equals(c_token, ";")) {
c_token++;
} else if (equals(c_token, "{")) {
begin_clause();
} else if (equals(c_token, "}")) {
end_clause();
} else
int_error(c_token, "unexpected or unrecognized token: %s",
token_to_string(c_token));
}
}
/* This check allows event handling inside load/eval/while statements */
check_for_mouse_events();
return (0);
} | 0 | [
"CWE-415"
] | gnuplot | 052cbd17c3cbbc602ee080b2617d32a8417d7563 | 119,698,043,207,143,560,000,000,000,000,000,000,000 | 115 | successive failures of "set print <foo>" could cause double-free
Bug #2312 |
warn_if_hs_unreachable(const edge_connection_t *conn, uint8_t reason)
{
tor_assert(conn);
if (conn->base_.type == CONN_TYPE_EXIT &&
connection_edge_is_rendezvous_stream(conn) &&
(reason == END_STREAM_REASON_CONNECTREFUSED ||
reason == END_STREAM_REASON_TIMEOUT)) {
#define WARN_FAILED_HS_CONNECTION 300
static ratelim_t warn_limit = RATELIM_INIT(WARN_FAILED_HS_CONNECTION);
char *m;
if ((m = rate_limit_log(&warn_limit, approx_time()))) {
log_warn(LD_EDGE, "Onion service connection to %s failed (%s)",
connection_describe_peer(TO_CONN(conn)),
stream_end_reason_to_string(reason));
tor_free(m);
}
}
} | 0 | [
"CWE-532"
] | tor | 80c404c4b79f3bcba3fc4585d4c62a62a04f3ed9 | 248,227,352,570,841,100,000,000,000,000,000,000,000 | 19 | Log warning when connecting to soon-to-be-deprecated v2 onions. |
static int i8042_pm_restore(struct device *dev)
{
return i8042_controller_resume(false);
} | 0 | [
"CWE-476"
] | linux | 340d394a789518018f834ff70f7534fc463d3226 | 21,267,102,081,954,612,000,000,000,000,000,000,000 | 4 | Input: i8042 - fix crash at boot time
The driver checks port->exists twice in i8042_interrupt(), first when
trying to assign temporary "serio" variable, and second time when deciding
whether it should call serio_interrupt(). The value of port->exists may
change between the 2 checks, and we may end up calling serio_interrupt()
with a NULL pointer:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000050
IP: [<ffffffff8150feaf>] _spin_lock_irqsave+0x1f/0x40
PGD 0
Oops: 0002 [#1] SMP
last sysfs file:
CPU 0
Modules linked in:
Pid: 1, comm: swapper Not tainted 2.6.32-358.el6.x86_64 #1 QEMU Standard PC (i440FX + PIIX, 1996)
RIP: 0010:[<ffffffff8150feaf>] [<ffffffff8150feaf>] _spin_lock_irqsave+0x1f/0x40
RSP: 0018:ffff880028203cc0 EFLAGS: 00010082
RAX: 0000000000010000 RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000282 RSI: 0000000000000098 RDI: 0000000000000050
RBP: ffff880028203cc0 R08: ffff88013e79c000 R09: ffff880028203ee0
R10: 0000000000000298 R11: 0000000000000282 R12: 0000000000000050
R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000098
FS: 0000000000000000(0000) GS:ffff880028200000(0000) knlGS:0000000000000000
CS: 0010 DS: 0018 ES: 0018 CR0: 000000008005003b
CR2: 0000000000000050 CR3: 0000000001a85000 CR4: 00000000001407f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
Process swapper (pid: 1, threadinfo ffff88013e79c000, task ffff88013e79b500)
Stack:
ffff880028203d00 ffffffff813de186 ffffffffffffff02 0000000000000000
<d> 0000000000000000 0000000000000000 0000000000000000 0000000000000098
<d> ffff880028203d70 ffffffff813e0162 ffff880028203d20 ffffffff8103b8ac
Call Trace:
<IRQ>
[<ffffffff813de186>] serio_interrupt+0x36/0xa0
[<ffffffff813e0162>] i8042_interrupt+0x132/0x3a0
[<ffffffff8103b8ac>] ? kvm_clock_read+0x1c/0x20
[<ffffffff8103b8b9>] ? kvm_clock_get_cycles+0x9/0x10
[<ffffffff810e1640>] handle_IRQ_event+0x60/0x170
[<ffffffff8103b154>] ? kvm_guest_apic_eoi_write+0x44/0x50
[<ffffffff810e3d8e>] handle_edge_irq+0xde/0x180
[<ffffffff8100de89>] handle_irq+0x49/0xa0
[<ffffffff81516c8c>] do_IRQ+0x6c/0xf0
[<ffffffff8100b9d3>] ret_from_intr+0x0/0x11
[<ffffffff81076f63>] ? __do_softirq+0x73/0x1e0
[<ffffffff8109b75b>] ? hrtimer_interrupt+0x14b/0x260
[<ffffffff8100c1cc>] ? call_softirq+0x1c/0x30
[<ffffffff8100de05>] ? do_softirq+0x65/0xa0
[<ffffffff81076d95>] ? irq_exit+0x85/0x90
[<ffffffff81516d80>] ? smp_apic_timer_interrupt+0x70/0x9b
[<ffffffff8100bb93>] ? apic_timer_interrupt+0x13/0x20
To avoid the issue let's change the second check to test whether serio is
NULL or not.
Also, let's take i8042_lock in i8042_start() and i8042_stop() instead of
trying to be overly smart and using memory barriers.
Signed-off-by: Chen Hong <[email protected]>
[dtor: take lock in i8042_start()/i8042_stop()]
Cc: [email protected]
Signed-off-by: Dmitry Torokhov <[email protected]> |
win_equal_rec(
win_T *next_curwin, /* pointer to current window to be or NULL */
int current, /* do only frame with current window */
frame_T *topfr, /* frame to set size off */
int dir, /* 'v', 'h' or 'b', see win_equal() */
int col, /* horizontal position for frame */
int row, /* vertical position for frame */
int width, /* new width of frame */
int height) /* new height of frame */
{
int n, m;
int extra_sep = 0;
int wincount, totwincount = 0;
frame_T *fr;
int next_curwin_size = 0;
int room = 0;
int new_size;
int has_next_curwin = 0;
int hnc;
if (topfr->fr_layout == FR_LEAF)
{
/* Set the width/height of this frame.
* Redraw when size or position changes */
if (topfr->fr_height != height || topfr->fr_win->w_winrow != row
|| topfr->fr_width != width || topfr->fr_win->w_wincol != col
)
{
topfr->fr_win->w_winrow = row;
frame_new_height(topfr, height, FALSE, FALSE);
topfr->fr_win->w_wincol = col;
frame_new_width(topfr, width, FALSE, FALSE);
redraw_all_later(NOT_VALID);
}
}
else if (topfr->fr_layout == FR_ROW)
{
topfr->fr_width = width;
topfr->fr_height = height;
if (dir != 'v') /* equalize frame widths */
{
/* Compute the maximum number of windows horizontally in this
* frame. */
n = frame_minwidth(topfr, NOWIN);
/* add one for the rightmost window, it doesn't have a separator */
if (col + width == Columns)
extra_sep = 1;
else
extra_sep = 0;
totwincount = (n + extra_sep) / (p_wmw + 1);
has_next_curwin = frame_has_win(topfr, next_curwin);
/*
* Compute width for "next_curwin" window and room available for
* other windows.
* "m" is the minimal width when counting p_wiw for "next_curwin".
*/
m = frame_minwidth(topfr, next_curwin);
room = width - m;
if (room < 0)
{
next_curwin_size = p_wiw + room;
room = 0;
}
else
{
next_curwin_size = -1;
FOR_ALL_FRAMES(fr, topfr->fr_child)
{
/* If 'winfixwidth' set keep the window width if
* possible.
* Watch out for this window being the next_curwin. */
if (frame_fixed_width(fr))
{
n = frame_minwidth(fr, NOWIN);
new_size = fr->fr_width;
if (frame_has_win(fr, next_curwin))
{
room += p_wiw - p_wmw;
next_curwin_size = 0;
if (new_size < p_wiw)
new_size = p_wiw;
}
else
/* These windows don't use up room. */
totwincount -= (n + (fr->fr_next == NULL
? extra_sep : 0)) / (p_wmw + 1);
room -= new_size - n;
if (room < 0)
{
new_size += room;
room = 0;
}
fr->fr_newwidth = new_size;
}
}
if (next_curwin_size == -1)
{
if (!has_next_curwin)
next_curwin_size = 0;
else if (totwincount > 1
&& (room + (totwincount - 2))
/ (totwincount - 1) > p_wiw)
{
/* Can make all windows wider than 'winwidth', spread
* the room equally. */
next_curwin_size = (room + p_wiw
+ (totwincount - 1) * p_wmw
+ (totwincount - 1)) / totwincount;
room -= next_curwin_size - p_wiw;
}
else
next_curwin_size = p_wiw;
}
}
if (has_next_curwin)
--totwincount; /* don't count curwin */
}
FOR_ALL_FRAMES(fr, topfr->fr_child)
{
wincount = 1;
if (fr->fr_next == NULL)
/* last frame gets all that remains (avoid roundoff error) */
new_size = width;
else if (dir == 'v')
new_size = fr->fr_width;
else if (frame_fixed_width(fr))
{
new_size = fr->fr_newwidth;
wincount = 0; /* doesn't count as a sizeable window */
}
else
{
/* Compute the maximum number of windows horiz. in "fr". */
n = frame_minwidth(fr, NOWIN);
wincount = (n + (fr->fr_next == NULL ? extra_sep : 0))
/ (p_wmw + 1);
m = frame_minwidth(fr, next_curwin);
if (has_next_curwin)
hnc = frame_has_win(fr, next_curwin);
else
hnc = FALSE;
if (hnc) /* don't count next_curwin */
--wincount;
if (totwincount == 0)
new_size = room;
else
new_size = (wincount * room + ((unsigned)totwincount >> 1))
/ totwincount;
if (hnc) /* add next_curwin size */
{
next_curwin_size -= p_wiw - (m - n);
new_size += next_curwin_size;
room -= new_size - next_curwin_size;
}
else
room -= new_size;
new_size += n;
}
/* Skip frame that is full width when splitting or closing a
* window, unless equalizing all frames. */
if (!current || dir != 'v' || topfr->fr_parent != NULL
|| (new_size != fr->fr_width)
|| frame_has_win(fr, next_curwin))
win_equal_rec(next_curwin, current, fr, dir, col, row,
new_size, height);
col += new_size;
width -= new_size;
totwincount -= wincount;
}
}
else /* topfr->fr_layout == FR_COL */
{
topfr->fr_width = width;
topfr->fr_height = height;
if (dir != 'h') /* equalize frame heights */
{
/* Compute maximum number of windows vertically in this frame. */
n = frame_minheight(topfr, NOWIN);
/* add one for the bottom window if it doesn't have a statusline */
if (row + height == cmdline_row && p_ls == 0)
extra_sep = 1;
else
extra_sep = 0;
totwincount = (n + extra_sep) / (p_wmh + 1);
has_next_curwin = frame_has_win(topfr, next_curwin);
/*
* Compute height for "next_curwin" window and room available for
* other windows.
* "m" is the minimal height when counting p_wh for "next_curwin".
*/
m = frame_minheight(topfr, next_curwin);
room = height - m;
if (room < 0)
{
/* The room is less then 'winheight', use all space for the
* current window. */
next_curwin_size = p_wh + room;
room = 0;
}
else
{
next_curwin_size = -1;
FOR_ALL_FRAMES(fr, topfr->fr_child)
{
/* If 'winfixheight' set keep the window height if
* possible.
* Watch out for this window being the next_curwin. */
if (frame_fixed_height(fr))
{
n = frame_minheight(fr, NOWIN);
new_size = fr->fr_height;
if (frame_has_win(fr, next_curwin))
{
room += p_wh - p_wmh;
next_curwin_size = 0;
if (new_size < p_wh)
new_size = p_wh;
}
else
/* These windows don't use up room. */
totwincount -= (n + (fr->fr_next == NULL
? extra_sep : 0)) / (p_wmh + 1);
room -= new_size - n;
if (room < 0)
{
new_size += room;
room = 0;
}
fr->fr_newheight = new_size;
}
}
if (next_curwin_size == -1)
{
if (!has_next_curwin)
next_curwin_size = 0;
else if (totwincount > 1
&& (room + (totwincount - 2))
/ (totwincount - 1) > p_wh)
{
/* can make all windows higher than 'winheight',
* spread the room equally. */
next_curwin_size = (room + p_wh
+ (totwincount - 1) * p_wmh
+ (totwincount - 1)) / totwincount;
room -= next_curwin_size - p_wh;
}
else
next_curwin_size = p_wh;
}
}
if (has_next_curwin)
--totwincount; /* don't count curwin */
}
FOR_ALL_FRAMES(fr, topfr->fr_child)
{
wincount = 1;
if (fr->fr_next == NULL)
/* last frame gets all that remains (avoid roundoff error) */
new_size = height;
else if (dir == 'h')
new_size = fr->fr_height;
else if (frame_fixed_height(fr))
{
new_size = fr->fr_newheight;
wincount = 0; /* doesn't count as a sizeable window */
}
else
{
/* Compute the maximum number of windows vert. in "fr". */
n = frame_minheight(fr, NOWIN);
wincount = (n + (fr->fr_next == NULL ? extra_sep : 0))
/ (p_wmh + 1);
m = frame_minheight(fr, next_curwin);
if (has_next_curwin)
hnc = frame_has_win(fr, next_curwin);
else
hnc = FALSE;
if (hnc) /* don't count next_curwin */
--wincount;
if (totwincount == 0)
new_size = room;
else
new_size = (wincount * room + ((unsigned)totwincount >> 1))
/ totwincount;
if (hnc) /* add next_curwin size */
{
next_curwin_size -= p_wh - (m - n);
new_size += next_curwin_size;
room -= new_size - next_curwin_size;
}
else
room -= new_size;
new_size += n;
}
/* Skip frame that is full width when splitting or closing a
* window, unless equalizing all frames. */
if (!current || dir != 'h' || topfr->fr_parent != NULL
|| (new_size != fr->fr_height)
|| frame_has_win(fr, next_curwin))
win_equal_rec(next_curwin, current, fr, dir, col, row,
width, new_size);
row += new_size;
height -= new_size;
totwincount -= wincount;
}
}
} | 0 | [
"CWE-416"
] | vim | ec66c41d84e574baf8009dbc0bd088d2bc5b2421 | 178,067,894,842,255,620,000,000,000,000,000,000,000 | 316 | patch 8.1.2136: using freed memory with autocmd from fuzzer
Problem: using freed memory with autocmd from fuzzer. (Dhiraj Mishra,
Dominique Pelle)
Solution: Avoid using "wp" after autocommands. (closes #5041) |
static void vmxnet3_class_init(ObjectClass *class, void *data)
{
DeviceClass *dc = DEVICE_CLASS(class);
PCIDeviceClass *c = PCI_DEVICE_CLASS(class);
VMXNET3Class *vc = VMXNET3_DEVICE_CLASS(class);
c->realize = vmxnet3_pci_realize;
c->exit = vmxnet3_pci_uninit;
c->vendor_id = PCI_VENDOR_ID_VMWARE;
c->device_id = PCI_DEVICE_ID_VMWARE_VMXNET3;
c->revision = PCI_DEVICE_ID_VMWARE_VMXNET3_REVISION;
c->romfile = "efi-vmxnet3.rom";
c->class_id = PCI_CLASS_NETWORK_ETHERNET;
c->subsystem_vendor_id = PCI_VENDOR_ID_VMWARE;
c->subsystem_id = PCI_DEVICE_ID_VMWARE_VMXNET3;
vc->parent_dc_realize = dc->realize;
dc->realize = vmxnet3_realize;
dc->desc = "VMWare Paravirtualized Ethernet v3";
dc->reset = vmxnet3_qdev_reset;
dc->vmsd = &vmstate_vmxnet3;
dc->props = vmxnet3_properties;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
} | 0 | [
"CWE-416"
] | qemu | 6c352ca9b4ee3e1e286ea9e8434bd8e69ac7d0d8 | 326,688,733,519,975,720,000,000,000,000,000,000,000 | 23 | net: vmxnet3: check for device_active before write
Vmxnet3 device emulator does not check if the device is active,
before using it for write. It leads to a use after free issue,
if the vmxnet3_io_bar0_write routine is called after the device is
deactivated. Add check to avoid it.
Reported-by: Li Qiang <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Acked-by: Dmitry Fleytman <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
{
struct k_itimer *timr;
int overrun;
unsigned long flags;
timr = lock_timer(timer_id, &flags);
if (!timr)
return -EINVAL;
overrun = timr->it_overrun_last;
unlock_timer(timr, flags);
return overrun;
} | 1 | [
"CWE-190"
] | linux | 78c9c4dfbf8c04883941445a195276bb4bb92c76 | 304,086,855,164,084,270,000,000,000,000,000,000,000 | 15 | posix-timers: Sanitize overrun handling
The posix timer overrun handling is broken because the forwarding functions
can return a huge number of overruns which does not fit in an int. As a
consequence timer_getoverrun(2) and siginfo::si_overrun can turn into
random number generators.
The k_clock::timer_forward() callbacks return a 64 bit value now. Make
k_itimer::ti_overrun[_last] 64bit as well, so the kernel internal
accounting is correct. 3Remove the temporary (int) casts.
Add a helper function which clamps the overrun value returned to user space
via timer_getoverrun(2) or siginfo::si_overrun limited to a positive value
between 0 and INT_MAX. INT_MAX is an indicator for user space that the
overrun value has been clamped.
Reported-by: Team OWL337 <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Acked-by: John Stultz <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Michael Kerrisk <[email protected]>
Link: https://lkml.kernel.org/r/[email protected] |
static bool sssctl_backup_exist(const char **files)
{
int i;
for (i = 0; files[i] != NULL; i++) {
if (sssctl_backup_file_exists(files[i])) {
return true;
}
}
return false;
} | 0 | [
"CWE-78"
] | sssd | 7ab83f97e1cbefb78ece17232185bdd2985f0bbe | 142,185,963,329,527,700,000,000,000,000,000,000,000 | 12 | TOOLS: replace system() with execvp() to avoid execution of user supplied command
:relnote: A flaw was found in SSSD, where the sssctl command was
vulnerable to shell command injection via the logs-fetch and
cache-expire subcommands. This flaw allows an attacker to trick
the root user into running a specially crafted sssctl command,
such as via sudo, to gain root access. The highest threat from this
vulnerability is to confidentiality, integrity, as well as system
availability.
This patch fixes a flaw by replacing system() with execvp().
:fixes: CVE-2021-3621
Reviewed-by: Pavel Březina <[email protected]> |
changed_line_abv_curs(void)
{
curwin->w_valid &= ~(VALID_WROW|VALID_WCOL|VALID_VIRTCOL|VALID_CROW
|VALID_CHEIGHT|VALID_TOPLINE);
} | 0 | [
"CWE-122"
] | vim | 777e7c21b7627be80961848ac560cb0a9978ff43 | 99,224,152,036,844,760,000,000,000,000,000,000,000 | 5 | patch 8.2.3564: invalid memory access when scrolling without valid screen
Problem: Invalid memory access when scrolling without a valid screen.
Solution: Do not set VALID_BOTLINE in w_valid. |
apr_byte_t oidc_util_json_validate_cnf_x5t_s256(request_rec *r,
int token_binding_policy, const char *x5t_256_str) {
const char *fingerprint = NULL;
fingerprint = oidc_util_get_client_cert_fingerprint(r);
if (fingerprint == NULL) {
oidc_debug(r, "no certificate (fingerprint) provided");
goto out_err;
}
if (apr_strnatcmp(fingerprint, x5t_256_str) != 0) {
oidc_warn(r,
"fingerprint of provided cert (%s) does not match cnf[\"x5t#S256\"] (%s)",
fingerprint, x5t_256_str);
goto out_err;
}
oidc_debug(r, "fingerprint of provided cert (%s) matches cnf[\"x5t#S256\"]",
fingerprint);
return TRUE;
out_err:
if (token_binding_policy == OIDC_TOKEN_BINDING_POLICY_OPTIONAL)
return TRUE;
if (token_binding_policy == OIDC_TOKEN_BINDING_POLICY_ENFORCED)
return FALSE;
// token_binding_policy == OIDC_TOKEN_BINDING_POLICY_REQURIED
return (fingerprint == NULL);
} | 0 | [
"CWE-79"
] | mod_auth_openidc | 55ea0a085290cd2c8cdfdd960a230cbc38ba8b56 | 255,346,586,227,723,000,000,000,000,000,000,000,000 | 32 | Add a function to escape Javascript characters |
static inline void __init check_numabalancing_enable(void)
{
} | 0 | [
"CWE-388"
] | linux | cf01fb9985e8deb25ccf0ea54d916b8871ae0e62 | 283,952,581,792,305,480,000,000,000,000,000,000,000 | 3 | mm/mempolicy.c: fix error handling in set_mempolicy and mbind.
In the case that compat_get_bitmap fails we do not want to copy the
bitmap to the user as it will contain uninitialized stack data and leak
sensitive data.
Signed-off-by: Chris Salls <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
xmlSprintfElementContent(char *buf ATTRIBUTE_UNUSED,
xmlElementContentPtr content ATTRIBUTE_UNUSED,
int englob ATTRIBUTE_UNUSED) {
} | 0 | [] | libxml2 | 932cc9896ab41475d4aa429c27d9afd175959d74 | 6,759,270,582,994,264,000,000,000,000,000,000,000 | 4 | Fix buffer size checks in xmlSnprintfElementContent
xmlSnprintfElementContent failed to correctly check the available
buffer space in two locations.
Fixes bug 781333 (CVE-2017-9047) and bug 781701 (CVE-2017-9048).
Thanks to Marcel Böhme and Thuan Pham for the report. |
nat_select_range_tuple(struct conntrack *ct, const struct conn *conn,
struct conn *nat_conn)
{
enum { MIN_NAT_EPHEMERAL_PORT = 1024,
MAX_NAT_EPHEMERAL_PORT = 65535 };
uint16_t min_port;
uint16_t max_port;
uint16_t first_port;
uint32_t hash = nat_range_hash(conn, ct->hash_basis);
if ((conn->nat_info->nat_action & NAT_ACTION_SRC) &&
(!(conn->nat_info->nat_action & NAT_ACTION_SRC_PORT))) {
min_port = ntohs(conn->key.src.port);
max_port = ntohs(conn->key.src.port);
first_port = min_port;
} else if ((conn->nat_info->nat_action & NAT_ACTION_DST) &&
(!(conn->nat_info->nat_action & NAT_ACTION_DST_PORT))) {
min_port = ntohs(conn->key.dst.port);
max_port = ntohs(conn->key.dst.port);
first_port = min_port;
} else {
uint16_t deltap = conn->nat_info->max_port - conn->nat_info->min_port;
uint32_t port_index = hash % (deltap + 1);
first_port = conn->nat_info->min_port + port_index;
min_port = conn->nat_info->min_port;
max_port = conn->nat_info->max_port;
}
uint32_t deltaa = 0;
uint32_t address_index;
union ct_addr ct_addr;
memset(&ct_addr, 0, sizeof ct_addr);
union ct_addr max_ct_addr;
memset(&max_ct_addr, 0, sizeof max_ct_addr);
max_ct_addr = conn->nat_info->max_addr;
if (conn->key.dl_type == htons(ETH_TYPE_IP)) {
deltaa = ntohl(conn->nat_info->max_addr.ipv4) -
ntohl(conn->nat_info->min_addr.ipv4);
address_index = hash % (deltaa + 1);
ct_addr.ipv4 = htonl(
ntohl(conn->nat_info->min_addr.ipv4) + address_index);
} else {
deltaa = nat_ipv6_addrs_delta(&conn->nat_info->min_addr.ipv6,
&conn->nat_info->max_addr.ipv6);
/* deltaa must be within 32 bits for full hash coverage. A 64 or
* 128 bit hash is unnecessary and hence not used here. Most code
* is kept common with V4; nat_ipv6_addrs_delta() will do the
* enforcement via max_ct_addr. */
max_ct_addr = conn->nat_info->min_addr;
nat_ipv6_addr_increment(&max_ct_addr.ipv6, deltaa);
address_index = hash % (deltaa + 1);
ct_addr.ipv6 = conn->nat_info->min_addr.ipv6;
nat_ipv6_addr_increment(&ct_addr.ipv6, address_index);
}
uint16_t port = first_port;
bool all_ports_tried = false;
/* For DNAT or for specified port ranges, we don't use ephemeral ports. */
bool ephemeral_ports_tried
= conn->nat_info->nat_action & NAT_ACTION_DST ||
conn->nat_info->nat_action & NAT_ACTION_SRC_PORT
? true : false;
union ct_addr first_addr = ct_addr;
bool pat_enabled = conn->key.nw_proto != IPPROTO_ICMP &&
conn->key.nw_proto != IPPROTO_ICMPV6;
while (true) {
if (conn->nat_info->nat_action & NAT_ACTION_SRC) {
nat_conn->rev_key.dst.addr = ct_addr;
if (pat_enabled) {
nat_conn->rev_key.dst.port = htons(port);
}
} else {
nat_conn->rev_key.src.addr = ct_addr;
if (pat_enabled) {
nat_conn->rev_key.src.port = htons(port);
}
}
bool found = conn_lookup(ct, &nat_conn->rev_key, time_msec(), NULL,
NULL);
if (!found) {
return true;
} else if (pat_enabled && !all_ports_tried) {
if (min_port == max_port) {
all_ports_tried = true;
} else if (port == max_port) {
port = min_port;
} else {
port++;
}
if (port == first_port) {
all_ports_tried = true;
}
} else {
if (memcmp(&ct_addr, &max_ct_addr, sizeof ct_addr)) {
if (conn->key.dl_type == htons(ETH_TYPE_IP)) {
ct_addr.ipv4 = htonl(ntohl(ct_addr.ipv4) + 1);
} else {
nat_ipv6_addr_increment(&ct_addr.ipv6, 1);
}
} else {
ct_addr = conn->nat_info->min_addr;
}
if (!memcmp(&ct_addr, &first_addr, sizeof ct_addr)) {
if (pat_enabled && !ephemeral_ports_tried) {
ephemeral_ports_tried = true;
ct_addr = conn->nat_info->min_addr;
first_addr = ct_addr;
min_port = MIN_NAT_EPHEMERAL_PORT;
max_port = MAX_NAT_EPHEMERAL_PORT;
} else {
break;
}
}
first_port = min_port;
port = first_port;
all_ports_tried = false;
}
}
return false;
} | 0 | [
"CWE-400"
] | ovs | 59b588604b89e85b463984ba08a99badb4fcba15 | 337,517,083,650,545,930,000,000,000,000,000,000,000 | 124 | flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]> |
static NORETURN void die_nicely(const char *err, va_list params)
{
static int zombie;
char message[2 * PATH_MAX];
vsnprintf(message, sizeof(message), err, params);
fputs("fatal: ", stderr);
fputs(message, stderr);
fputc('\n', stderr);
if (!zombie) {
zombie = 1;
write_crash_report(message);
end_packfile();
unkeep_all_packs();
dump_marks();
}
exit(128);
} | 0 | [] | git | 68061e3470210703cb15594194718d35094afdc0 | 182,413,600,959,149,050,000,000,000,000,000,000,000 | 19 | fast-import: disallow "feature export-marks" by default
The fast-import stream command "feature export-marks=<path>" lets the
stream write marks to an arbitrary path. This may be surprising if you
are running fast-import against an untrusted input (which otherwise
cannot do anything except update Git objects and refs).
Let's disallow the use of this feature by default, and provide a
command-line option to re-enable it (you can always just use the
command-line --export-marks as well, but the in-stream version provides
an easy way for exporters to control the process).
This is a backwards-incompatible change, since the default is flipping
to the new, safer behavior. However, since the main users of the
in-stream versions would be import/export-based remote helpers, and
since we trust remote helpers already (which are already running
arbitrary code), we'll pass the new option by default when reading a
remote helper's stream. This should minimize the impact.
Note that the implementation isn't totally simple, as we have to work
around the fact that fast-import doesn't parse its command-line options
until after it has read any "feature" lines from the stream. This is how
it lets command-line options override in-stream. But in our case, it's
important to parse the new --allow-unsafe-features first.
There are three options for resolving this:
1. Do a separate "early" pass over the options. This is easy for us to
do because there are no command-line options that allow the
"unstuck" form (so there's no chance of us mistaking an argument
for an option), though it does introduce a risk of incorrect
parsing later (e.g,. if we convert to parse-options).
2. Move the option parsing phase back to the start of the program, but
teach the stream-reading code never to override an existing value.
This is tricky, because stream "feature" lines override each other
(meaning we'd have to start tracking the source for every option).
3. Accept that we might parse a "feature export-marks" line that is
forbidden, as long we don't _act_ on it until after we've parsed
the command line options.
This would, in fact, work with the current code, but only because
the previous patch fixed the export-marks parser to avoid touching
the filesystem.
So while it works, it does carry risk of somebody getting it wrong
in the future in a rather subtle and unsafe way.
I've gone with option (1) here as simple, safe, and unlikely to cause
regressions.
This fixes CVE-2019-1348.
Signed-off-by: Jeff King <[email protected]> |
static pfunc check_literal(struct jv_parser* p) {
if (p->tokenpos == 0) return 0;
const char* pattern = 0;
int plen;
jv v;
switch (p->tokenbuf[0]) {
case 't': pattern = "true"; plen = 4; v = jv_true(); break;
case 'f': pattern = "false"; plen = 5; v = jv_false(); break;
case 'n': pattern = "null"; plen = 4; v = jv_null(); break;
}
if (pattern) {
if (p->tokenpos != plen) return "Invalid literal";
for (int i=0; i<plen; i++)
if (p->tokenbuf[i] != pattern[i])
return "Invalid literal";
TRY(value(p, v));
} else {
// FIXME: better parser
p->tokenbuf[p->tokenpos] = 0; // FIXME: invalid
char* end = 0;
double d = jvp_strtod(&p->dtoa, p->tokenbuf, &end);
if (end == 0 || *end != 0)
return "Invalid numeric literal";
TRY(value(p, jv_number(d)));
}
p->tokenpos = 0;
return 0;
} | 1 | [
"CWE-119",
"CWE-787"
] | jq | 8eb1367ca44e772963e704a700ef72ae2e12babd | 224,256,725,157,612,950,000,000,000,000,000,000,000 | 29 | Heap buffer overflow in tokenadd() (fix #105)
This was an off-by one: the NUL terminator byte was not allocated on
resize. This was triggered by JSON-encoded numbers longer than 256
bytes. |
static int check_rename_constraints(struct ldb_message *msg,
struct samldb_ctx *ac,
struct ldb_dn *olddn, struct ldb_dn *newdn)
{
struct ldb_context *ldb = ldb_module_get_ctx(ac->module);
struct ldb_dn *dn1, *dn2, *nc_root;
int32_t systemFlags;
bool move_op = false;
bool rename_op = false;
int ret;
/* Skip the checks if old and new DN are the same, or if we have the
* relax control specified or if the returned objects is already
* deleted and needs only to be moved for consistency. */
if (ldb_dn_compare(olddn, newdn) == 0) {
return LDB_SUCCESS;
}
if (ldb_request_get_control(ac->req, LDB_CONTROL_RELAX_OID) != NULL) {
return LDB_SUCCESS;
}
if (ldb_msg_find_attr_as_bool(msg, "isDeleted", false)) {
/*
* check originating request if we are supposed
* to "see" this record in first place.
*/
if (ldb_request_get_control(ac->req, LDB_CONTROL_SHOW_DELETED_OID) == NULL) {
return LDB_ERR_NO_SUCH_OBJECT;
}
return LDB_ERR_UNWILLING_TO_PERFORM;
}
/* Objects under CN=System */
dn1 = ldb_dn_copy(ac, ldb_get_default_basedn(ldb));
if (dn1 == NULL) return ldb_oom(ldb);
if ( ! ldb_dn_add_child_fmt(dn1, "CN=System")) {
talloc_free(dn1);
return LDB_ERR_OPERATIONS_ERROR;
}
if ((ldb_dn_compare_base(dn1, olddn) == 0) &&
(ldb_dn_compare_base(dn1, newdn) != 0)) {
talloc_free(dn1);
ldb_asprintf_errstring(ldb,
"subtree_rename: Cannot move/rename %s. Objects under CN=System have to stay under it!",
ldb_dn_get_linearized(olddn));
return LDB_ERR_OTHER;
}
talloc_free(dn1);
/* LSA objects */
if ((samdb_find_attribute(ldb, msg, "objectClass", "secret") != NULL) ||
(samdb_find_attribute(ldb, msg, "objectClass", "trustedDomain") != NULL)) {
ldb_asprintf_errstring(ldb,
"subtree_rename: Cannot move/rename %s. It's an LSA-specific object!",
ldb_dn_get_linearized(olddn));
return LDB_ERR_UNWILLING_TO_PERFORM;
}
/* subnet objects */
if (samdb_find_attribute(ldb, msg, "objectclass", "subnet") != NULL) {
ret = samldb_verify_subnet(ac, newdn);
if (ret != LDB_SUCCESS) {
return ret;
}
}
/* systemFlags */
dn1 = ldb_dn_get_parent(ac, olddn);
if (dn1 == NULL) return ldb_oom(ldb);
dn2 = ldb_dn_get_parent(ac, newdn);
if (dn2 == NULL) return ldb_oom(ldb);
if (ldb_dn_compare(dn1, dn2) == 0) {
rename_op = true;
} else {
move_op = true;
}
talloc_free(dn1);
talloc_free(dn2);
systemFlags = ldb_msg_find_attr_as_int(msg, "systemFlags", 0);
/* Fetch name context */
ret = dsdb_find_nc_root(ldb, ac, olddn, &nc_root);
if (ret != LDB_SUCCESS) {
return ret;
}
if (ldb_dn_compare(nc_root, ldb_get_schema_basedn(ldb)) == 0) {
if (move_op) {
ldb_asprintf_errstring(ldb,
"subtree_rename: Cannot move %s within schema partition",
ldb_dn_get_linearized(olddn));
return LDB_ERR_UNWILLING_TO_PERFORM;
}
if (rename_op &&
(systemFlags & SYSTEM_FLAG_SCHEMA_BASE_OBJECT) != 0) {
ldb_asprintf_errstring(ldb,
"subtree_rename: Cannot rename %s within schema partition",
ldb_dn_get_linearized(olddn));
return LDB_ERR_UNWILLING_TO_PERFORM;
}
} else if (ldb_dn_compare(nc_root, ldb_get_config_basedn(ldb)) == 0) {
if (move_op &&
(systemFlags & SYSTEM_FLAG_CONFIG_ALLOW_MOVE) == 0) {
/* Here we have to do more: control the
* "ALLOW_LIMITED_MOVE" flag. This means that the
* grand-grand-parents of two objects have to be equal
* in order to perform the move (this is used for
* moving "server" objects in the "sites" container). */
bool limited_move =
systemFlags & SYSTEM_FLAG_CONFIG_ALLOW_LIMITED_MOVE;
if (limited_move) {
dn1 = ldb_dn_copy(ac, olddn);
if (dn1 == NULL) return ldb_oom(ldb);
dn2 = ldb_dn_copy(ac, newdn);
if (dn2 == NULL) return ldb_oom(ldb);
limited_move &= ldb_dn_remove_child_components(dn1, 3);
limited_move &= ldb_dn_remove_child_components(dn2, 3);
limited_move &= ldb_dn_compare(dn1, dn2) == 0;
talloc_free(dn1);
talloc_free(dn2);
}
if (!limited_move
&& ldb_request_get_control(ac->req, DSDB_CONTROL_RESTORE_TOMBSTONE_OID) == NULL) {
ldb_asprintf_errstring(ldb,
"subtree_rename: Cannot move %s to %s in config partition",
ldb_dn_get_linearized(olddn), ldb_dn_get_linearized(newdn));
return LDB_ERR_UNWILLING_TO_PERFORM;
}
}
if (rename_op &&
(systemFlags & SYSTEM_FLAG_CONFIG_ALLOW_RENAME) == 0) {
ldb_asprintf_errstring(ldb,
"subtree_rename: Cannot rename %s to %s within config partition",
ldb_dn_get_linearized(olddn), ldb_dn_get_linearized(newdn));
return LDB_ERR_UNWILLING_TO_PERFORM;
}
} else if (ldb_dn_compare(nc_root, ldb_get_default_basedn(ldb)) == 0) {
if (move_op &&
(systemFlags & SYSTEM_FLAG_DOMAIN_DISALLOW_MOVE) != 0) {
ldb_asprintf_errstring(ldb,
"subtree_rename: Cannot move %s to %s - DISALLOW_MOVE set",
ldb_dn_get_linearized(olddn), ldb_dn_get_linearized(newdn));
return LDB_ERR_UNWILLING_TO_PERFORM;
}
if (rename_op &&
(systemFlags & SYSTEM_FLAG_DOMAIN_DISALLOW_RENAME) != 0) {
ldb_asprintf_errstring(ldb,
"subtree_rename: Cannot rename %s to %s - DISALLOW_RENAME set",
ldb_dn_get_linearized(olddn), ldb_dn_get_linearized(newdn));
return LDB_ERR_UNWILLING_TO_PERFORM;
}
}
talloc_free(nc_root);
return LDB_SUCCESS;
} | 0 | [
"CWE-200"
] | samba | 0a3aa5f908e351201dc9c4d4807b09ed9eedff77 | 68,591,635,878,225,880,000,000,000,000,000,000,000 | 172 | CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]> |
static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
u8 port_num)
{
return IB_LINK_LAYER_ETHERNET;
} | 0 | [
"CWE-665"
] | kernel | 72be029e947510dd6cbbbaf51879622af26e4200 | 253,683,046,923,659,800,000,000,000,000,000,000,000 | 5 | RDMA/hns: Fix init resp when alloc ucontext (bsc#1104427
FATE#326416).
suse-commit: 8e5436bc2806cbe952f043cc995804c188ce047a |
void MonClient::_send_command(MonCommand *r)
{
if (r->is_tell()) {
++r->send_attempts;
if (r->send_attempts > cct->_conf->mon_client_directed_command_retry) {
_finish_command(r, -ENXIO, "mon unavailable");
return;
}
// tell-style command
if (monmap.min_mon_release >= ceph_release_t::octopus) {
if (r->target_con) {
r->target_con->mark_down();
}
if (r->target_rank >= 0) {
if (r->target_rank >= (int)monmap.size()) {
ldout(cct, 10) << " target " << r->target_rank
<< " >= max mon " << monmap.size() << dendl;
_finish_command(r, -ENOENT, "mon rank dne");
return;
}
r->target_con = messenger->connect_to_mon(
monmap.get_addrs(r->target_rank), true /* anon */);
} else {
if (!monmap.contains(r->target_name)) {
ldout(cct, 10) << " target " << r->target_name
<< " not present in monmap" << dendl;
_finish_command(r, -ENOENT, "mon dne");
return;
}
r->target_con = messenger->connect_to_mon(
monmap.get_addrs(r->target_name), true /* anon */);
}
r->target_session.reset(new MonConnection(cct, r->target_con, 0,
&auth_registry));
r->target_session->start(monmap.get_epoch(), entity_name);
r->last_send_attempt = ceph_clock_now();
MCommand *m = new MCommand(monmap.fsid);
m->set_tid(r->tid);
m->cmd = r->cmd;
m->set_data(r->inbl);
r->target_session->queue_command(m);
return;
}
// ugly legacy handling of pre-octopus mons
entity_addr_t peer;
if (active_con) {
peer = active_con->get_con()->get_peer_addr();
}
if (r->target_rank >= 0 &&
r->target_rank != monmap.get_rank(peer)) {
ldout(cct, 10) << __func__ << " " << r->tid << " " << r->cmd
<< " wants rank " << r->target_rank
<< ", reopening session"
<< dendl;
if (r->target_rank >= (int)monmap.size()) {
ldout(cct, 10) << " target " << r->target_rank
<< " >= max mon " << monmap.size() << dendl;
_finish_command(r, -ENOENT, "mon rank dne");
return;
}
_reopen_session(r->target_rank);
return;
}
if (r->target_name.length() &&
r->target_name != monmap.get_name(peer)) {
ldout(cct, 10) << __func__ << " " << r->tid << " " << r->cmd
<< " wants mon " << r->target_name
<< ", reopening session"
<< dendl;
if (!monmap.contains(r->target_name)) {
ldout(cct, 10) << " target " << r->target_name
<< " not present in monmap" << dendl;
_finish_command(r, -ENOENT, "mon dne");
return;
}
_reopen_session(monmap.get_rank(r->target_name));
return;
}
// fall-thru to send 'normal' CLI command
}
// normal CLI command
ldout(cct, 10) << __func__ << " " << r->tid << " " << r->cmd << dendl;
auto m = ceph::make_message<MMonCommand>(monmap.fsid);
m->set_tid(r->tid);
m->cmd = r->cmd;
m->set_data(r->inbl);
_send_mon_message(std::move(m));
return;
} | 0 | [
"CWE-294"
] | ceph | 6c14c2fb5650426285428dfe6ca1597e5ea1d07d | 237,494,055,160,959,770,000,000,000,000,000,000,000 | 95 | mon/MonClient: bring back CEPHX_V2 authorizer challenges
Commit c58c5754dfd2 ("msg/async/ProtocolV1: use AuthServer and
AuthClient") introduced a backwards compatibility issue into msgr1.
To fix it, commit 321548010578 ("mon/MonClient: skip CEPHX_V2
challenge if client doesn't support it") set out to skip authorizer
challenges for peers that don't support CEPHX_V2. However, it
made it so that authorizer challenges are skipped for all peers in
both msgr1 and msgr2 cases, effectively disabling the protection
against replay attacks that was put in place in commit f80b848d3f83
("auth/cephx: add authorizer challenge", CVE-2018-1128).
This is because con->get_features() always returns 0 at that
point. In msgr1 case, the peer shares its features along with the
authorizer, but while they are available in connect_msg.features they
aren't assigned to con until ProtocolV1::open(). In msgr2 case, the
peer doesn't share its features until much later (in CLIENT_IDENT
frame, i.e. after the authentication phase). The result is that
!CEPHX_V2 branch is taken in all cases and replay attack protection
is lost.
Only clusters with cephx_service_require_version set to 2 on the
service daemons would not be silently downgraded. But, since the
default is 1 and there are no reports of looping on BADAUTHORIZER
faults, I'm pretty sure that no one has ever done that. Note that
cephx_require_version set to 2 would have no effect even though it
is supposed to be stronger than cephx_service_require_version
because MonClient::handle_auth_request() didn't check it.
To fix:
- for msgr1, check connect_msg.features (as was done before commit
c58c5754dfd2) and challenge if CEPHX_V2 is supported. Together
with two preceding patches that resurrect proper cephx_* option
handling in msgr1, this covers both "I want old clients to work"
and "I wish to require better authentication" use cases.
- for msgr2, don't check anything and always challenge. CEPHX_V2
predates msgr2, anyone speaking msgr2 must support it.
Signed-off-by: Ilya Dryomov <[email protected]>
(cherry picked from commit 4a82c72e3bdddcb625933e83af8b50a444b961f1) |
__sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
struct dst_entry *old_dst;
sk_tx_queue_clear(sk);
/*
* This can be called while sk is owned by the caller only,
* with no state that can be checked in a rcu_dereference_check() cond
*/
old_dst = rcu_dereference_raw(sk->sk_dst_cache);
rcu_assign_pointer(sk->sk_dst_cache, dst);
dst_release(old_dst);
} | 0 | [
"CWE-400"
] | linux-2.6 | c377411f2494a931ff7facdbb3a6839b1266bcf6 | 203,485,310,745,299,730,000,000,000,000,000,000,000 | 13 | net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int set_config(struct usb_composite_dev *cdev,
const struct usb_ctrlrequest *ctrl, unsigned number)
{
struct usb_gadget *gadget = cdev->gadget;
struct usb_configuration *c = NULL;
int result = -EINVAL;
unsigned power = gadget_is_otg(gadget) ? 8 : 100;
int tmp;
if (number) {
list_for_each_entry(c, &cdev->configs, list) {
if (c->bConfigurationValue == number) {
/*
* We disable the FDs of the previous
* configuration only if the new configuration
* is a valid one
*/
if (cdev->config)
reset_config(cdev);
result = 0;
break;
}
}
if (result < 0)
goto done;
} else { /* Zero configuration value - need to reset the config */
if (cdev->config)
reset_config(cdev);
result = 0;
}
DBG(cdev, "%s config #%d: %s\n",
usb_speed_string(gadget->speed),
number, c ? c->label : "unconfigured");
if (!c)
goto done;
usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
cdev->config = c;
/* Initialize all interfaces by setting them to altsetting zero. */
for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) {
struct usb_function *f = c->interface[tmp];
struct usb_descriptor_header **descriptors;
if (!f)
break;
/*
* Record which endpoints are used by the function. This is used
* to dispatch control requests targeted at that endpoint to the
* function's setup callback instead of the current
* configuration's setup callback.
*/
descriptors = function_descriptors(f, gadget->speed);
for (; *descriptors; ++descriptors) {
struct usb_endpoint_descriptor *ep;
int addr;
if ((*descriptors)->bDescriptorType != USB_DT_ENDPOINT)
continue;
ep = (struct usb_endpoint_descriptor *)*descriptors;
addr = ((ep->bEndpointAddress & 0x80) >> 3)
| (ep->bEndpointAddress & 0x0f);
set_bit(addr, f->endpoints);
}
result = f->set_alt(f, tmp, 0);
if (result < 0) {
DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n",
tmp, f->name, f, result);
reset_config(cdev);
goto done;
}
if (result == USB_GADGET_DELAYED_STATUS) {
DBG(cdev,
"%s: interface %d (%s) requested delayed status\n",
__func__, tmp, f->name);
cdev->delayed_status++;
DBG(cdev, "delayed_status count %d\n",
cdev->delayed_status);
}
}
/* when we return, be sure our power usage is valid */
if (c->MaxPower || (c->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
power = c->MaxPower;
else
power = CONFIG_USB_GADGET_VBUS_DRAW;
if (gadget->speed < USB_SPEED_SUPER)
power = min(power, 500U);
else
power = min(power, 900U);
done:
if (power <= USB_SELF_POWER_VBUS_MAX_DRAW)
usb_gadget_set_selfpowered(gadget);
else
usb_gadget_clear_selfpowered(gadget);
usb_gadget_vbus_draw(gadget, power);
if (result >= 0 && cdev->delayed_status)
result = USB_GADGET_DELAYED_STATUS;
return result;
} | 0 | [
"CWE-476"
] | linux | 75e5b4849b81e19e9efe1654b30d7f3151c33c2c | 157,157,837,839,027,930,000,000,000,000,000,000,000 | 110 | USB: gadget: validate interface OS descriptor requests
Stall the control endpoint in case provided index exceeds array size of
MAX_CONFIG_INTERFACES or when the retrieved function pointer is null.
Signed-off-by: Szymon Heidrich <[email protected]>
Cc: [email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
ASN1_TIME *X509_time_adj(ASN1_TIME *s, long offset_sec, time_t *in_tm)
{
return X509_time_adj_ex(s, 0, offset_sec, in_tm);
} | 0 | [
"CWE-119"
] | openssl | 370ac320301e28bb615cee80124c042649c95d14 | 329,183,413,789,228,600,000,000,000,000,000,000,000 | 4 | Fix length checks in X509_cmp_time to avoid out-of-bounds reads.
Also tighten X509_cmp_time to reject more than three fractional
seconds in the time; and to reject trailing garbage after the offset.
CVE-2015-1789
Reviewed-by: Viktor Dukhovni <[email protected]>
Reviewed-by: Richard Levitte <[email protected]> |
static int decomp(unsigned char *srcBuf, unsigned char **jpegBuf,
unsigned long *jpegSize, unsigned char *dstBuf, int w, int h,
int subsamp, int jpegQual, char *fileName, int tilew,
int tileh)
{
char tempStr[1024], sizeStr[24] = "\0", qualStr[13] = "\0", *ptr;
FILE *file = NULL;
tjhandle handle = NULL;
int row, col, iter = 0, dstBufAlloc = 0, retval = 0;
double elapsed, elapsedDecode;
int ps = tjPixelSize[pf];
int scaledw = TJSCALED(w, sf);
int scaledh = TJSCALED(h, sf);
int pitch = scaledw * ps;
int ntilesw = (w + tilew - 1) / tilew, ntilesh = (h + tileh - 1) / tileh;
unsigned char *dstPtr, *dstPtr2, *yuvBuf = NULL;
if (jpegQual > 0) {
snprintf(qualStr, 13, "_Q%d", jpegQual);
qualStr[12] = 0;
}
if ((handle = tjInitDecompress()) == NULL)
THROW_TJ("executing tjInitDecompress()");
if (dstBuf == NULL) {
if ((unsigned long long)pitch * (unsigned long long)scaledh >
(unsigned long long)((size_t)-1))
THROW("allocating destination buffer", "Image is too large");
if ((dstBuf = (unsigned char *)malloc((size_t)pitch * scaledh)) == NULL)
THROW_UNIX("allocating destination buffer");
dstBufAlloc = 1;
}
/* Set the destination buffer to gray so we know whether the decompressor
attempted to write to it */
memset(dstBuf, 127, (size_t)pitch * scaledh);
if (doYUV) {
int width = doTile ? tilew : scaledw;
int height = doTile ? tileh : scaledh;
unsigned long yuvSize = tjBufSizeYUV2(width, yuvPad, height, subsamp);
if (yuvSize == (unsigned long)-1)
THROW_TJ("allocating YUV buffer");
if ((yuvBuf = (unsigned char *)malloc(yuvSize)) == NULL)
THROW_UNIX("allocating YUV buffer");
memset(yuvBuf, 127, yuvSize);
}
/* Benchmark */
iter = -1;
elapsed = elapsedDecode = 0.;
while (1) {
int tile = 0;
double start = getTime();
for (row = 0, dstPtr = dstBuf; row < ntilesh;
row++, dstPtr += (size_t)pitch * tileh) {
for (col = 0, dstPtr2 = dstPtr; col < ntilesw;
col++, tile++, dstPtr2 += ps * tilew) {
int width = doTile ? min(tilew, w - col * tilew) : scaledw;
int height = doTile ? min(tileh, h - row * tileh) : scaledh;
if (doYUV) {
double startDecode;
if (tjDecompressToYUV2(handle, jpegBuf[tile], jpegSize[tile], yuvBuf,
width, yuvPad, height, flags) == -1)
THROW_TJ("executing tjDecompressToYUV2()");
startDecode = getTime();
if (tjDecodeYUV(handle, yuvBuf, yuvPad, subsamp, dstPtr2, width,
pitch, height, pf, flags) == -1)
THROW_TJ("executing tjDecodeYUV()");
if (iter >= 0) elapsedDecode += getTime() - startDecode;
} else if (tjDecompress2(handle, jpegBuf[tile], jpegSize[tile],
dstPtr2, width, pitch, height, pf,
flags) == -1)
THROW_TJ("executing tjDecompress2()");
}
}
elapsed += getTime() - start;
if (iter >= 0) {
iter++;
if (elapsed >= benchTime) break;
} else if (elapsed >= warmup) {
iter = 0;
elapsed = elapsedDecode = 0.;
}
}
if (doYUV) elapsed -= elapsedDecode;
if (tjDestroy(handle) == -1) THROW_TJ("executing tjDestroy()");
handle = NULL;
if (quiet) {
printf("%-6s%s",
sigfig((double)(w * h) / 1000000. * (double)iter / elapsed, 4,
tempStr, 1024),
quiet == 2 ? "\n" : " ");
if (doYUV)
printf("%s\n",
sigfig((double)(w * h) / 1000000. * (double)iter / elapsedDecode,
4, tempStr, 1024));
else if (quiet != 2) printf("\n");
} else {
printf("%s --> Frame rate: %f fps\n",
doYUV ? "Decomp to YUV" : "Decompress ", (double)iter / elapsed);
printf(" Throughput: %f Megapixels/sec\n",
(double)(w * h) / 1000000. * (double)iter / elapsed);
if (doYUV) {
printf("YUV Decode --> Frame rate: %f fps\n",
(double)iter / elapsedDecode);
printf(" Throughput: %f Megapixels/sec\n",
(double)(w * h) / 1000000. * (double)iter / elapsedDecode);
}
}
if (!doWrite) goto bailout;
if (sf.num != 1 || sf.denom != 1)
snprintf(sizeStr, 24, "%d_%d", sf.num, sf.denom);
else if (tilew != w || tileh != h)
snprintf(sizeStr, 24, "%dx%d", tilew, tileh);
else snprintf(sizeStr, 24, "full");
if (decompOnly)
snprintf(tempStr, 1024, "%s_%s.%s", fileName, sizeStr, ext);
else
snprintf(tempStr, 1024, "%s_%s%s_%s.%s", fileName, subName[subsamp],
qualStr, sizeStr, ext);
if (tjSaveImage(tempStr, dstBuf, scaledw, 0, scaledh, pf, flags) == -1)
THROW_TJG("saving bitmap");
ptr = strrchr(tempStr, '.');
snprintf(ptr, 1024 - (ptr - tempStr), "-err.%s", ext);
if (srcBuf && sf.num == 1 && sf.denom == 1) {
if (!quiet) printf("Compression error written to %s.\n", tempStr);
if (subsamp == TJ_GRAYSCALE) {
unsigned long index, index2;
for (row = 0, index = 0; row < h; row++, index += pitch) {
for (col = 0, index2 = index; col < w; col++, index2 += ps) {
unsigned long rindex = index2 + tjRedOffset[pf];
unsigned long gindex = index2 + tjGreenOffset[pf];
unsigned long bindex = index2 + tjBlueOffset[pf];
int y = (int)((double)srcBuf[rindex] * 0.299 +
(double)srcBuf[gindex] * 0.587 +
(double)srcBuf[bindex] * 0.114 + 0.5);
if (y > 255) y = 255;
if (y < 0) y = 0;
dstBuf[rindex] = abs(dstBuf[rindex] - y);
dstBuf[gindex] = abs(dstBuf[gindex] - y);
dstBuf[bindex] = abs(dstBuf[bindex] - y);
}
}
} else {
for (row = 0; row < h; row++)
for (col = 0; col < w * ps; col++)
dstBuf[pitch * row + col] =
abs(dstBuf[pitch * row + col] - srcBuf[pitch * row + col]);
}
if (tjSaveImage(tempStr, dstBuf, w, 0, h, pf, flags) == -1)
THROW_TJG("saving bitmap");
}
bailout:
if (file) fclose(file);
if (handle) tjDestroy(handle);
if (dstBuf && dstBufAlloc) free(dstBuf);
if (yuvBuf) free(yuvBuf);
return retval;
} | 0 | [
"CWE-787"
] | libjpeg-turbo | c30b1e72dac76343ef9029833d1561de07d29bad | 133,301,745,161,016,740,000,000,000,000,000,000,000 | 172 | 64-bit tjbench: Fix signed int overflow/segfault
... that occurred when attempting to decompress images with more than
715827882 (2048*1024*1024 / 3) pixels.
Fixes #388 |
int find_slot_by_number(pkcs11_handle_t *h, unsigned int slot_num, unsigned int *slotID)
{
SECMODModule *module = h->module;
int i;
/* if module is null,
* any of the PKCS #11 modules specified in the system config
* is available, find one */
if (module == NULL) {
PK11SlotList *list;
PK11SlotListElement *le;
PK11SlotInfo *slot = NULL;
/* find a slot, we haven't specifically selected a module,
* so find an appropriate one. */
/* get them all */
list = PK11_GetAllTokens(CKM_INVALID_MECHANISM, PR_FALSE, PR_TRUE, NULL);
if (list == NULL) {
return -1;
}
for (le = list->head; le; le = le->next) {
CK_SLOT_INFO slInfo;
SECStatus rv;
slInfo.flags = 0;
rv = PK11_GetSlotInfo(le->slot, &slInfo);
if (rv == SECSuccess && (slInfo.flags & CKF_REMOVABLE_DEVICE)) {
slot = PK11_ReferenceSlot(le->slot);
module = SECMOD_ReferenceModule(PK11_GetModule(le->slot));
break;
}
}
PK11_FreeSlotList(list);
if (slot == NULL) {
return -1;
}
h->slot = slot;
h->module = module;
*slotID = PK11_GetSlotID(slot);
return 0;
}
/*
* we're configured with a specific module, look for a present slot
* on that module. */
if (slot_num == 0) {
/* threaded applications should also acquire the
* DefaultModuleListLock */
for (i=0; i < module->slotCount; i++) {
if (module->slots[i] && PK11_IsPresent(module->slots[i])) {
h->slot = PK11_ReferenceSlot(module->slots[i]);
*slotID = PK11_GetSlotID(h->slot);
return 0;
}
}
}
/* we're configured for a specific module and token, see if it's present */
slot_num--;
if (slot_num < module->slotCount && module->slots &&
module->slots[slot_num] && PK11_IsPresent(module->slots[slot_num])) {
h->slot = PK11_ReferenceSlot(module->slots[slot_num]);
*slotID = PK11_GetSlotID(h->slot);
return 0;
}
return -1;
} | 0 | [] | pam_pkcs11 | cc51b3e2720ea862d500cab2ea517518ff39a497 | 94,012,363,703,252,570,000,000,000,000,000,000,000 | 66 | verify using a nonce from the system, not the card
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting the problem. |
void (*SSL_CTX_sess_get_remove_cb(SSL_CTX *ctx))(SSL_CTX * ctx,SSL_SESSION *sess)
{
return ctx->remove_session_cb;
} | 0 | [] | openssl | edc032b5e3f3ebb1006a9c89e0ae00504f47966f | 161,261,377,583,169,390,000,000,000,000,000,000,000 | 4 | Add SRP support. |
dump_destroy_nhwindow(win)
winid win UNUSED;
{
return;
} | 0 | [
"CWE-120",
"CWE-269"
] | NetHack | f3def5c0b999478da2d0a8f0b6a7c370a2065f77 | 222,078,772,955,339,300,000,000,000,000,000,000,000 | 5 | command line triggered buffer overruns
Prevent extremely long command line arguments from overflowing local
buffers in raw_printf or config_error_add. The increased buffer
sizes they recently got to deal with long configuration file values
aren't sufficient to handle command line induced overflows.
choose_windows(core): copy and truncate the window_type argument in
case it gets passed to config_error_add().
process_options(unix): report bad values with "%.60s" so that vsprintf
will implicitly truncate when formatted by raw_printf(). |
static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
struct perf_event *p_event)
{
/* The ftrace function trace is allowed only for root. */
if (ftrace_event_is_function(tp_event) &&
perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
return -EPERM;
/* No tracing, just counting, so no obvious leak */
if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
return 0;
/* Some events are ok to be traced by non-root users... */
if (p_event->attach_state == PERF_ATTACH_TASK) {
if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
return 0;
}
/*
* ...otherwise raw tracepoint data can be a severe data leak,
* only allow root to have these.
*/
if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
} | 1 | [
"CWE-284",
"CWE-264"
] | linux | 12ae030d54ef250706da5642fc7697cc60ad0df7 | 76,970,202,480,682,020,000,000,000,000,000,000,000 | 27 | perf/ftrace: Fix paranoid level for enabling function tracer
The current default perf paranoid level is "1" which has
"perf_paranoid_kernel()" return false, and giving any operations that
use it, access to normal users. Unfortunately, this includes function
tracing and normal users should not be allowed to enable function
tracing by default.
The proper level is defined at "-1" (full perf access), which
"perf_paranoid_tracepoint_raw()" will only give access to. Use that
check instead for enabling function tracing.
Reported-by: Dave Jones <[email protected]>
Reported-by: Vince Weaver <[email protected]>
Tested-by: Vince Weaver <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: [email protected] # 3.4+
CVE: CVE-2013-2930
Fixes: ced39002f5ea ("ftrace, perf: Add support to use function tracepoint in perf")
Signed-off-by: Steven Rostedt <[email protected]> |
cmsNAMEDCOLORLIST* CMSEXPORT cmsDupNamedColorList(const cmsNAMEDCOLORLIST* v)
{
cmsNAMEDCOLORLIST* NewNC;
if (v == NULL) return NULL;
NewNC= cmsAllocNamedColorList(v ->ContextID, v -> nColors, v ->ColorantCount, v ->Prefix, v ->Suffix);
if (NewNC == NULL) return NULL;
// For really large tables we need this
while (NewNC ->Allocated < v ->Allocated)
GrowNamedColorList(NewNC);
memmove(NewNC ->Prefix, v ->Prefix, sizeof(v ->Prefix));
memmove(NewNC ->Suffix, v ->Suffix, sizeof(v ->Suffix));
NewNC ->ColorantCount = v ->ColorantCount;
memmove(NewNC->List, v ->List, v->nColors * sizeof(_cmsNAMEDCOLOR));
NewNC ->nColors = v ->nColors;
return NewNC;
} | 0 | [] | Little-CMS | 886e2f524268efe8a1c3aa838c28e446fda24486 | 237,811,360,955,553,300,000,000,000,000,000,000,000 | 20 | Fixes from coverity check |
has_innodb_buffer_pool_dump()
{
if ((server_flavor == FLAVOR_PERCONA_SERVER ||
server_flavor == FLAVOR_MYSQL) &&
mysql_server_version >= 50603) {
return(true);
}
if (server_flavor == FLAVOR_MARIADB && mysql_server_version >= 10000) {
return(true);
}
msg_ts("Server has no support for innodb_buffer_pool_dump_now");
return(false);
} | 0 | [
"CWE-200"
] | percona-xtrabackup | 7742f875bb289a874246fb4653b7cd9f14b588fe | 315,196,922,836,432,530,000,000,000,000,000,000,000 | 15 | PXB-2722 password is written into xtrabackup_info
https://jira.percona.com/browse/PXB-2722
Analysis:
password passed with -p option is written into backup tool_command in xtrabackup_info
Fix:
mask password before writting into xtrabackup_info |
static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item,
const char *page, size_t len)
{
u16 bcdUSB;
int ret;
ret = kstrtou16(page, 0, &bcdUSB);
if (ret)
return ret;
ret = is_valid_bcd(bcdUSB);
if (ret)
return ret;
to_gadget_info(item)->cdev.desc.bcdUSB = cpu_to_le16(bcdUSB);
return len;
} | 0 | [
"CWE-125"
] | linux | 15753588bcd4bbffae1cca33c8ced5722477fe1f | 22,713,879,776,148,580,000,000,000,000,000,000,000 | 16 | USB: gadget: fix illegal array access in binding with UDC
FuzzUSB (a variant of syzkaller) found an illegal array access
using an incorrect index while binding a gadget with UDC.
Reference: https://www.spinics.net/lists/linux-usb/msg194331.html
This bug occurs when a size variable used for a buffer
is misused to access its strcpy-ed buffer.
Given a buffer along with its size variable (taken from user input),
from which, a new buffer is created using kstrdup().
Due to the original buffer containing 0 value in the middle,
the size of the kstrdup-ed buffer becomes smaller than that of the original.
So accessing the kstrdup-ed buffer with the same size variable
triggers memory access violation.
The fix makes sure no zero value in the buffer,
by comparing the strlen() of the orignal buffer with the size variable,
so that the access to the kstrdup-ed buffer is safe.
BUG: KASAN: slab-out-of-bounds in gadget_dev_desc_UDC_store+0x1ba/0x200
drivers/usb/gadget/configfs.c:266
Read of size 1 at addr ffff88806a55dd7e by task syz-executor.0/17208
CPU: 2 PID: 17208 Comm: syz-executor.0 Not tainted 5.6.8 #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0xce/0x128 lib/dump_stack.c:118
print_address_description.constprop.4+0x21/0x3c0 mm/kasan/report.c:374
__kasan_report+0x131/0x1b0 mm/kasan/report.c:506
kasan_report+0x12/0x20 mm/kasan/common.c:641
__asan_report_load1_noabort+0x14/0x20 mm/kasan/generic_report.c:132
gadget_dev_desc_UDC_store+0x1ba/0x200 drivers/usb/gadget/configfs.c:266
flush_write_buffer fs/configfs/file.c:251 [inline]
configfs_write_file+0x2f1/0x4c0 fs/configfs/file.c:283
__vfs_write+0x85/0x110 fs/read_write.c:494
vfs_write+0x1cd/0x510 fs/read_write.c:558
ksys_write+0x18a/0x220 fs/read_write.c:611
__do_sys_write fs/read_write.c:623 [inline]
__se_sys_write fs/read_write.c:620 [inline]
__x64_sys_write+0x73/0xb0 fs/read_write.c:620
do_syscall_64+0x9e/0x510 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Signed-off-by: Kyungtae Kim <[email protected]>
Reported-and-tested-by: Kyungtae Kim <[email protected]>
Cc: Felipe Balbi <[email protected]>
Cc: stable <[email protected]>
Link: https://lore.kernel.org/r/20200510054326.GA19198@pizza01
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
void PutByte(byte inByte)
{
if (counter >= outputLen || validOutput[counter] != inByte)
{
std::cerr << "incorrect output " << counter << ", " << (word16)validOutput[counter] << ", " << (word16)inByte << "\n";
fail = true;
CRYPTOPP_ASSERT(false);
}
counter++;
}
| 0 | [
"CWE-190",
"CWE-125"
] | cryptopp | 07dbcc3d9644b18e05c1776db2a57fe04d780965 | 43,886,040,438,258,380,000,000,000,000,000,000,000 | 10 | Add Inflator::BadDistanceErr exception (Issue 414)
The improved validation and excpetion clears the Address Sanitizer and Undefined Behavior Sanitizer findings |
static void i2c_ddc_reset(DeviceState *ds)
{
I2CDDCState *s = I2CDDC(ds);
s->firstbyte = false;
s->reg = 0;
} | 0 | [
"CWE-125"
] | qemu | b05b267840515730dbf6753495d5b7bd8b04ad1c | 31,669,133,299,964,760,000,000,000,000,000,000,000 | 7 | i2c-ddc: fix oob read
Suggested-by: Michael Hanselmann <[email protected]>
Signed-off-by: Gerd Hoffmann <[email protected]>
Reviewed-by: Michael Hanselmann <[email protected]>
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Message-id: [email protected] |
compliance_failure(void)
{
char *ver="???";
switch(opt.compliance)
{
case CO_GNUPG:
ver="GnuPG";
break;
case CO_RFC4880:
ver="OpenPGP";
break;
case CO_RFC2440:
ver="OpenPGP (older)";
break;
case CO_PGP6:
ver="PGP 6.x";
break;
case CO_PGP7:
ver="PGP 7.x";
break;
case CO_PGP8:
ver="PGP 8.x";
break;
}
log_info(_("this message may not be usable by %s\n"),ver);
opt.compliance=CO_GNUPG;
} | 0 | [
"CWE-20"
] | gnupg | 2183683bd633818dd031b090b5530951de76f392 | 336,078,907,942,235,740,000,000,000,000,000,000,000 | 34 | Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]> |
static int __init default_policy_setup(char *str)
{
ima_use_tcb = 1;
return 1;
} | 0 | [
"CWE-284",
"CWE-264"
] | linux | 867c20265459d30a01b021a9c1e81fb4c5832aa9 | 171,380,102,804,479,130,000,000,000,000,000,000,000 | 5 | ima: fix add LSM rule bug
If security_filter_rule_init() doesn't return a rule, then not everything
is as fine as the return code implies.
This bug only occurs when the LSM (eg. SELinux) is disabled at runtime.
Adding an empty LSM rule causes ima_match_rules() to always succeed,
ignoring any remaining rules.
default IMA TCB policy:
# PROC_SUPER_MAGIC
dont_measure fsmagic=0x9fa0
# SYSFS_MAGIC
dont_measure fsmagic=0x62656572
# DEBUGFS_MAGIC
dont_measure fsmagic=0x64626720
# TMPFS_MAGIC
dont_measure fsmagic=0x01021994
# SECURITYFS_MAGIC
dont_measure fsmagic=0x73636673
< LSM specific rule >
dont_measure obj_type=var_log_t
measure func=BPRM_CHECK
measure func=FILE_MMAP mask=MAY_EXEC
measure func=FILE_CHECK mask=MAY_READ uid=0
Thus without the patch, with the boot parameters 'tcb selinux=0', adding
the above 'dont_measure obj_type=var_log_t' rule to the default IMA TCB
measurement policy, would result in nothing being measured. The patch
prevents the default TCB policy from being replaced.
Signed-off-by: Mimi Zohar <[email protected]>
Cc: James Morris <[email protected]>
Acked-by: Serge Hallyn <[email protected]>
Cc: David Safford <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int max() { return (int)(~0U>>1); } | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 24,873,659,725,961,000,000,000,000,000,000,000,000 | 1 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
static int init_server_name(SSL *s, unsigned int context)
{
if (s->server) {
s->servername_done = 0;
OPENSSL_free(s->ext.hostname);
s->ext.hostname = NULL;
}
return 1;
} | 0 | [
"CWE-476"
] | openssl | fb9fa6b51defd48157eeb207f52181f735d96148 | 195,940,367,944,925,400,000,000,000,000,000,000,000 | 11 | ssl sigalg extension: fix NULL pointer dereference
As the variable peer_sigalgslen is not cleared on ssl rehandshake, it's
possible to crash an openssl tls secured server remotely by sending a
manipulated hello message in a rehandshake.
On such a manipulated rehandshake, tls1_set_shared_sigalgs() calls
tls12_shared_sigalgs() with the peer_sigalgslen of the previous
handshake, while the peer_sigalgs has been freed.
As a result tls12_shared_sigalgs() walks over the available
peer_sigalgs and tries to access data of a NULL pointer.
This issue was introduced by c589c34e61 (Add support for the TLS 1.3
signature_algorithms_cert extension, 2018-01-11).
Signed-off-by: Peter Kästle <[email protected]>
Signed-off-by: Samuel Sapalski <[email protected]>
CVE-2021-3449
CLA: trivial
Reviewed-by: Tomas Mraz <[email protected]>
Reviewed-by: Paul Dale <[email protected]>
Reviewed-by: Matt Caswell <[email protected]> |
static DIR *open_cwd(pid_t pid)
{
char buf[sizeof("/proc/%lu/cwd") + sizeof(long)*3];
sprintf(buf, "/proc/%lu/cwd", (long)pid);
DIR *cwd = opendir(buf);
if (cwd == NULL)
perror_msg("Can't open process's CWD for CompatCore");
return cwd;
} | 0 | [
"CWE-59"
] | abrt | 28ce40d8db91c1926a95f21ef19a980a8af88471 | 176,809,979,104,062,300,000,000,000,000,000,000,000 | 11 | ccpp: check for overflow in abrt coredump path creation
This issue was discovered by Florian Weimer of Red Hat Product Security.
Signed-off-by: Jakub Filak <[email protected]> |
writeRead (const std::string &tempDir,
const Array2D<unsigned int> &pi,
const Array2D<half> &ph,
const Array2D<float> &pf,
int W,
int H,
LineOrder lorder,
Compression comp,
LevelRoundingMode rmode,
int dx, int dy,
int xSize, int ySize)
{
std::string filename = tempDir + "imf_test_scanline_api.exr";
writeRead (pi, ph, pf, filename.c_str(), lorder, W, H,
xSize, ySize, dx, dy, comp, ONE_LEVEL, rmode);
writeRead (pi, ph, pf, filename.c_str(), lorder, W, H,
xSize, ySize, dx, dy, comp, MIPMAP_LEVELS, rmode);
writeRead (pi, ph, pf, filename.c_str(), lorder, W, H,
xSize, ySize, dx, dy, comp, RIPMAP_LEVELS, rmode);
} | 1 | [
"CWE-770"
] | openexr | d80f11f4f55100d007ae80a162bf257ec291612c | 304,466,669,928,447,230,000,000,000,000,000,000,000 | 21 | More efficient handling of filled channels reading tiles with scanline API (#830)
* refactor channel filling in InputFile API with tiled source
Signed-off-by: Peter Hillman <[email protected]>
* handle edge-case of empty framebuffer
Signed-off-by: Peter Hillman <[email protected]> |
GF_Err fiel_box_dump(GF_Box *a, FILE * trace)
{
GF_FieldInfoBox *p = (GF_FieldInfoBox *) a;
gf_isom_box_dump_start(a, "FieldInfoBox", trace);
gf_fprintf(trace, "count=\"%d\" order=\"%d\">\n", p->field_count, p->field_order);
gf_isom_box_dump_done("FieldInfoBox", a, trace);
return GF_OK;
} | 0 | [
"CWE-787"
] | gpac | ea1eca00fd92fa17f0e25ac25652622924a9a6a0 | 267,044,076,493,146,650,000,000,000,000,000,000,000 | 9 | fixed #2138 |
static inline u32 nfsd4_seek_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 3) * sizeof(__be32);
} | 0 | [
"CWE-20",
"CWE-129"
] | linux | b550a32e60a4941994b437a8d662432a486235a5 | 274,350,724,072,718,580,000,000,000,000,000,000,000 | 4 | nfsd: fix undefined behavior in nfsd4_layout_verify
UBSAN: Undefined behaviour in fs/nfsd/nfs4proc.c:1262:34
shift exponent 128 is too large for 32-bit type 'int'
Depending on compiler+architecture, this may cause the check for
layout_type to succeed for overly large values (which seems to be the
case with amd64). The large value will be later used in de-referencing
nfsd4_layout_ops for function pointers.
Reported-by: Jani Tuovila <[email protected]>
Signed-off-by: Ari Kauppi <[email protected]>
[[email protected]: use LAYOUT_TYPE_MAX instead of 32]
Cc: [email protected]
Reviewed-by: Dan Carpenter <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Signed-off-by: J. Bruce Fields <[email protected]> |
slot_handle_level(struct kvm *kvm, const struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level,
bool flush_on_yield)
{
return slot_handle_level_range(kvm, memslot, fn, start_level,
end_level, memslot->base_gfn,
memslot->base_gfn + memslot->npages - 1,
flush_on_yield, false);
} | 0 | [
"CWE-476"
] | linux | 9f46c187e2e680ecd9de7983e4d081c3391acc76 | 151,908,768,279,797,020,000,000,000,000,000,000,000 | 9 | KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID
With shadow paging enabled, the INVPCID instruction results in a call
to kvm_mmu_invpcid_gva. If INVPCID is executed with CR0.PG=0, the
invlpg callback is not set and the result is a NULL pointer dereference.
Fix it trivially by checking for mmu->invlpg before every call.
There are other possibilities:
- check for CR0.PG, because KVM (like all Intel processors after P5)
flushes guest TLB on CR0.PG changes so that INVPCID/INVLPG are a
nop with paging disabled
- check for EFER.LMA, because KVM syncs and flushes when switching
MMU contexts outside of 64-bit mode
All of these are tricky, go for the simple solution. This is CVE-2022-1789.
Reported-by: Yongkang Jia <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]> |
void LIRGenerator::do_Convert(Convert* x) {
// flags that vary for the different operations and different SSE-settings
bool fixed_input = false, fixed_result = false, round_result = false, needs_stub = false;
switch (x->op()) {
case Bytecodes::_i2l: // fall through
case Bytecodes::_l2i: // fall through
case Bytecodes::_i2b: // fall through
case Bytecodes::_i2c: // fall through
case Bytecodes::_i2s: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break;
case Bytecodes::_f2d: fixed_input = UseSSE == 1; fixed_result = false; round_result = false; needs_stub = false; break;
case Bytecodes::_d2f: fixed_input = false; fixed_result = UseSSE == 1; round_result = UseSSE < 1; needs_stub = false; break;
case Bytecodes::_i2f: fixed_input = false; fixed_result = false; round_result = UseSSE < 1; needs_stub = false; break;
case Bytecodes::_i2d: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break;
case Bytecodes::_f2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break;
case Bytecodes::_d2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break;
case Bytecodes::_l2f: fixed_input = false; fixed_result = UseSSE >= 1; round_result = UseSSE < 1; needs_stub = false; break;
case Bytecodes::_l2d: fixed_input = false; fixed_result = UseSSE >= 2; round_result = UseSSE < 2; needs_stub = false; break;
case Bytecodes::_f2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break;
case Bytecodes::_d2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break;
default: ShouldNotReachHere();
}
LIRItem value(x->value(), this);
value.load_item();
LIR_Opr input = value.result();
LIR_Opr result = rlock(x);
// arguments of lir_convert
LIR_Opr conv_input = input;
LIR_Opr conv_result = result;
ConversionStub* stub = NULL;
if (fixed_input) {
conv_input = fixed_register_for(input->type());
__ move(input, conv_input);
}
assert(fixed_result == false || round_result == false, "cannot set both");
if (fixed_result) {
conv_result = fixed_register_for(result->type());
} else if (round_result) {
result = new_register(result->type());
set_vreg_flag(result, must_start_in_memory);
}
if (needs_stub) {
stub = new ConversionStub(x->op(), conv_input, conv_result);
}
__ convert(x->op(), conv_input, conv_result, stub);
if (result != conv_result) {
__ move(conv_result, result);
}
assert(result->is_virtual(), "result must be virtual register");
set_result(x, result);
} | 0 | [] | jdk11u-dev | 9a62b8af48af6c506d2fc4a3482116de26357f16 | 191,508,596,015,360,600,000,000,000,000,000,000,000 | 60 | 8272014: Better array indexing
Backport-of: 937c31d896d05aa24543b74e98a2ea9f05b5d86f |
int32_t cli_bcapi_map_done(struct cli_bc_ctx *ctx , int32_t id)
{
struct cli_map *s = get_hashtab(ctx, id);
if (!s)
return -1;
cli_map_delete(s);
if (id == ctx->nmaps-1) {
ctx->nmaps--;
if (!ctx->nmaps) {
free(ctx->maps);
ctx->maps = NULL;
} else {
s = cli_realloc(ctx->maps, ctx->nmaps*(sizeof(*s)));
if (s)
ctx->maps = s;
}
}
return 0;
} | 0 | [
"CWE-189"
] | clamav-devel | 3d664817f6ef833a17414a4ecea42004c35cc42f | 86,098,910,285,749,980,000,000,000,000,000,000,000 | 19 | fix recursion level crash (bb #3706).
Thanks to Stephane Chazelas for the analysis. |
static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
const struct compat_siginfo __user *ufrom)
{
struct compat_siginfo from;
if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
return -EFAULT;
from.si_signo = signo;
return post_copy_siginfo_from_user32(to, &from);
} | 0 | [
"CWE-190"
] | linux | d1e7fd6462ca9fc76650fbe6ca800e35b24267da | 124,247,512,926,836,470,000,000,000,000,000,000,000 | 11 | signal: Extend exec_id to 64bits
Replace the 32bit exec_id with a 64bit exec_id to make it impossible
to wrap the exec_id counter. With care an attacker can cause exec_id
wrap and send arbitrary signals to a newly exec'd parent. This
bypasses the signal sending checks if the parent changes their
credentials during exec.
The severity of this problem can been seen that in my limited testing
of a 32bit exec_id it can take as little as 19s to exec 65536 times.
Which means that it can take as little as 14 days to wrap a 32bit
exec_id. Adam Zabrocki has succeeded wrapping the self_exe_id in 7
days. Even my slower timing is in the uptime of a typical server.
Which means self_exec_id is simply a speed bump today, and if exec
gets noticably faster self_exec_id won't even be a speed bump.
Extending self_exec_id to 64bits introduces a problem on 32bit
architectures where reading self_exec_id is no longer atomic and can
take two read instructions. Which means that is is possible to hit
a window where the read value of exec_id does not match the written
value. So with very lucky timing after this change this still
remains expoiltable.
I have updated the update of exec_id on exec to use WRITE_ONCE
and the read of exec_id in do_notify_parent to use READ_ONCE
to make it clear that there is no locking between these two
locations.
Link: https://lore.kernel.org/kernel-hardening/[email protected]
Fixes: 2.3.23pre2
Cc: [email protected]
Signed-off-by: "Eric W. Biederman" <[email protected]> |
MagickPrivate void XColorBrowserWidget(Display *display,XWindows *windows,
const char *action,char *reply)
{
#define CancelButtonText "Cancel"
#define ColornameText "Name:"
#define ColorPatternText "Pattern:"
#define GrabButtonText "Grab"
#define ResetButtonText "Reset"
char
**colorlist,
primary_selection[MagickPathExtent],
reset_pattern[MagickPathExtent],
text[MagickPathExtent];
ExceptionInfo
*exception;
int
x,
y;
int
i;
static char
glob_pattern[MagickPathExtent] = "*";
static MagickStatusType
mask = (MagickStatusType) (CWWidth | CWHeight | CWX | CWY);
Status
status;
unsigned int
height,
text_width,
visible_colors,
width;
size_t
colors,
delay,
state;
XColor
color;
XEvent
event;
XFontStruct
*font_info;
XTextProperty
window_name;
XWidgetInfo
action_info,
cancel_info,
expose_info,
grab_info,
list_info,
mode_info,
north_info,
reply_info,
reset_info,
scroll_info,
selection_info,
slider_info,
south_info,
text_info;
XWindowChanges
window_changes;
/*
Get color list and sort in ascending order.
*/
assert(display != (Display *) NULL);
assert(windows != (XWindows *) NULL);
assert(action != (char *) NULL);
assert(reply != (char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",action);
XSetCursorState(display,windows,MagickTrue);
XCheckRefreshWindows(display,windows);
(void) CopyMagickString(reset_pattern,"*",MagickPathExtent);
exception=AcquireExceptionInfo();
colorlist=GetColorList(glob_pattern,&colors,exception);
if (colorlist == (char **) NULL)
{
/*
Pattern failed, obtain all the colors.
*/
(void) CopyMagickString(glob_pattern,"*",MagickPathExtent);
colorlist=GetColorList(glob_pattern,&colors,exception);
if (colorlist == (char **) NULL)
{
XNoticeWidget(display,windows,"Unable to obtain colors names:",
glob_pattern);
(void) XDialogWidget(display,windows,action,"Enter color name:",
reply);
return;
}
}
/*
Determine Color Browser widget attributes.
*/
font_info=windows->widget.font_info;
text_width=0;
for (i=0; i < (int) colors; i++)
if (WidgetTextWidth(font_info,colorlist[i]) > text_width)
text_width=WidgetTextWidth(font_info,colorlist[i]);
width=WidgetTextWidth(font_info,(char *) action);
if (WidgetTextWidth(font_info,CancelButtonText) > width)
width=WidgetTextWidth(font_info,CancelButtonText);
if (WidgetTextWidth(font_info,ResetButtonText) > width)
width=WidgetTextWidth(font_info,ResetButtonText);
if (WidgetTextWidth(font_info,GrabButtonText) > width)
width=WidgetTextWidth(font_info,GrabButtonText);
width+=QuantumMargin;
if (WidgetTextWidth(font_info,ColorPatternText) > width)
width=WidgetTextWidth(font_info,ColorPatternText);
if (WidgetTextWidth(font_info,ColornameText) > width)
width=WidgetTextWidth(font_info,ColornameText);
height=(unsigned int) (font_info->ascent+font_info->descent);
/*
Position Color Browser widget.
*/
windows->widget.width=(unsigned int)
(width+MagickMin((int) text_width,(int) MaxTextWidth)+6*QuantumMargin);
windows->widget.min_width=(unsigned int)
(width+MinTextWidth+4*QuantumMargin);
if (windows->widget.width < windows->widget.min_width)
windows->widget.width=windows->widget.min_width;
windows->widget.height=(unsigned int)
((81*height) >> 2)+((13*QuantumMargin) >> 1)+4;
windows->widget.min_height=(unsigned int)
(((23*height) >> 1)+((13*QuantumMargin) >> 1)+4);
if (windows->widget.height < windows->widget.min_height)
windows->widget.height=windows->widget.min_height;
XConstrainWindowPosition(display,&windows->widget);
/*
Map Color Browser widget.
*/
(void) CopyMagickString(windows->widget.name,"Browse and Select a Color",
MagickPathExtent);
status=XStringListToTextProperty(&windows->widget.name,1,&window_name);
if (status != False)
{
XSetWMName(display,windows->widget.id,&window_name);
XSetWMIconName(display,windows->widget.id,&window_name);
(void) XFree((void *) window_name.value);
}
window_changes.width=(int) windows->widget.width;
window_changes.height=(int) windows->widget.height;
window_changes.x=windows->widget.x;
window_changes.y=windows->widget.y;
(void) XReconfigureWMWindow(display,windows->widget.id,windows->widget.screen,
mask,&window_changes);
(void) XMapRaised(display,windows->widget.id);
windows->widget.mapped=MagickFalse;
/*
Respond to X events.
*/
XGetWidgetInfo((char *) NULL,&mode_info);
XGetWidgetInfo((char *) NULL,&slider_info);
XGetWidgetInfo((char *) NULL,&north_info);
XGetWidgetInfo((char *) NULL,&south_info);
XGetWidgetInfo((char *) NULL,&expose_info);
XGetWidgetInfo((char *) NULL,&selection_info);
visible_colors=0;
delay=SuspendTime << 2;
state=UpdateConfigurationState;
do
{
if (state & UpdateConfigurationState)
{
int
id;
/*
Initialize button information.
*/
XGetWidgetInfo(CancelButtonText,&cancel_info);
cancel_info.width=width;
cancel_info.height=(unsigned int) ((3*height) >> 1);
cancel_info.x=(int)
(windows->widget.width-cancel_info.width-QuantumMargin-2);
cancel_info.y=(int)
(windows->widget.height-cancel_info.height-QuantumMargin);
XGetWidgetInfo(action,&action_info);
action_info.width=width;
action_info.height=(unsigned int) ((3*height) >> 1);
action_info.x=cancel_info.x-(cancel_info.width+(QuantumMargin >> 1)+
(action_info.bevel_width << 1));
action_info.y=cancel_info.y;
XGetWidgetInfo(GrabButtonText,&grab_info);
grab_info.width=width;
grab_info.height=(unsigned int) ((3*height) >> 1);
grab_info.x=QuantumMargin;
grab_info.y=((5*QuantumMargin) >> 1)+height;
XGetWidgetInfo(ResetButtonText,&reset_info);
reset_info.width=width;
reset_info.height=(unsigned int) ((3*height) >> 1);
reset_info.x=QuantumMargin;
reset_info.y=grab_info.y+grab_info.height+QuantumMargin;
/*
Initialize reply information.
*/
XGetWidgetInfo(reply,&reply_info);
reply_info.raised=MagickFalse;
reply_info.bevel_width--;
reply_info.width=windows->widget.width-width-((6*QuantumMargin) >> 1);
reply_info.height=height << 1;
reply_info.x=(int) (width+(QuantumMargin << 1));
reply_info.y=action_info.y-reply_info.height-QuantumMargin;
/*
Initialize mode information.
*/
XGetWidgetInfo((char *) NULL,&mode_info);
mode_info.active=MagickTrue;
mode_info.bevel_width=0;
mode_info.width=(unsigned int) (action_info.x-(QuantumMargin << 1));
mode_info.height=action_info.height;
mode_info.x=QuantumMargin;
mode_info.y=action_info.y;
/*
Initialize scroll information.
*/
XGetWidgetInfo((char *) NULL,&scroll_info);
scroll_info.bevel_width--;
scroll_info.width=height;
scroll_info.height=(unsigned int) (reply_info.y-grab_info.y-
(QuantumMargin >> 1));
scroll_info.x=reply_info.x+(reply_info.width-scroll_info.width);
scroll_info.y=grab_info.y-reply_info.bevel_width;
scroll_info.raised=MagickFalse;
scroll_info.trough=MagickTrue;
north_info=scroll_info;
north_info.raised=MagickTrue;
north_info.width-=(north_info.bevel_width << 1);
north_info.height=north_info.width-1;
north_info.x+=north_info.bevel_width;
north_info.y+=north_info.bevel_width;
south_info=north_info;
south_info.y=scroll_info.y+scroll_info.height-scroll_info.bevel_width-
south_info.height;
id=slider_info.id;
slider_info=north_info;
slider_info.id=id;
slider_info.width-=2;
slider_info.min_y=north_info.y+north_info.height+north_info.bevel_width+
slider_info.bevel_width+2;
slider_info.height=scroll_info.height-((slider_info.min_y-
scroll_info.y+1) << 1)+4;
visible_colors=scroll_info.height/(height+(height >> 3));
if (colors > visible_colors)
slider_info.height=(unsigned int)
((visible_colors*slider_info.height)/colors);
slider_info.max_y=south_info.y-south_info.bevel_width-
slider_info.bevel_width-2;
slider_info.x=scroll_info.x+slider_info.bevel_width+1;
slider_info.y=slider_info.min_y;
expose_info=scroll_info;
expose_info.y=slider_info.y;
/*
Initialize list information.
*/
XGetWidgetInfo((char *) NULL,&list_info);
list_info.raised=MagickFalse;
list_info.bevel_width--;
list_info.width=(unsigned int)
(scroll_info.x-reply_info.x-(QuantumMargin >> 1));
list_info.height=scroll_info.height;
list_info.x=reply_info.x;
list_info.y=scroll_info.y;
if (windows->widget.mapped == MagickFalse)
state|=JumpListState;
/*
Initialize text information.
*/
*text='\0';
XGetWidgetInfo(text,&text_info);
text_info.center=MagickFalse;
text_info.width=reply_info.width;
text_info.height=height;
text_info.x=list_info.x-(QuantumMargin >> 1);
text_info.y=QuantumMargin;
/*
Initialize selection information.
*/
XGetWidgetInfo((char *) NULL,&selection_info);
selection_info.center=MagickFalse;
selection_info.width=list_info.width;
selection_info.height=(unsigned int) ((9*height) >> 3);
selection_info.x=list_info.x;
state&=(~UpdateConfigurationState);
}
if (state & RedrawWidgetState)
{
/*
Redraw Color Browser window.
*/
x=QuantumMargin;
y=text_info.y+((text_info.height-height) >> 1)+font_info->ascent;
(void) XDrawString(display,windows->widget.id,
windows->widget.annotate_context,x,y,ColorPatternText,
Extent(ColorPatternText));
(void) CopyMagickString(text_info.text,glob_pattern,MagickPathExtent);
XDrawWidgetText(display,&windows->widget,&text_info);
XDrawBeveledButton(display,&windows->widget,&grab_info);
XDrawBeveledButton(display,&windows->widget,&reset_info);
XDrawBeveledMatte(display,&windows->widget,&list_info);
XDrawBeveledMatte(display,&windows->widget,&scroll_info);
XDrawTriangleNorth(display,&windows->widget,&north_info);
XDrawBeveledButton(display,&windows->widget,&slider_info);
XDrawTriangleSouth(display,&windows->widget,&south_info);
x=QuantumMargin;
y=reply_info.y+((reply_info.height-height) >> 1)+font_info->ascent;
(void) XDrawString(display,windows->widget.id,
windows->widget.annotate_context,x,y,ColornameText,
Extent(ColornameText));
XDrawBeveledMatte(display,&windows->widget,&reply_info);
XDrawMatteText(display,&windows->widget,&reply_info);
XDrawBeveledButton(display,&windows->widget,&action_info);
XDrawBeveledButton(display,&windows->widget,&cancel_info);
XHighlightWidget(display,&windows->widget,BorderOffset,BorderOffset);
selection_info.id=(~0);
state|=RedrawActionState;
state|=RedrawListState;
state&=(~RedrawWidgetState);
}
if (state & UpdateListState)
{
char
**checklist;
size_t
number_colors;
status=XParseColor(display,windows->widget.map_info->colormap,
glob_pattern,&color);
if ((status != False) || (strchr(glob_pattern,'-') != (char *) NULL))
{
/*
Reply is a single color name-- exit.
*/
(void) CopyMagickString(reply,glob_pattern,MagickPathExtent);
(void) CopyMagickString(glob_pattern,reset_pattern,MagickPathExtent);
action_info.raised=MagickFalse;
XDrawBeveledButton(display,&windows->widget,&action_info);
break;
}
/*
Update color list.
*/
checklist=GetColorList(glob_pattern,&number_colors,exception);
if (number_colors == 0)
{
(void) CopyMagickString(glob_pattern,reset_pattern,MagickPathExtent);
(void) XBell(display,0);
}
else
{
for (i=0; i < (int) colors; i++)
colorlist[i]=DestroyString(colorlist[i]);
if (colorlist != (char **) NULL)
colorlist=(char **) RelinquishMagickMemory(colorlist);
colorlist=checklist;
colors=number_colors;
}
/*
Sort color list in ascending order.
*/
slider_info.height=
scroll_info.height-((slider_info.min_y-scroll_info.y+1) << 1)+1;
if (colors > visible_colors)
slider_info.height=(unsigned int)
((visible_colors*slider_info.height)/colors);
slider_info.max_y=south_info.y-south_info.bevel_width-
slider_info.bevel_width-2;
slider_info.id=0;
slider_info.y=slider_info.min_y;
expose_info.y=slider_info.y;
selection_info.id=(~0);
list_info.id=(~0);
state|=RedrawListState;
/*
Redraw color name & reply.
*/
*reply_info.text='\0';
reply_info.cursor=reply_info.text;
(void) CopyMagickString(text_info.text,glob_pattern,MagickPathExtent);
XDrawWidgetText(display,&windows->widget,&text_info);
XDrawMatteText(display,&windows->widget,&reply_info);
XDrawBeveledMatte(display,&windows->widget,&scroll_info);
XDrawTriangleNorth(display,&windows->widget,&north_info);
XDrawBeveledButton(display,&windows->widget,&slider_info);
XDrawTriangleSouth(display,&windows->widget,&south_info);
XHighlightWidget(display,&windows->widget,BorderOffset,BorderOffset);
state&=(~UpdateListState);
}
if (state & JumpListState)
{
/*
Jump scroll to match user color.
*/
list_info.id=(~0);
for (i=0; i < (int) colors; i++)
if (LocaleCompare(colorlist[i],reply) >= 0)
{
list_info.id=LocaleCompare(colorlist[i],reply) == 0 ? i : ~0;
break;
}
if ((i < slider_info.id) ||
(i >= (int) (slider_info.id+visible_colors)))
slider_info.id=i-(visible_colors >> 1);
selection_info.id=(~0);
state|=RedrawListState;
state&=(~JumpListState);
}
if (state & RedrawListState)
{
/*
Determine slider id and position.
*/
if (slider_info.id >= (int) (colors-visible_colors))
slider_info.id=(int) (colors-visible_colors);
if ((slider_info.id < 0) || (colors <= visible_colors))
slider_info.id=0;
slider_info.y=slider_info.min_y;
if (colors != 0)
slider_info.y+=((ssize_t) slider_info.id*(slider_info.max_y-
slider_info.min_y+1)/colors);
if (slider_info.id != selection_info.id)
{
/*
Redraw scroll bar and file names.
*/
selection_info.id=slider_info.id;
selection_info.y=list_info.y+(height >> 3)+2;
for (i=0; i < (int) visible_colors; i++)
{
selection_info.raised=(slider_info.id+i) != list_info.id ?
MagickTrue : MagickFalse;
selection_info.text=(char *) NULL;
if ((slider_info.id+i) < (int) colors)
selection_info.text=colorlist[slider_info.id+i];
XDrawWidgetText(display,&windows->widget,&selection_info);
selection_info.y+=(int) selection_info.height;
}
/*
Update slider.
*/
if (slider_info.y > expose_info.y)
{
expose_info.height=(unsigned int) slider_info.y-expose_info.y;
expose_info.y=slider_info.y-expose_info.height-
slider_info.bevel_width-1;
}
else
{
expose_info.height=(unsigned int) expose_info.y-slider_info.y;
expose_info.y=slider_info.y+slider_info.height+
slider_info.bevel_width+1;
}
XDrawTriangleNorth(display,&windows->widget,&north_info);
XDrawMatte(display,&windows->widget,&expose_info);
XDrawBeveledButton(display,&windows->widget,&slider_info);
XDrawTriangleSouth(display,&windows->widget,&south_info);
expose_info.y=slider_info.y;
}
state&=(~RedrawListState);
}
if (state & RedrawActionState)
{
static char
colorname[MagickPathExtent];
/*
Display the selected color in a drawing area.
*/
color=windows->widget.pixel_info->matte_color;
(void) XParseColor(display,windows->widget.map_info->colormap,
reply_info.text,&windows->widget.pixel_info->matte_color);
XBestPixel(display,windows->widget.map_info->colormap,(XColor *) NULL,
(unsigned int) windows->widget.visual_info->colormap_size,
&windows->widget.pixel_info->matte_color);
mode_info.text=colorname;
(void) FormatLocaleString(mode_info.text,MagickPathExtent,
"#%02x%02x%02x",windows->widget.pixel_info->matte_color.red,
windows->widget.pixel_info->matte_color.green,
windows->widget.pixel_info->matte_color.blue);
XDrawBeveledButton(display,&windows->widget,&mode_info);
windows->widget.pixel_info->matte_color=color;
state&=(~RedrawActionState);
}
/*
Wait for next event.
*/
if (north_info.raised && south_info.raised)
(void) XIfEvent(display,&event,XScreenEvent,(char *) windows);
else
{
/*
Brief delay before advancing scroll bar.
*/
XDelay(display,delay);
delay=SuspendTime;
(void) XCheckIfEvent(display,&event,XScreenEvent,(char *) windows);
if (north_info.raised == MagickFalse)
if (slider_info.id > 0)
{
/*
Move slider up.
*/
slider_info.id--;
state|=RedrawListState;
}
if (south_info.raised == MagickFalse)
if (slider_info.id < (int) colors)
{
/*
Move slider down.
*/
slider_info.id++;
state|=RedrawListState;
}
if (event.type != ButtonRelease)
continue;
}
switch (event.type)
{
case ButtonPress:
{
if (MatteIsActive(slider_info,event.xbutton))
{
/*
Track slider.
*/
slider_info.active=MagickTrue;
break;
}
if (MatteIsActive(north_info,event.xbutton))
if (slider_info.id > 0)
{
/*
Move slider up.
*/
north_info.raised=MagickFalse;
slider_info.id--;
state|=RedrawListState;
break;
}
if (MatteIsActive(south_info,event.xbutton))
if (slider_info.id < (int) colors)
{
/*
Move slider down.
*/
south_info.raised=MagickFalse;
slider_info.id++;
state|=RedrawListState;
break;
}
if (MatteIsActive(scroll_info,event.xbutton))
{
/*
Move slider.
*/
if (event.xbutton.y < slider_info.y)
slider_info.id-=(visible_colors-1);
else
slider_info.id+=(visible_colors-1);
state|=RedrawListState;
break;
}
if (MatteIsActive(list_info,event.xbutton))
{
int
id;
/*
User pressed list matte.
*/
id=slider_info.id+(event.xbutton.y-(list_info.y+(height >> 1))+1)/
selection_info.height;
if (id >= (int) colors)
break;
(void) CopyMagickString(reply_info.text,colorlist[id],
MagickPathExtent);
reply_info.highlight=MagickFalse;
reply_info.marker=reply_info.text;
reply_info.cursor=reply_info.text+Extent(reply_info.text);
XDrawMatteText(display,&windows->widget,&reply_info);
state|=RedrawActionState;
if (id == list_info.id)
{
(void) CopyMagickString(glob_pattern,reply_info.text,
MagickPathExtent);
state|=UpdateListState;
}
selection_info.id=(~0);
list_info.id=id;
state|=RedrawListState;
break;
}
if (MatteIsActive(grab_info,event.xbutton))
{
/*
User pressed Grab button.
*/
grab_info.raised=MagickFalse;
XDrawBeveledButton(display,&windows->widget,&grab_info);
break;
}
if (MatteIsActive(reset_info,event.xbutton))
{
/*
User pressed Reset button.
*/
reset_info.raised=MagickFalse;
XDrawBeveledButton(display,&windows->widget,&reset_info);
break;
}
if (MatteIsActive(mode_info,event.xbutton))
{
/*
User pressed mode button.
*/
if (mode_info.text != (char *) NULL)
(void) CopyMagickString(reply_info.text,mode_info.text,
MagickPathExtent);
(void) CopyMagickString(primary_selection,reply_info.text,
MagickPathExtent);
(void) XSetSelectionOwner(display,XA_PRIMARY,windows->widget.id,
event.xbutton.time);
reply_info.highlight=XGetSelectionOwner(display,XA_PRIMARY) ==
windows->widget.id ? MagickTrue : MagickFalse;
reply_info.marker=reply_info.text;
reply_info.cursor=reply_info.text+Extent(reply_info.text);
XDrawMatteText(display,&windows->widget,&reply_info);
break;
}
if (MatteIsActive(action_info,event.xbutton))
{
/*
User pressed action button.
*/
action_info.raised=MagickFalse;
XDrawBeveledButton(display,&windows->widget,&action_info);
break;
}
if (MatteIsActive(cancel_info,event.xbutton))
{
/*
User pressed Cancel button.
*/
cancel_info.raised=MagickFalse;
XDrawBeveledButton(display,&windows->widget,&cancel_info);
break;
}
if (MatteIsActive(reply_info,event.xbutton) == MagickFalse)
break;
if (event.xbutton.button != Button2)
{
static Time
click_time;
/*
Move text cursor to position of button press.
*/
x=event.xbutton.x-reply_info.x-(QuantumMargin >> 2);
for (i=1; i <= Extent(reply_info.marker); i++)
if (XTextWidth(font_info,reply_info.marker,i) > x)
break;
reply_info.cursor=reply_info.marker+i-1;
if (event.xbutton.time > (click_time+DoubleClick))
reply_info.highlight=MagickFalse;
else
{
/*
Become the XA_PRIMARY selection owner.
*/
(void) CopyMagickString(primary_selection,reply_info.text,
MagickPathExtent);
(void) XSetSelectionOwner(display,XA_PRIMARY,windows->widget.id,
event.xbutton.time);
reply_info.highlight=XGetSelectionOwner(display,XA_PRIMARY) ==
windows->widget.id ? MagickTrue : MagickFalse;
}
XDrawMatteText(display,&windows->widget,&reply_info);
click_time=event.xbutton.time;
break;
}
/*
Request primary selection.
*/
(void) XConvertSelection(display,XA_PRIMARY,XA_STRING,XA_STRING,
windows->widget.id,event.xbutton.time);
break;
}
case ButtonRelease:
{
if (windows->widget.mapped == MagickFalse)
break;
if (north_info.raised == MagickFalse)
{
/*
User released up button.
*/
delay=SuspendTime << 2;
north_info.raised=MagickTrue;
XDrawTriangleNorth(display,&windows->widget,&north_info);
}
if (south_info.raised == MagickFalse)
{
/*
User released down button.
*/
delay=SuspendTime << 2;
south_info.raised=MagickTrue;
XDrawTriangleSouth(display,&windows->widget,&south_info);
}
if (slider_info.active)
{
/*
Stop tracking slider.
*/
slider_info.active=MagickFalse;
break;
}
if (grab_info.raised == MagickFalse)
{
if (event.xbutton.window == windows->widget.id)
if (MatteIsActive(grab_info,event.xbutton))
{
/*
Select a fill color from the X server.
*/
(void) XGetWindowColor(display,windows,reply_info.text,
exception);
reply_info.marker=reply_info.text;
reply_info.cursor=reply_info.text+Extent(reply_info.text);
XDrawMatteText(display,&windows->widget,&reply_info);
state|=RedrawActionState;
}
grab_info.raised=MagickTrue;
XDrawBeveledButton(display,&windows->widget,&grab_info);
}
if (reset_info.raised == MagickFalse)
{
if (event.xbutton.window == windows->widget.id)
if (MatteIsActive(reset_info,event.xbutton))
{
(void) CopyMagickString(glob_pattern,reset_pattern,
MagickPathExtent);
state|=UpdateListState;
}
reset_info.raised=MagickTrue;
XDrawBeveledButton(display,&windows->widget,&reset_info);
}
if (action_info.raised == MagickFalse)
{
if (event.xbutton.window == windows->widget.id)
{
if (MatteIsActive(action_info,event.xbutton))
{
if (*reply_info.text == '\0')
(void) XBell(display,0);
else
state|=ExitState;
}
}
action_info.raised=MagickTrue;
XDrawBeveledButton(display,&windows->widget,&action_info);
}
if (cancel_info.raised == MagickFalse)
{
if (event.xbutton.window == windows->widget.id)
if (MatteIsActive(cancel_info,event.xbutton))
{
*reply_info.text='\0';
state|=ExitState;
}
cancel_info.raised=MagickTrue;
XDrawBeveledButton(display,&windows->widget,&cancel_info);
}
if (MatteIsActive(reply_info,event.xbutton) == MagickFalse)
break;
break;
}
case ClientMessage:
{
/*
If client window delete message, exit.
*/
if (event.xclient.message_type != windows->wm_protocols)
break;
if (*event.xclient.data.l == (int) windows->wm_take_focus)
{
(void) XSetInputFocus(display,event.xclient.window,RevertToParent,
(Time) event.xclient.data.l[1]);
break;
}
if (*event.xclient.data.l != (int) windows->wm_delete_window)
break;
if (event.xclient.window == windows->widget.id)
{
*reply_info.text='\0';
state|=ExitState;
break;
}
break;
}
case ConfigureNotify:
{
/*
Update widget configuration.
*/
if (event.xconfigure.window != windows->widget.id)
break;
if ((event.xconfigure.width == (int) windows->widget.width) &&
(event.xconfigure.height == (int) windows->widget.height))
break;
windows->widget.width=(unsigned int)
MagickMax(event.xconfigure.width,(int) windows->widget.min_width);
windows->widget.height=(unsigned int)
MagickMax(event.xconfigure.height,(int) windows->widget.min_height);
state|=UpdateConfigurationState;
break;
}
case EnterNotify:
{
if (event.xcrossing.window != windows->widget.id)
break;
state&=(~InactiveWidgetState);
break;
}
case Expose:
{
if (event.xexpose.window != windows->widget.id)
break;
if (event.xexpose.count != 0)
break;
state|=RedrawWidgetState;
break;
}
case KeyPress:
{
static char
command[MagickPathExtent];
static int
length;
static KeySym
key_symbol;
/*
Respond to a user key press.
*/
if (event.xkey.window != windows->widget.id)
break;
length=XLookupString((XKeyEvent *) &event.xkey,command,
(int) sizeof(command),&key_symbol,(XComposeStatus *) NULL);
*(command+length)='\0';
if (AreaIsActive(scroll_info,event.xkey))
{
/*
Move slider.
*/
switch ((int) key_symbol)
{
case XK_Home:
case XK_KP_Home:
{
slider_info.id=0;
break;
}
case XK_Up:
case XK_KP_Up:
{
slider_info.id--;
break;
}
case XK_Down:
case XK_KP_Down:
{
slider_info.id++;
break;
}
case XK_Prior:
case XK_KP_Prior:
{
slider_info.id-=visible_colors;
break;
}
case XK_Next:
case XK_KP_Next:
{
slider_info.id+=visible_colors;
break;
}
case XK_End:
case XK_KP_End:
{
slider_info.id=(int) colors;
break;
}
}
state|=RedrawListState;
break;
}
if ((key_symbol == XK_Return) || (key_symbol == XK_KP_Enter))
{
/*
Read new color or glob patterm.
*/
if (*reply_info.text == '\0')
break;
(void) CopyMagickString(glob_pattern,reply_info.text,MagickPathExtent);
state|=UpdateListState;
break;
}
if (key_symbol == XK_Control_L)
{
state|=ControlState;
break;
}
if (state & ControlState)
switch ((int) key_symbol)
{
case XK_u:
case XK_U:
{
/*
Erase the entire line of text.
*/
*reply_info.text='\0';
reply_info.cursor=reply_info.text;
reply_info.marker=reply_info.text;
reply_info.highlight=MagickFalse;
break;
}
default:
break;
}
XEditText(display,&reply_info,key_symbol,command,state);
XDrawMatteText(display,&windows->widget,&reply_info);
state|=JumpListState;
status=XParseColor(display,windows->widget.map_info->colormap,
reply_info.text,&color);
if (status != False)
state|=RedrawActionState;
break;
}
case KeyRelease:
{
static char
command[MagickPathExtent];
static KeySym
key_symbol;
/*
Respond to a user key release.
*/
if (event.xkey.window != windows->widget.id)
break;
(void) XLookupString((XKeyEvent *) &event.xkey,command,
(int) sizeof(command),&key_symbol,(XComposeStatus *) NULL);
if (key_symbol == XK_Control_L)
state&=(~ControlState);
break;
}
case LeaveNotify:
{
if (event.xcrossing.window != windows->widget.id)
break;
state|=InactiveWidgetState;
break;
}
case MapNotify:
{
mask&=(~CWX);
mask&=(~CWY);
break;
}
case MotionNotify:
{
/*
Discard pending button motion events.
*/
while (XCheckMaskEvent(display,ButtonMotionMask,&event)) ;
if (slider_info.active)
{
/*
Move slider matte.
*/
slider_info.y=event.xmotion.y-
((slider_info.height+slider_info.bevel_width) >> 1)+1;
if (slider_info.y < slider_info.min_y)
slider_info.y=slider_info.min_y;
if (slider_info.y > slider_info.max_y)
slider_info.y=slider_info.max_y;
slider_info.id=0;
if (slider_info.y != slider_info.min_y)
slider_info.id=(int) ((colors*(slider_info.y-
slider_info.min_y+1))/(slider_info.max_y-slider_info.min_y+1));
state|=RedrawListState;
break;
}
if (state & InactiveWidgetState)
break;
if (grab_info.raised == MatteIsActive(grab_info,event.xmotion))
{
/*
Grab button status changed.
*/
grab_info.raised=!grab_info.raised;
XDrawBeveledButton(display,&windows->widget,&grab_info);
break;
}
if (reset_info.raised == MatteIsActive(reset_info,event.xmotion))
{
/*
Reset button status changed.
*/
reset_info.raised=!reset_info.raised;
XDrawBeveledButton(display,&windows->widget,&reset_info);
break;
}
if (action_info.raised == MatteIsActive(action_info,event.xmotion))
{
/*
Action button status changed.
*/
action_info.raised=action_info.raised == MagickFalse ?
MagickTrue : MagickFalse;
XDrawBeveledButton(display,&windows->widget,&action_info);
break;
}
if (cancel_info.raised == MatteIsActive(cancel_info,event.xmotion))
{
/*
Cancel button status changed.
*/
cancel_info.raised=cancel_info.raised == MagickFalse ?
MagickTrue : MagickFalse;
XDrawBeveledButton(display,&windows->widget,&cancel_info);
break;
}
break;
}
case SelectionClear:
{
reply_info.highlight=MagickFalse;
XDrawMatteText(display,&windows->widget,&reply_info);
break;
}
case SelectionNotify:
{
Atom
type;
int
format;
unsigned char
*data;
unsigned long
after,
length;
/*
Obtain response from primary selection.
*/
if (event.xselection.property == (Atom) None)
break;
status=XGetWindowProperty(display,event.xselection.requestor,
event.xselection.property,0L,2047L,MagickTrue,XA_STRING,&type,
&format,&length,&after,&data);
if ((status != Success) || (type != XA_STRING) || (format == 32) ||
(length == 0))
break;
if ((Extent(reply_info.text)+length) >= (MagickPathExtent-1))
(void) XBell(display,0);
else
{
/*
Insert primary selection in reply text.
*/
*(data+length)='\0';
XEditText(display,&reply_info,(KeySym) XK_Insert,(char *) data,
state);
XDrawMatteText(display,&windows->widget,&reply_info);
state|=JumpListState;
state|=RedrawActionState;
}
(void) XFree((void *) data);
break;
}
case SelectionRequest:
{
XSelectionEvent
notify;
XSelectionRequestEvent
*request;
if (reply_info.highlight == MagickFalse)
break;
/*
Set primary selection.
*/
request=(&(event.xselectionrequest));
(void) XChangeProperty(request->display,request->requestor,
request->property,request->target,8,PropModeReplace,
(unsigned char *) primary_selection,Extent(primary_selection));
notify.type=SelectionNotify;
notify.send_event=MagickTrue;
notify.display=request->display;
notify.requestor=request->requestor;
notify.selection=request->selection;
notify.target=request->target;
notify.time=request->time;
if (request->property == None)
notify.property=request->target;
else
notify.property=request->property;
(void) XSendEvent(request->display,request->requestor,False,
NoEventMask,(XEvent *) ¬ify);
}
default:
break;
}
} while ((state & ExitState) == 0);
XSetCursorState(display,windows,MagickFalse);
(void) XWithdrawWindow(display,windows->widget.id,windows->widget.screen);
XCheckRefreshWindows(display,windows);
/*
Free color list.
*/
for (i=0; i < (int) colors; i++)
colorlist[i]=DestroyString(colorlist[i]);
if (colorlist != (char **) NULL)
colorlist=(char **) RelinquishMagickMemory(colorlist);
exception=DestroyExceptionInfo(exception);
if ((*reply == '\0') || (strchr(reply,'-') != (char *) NULL))
return;
status=XParseColor(display,windows->widget.map_info->colormap,reply,&color);
if (status != False)
return;
XNoticeWidget(display,windows,"Color is unknown to X server:",reply);
(void) CopyMagickString(reply,"gray",MagickPathExtent);
} | 1 | [] | ImageMagick | d95735d25a39300dd874f0227c430d5dbb1f83cc | 160,042,081,096,241,500,000,000,000,000,000,000,000 | 1,159 | https://github.com/ImageMagick/ImageMagick/issues/3333 |
const BigInt& EC_Group::get_g_x() const
{
return data().g_x();
} | 0 | [
"CWE-200"
] | botan | 48fc8df51d99f9d8ba251219367b3d629cc848e3 | 167,550,942,579,796,140,000,000,000,000,000,000,000 | 4 | Address DSA/ECDSA side channel |
tiff_unmap_file (thandle_t handle, tdata_t data, toff_t offset)
{
} | 0 | [
"CWE-20"
] | gdk-pixbuf | 3bac204e0d0241a0d68586ece7099e6acf0e9bea | 282,636,363,375,997,680,000,000,000,000,000,000,000 | 3 | Initial stab at getting the focus code to work.
Fri Jun 1 18:54:47 2001 Jonathan Blandford <[email protected]>
* gtk/gtktreeview.c: (gtk_tree_view_focus): Initial stab at
getting the focus code to work.
(gtk_tree_view_class_init): Add a bunch of keybindings.
* gtk/gtktreeviewcolumn.c
(gtk_tree_view_column_set_cell_data_func):
s/GtkCellDataFunc/GtkTreeCellDataFunc.
(_gtk_tree_view_column_set_tree_view): Use "notify::model" instead
of "properties_changed" to help justify the death of the latter
signal. (-:
* tests/testtreefocus.c (main): Let some columns be focussable to
test focus better. |
CImgList<T>& assign(const CImg<t1>& img1, const CImg<t2>& img2, const CImg<t3>& img3, const CImg<t4>& img4,
const CImg<t5>& img5, const CImg<t6>& img6, const CImg<t7>& img7, const CImg<t8>& img8,
const bool is_shared=false) {
assign(8);
_data[0].assign(img1,is_shared); _data[1].assign(img2,is_shared); _data[2].assign(img3,is_shared);
_data[3].assign(img4,is_shared); _data[4].assign(img5,is_shared); _data[5].assign(img6,is_shared);
_data[6].assign(img7,is_shared); _data[7].assign(img8,is_shared);
return *this;
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 213,667,265,230,302,640,000,000,000,000,000,000,000 | 9 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
M_WriteFile
( char const* name,
void* source,
int length )
{
int handle;
int count;
handle = open ( name, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0666);
if (handle == -1)
return false;
count = write (handle, source, length);
close (handle);
if (count < length)
return false;
return true;
} | 0 | [
"CWE-120",
"CWE-787"
] | doom-vanille | 8a6d9a02fa991a91ff90ccdc73b5ceabaa6cb9ec | 267,459,861,950,410,400,000,000,000,000,000,000,000 | 21 | Fix buffer overflow in M_LoadDefaults
Too much data will most likely result in a crash or freeze, but you can overwrite the stack which can be used to do an arbitrary code execution. (https://twitter.com/notrevenant/status/1268654123903340544) |
gst_asf_demux_get_stream_video_format (asf_stream_video_format * fmt,
guint8 ** p_data, guint64 * p_size)
{
if (*p_size < (4 + 4 + 4 + 2 + 2 + 4 + 4 + 4 + 4 + 4 + 4))
return FALSE;
fmt->size = gst_asf_demux_get_uint32 (p_data, p_size);
/* Sanity checks */
if (fmt->size < 40) {
GST_WARNING ("Corrupted asf_stream_video_format (size < 40)");
return FALSE;
}
if ((guint64) fmt->size - 4 > *p_size) {
GST_WARNING ("Corrupted asf_stream_video_format (codec_data is too small)");
return FALSE;
}
fmt->width = gst_asf_demux_get_uint32 (p_data, p_size);
fmt->height = gst_asf_demux_get_uint32 (p_data, p_size);
fmt->planes = gst_asf_demux_get_uint16 (p_data, p_size);
fmt->depth = gst_asf_demux_get_uint16 (p_data, p_size);
fmt->tag = gst_asf_demux_get_uint32 (p_data, p_size);
fmt->image_size = gst_asf_demux_get_uint32 (p_data, p_size);
fmt->xpels_meter = gst_asf_demux_get_uint32 (p_data, p_size);
fmt->ypels_meter = gst_asf_demux_get_uint32 (p_data, p_size);
fmt->num_colors = gst_asf_demux_get_uint32 (p_data, p_size);
fmt->imp_colors = gst_asf_demux_get_uint32 (p_data, p_size);
return TRUE;
} | 0 | [
"CWE-125",
"CWE-787"
] | gst-plugins-ugly | d21017b52a585f145e8d62781bcc1c5fefc7ee37 | 159,263,281,162,772,500,000,000,000,000,000,000,000 | 28 | asfdemux: Check that we have enough data available before parsing bool/uint extended content descriptors
https://bugzilla.gnome.org/show_bug.cgi?id=777955 |
struct cgroup *task_cgroup_from_root(struct task_struct *task,
struct cgroup_root *root)
{
/*
* No need to lock the task - since we hold css_set_lock the
* task can't change groups.
*/
return cset_cgroup_from_root(task_css_set(task), root);
} | 0 | [
"CWE-416"
] | linux | a06247c6804f1a7c86a2e5398a4c1f1db1471848 | 260,064,628,677,006,780,000,000,000,000,000,000,000 | 9 | psi: Fix uaf issue when psi trigger is destroyed while being polled
With write operation on psi files replacing old trigger with a new one,
the lifetime of its waitqueue is totally arbitrary. Overwriting an
existing trigger causes its waitqueue to be freed and pending poll()
will stumble on trigger->event_wait which was destroyed.
Fix this by disallowing to redefine an existing psi trigger. If a write
operation is used on a file descriptor with an already existing psi
trigger, the operation will fail with EBUSY error.
Also bypass a check for psi_disabled in the psi_trigger_destroy as the
flag can be flipped after the trigger is created, leading to a memory
leak.
Fixes: 0e94682b73bf ("psi: introduce psi monitor")
Reported-by: [email protected]
Suggested-by: Linus Torvalds <[email protected]>
Analyzed-by: Eric Biggers <[email protected]>
Signed-off-by: Suren Baghdasaryan <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Eric Biggers <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: [email protected]
Link: https://lore.kernel.org/r/[email protected] |
static void sched_feat_disable(int i)
{
if (static_key_enabled(&sched_feat_keys[i]))
static_key_slow_dec(&sched_feat_keys[i]);
} | 0 | [
"CWE-200"
] | linux | 4efbc454ba68def5ef285b26ebfcfdb605b52755 | 204,625,932,193,311,470,000,000,000,000,000,000,000 | 5 | sched: Fix information leak in sys_sched_getattr()
We're copying the on-stack structure to userspace, but forgot to give
the right number of bytes to copy. This allows the calling process to
obtain up to PAGE_SIZE bytes from the stack (and possibly adjacent
kernel memory).
This fix copies only as much as we actually have on the stack
(attr->size defaults to the size of the struct) and leaves the rest of
the userspace-provided buffer untouched.
Found using kmemcheck + trinity.
Fixes: d50dde5a10f30 ("sched: Add new scheduler syscalls to support an extended scheduling parameters ABI")
Cc: Dario Faggioli <[email protected]>
Cc: Juri Lelli <[email protected]>
Cc: Ingo Molnar <[email protected]>
Signed-off-by: Vegard Nossum <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Thomas Gleixner <[email protected]> |
xmlGetDtdAttrDesc(xmlDtdPtr dtd, const xmlChar *elem, const xmlChar *name) {
xmlAttributeTablePtr table;
xmlAttributePtr cur;
xmlChar *uqname = NULL, *prefix = NULL;
if (dtd == NULL) return(NULL);
if (dtd->attributes == NULL) return(NULL);
table = (xmlAttributeTablePtr) dtd->attributes;
if (table == NULL)
return(NULL);
uqname = xmlSplitQName2(name, &prefix);
if (uqname != NULL) {
cur = xmlHashLookup3(table, uqname, prefix, elem);
if (prefix != NULL) xmlFree(prefix);
if (uqname != NULL) xmlFree(uqname);
} else
cur = xmlHashLookup3(table, name, NULL, elem);
return(cur);
} | 0 | [] | libxml2 | 932cc9896ab41475d4aa429c27d9afd175959d74 | 21,709,995,283,538,685,000,000,000,000,000,000,000 | 22 | Fix buffer size checks in xmlSnprintfElementContent
xmlSnprintfElementContent failed to correctly check the available
buffer space in two locations.
Fixes bug 781333 (CVE-2017-9047) and bug 781701 (CVE-2017-9048).
Thanks to Marcel Böhme and Thuan Pham for the report. |
static void rtl8139_write_buffer(RTL8139State *s, const void *buf, int size)
{
PCIDevice *d = PCI_DEVICE(s);
if (s->RxBufAddr + size > s->RxBufferSize)
{
int wrapped = MOD2(s->RxBufAddr + size, s->RxBufferSize);
/* write packet data */
if (wrapped && !(s->RxBufferSize < 65536 && rtl8139_RxWrap(s)))
{
DPRINTF(">>> rx packet wrapped in buffer at %d\n", size - wrapped);
if (size > wrapped)
{
pci_dma_write(d, s->RxBuf + s->RxBufAddr,
buf, size-wrapped);
}
/* reset buffer pointer */
s->RxBufAddr = 0;
pci_dma_write(d, s->RxBuf + s->RxBufAddr,
buf + (size-wrapped), wrapped);
s->RxBufAddr = wrapped;
return;
}
}
/* non-wrapping path or overwrapping enabled */
pci_dma_write(d, s->RxBuf + s->RxBufAddr, buf, size);
s->RxBufAddr += size;
} | 0 | [
"CWE-835"
] | qemu | 5311fb805a4403bba024e83886fa0e7572265de4 | 299,244,727,121,129,370,000,000,000,000,000,000,000 | 36 | rtl8139: switch to use qemu_receive_packet() for loopback
This patch switches to use qemu_receive_packet() which can detect
reentrancy and return early.
This is intended to address CVE-2021-3416.
Cc: Prasad J Pandit <[email protected]>
Cc: [email protected]
Buglink: https://bugs.launchpad.net/qemu/+bug/1910826
Reviewed-by: Philippe Mathieu-Daudé <[email protected]
Signed-off-by: Alexander Bulekov <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
static const char *urlsection(cmd_parms *cmd, void *mconfig, const char *arg)
{
const char *errmsg;
const char *endp = ap_strrchr_c(arg, '>');
int old_overrides = cmd->override;
char *old_path = cmd->path;
core_dir_config *conf;
ap_regex_t *r = NULL;
const command_rec *thiscmd = cmd->cmd;
ap_conf_vector_t *new_url_conf = ap_create_per_dir_config(cmd->pool);
const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_CONTEXT);
if (err != NULL) {
return err;
}
if (endp == NULL) {
return unclosed_directive(cmd);
}
arg = apr_pstrndup(cmd->temp_pool, arg, endp - arg);
if (!arg[0]) {
return missing_container_arg(cmd);
}
cmd->path = ap_getword_conf(cmd->pool, &arg);
cmd->override = OR_ALL|ACCESS_CONF;
if (thiscmd->cmd_data) { /* <LocationMatch> */
r = ap_pregcomp(cmd->pool, cmd->path, AP_REG_EXTENDED);
if (!r) {
return "Regex could not be compiled";
}
}
else if (!strcmp(cmd->path, "~")) {
cmd->path = ap_getword_conf(cmd->pool, &arg);
r = ap_pregcomp(cmd->pool, cmd->path, AP_REG_EXTENDED);
if (!r) {
return "Regex could not be compiled";
}
}
/* initialize our config and fetch it */
conf = ap_set_config_vectors(cmd->server, new_url_conf, cmd->path,
&core_module, cmd->pool);
errmsg = ap_walk_config(cmd->directive->first_child, cmd, new_url_conf);
if (errmsg != NULL)
return errmsg;
conf->d = apr_pstrdup(cmd->pool, cmd->path); /* No mangling, please */
conf->d_is_fnmatch = apr_fnmatch_test(conf->d) != 0;
conf->r = r;
if (r) {
conf->refs = apr_array_make(cmd->pool, 8, sizeof(char *));
ap_regname(r, conf->refs, AP_REG_MATCH, 1);
}
ap_add_per_url_conf(cmd->server, new_url_conf);
if (*arg != '\0') {
return apr_pstrcat(cmd->pool, "Multiple ", thiscmd->name,
"> arguments not (yet) supported.", NULL);
}
cmd->path = old_path;
cmd->override = old_overrides;
return NULL;
} | 0 | [] | httpd | ecebcc035ccd8d0e2984fe41420d9e944f456b3c | 242,533,514,496,341,000,000,000,000,000,000,000,000 | 71 | Merged r1734009,r1734231,r1734281,r1838055,r1838079,r1840229,r1876664,r1876674,r1876784,r1879078,r1881620,r1887311,r1888871 from trunk:
*) core: Split ap_create_request() from ap_read_request(). [Graham Leggett]
*) core, h2: common ap_parse_request_line() and ap_check_request_header()
code. [Yann Ylavic]
*) core: Add StrictHostCheck to allow unconfigured hostnames to be
rejected. [Eric Covener]
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1890245 13f79535-47bb-0310-9956-ffa450edef68 |
int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b) {
if (a == b) return 1;
if (a == NULL || b == NULL) return 0;
return TfLiteIntArrayEqualsArray(a, b->size, b->data);
} | 0 | [
"CWE-190"
] | tensorflow | 7c8cc4ec69cd348e44ad6a2699057ca88faad3e5 | 68,959,748,762,670,620,000,000,000,000,000,000,000 | 5 | Fix a dangerous integer overflow and a malloc of negative size.
PiperOrigin-RevId: 371254154
Change-Id: I250a98a3df26328770167025670235a963a72da0 |
star_fixup_header (struct tar_sparse_file *file)
{
/* NOTE! st_size was initialized from the header
which actually contains archived size. The following fixes it */
off_t realsize = OFF_FROM_HEADER (current_header->star_in_header.realsize);
file->stat_info->archive_file_size = file->stat_info->stat.st_size;
file->stat_info->stat.st_size = max (0, realsize);
return 0 <= realsize;
} | 0 | [] | tar | c15c42ccd1e2377945fd0414eca1a49294bff454 | 327,694,903,339,996,840,000,000,000,000,000,000,000 | 9 | Fix CVE-2018-20482
* NEWS: Update.
* src/sparse.c (sparse_dump_region): Handle short read condition.
(sparse_extract_region,check_data_region): Fix dumped_size calculation.
Handle short read condition.
(pax_decode_header): Fix dumped_size calculation.
* tests/Makefile.am: Add new testcases.
* tests/testsuite.at: Likewise.
* tests/sptrcreat.at: New file.
* tests/sptrdiff00.at: New file.
* tests/sptrdiff01.at: New file. |
static inline struct page *dev_alloc_page(void)
{
return dev_alloc_pages(0); | 0 | [
"CWE-20"
] | linux | 2b16f048729bf35e6c28a40cbfad07239f9dcd90 | 178,195,793,881,014,030,000,000,000,000,000,000,000 | 4 | net: create skb_gso_validate_mac_len()
If you take a GSO skb, and split it into packets, will the MAC
length (L2 + L3 + L4 headers + payload) of those packets be small
enough to fit within a given length?
Move skb_gso_mac_seglen() to skbuff.h with other related functions
like skb_gso_network_seglen() so we can use it, and then create
skb_gso_validate_mac_len to do the full calculation.
Signed-off-by: Daniel Axtens <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static ssize_t iowarrior_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct iowarrior *dev;
int read_idx;
int offset;
dev = file->private_data;
/* verify that the device wasn't unplugged */
if (!dev || !dev->present)
return -ENODEV;
dev_dbg(&dev->interface->dev, "minor %d, count = %zd\n",
dev->minor, count);
/* read count must be packet size (+ time stamp) */
if ((count != dev->report_size)
&& (count != (dev->report_size + 1)))
return -EINVAL;
/* repeat until no buffer overrun in callback handler occur */
do {
atomic_set(&dev->overflow_flag, 0);
if ((read_idx = read_index(dev)) == -1) {
/* queue empty */
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
else {
//next line will return when there is either new data, or the device is unplugged
int r = wait_event_interruptible(dev->read_wait,
(!dev->present
|| (read_idx =
read_index
(dev)) !=
-1));
if (r) {
//we were interrupted by a signal
return -ERESTART;
}
if (!dev->present) {
//The device was unplugged
return -ENODEV;
}
if (read_idx == -1) {
// Can this happen ???
return 0;
}
}
}
offset = read_idx * (dev->report_size + 1);
if (copy_to_user(buffer, dev->read_queue + offset, count)) {
return -EFAULT;
}
} while (atomic_read(&dev->overflow_flag));
read_idx = ++read_idx == MAX_INTERRUPT_BUFFER ? 0 : read_idx;
atomic_set(&dev->read_idx, read_idx);
return count;
} | 0 | [
"CWE-416"
] | linux | edc4746f253d907d048de680a621e121517f484b | 337,305,622,275,388,720,000,000,000,000,000,000,000 | 61 | USB: iowarrior: fix use-after-free on disconnect
A recent fix addressing a deadlock on disconnect introduced a new bug
by moving the present flag out of the critical section protected by the
driver-data mutex. This could lead to a racing release() freeing the
driver data before disconnect() is done with it.
Due to insufficient locking a related use-after-free could be triggered
also before the above mentioned commit. Specifically, the driver needs
to hold the driver-data mutex also while checking the opened flag at
disconnect().
Fixes: c468a8aa790e ("usb: iowarrior: fix deadlock on disconnect")
Fixes: 946b960d13c1 ("USB: add driver for iowarrior devices.")
Cc: stable <[email protected]> # 2.6.21
Reported-by: [email protected]
Signed-off-by: Johan Hovold <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
struct ps_data *ps;
/*
* broadcast/multicast frame
*
* If any of the associated/peer stations is in power save mode,
* the frame is buffered to be sent after DTIM beacon frame.
* This is done either by the hardware or us.
*/
/* powersaving STAs currently only in AP/VLAN/mesh mode */
if (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
if (!tx->sdata->bss)
return TX_CONTINUE;
ps = &tx->sdata->bss->ps;
} else if (ieee80211_vif_is_mesh(&tx->sdata->vif)) {
ps = &tx->sdata->u.mesh.ps;
} else {
return TX_CONTINUE;
}
/* no buffering for ordered frames */
if (ieee80211_has_order(hdr->frame_control))
return TX_CONTINUE;
if (ieee80211_is_probe_req(hdr->frame_control))
return TX_CONTINUE;
if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL))
info->hw_queue = tx->sdata->vif.cab_queue;
/* no stations in PS mode and no buffered packets */
if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf))
return TX_CONTINUE;
info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
/* device releases frame after DTIM beacon */
if (!ieee80211_hw_check(&tx->local->hw, HOST_BROADCAST_PS_BUFFERING))
return TX_CONTINUE;
/* buffered in mac80211 */
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
purge_old_ps_buffers(tx->local);
if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
ps_dbg(tx->sdata,
"BC TX buffer full - dropping the oldest frame\n");
ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf));
} else
tx->local->total_ps_buffered++;
skb_queue_tail(&ps->bc_buf, tx->skb);
return TX_QUEUED;
} | 0 | [
"CWE-476"
] | linux | bddc0c411a45d3718ac535a070f349be8eca8d48 | 154,962,554,391,871,830,000,000,000,000,000,000,000 | 63 | mac80211: Fix NULL ptr deref for injected rate info
The commit cb17ed29a7a5 ("mac80211: parse radiotap header when selecting Tx
queue") moved the code to validate the radiotap header from
ieee80211_monitor_start_xmit to ieee80211_parse_tx_radiotap. This made is
possible to share more code with the new Tx queue selection code for
injected frames. But at the same time, it now required the call of
ieee80211_parse_tx_radiotap at the beginning of functions which wanted to
handle the radiotap header. And this broke the rate parser for radiotap
header parser.
The radiotap parser for rates is operating most of the time only on the
data in the actual radiotap header. But for the 802.11a/b/g rates, it must
also know the selected band from the chandef information. But this
information is only written to the ieee80211_tx_info at the end of the
ieee80211_monitor_start_xmit - long after ieee80211_parse_tx_radiotap was
already called. The info->band information was therefore always 0
(NL80211_BAND_2GHZ) when the parser code tried to access it.
For a 5GHz only device, injecting a frame with 802.11a rates would cause a
NULL pointer dereference because local->hw.wiphy->bands[NL80211_BAND_2GHZ]
would most likely have been NULL when the radiotap parser searched for the
correct rate index of the driver.
Cc: [email protected]
Reported-by: Ben Greear <[email protected]>
Fixes: cb17ed29a7a5 ("mac80211: parse radiotap header when selecting Tx queue")
Signed-off-by: Mathy Vanhoef <[email protected]>
[[email protected]: added commit message]
Signed-off-by: Sven Eckelmann <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Johannes Berg <[email protected]> |
qf_parse_fmt_k(regmatch_T *rmp, int midx, qffields_T *fields)
{
if (rmp->startp[midx] == NULL)
return QF_FAIL;
fields->end_col = (int)atol((char *)rmp->startp[midx]);
return QF_OK;
} | 0 | [
"CWE-416"
] | vim | 4f1b083be43f351bc107541e7b0c9655a5d2c0bb | 129,195,799,042,936,500,000,000,000,000,000,000,000 | 7 | patch 9.0.0322: crash when no errors and 'quickfixtextfunc' is set
Problem: Crash when no errors and 'quickfixtextfunc' is set.
Solution: Do not handle errors if there aren't any. |
gamma (half h, float m)
{
//
// Conversion from half to unsigned char pixel data,
// with gamma correction. The conversion is the same
// as in the exrdisplay program's ImageView class,
// except with defog, kneeLow, and kneeHigh fixed
// at 0.0, 0.0, and 5.0 respectively.
//
float x = max (0.f, h * m);
if (x > 1)
x = 1 + knee (x - 1, 0.184874f);
return (unsigned char) (IMATH_NAMESPACE::clamp (Math<float>::pow (x, 0.4545f) * 84.66f,
0.f,
255.f));
} | 0 | [
"CWE-125"
] | openexr | e79d2296496a50826a15c667bf92bdc5a05518b4 | 212,678,898,283,179,000,000,000,000,000,000,000,000 | 19 | fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <[email protected]> |
vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct inode *inode = file_inode(file);
struct vc_data *vc;
struct vcs_poll_data *poll;
long pos, read;
int attr, uni_mode, row, col, maxcol, viewed;
unsigned short *org = NULL;
ssize_t ret;
char *con_buf;
con_buf = (char *) __get_free_page(GFP_KERNEL);
if (!con_buf)
return -ENOMEM;
pos = *ppos;
/* Select the proper current console and verify
* sanity of the situation under the console lock.
*/
console_lock();
uni_mode = use_unicode(inode);
attr = use_attributes(inode);
ret = -ENXIO;
vc = vcs_vc(inode, &viewed);
if (!vc)
goto unlock_out;
ret = -EINVAL;
if (pos < 0)
goto unlock_out;
/* we enforce 32-bit alignment for pos and count in unicode mode */
if (uni_mode && (pos | count) & 3)
goto unlock_out;
poll = file->private_data;
if (count && poll)
poll->event = 0;
read = 0;
ret = 0;
while (count) {
char *con_buf0, *con_buf_start;
long this_round, size;
ssize_t orig_count;
long p = pos;
/* Check whether we are above size each round,
* as copy_to_user at the end of this loop
* could sleep.
*/
size = vcs_size(inode);
if (size < 0) {
if (read)
break;
ret = size;
goto unlock_out;
}
if (pos >= size)
break;
if (count > size - pos)
count = size - pos;
this_round = count;
if (this_round > CON_BUF_SIZE)
this_round = CON_BUF_SIZE;
/* Perform the whole read into the local con_buf.
* Then we can drop the console spinlock and safely
* attempt to move it to userspace.
*/
con_buf_start = con_buf0 = con_buf;
orig_count = this_round;
maxcol = vc->vc_cols;
if (uni_mode) {
unsigned int nr;
ret = vc_uniscr_check(vc);
if (ret)
break;
p /= 4;
row = p / vc->vc_cols;
col = p % maxcol;
nr = maxcol - col;
do {
if (nr > this_round/4)
nr = this_round/4;
vc_uniscr_copy_line(vc, con_buf0, viewed,
row, col, nr);
con_buf0 += nr * 4;
this_round -= nr * 4;
row++;
col = 0;
nr = maxcol;
} while (this_round);
} else if (!attr) {
org = screen_pos(vc, p, viewed);
col = p % maxcol;
p += maxcol - col;
while (this_round-- > 0) {
*con_buf0++ = (vcs_scr_readw(vc, org++) & 0xff);
if (++col == maxcol) {
org = screen_pos(vc, p, viewed);
col = 0;
p += maxcol;
}
}
} else {
if (p < HEADER_SIZE) {
size_t tmp_count;
/* clamp header values if they don't fit */
con_buf0[0] = min(vc->vc_rows, 0xFFu);
con_buf0[1] = min(vc->vc_cols, 0xFFu);
getconsxy(vc, con_buf0 + 2);
con_buf_start += p;
this_round += p;
if (this_round > CON_BUF_SIZE) {
this_round = CON_BUF_SIZE;
orig_count = this_round - p;
}
tmp_count = HEADER_SIZE;
if (tmp_count > this_round)
tmp_count = this_round;
/* Advance state pointers and move on. */
this_round -= tmp_count;
p = HEADER_SIZE;
con_buf0 = con_buf + HEADER_SIZE;
/* If this_round >= 0, then p is even... */
} else if (p & 1) {
/* Skip first byte for output if start address is odd
* Update region sizes up/down depending on free
* space in buffer.
*/
con_buf_start++;
if (this_round < CON_BUF_SIZE)
this_round++;
else
orig_count--;
}
if (this_round > 0) {
unsigned short *tmp_buf = (unsigned short *)con_buf0;
p -= HEADER_SIZE;
p /= 2;
col = p % maxcol;
org = screen_pos(vc, p, viewed);
p += maxcol - col;
/* Buffer has even length, so we can always copy
* character + attribute. We do not copy last byte
* to userspace if this_round is odd.
*/
this_round = (this_round + 1) >> 1;
while (this_round) {
*tmp_buf++ = vcs_scr_readw(vc, org++);
this_round --;
if (++col == maxcol) {
org = screen_pos(vc, p, viewed);
col = 0;
p += maxcol;
}
}
}
}
/* Finally, release the console semaphore while we push
* all the data to userspace from our temporary buffer.
*
* AKPM: Even though it's a semaphore, we should drop it because
* the pagefault handling code may want to call printk().
*/
console_unlock();
ret = copy_to_user(buf, con_buf_start, orig_count);
console_lock();
if (ret) {
read += (orig_count - ret);
ret = -EFAULT;
break;
}
buf += orig_count;
pos += orig_count;
read += orig_count;
count -= orig_count;
}
*ppos += read;
if (read)
ret = read;
unlock_out:
console_unlock();
free_page((unsigned long) con_buf);
return ret;
} | 0 | [
"CWE-125"
] | tty | 0c9acb1af77a3cb8707e43f45b72c95266903cee | 297,247,948,997,435,260,000,000,000,000,000,000,000 | 201 | vcs: prevent write access to vcsu devices
Commit d21b0be246bf ("vt: introduce unicode mode for /dev/vcs") guarded
against using devices containing attributes as this is not yet
implemented. It however failed to guard against writes to any devices
as this is also unimplemented.
Reported-by: Or Cohen <[email protected]>
Signed-off-by: Nicolas Pitre <[email protected]>
Cc: <[email protected]> # v4.19+
Cc: Jiri Slaby <[email protected]>
Fixes: d21b0be246bf ("vt: introduce unicode mode for /dev/vcs")
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static inline bool cpu_has_vmx_apicv(void)
{
return cpu_has_vmx_apic_register_virt() &&
cpu_has_vmx_virtual_intr_delivery() &&
cpu_has_vmx_posted_intr();
} | 0 | [] | kvm | a642fc305053cc1c6e47e4f4df327895747ab485 | 25,674,877,048,260,900,000,000,000,000,000,000,000 | 6 | kvm: vmx: handle invvpid vm exit gracefully
On systems with invvpid instruction support (corresponding bit in
IA32_VMX_EPT_VPID_CAP MSR is set) guest invocation of invvpid
causes vm exit, which is currently not handled and results in
propagation of unknown exit to userspace.
Fix this by installing an invvpid vm exit handler.
This is CVE-2014-3646.
Cc: [email protected]
Signed-off-by: Petr Matousek <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
Tensor Tensor::SubSlice(int64_t index) const {
CHECK_GE(dims(), 1); // Crash ok.
CHECK_LE(0, index); // Crash ok.
int64_t dim0_size = shape_.dim_size(0);
CHECK_LE(index, dim0_size); // Crash ok.
Tensor ret;
ret.shape_ = shape_;
ret.shape_.RemoveDim(0);
ret.set_dtype(dtype());
ret.buf_ = nullptr;
if (dim0_size > 0) {
const int64_t elems_per_dim0 = NumElements() / dim0_size;
const int64_t delta = index * elems_per_dim0;
const int64_t num_elems = elems_per_dim0;
if (buf_) {
DataType dt = dtype();
CASES(dt, ret.buf_ = new SubBuffer<T>(buf_, delta, num_elems));
}
}
return ret;
} | 0 | [
"CWE-345"
] | tensorflow | abcced051cb1bd8fb05046ac3b6023a7ebcc4578 | 9,944,327,875,258,772,000,000,000,000,000,000,000 | 21 | Prevent crashes when loading tensor slices with unsupported types.
Also fix the `Tensor(const TensorShape&)` constructor swapping the LOG(FATAL)
messages for the unset and unsupported types.
PiperOrigin-RevId: 392695027
Change-Id: I4beda7db950db951d273e3259a7c8534ece49354 |
encode_NOTE(const struct ofpact_note *note,
enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out)
{
size_t start_ofs = out->size;
struct nx_action_note *nan;
put_NXAST_NOTE(out);
out->size = out->size - sizeof nan->note;
ofpbuf_put(out, note->data, note->length);
pad_ofpat(out, start_ofs);
} | 0 | [
"CWE-125"
] | ovs | 9237a63c47bd314b807cda0bd2216264e82edbe8 | 97,417,207,261,621,700,000,000,000,000,000,000,000 | 12 | ofp-actions: Avoid buffer overread in BUNDLE action decoding.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]> |
mail_parser_run (EMailParser *parser,
EMailPartList *part_list,
GCancellable *cancellable)
{
EMailExtensionRegistry *reg;
CamelMimeMessage *message;
EMailPart *mail_part;
GQueue *parsers;
GQueue mail_part_queue = G_QUEUE_INIT;
GList *iter;
GString *part_id;
if (cancellable)
g_object_ref (cancellable);
else
cancellable = g_cancellable_new ();
g_mutex_lock (&parser->priv->mutex);
g_hash_table_insert (parser->priv->ongoing_part_lists, cancellable, part_list);
g_mutex_unlock (&parser->priv->mutex);
message = e_mail_part_list_get_message (part_list);
reg = e_mail_parser_get_extension_registry (parser);
parsers = e_mail_extension_registry_get_for_mime_type (
reg, "application/vnd.evolution.message");
if (parsers == NULL)
parsers = e_mail_extension_registry_get_for_mime_type (
reg, "message/*");
/* No parsers means the internal Evolution parser
* extensions were not loaded. Something is terribly wrong! */
g_return_if_fail (parsers != NULL);
part_id = g_string_new (".message");
mail_part = e_mail_part_new (CAMEL_MIME_PART (message), ".message");
e_mail_part_list_add_part (part_list, mail_part);
g_object_unref (mail_part);
for (iter = parsers->head; iter; iter = iter->next) {
EMailParserExtension *extension;
gboolean message_handled;
if (g_cancellable_is_cancelled (cancellable))
break;
extension = iter->data;
if (!extension)
continue;
message_handled = e_mail_parser_extension_parse (
extension, parser,
CAMEL_MIME_PART (message),
part_id, cancellable, &mail_part_queue);
if (message_handled)
break;
}
while (!g_queue_is_empty (&mail_part_queue)) {
mail_part = g_queue_pop_head (&mail_part_queue);
e_mail_part_list_add_part (part_list, mail_part);
g_object_unref (mail_part);
}
g_mutex_lock (&parser->priv->mutex);
g_hash_table_remove (parser->priv->ongoing_part_lists, cancellable);
g_mutex_unlock (&parser->priv->mutex);
g_clear_object (&cancellable);
g_string_free (part_id, TRUE);
} | 1 | [
"CWE-347"
] | evolution | 9c55a311325f5905d8b8403b96607e46cf343f21 | 15,442,193,154,534,747,000,000,000,000,000,000,000 | 75 | I#120 - Show security bar above message headers
Closes https://gitlab.gnome.org/GNOME/evolution/issues/120 |
ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
int how, struct nfs_commit_info *cinfo)
{
return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
ff_layout_initiate_commit);
} | 0 | [
"CWE-787"
] | linux | ed34695e15aba74f45247f1ee2cf7e09d449f925 | 28,148,689,638,313,914,000,000,000,000,000,000,000 | 6 | pNFS/flexfiles: fix incorrect size check in decode_nfs_fh()
We (adam zabrocki, alexander matrosov, alexander tereshkin, maksym
bazalii) observed the check:
if (fh->size > sizeof(struct nfs_fh))
should not use the size of the nfs_fh struct which includes an extra two
bytes from the size field.
struct nfs_fh {
unsigned short size;
unsigned char data[NFS_MAXFHSIZE];
}
but should determine the size from data[NFS_MAXFHSIZE] so the memcpy
will not write 2 bytes beyond destination. The proposed fix is to
compare against the NFS_MAXFHSIZE directly, as is done elsewhere in fs
code base.
Fixes: d67ae825a59d ("pnfs/flexfiles: Add the FlexFile Layout Driver")
Signed-off-by: Nikola Livic <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]> |
static int inotify_update_existing_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
struct fsnotify_mark *fsn_mark;
struct inotify_inode_mark *i_mark;
__u32 old_mask, new_mask;
__u32 mask;
int add = (arg & IN_MASK_ADD);
int create = (arg & IN_MASK_CREATE);
int ret;
mask = inotify_arg_to_mask(arg);
fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
if (!fsn_mark)
return -ENOENT;
else if (create) {
ret = -EEXIST;
goto out;
}
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
spin_lock(&fsn_mark->lock);
old_mask = fsn_mark->mask;
if (add)
fsn_mark->mask |= mask;
else
fsn_mark->mask = mask;
new_mask = fsn_mark->mask;
spin_unlock(&fsn_mark->lock);
if (old_mask != new_mask) {
/* more bits in old than in new? */
int dropped = (old_mask & ~new_mask);
/* more bits in this fsn_mark than the inode's mask? */
int do_inode = (new_mask & ~inode->i_fsnotify_mask);
/* update the inode with this new fsn_mark */
if (dropped || do_inode)
fsnotify_recalc_mask(inode->i_fsnotify_marks);
}
/* return the wd */
ret = i_mark->wd;
out:
/* match the get from fsnotify_find_mark() */
fsnotify_put_mark(fsn_mark);
return ret;
} | 0 | [
"CWE-401"
] | linux-fs | 62c9d2674b31d4c8a674bee86b7edc6da2803aea | 10,661,820,551,041,424,000,000,000,000,000,000,000 | 54 | inotify: Fix fsnotify_mark refcount leak in inotify_update_existing_watch()
Commit 4d97f7d53da7dc83 ("inotify: Add flag IN_MASK_CREATE for
inotify_add_watch()") forgot to call fsnotify_put_mark() with
IN_MASK_CREATE after fsnotify_find_mark()
Fixes: 4d97f7d53da7dc83 ("inotify: Add flag IN_MASK_CREATE for inotify_add_watch()")
Signed-off-by: ZhangXiaoxu <[email protected]>
Signed-off-by: Jan Kara <[email protected]> |
dir_policy_permits_address(const tor_addr_t *addr)
{
return addr_policy_permits_tor_addr(addr, 1, dir_policy);
} | 0 | [
"CWE-119"
] | tor | 43414eb98821d3b5c6c65181d7545ce938f82c8e | 227,549,464,853,113,650,000,000,000,000,000,000,000 | 4 | Fix bounds-checking in policy_summarize
Found by piebeer. |
conntrack_clear(struct dp_packet *packet)
{
/* According to pkt_metadata_init(), ct_state == 0 is enough to make all of
* the conntrack fields invalid. */
packet->md.ct_state = 0;
} | 0 | [
"CWE-400"
] | ovs | 53c1b8b166f3dd217bc391d707885f789e9ecc49 | 53,582,976,647,843,150,000,000,000,000,000,000,000 | 6 | flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]> |
asmlinkage long sys_symlinkat(const char __user *oldname,
int newdfd, const char __user *newname)
{
int error = 0;
char * from;
char * to;
struct dentry *dentry;
struct nameidata nd;
from = getname(oldname);
if(IS_ERR(from))
return PTR_ERR(from);
to = getname(newname);
error = PTR_ERR(to);
if (IS_ERR(to))
goto out_putname;
error = do_path_lookup(newdfd, to, LOOKUP_PARENT, &nd);
if (error)
goto out;
dentry = lookup_create(&nd, 0);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_unlock;
error = mnt_want_write(nd.path.mnt);
if (error)
goto out_dput;
error = vfs_symlink(nd.path.dentry->d_inode, dentry, from, S_IALLUGO);
mnt_drop_write(nd.path.mnt);
out_dput:
dput(dentry);
out_unlock:
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
path_put(&nd.path);
out:
putname(to);
out_putname:
putname(from);
return error;
} | 0 | [
"CWE-120"
] | linux-2.6 | d70b67c8bc72ee23b55381bd6a884f4796692f77 | 283,489,915,514,039,870,000,000,000,000,000,000,000 | 41 | [patch] vfs: fix lookup on deleted directory
Lookup can install a child dentry for a deleted directory. This keeps
the directory dentry alive, and the inode pinned in the cache and on
disk, even after all external references have gone away.
This isn't a big problem normally, since memory pressure or umount
will clear out the directory dentry and its children, releasing the
inode. But for UBIFS this causes problems because its orphan area can
overflow.
Fix this by returning ENOENT for all lookups on a S_DEAD directory
before creating a child dentry.
Thanks to Zoltan Sogor for noticing this while testing UBIFS, and
Artem for the excellent analysis of the problem and testing.
Reported-by: Artem Bityutskiy <[email protected]>
Tested-by: Artem Bityutskiy <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
Signed-off-by: Al Viro <[email protected]> |
poppler_document_class_init (PopplerDocumentClass *klass)
{
GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
gobject_class->finalize = poppler_document_finalize;
gobject_class->get_property = poppler_document_get_property;
gobject_class->set_property = poppler_document_set_property;
/**
* PopplerDocument:title:
*
* The document's title or %NULL
*/
g_object_class_install_property (G_OBJECT_CLASS (klass),
PROP_TITLE,
g_param_spec_string ("title",
"Document Title",
"The title of the document",
nullptr,
G_PARAM_READWRITE));
/**
* PopplerDocument:format:
*
* The PDF version as string. See also poppler_document_get_pdf_version_string()
*/
g_object_class_install_property (G_OBJECT_CLASS (klass),
PROP_FORMAT,
g_param_spec_string ("format",
"PDF Format",
"The PDF version of the document",
nullptr,
G_PARAM_READABLE));
/**
* PopplerDocument:format-major:
*
* The PDF major version number. See also poppler_document_get_pdf_version()
*/
g_object_class_install_property (G_OBJECT_CLASS (klass),
PROP_FORMAT_MAJOR,
g_param_spec_uint ("format-major",
"PDF Format Major",
"The PDF major version number of the document",
0, G_MAXUINT, 1,
G_PARAM_READABLE));
/**
* PopplerDocument:format-minor:
*
* The PDF minor version number. See also poppler_document_get_pdf_version()
*/
g_object_class_install_property (G_OBJECT_CLASS (klass),
PROP_FORMAT_MINOR,
g_param_spec_uint ("format-minor",
"PDF Format Minor",
"The PDF minor version number of the document",
0, G_MAXUINT, 0,
G_PARAM_READABLE));
/**
* PopplerDocument:author:
*
* The author of the document
*/
g_object_class_install_property (G_OBJECT_CLASS (klass),
PROP_AUTHOR,
g_param_spec_string ("author",
"Author",
"The author of the document",
nullptr,
G_PARAM_READWRITE));
/**
* PopplerDocument:subject:
*
* The subject of the document
*/
g_object_class_install_property (G_OBJECT_CLASS (klass),
PROP_SUBJECT,
g_param_spec_string ("subject",
"Subject",
"Subjects the document touches",
nullptr,
G_PARAM_READWRITE));
/**
* PopplerDocument:keywords:
*
* The keywords associated to the document
*/
g_object_class_install_property (G_OBJECT_CLASS (klass),
PROP_KEYWORDS,
g_param_spec_string ("keywords",
"Keywords",
"Keywords",
nullptr,
G_PARAM_READWRITE));
/**
* PopplerDocument:creator:
*
* The creator of the document. See also poppler_document_get_creator()
*/
g_object_class_install_property (G_OBJECT_CLASS (klass),
PROP_CREATOR,
g_param_spec_string ("creator",
"Creator",
"The software that created the document",
nullptr,
G_PARAM_READWRITE));
/**
* PopplerDocument:producer:
*
* The producer of the document. See also poppler_document_get_producer()
*/
g_object_class_install_property (G_OBJECT_CLASS (klass),
PROP_PRODUCER,
g_param_spec_string ("producer",
"Producer",
"The software that converted the document",
nullptr,
G_PARAM_READWRITE));
/**
* PopplerDocument:creation-date:
*
* The date the document was created as seconds since the Epoch, or -1
*/
g_object_class_install_property (G_OBJECT_CLASS (klass),
PROP_CREATION_DATE,
g_param_spec_int ("creation-date",
"Creation Date",
"The date and time the document was created",
-1, G_MAXINT, -1,
G_PARAM_READWRITE));
/**
* PopplerDocument:mod-date:
*
* The date the document was most recently modified as seconds since the Epoch, or -1
*/
g_object_class_install_property (G_OBJECT_CLASS (klass),
PROP_MOD_DATE,
g_param_spec_int ("mod-date",
"Modification Date",
"The date and time the document was modified",
-1, G_MAXINT, -1,
G_PARAM_READWRITE));
/**
* PopplerDocument:linearized:
*
* Whether document is linearized. See also poppler_document_is_linearized()
*/
g_object_class_install_property (G_OBJECT_CLASS (klass),
PROP_LINEARIZED,
g_param_spec_boolean ("linearized",
"Fast Web View Enabled",
"Is the document optimized for web viewing?",
FALSE,
G_PARAM_READABLE));
/**
* PopplerDocument:page-layout:
*
* The page layout that should be used when the document is opened
*/
g_object_class_install_property (G_OBJECT_CLASS (klass),
PROP_PAGE_LAYOUT,
g_param_spec_enum ("page-layout",
"Page Layout",
"Initial Page Layout",
POPPLER_TYPE_PAGE_LAYOUT,
POPPLER_PAGE_LAYOUT_UNSET,
G_PARAM_READABLE));
/**
* PopplerDocument:page-mode:
*
* The mode that should be used when the document is opened
*/
g_object_class_install_property (G_OBJECT_CLASS (klass),
PROP_PAGE_MODE,
g_param_spec_enum ("page-mode",
"Page Mode",
"Page Mode",
POPPLER_TYPE_PAGE_MODE,
POPPLER_PAGE_MODE_UNSET,
G_PARAM_READABLE));
/**
* PopplerDocument:viewer-preferences:
*/
g_object_class_install_property (G_OBJECT_CLASS (klass),
PROP_VIEWER_PREFERENCES,
g_param_spec_flags ("viewer-preferences",
"Viewer Preferences",
"Viewer Preferences",
POPPLER_TYPE_VIEWER_PREFERENCES,
POPPLER_VIEWER_PREFERENCES_UNSET,
G_PARAM_READABLE));
/**
* PopplerDocument:permissions:
*
* Flags specifying which operations are permitted when the document is opened
*/
g_object_class_install_property (G_OBJECT_CLASS (klass),
PROP_PERMISSIONS,
g_param_spec_flags ("permissions",
"Permissions",
"Permissions",
POPPLER_TYPE_PERMISSIONS,
POPPLER_PERMISSIONS_FULL,
G_PARAM_READABLE));
/**
* PopplerDocument:metadata:
*
* Document metadata in XML format, or %NULL
*/
g_object_class_install_property (G_OBJECT_CLASS (klass),
PROP_METADATA,
g_param_spec_string ("metadata",
"XML Metadata",
"Embedded XML metadata",
nullptr,
G_PARAM_READABLE));
} | 0 | [
"CWE-476"
] | poppler | f162ecdea0dda5dbbdb45503c1d55d9afaa41d44 | 107,764,113,973,656,930,000,000,000,000,000,000,000 | 231 | Fix crash on missing embedded file
Check whether an embedded file is actually present in the PDF
and show warning in that case.
https://bugs.freedesktop.org/show_bug.cgi?id=106137
https://gitlab.freedesktop.org/poppler/poppler/issues/236 |
njs_iterator_to_array(njs_vm_t *vm, njs_value_t *iterator)
{
int64_t length;
njs_int_t ret;
njs_iterator_args_t args;
njs_memzero(&args, sizeof(njs_iterator_args_t));
ret = njs_object_length(vm, iterator, &length);
if (njs_slow_path(ret != NJS_OK)) {
return NULL;
}
args.data = njs_array_alloc(vm, 1, length, 0);
if (njs_slow_path(args.data == NULL)) {
return NULL;
}
args.value = iterator;
args.to = length;
ret = njs_object_iterate(vm, &args, njs_iterator_to_array_handler);
if (njs_slow_path(ret == NJS_ERROR)) {
njs_mp_free(vm->mem_pool, args.data);
return NULL;
}
return args.data;
} | 1 | [] | njs | c756e23eb09dac519fe161c88587cc034306630f | 107,907,821,832,273,330,000,000,000,000,000,000,000 | 29 | Fixed njs_iterator_to_array() with sparse arrays.
This closes #524 issue on Github. |
static int ntop_redis_get_host_id(lua_State* vm) {
char *host_name;
Redis *redis = ntop->getRedis();
char daybuf[32];
time_t when = time(NULL);
bool new_key;
NetworkInterfaceView *ntop_interface = getCurrentInterface(vm);
ntop->getTrace()->traceEvent(TRACE_INFO, "%s() called", __FUNCTION__);
if(ntop_lua_check(vm, __FUNCTION__, 1, LUA_TSTRING)) return(CONST_LUA_ERROR);
if((host_name = (char*)lua_tostring(vm, 1)) == NULL) return(CONST_LUA_PARAM_ERROR);
strftime(daybuf, sizeof(daybuf), CONST_DB_DAY_FORMAT, localtime(&when));
lua_pushinteger(vm, redis->host_to_id(ntop_interface->getFirst(), daybuf, host_name, &new_key)); /* CHECK */
return(CONST_LUA_OK);
} | 0 | [
"CWE-254"
] | ntopng | 2e0620be3410f5e22c9aa47e261bc5a12be692c6 | 169,991,303,254,570,550,000,000,000,000,000,000,000 | 18 | Added security fix to avoid escalating privileges to non-privileged users
Many thanks to Dolev Farhi for reporting it |
STACK_OF(X509_NAME) *SSL_load_client_CA_file(const char *file)
{
BIO *in;
X509 *x=NULL;
X509_NAME *xn=NULL;
STACK_OF(X509_NAME) *ret = NULL,*sk;
sk=sk_X509_NAME_new(xname_cmp);
in=BIO_new(BIO_s_file_internal());
if ((sk == NULL) || (in == NULL))
{
SSLerr(SSL_F_SSL_LOAD_CLIENT_CA_FILE,ERR_R_MALLOC_FAILURE);
goto err;
}
if (!BIO_read_filename(in,file))
goto err;
for (;;)
{
if (PEM_read_bio_X509(in,&x,NULL,NULL) == NULL)
break;
if (ret == NULL)
{
ret = sk_X509_NAME_new_null();
if (ret == NULL)
{
SSLerr(SSL_F_SSL_LOAD_CLIENT_CA_FILE,ERR_R_MALLOC_FAILURE);
goto err;
}
}
if ((xn=X509_get_subject_name(x)) == NULL) goto err;
/* check for duplicates */
xn=X509_NAME_dup(xn);
if (xn == NULL) goto err;
if (sk_X509_NAME_find(sk,xn) >= 0)
X509_NAME_free(xn);
else
{
sk_X509_NAME_push(sk,xn);
sk_X509_NAME_push(ret,xn);
}
}
if (0)
{
err:
if (ret != NULL) sk_X509_NAME_pop_free(ret,X509_NAME_free);
ret=NULL;
}
if (sk != NULL) sk_X509_NAME_free(sk);
if (in != NULL) BIO_free(in);
if (x != NULL) X509_free(x);
if (ret != NULL)
ERR_clear_error();
return(ret);
} | 0 | [] | openssl | c70a1fee71119a9005b1f304a3bf47694b4a53ac | 279,115,906,387,005,060,000,000,000,000,000,000,000 | 59 | Reorganise supported signature algorithm extension processing.
Only store encoded versions of peer and configured signature algorithms.
Determine shared signature algorithms and cache the result along with NID
equivalents of each algorithm.
(backport from HEAD) |
check_envvar (void)
{
const gchar *envvar;
envvar = g_getenv ("NO_AT_BRIDGE");
if (envvar && atoi (envvar) == 1)
return FALSE;
else
return TRUE;
} | 0 | [] | at-spi2-atk | e4f3eee2e137cd34cd427875365f458c65458164 | 8,332,235,866,577,848,000,000,000,000,000,000,000 | 11 | Use XDG_RUNTIME_DIR to hold sockets, and do not make a world-writable dir
If we use XDG_RUNTIME_DIR, then the directory should be owned by the
appropriate user, so it should not need to be world-writable. Hopefully this
won't break accessibility for administrative apps on some distro.
https://bugzilla.gnome.org/show_bug.cgi?id=678348 |
child_setup (gpointer user_data)
{
GArray *fd_array = user_data;
int i;
/* If no fd_array was specified, don't care. */
if (fd_array == NULL)
return;
/* Otherwise, mark not - close-on-exec all the fds in the array */
for (i = 0; i < fd_array->len; i++)
fcntl (g_array_index (fd_array, int, i), F_SETFD, 0);
} | 0 | [
"CWE-20"
] | flatpak | 902fb713990a8f968ea4350c7c2a27ff46f1a6c4 | 122,296,813,217,615,770,000,000,000,000,000,000,000 | 13 | Use seccomp to filter out TIOCSTI ioctl
This would otherwise let the sandbox add input to the controlling tty. |
static CURLcode smtp_done(struct connectdata *conn, CURLcode status,
bool premature)
{
struct SessionHandle *data = conn->data;
struct FTP *smtp = data->state.proto.smtp;
CURLcode result = CURLE_OK;
ssize_t bytes_written;
(void)premature;
if(!smtp)
/* When the easy handle is removed from the multi while libcurl is still
* trying to resolve the host name, it seems that the smtp struct is not
* yet initialized, but the removal action calls Curl_done() which calls
* this function. So we simply return success if no smtp pointer is set.
*/
return CURLE_OK;
if(status) {
conn->bits.close = TRUE; /* marked for closure */
result = status; /* use the already set error code */
}
else
/* TODO: make this work even when the socket is EWOULDBLOCK in this
call! */
/* write to socket (send away data) */
result = Curl_write(conn,
conn->writesockfd, /* socket to send to */
SMTP_EOB, /* buffer pointer */
SMTP_EOB_LEN, /* buffer size */
&bytes_written); /* actually sent away */
if(status == CURLE_OK) {
struct smtp_conn *smtpc = &conn->proto.smtpc;
struct pingpong *pp = &smtpc->pp;
pp->response = Curl_tvnow(); /* timeout relative now */
state(conn, SMTP_POSTDATA);
/* run the state-machine
TODO: when the multi interface is used, this _really_ should be using
the smtp_multi_statemach function but we have no general support for
non-blocking DONE operations, not in the multi state machine and with
Curl_done() invokes on several places in the code!
*/
result = smtp_easy_statemach(conn);
}
/* clear these for next connection */
smtp->transfer = FTPTRANSFER_BODY;
return result;
} | 0 | [
"CWE-89"
] | curl | 75ca568fa1c19de4c5358fed246686de8467c238 | 148,338,716,318,075,370,000,000,000,000,000,000,000 | 54 | URL sanitize: reject URLs containing bad data
Protocols (IMAP, POP3 and SMTP) that use the path part of a URL in a
decoded manner now use the new Curl_urldecode() function to reject URLs
with embedded control codes (anything that is or decodes to a byte value
less than 32).
URLs containing such codes could easily otherwise be used to do harm and
allow users to do unintended actions with otherwise innocent tools and
applications. Like for example using a URL like
pop3://pop3.example.com/1%0d%0aDELE%201 when the app wants a URL to get
a mail and instead this would delete one.
This flaw is considered a security vulnerability: CVE-2012-0036
Security advisory at: http://curl.haxx.se/docs/adv_20120124.html
Reported by: Dan Fandrich |
int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
struct ath_tx_control *txctl)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ath_frame_info *fi = get_frame_info(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_buf *bf;
int padpos, padsize;
padpos = ieee80211_hdrlen(hdr->frame_control);
padsize = padpos & 3;
if (padsize && skb->len > padpos) {
if (skb_headroom(skb) < padsize) {
ath_dbg(common, XMIT,
"tx99 padding failed\n");
return -EINVAL;
}
skb_push(skb, padsize);
memmove(skb->data, skb->data + padsize, padpos);
}
fi->keyix = ATH9K_TXKEYIX_INVALID;
fi->framelen = skb->len + FCS_LEN;
fi->keytype = ATH9K_KEY_TYPE_CLEAR;
bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
if (!bf) {
ath_dbg(common, XMIT, "tx99 buffer setup failed\n");
return -EINVAL;
}
ath_set_rates(sc->tx99_vif, NULL, bf);
ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);
ath_tx_send_normal(sc, txctl->txq, NULL, skb);
return 0;
} | 0 | [
"CWE-362",
"CWE-241"
] | linux | 21f8aaee0c62708654988ce092838aa7df4d25d8 | 15,461,120,242,469,760,000,000,000,000,000,000,000 | 42 | ath9k: protect tid->sched check
We check tid->sched without a lock taken on ath_tx_aggr_sleep(). That
is race condition which can result of doing list_del(&tid->list) twice
(second time with poisoned list node) and cause crash like shown below:
[424271.637220] BUG: unable to handle kernel paging request at 00100104
[424271.637328] IP: [<f90fc072>] ath_tx_aggr_sleep+0x62/0xe0 [ath9k]
...
[424271.639953] Call Trace:
[424271.639998] [<f90f6900>] ? ath9k_get_survey+0x110/0x110 [ath9k]
[424271.640083] [<f90f6942>] ath9k_sta_notify+0x42/0x50 [ath9k]
[424271.640177] [<f809cfef>] sta_ps_start+0x8f/0x1c0 [mac80211]
[424271.640258] [<c10f730e>] ? free_compound_page+0x2e/0x40
[424271.640346] [<f809e915>] ieee80211_rx_handlers+0x9d5/0x2340 [mac80211]
[424271.640437] [<c112f048>] ? kmem_cache_free+0x1d8/0x1f0
[424271.640510] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640578] [<c10fc23c>] ? put_page+0x2c/0x40
[424271.640640] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640706] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640787] [<f809dde3>] ? ieee80211_rx_handlers_result+0x73/0x1d0 [mac80211]
[424271.640897] [<f80a07a0>] ieee80211_prepare_and_rx_handle+0x520/0xad0 [mac80211]
[424271.641009] [<f809e22d>] ? ieee80211_rx_handlers+0x2ed/0x2340 [mac80211]
[424271.641104] [<c13846ce>] ? ip_output+0x7e/0xd0
[424271.641182] [<f80a1057>] ieee80211_rx+0x307/0x7c0 [mac80211]
[424271.641266] [<f90fa6ee>] ath_rx_tasklet+0x88e/0xf70 [ath9k]
[424271.641358] [<f80a0f2c>] ? ieee80211_rx+0x1dc/0x7c0 [mac80211]
[424271.641445] [<f90f82db>] ath9k_tasklet+0xcb/0x130 [ath9k]
Bug report:
https://bugzilla.kernel.org/show_bug.cgi?id=70551
Reported-and-tested-by: Max Sydorenko <[email protected]>
Cc: [email protected]
Signed-off-by: Stanislaw Gruszka <[email protected]>
Signed-off-by: John W. Linville <[email protected]> |
UpdateWaitHandles(LPHANDLE *handles_ptr, LPDWORD count,
HANDLE io_event, HANDLE exit_event, list_item_t *threads)
{
static DWORD size = 10;
static LPHANDLE handles = NULL;
DWORD pos = 0;
if (handles == NULL)
{
handles = malloc(size * sizeof(HANDLE));
*handles_ptr = handles;
if (handles == NULL)
{
return ERROR_OUTOFMEMORY;
}
}
handles[pos++] = io_event;
if (!threads)
{
handles[pos++] = exit_event;
}
while (threads)
{
if (pos == size)
{
LPHANDLE tmp;
size += 10;
tmp = realloc(handles, size * sizeof(HANDLE));
if (tmp == NULL)
{
size -= 10;
*count = pos;
return ERROR_OUTOFMEMORY;
}
handles = tmp;
*handles_ptr = handles;
}
handles[pos++] = threads->data;
threads = threads->next;
}
*count = pos;
return NO_ERROR;
} | 0 | [
"CWE-415"
] | openvpn | 1394192b210cb3c6624a7419bcf3ff966742e79b | 175,312,727,697,273,400,000,000,000,000,000,000,000 | 47 | Fix potential double-free() in Interactive Service (CVE-2018-9336)
Malformed input data on the service pipe towards the OpenVPN interactive
service (normally used by the OpenVPN GUI to request openvpn instances
from the service) can result in a double free() in the error handling code.
This usually only leads to a process crash (DoS by an unprivileged local
account) but since it could possibly lead to memory corruption if
happening while multiple other threads are active at the same time,
CVE-2018-9336 has been assigned to acknowledge this risk.
Fix by ensuring that sud->directory is set to NULL in GetStartUpData()
for all error cases (thus not being free()ed in FreeStartupData()).
Rewrite control flow to use explicit error label for error exit.
Discovered and reported by Jacob Baines <[email protected]>.
CVE: 2018-9336
Signed-off-by: Gert Doering <[email protected]>
Acked-by: Selva Nair <[email protected]>
Message-Id: <[email protected]>
URL: https://www.mail-archive.com/search?l=mid&[email protected]
Signed-off-by: Gert Doering <[email protected]> |
static void sub_remove(struct idr *idp, int shift, int id)
{
struct idr_layer *p = idp->top;
struct idr_layer **pa[MAX_IDR_LEVEL];
struct idr_layer ***paa = &pa[0];
struct idr_layer *to_free;
int n;
*paa = NULL;
*++paa = &idp->top;
while ((shift > 0) && p) {
n = (id >> shift) & IDR_MASK;
__clear_bit(n, &p->bitmap);
*++paa = &p->ary[n];
p = p->ary[n];
shift -= IDR_BITS;
}
n = id & IDR_MASK;
if (likely(p != NULL && test_bit(n, &p->bitmap))){
__clear_bit(n, &p->bitmap);
rcu_assign_pointer(p->ary[n], NULL);
to_free = NULL;
while(*paa && ! --((**paa)->count)){
if (to_free)
free_layer(to_free);
to_free = **paa;
**paa-- = NULL;
}
if (!*paa)
idp->layers = 0;
if (to_free)
free_layer(to_free);
} else
idr_remove_warning(id);
} | 1 | [] | linux | 326cf0f0f308933c10236280a322031f0097205d | 160,423,979,510,555,230,000,000,000,000,000,000,000 | 36 | idr: fix top layer handling
Most functions in idr fail to deal with the high bits when the idr
tree grows to the maximum height.
* idr_get_empty_slot() stops growing idr tree once the depth reaches
MAX_IDR_LEVEL - 1, which is one depth shallower than necessary to
cover the whole range. The function doesn't even notice that it
didn't grow the tree enough and ends up allocating the wrong ID
given sufficiently high @starting_id.
For example, on 64 bit, if the starting id is 0x7fffff01,
idr_get_empty_slot() will grow the tree 5 layer deep, which only
covers the 30 bits and then proceed to allocate as if the bit 30
wasn't specified. It ends up allocating 0x3fffff01 without the bit
30 but still returns 0x7fffff01.
* __idr_remove_all() will not remove anything if the tree is fully
grown.
* idr_find() can't find anything if the tree is fully grown.
* idr_for_each() and idr_get_next() can't iterate anything if the tree
is fully grown.
Fix it by introducing idr_max() which returns the maximum possible ID
given the depth of tree and replacing the id limit checks in all
affected places.
As the idr_layer pointer array pa[] needs to be 1 larger than the
maximum depth, enlarge pa[] arrays by one.
While this plugs the discovered issues, the whole code base is
horrible and in desparate need of rewrite. It's fragile like hell,
Signed-off-by: Tejun Heo <[email protected]>
Cc: Rusty Russell <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void _moddeinit(module_unload_intent_t intent)
{
service_named_unbind_command("chanserv", &cs_flags);
} | 1 | [
"CWE-284"
] | atheme | c597156adc60a45b5f827793cd420945f47bc03b | 4,581,328,154,741,107,000,000,000,000,000,000,000 | 4 | chanserv/flags: make Anope FLAGS compatibility an option
Previously, ChanServ FLAGS behavior could be modified by registering or
dropping the keyword nicks "LIST", "CLEAR", and "MODIFY".
Now, a configuration option is available that when turned on (default),
disables registration of these keyword nicks and enables this
compatibility feature. When turned off, registration of these keyword
nicks is possible, and compatibility to Anope's FLAGS command is
disabled.
Fixes atheme/atheme#397 |
struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
struct zone *zone)
{
int nid = zone_to_nid(zone);
int zid = zone_idx(zone);
struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
return &mz->reclaim_stat;
} | 0 | [
"CWE-264"
] | linux-2.6 | 1a5a9906d4e8d1976b701f889d8f35d54b928f25 | 321,168,209,584,130,740,000,000,000,000,000,000,000 | 9 | mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38+]
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
int EC_POINT_set_Jprojective_coordinates_GFp(const EC_GROUP *group, EC_POINT *point,
const BIGNUM *x, const BIGNUM *y, const BIGNUM *z, BN_CTX *ctx)
{
if (group->meth->point_set_Jprojective_coordinates_GFp == 0)
{
ECerr(EC_F_EC_POINT_SET_JPROJECTIVE_COORDINATES_GFP, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
return 0;
}
if (group->meth != point->meth)
{
ECerr(EC_F_EC_POINT_SET_JPROJECTIVE_COORDINATES_GFP, EC_R_INCOMPATIBLE_OBJECTS);
return 0;
}
return group->meth->point_set_Jprojective_coordinates_GFp(group, point, x, y, z, ctx);
} | 0 | [
"CWE-320"
] | openssl | 8aed2a7548362e88e84a7feb795a3a97e8395008 | 168,376,880,658,851,280,000,000,000,000,000,000,000 | 15 | Reserve option to use BN_mod_exp_mont_consttime in ECDSA.
Submitted by Shay Gueron, Intel Corp.
RT: 3149
Reviewed-by: Rich Salz <[email protected]>
(cherry picked from commit f54be179aa4cbbd944728771d7d59ed588158a12) |
static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
int bearer_id, struct sk_buff_head *xmitq)
{
struct tipc_msg *hdr = buf_msg(skb);
int usr = msg_user(hdr);
int mtyp = msg_type(hdr);
u16 oseqno = msg_seqno(hdr);
u16 exp_pkts = msg_msgcnt(hdr);
u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
int state = n->state;
struct tipc_link *l, *tnl, *pl = NULL;
struct tipc_media_addr *maddr;
int pb_id;
if (trace_tipc_node_check_state_enabled()) {
trace_tipc_skb_dump(skb, false, "skb for node state check");
trace_tipc_node_check_state(n, true, " ");
}
l = n->links[bearer_id].link;
if (!l)
return false;
rcv_nxt = tipc_link_rcv_nxt(l);
if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
return true;
/* Find parallel link, if any */
for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
if ((pb_id != bearer_id) && n->links[pb_id].link) {
pl = n->links[pb_id].link;
break;
}
}
if (!tipc_link_validate_msg(l, hdr)) {
trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
return false;
}
/* Check and update node accesibility if applicable */
if (state == SELF_UP_PEER_COMING) {
if (!tipc_link_is_up(l))
return true;
if (!msg_peer_link_is_up(hdr))
return true;
tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
}
if (state == SELF_DOWN_PEER_LEAVING) {
if (msg_peer_node_is_up(hdr))
return false;
tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
return true;
}
if (state == SELF_LEAVING_PEER_DOWN)
return false;
/* Ignore duplicate packets */
if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
return true;
/* Initiate or update failover mode if applicable */
if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
syncpt = oseqno + exp_pkts - 1;
if (pl && !tipc_link_is_reset(pl)) {
__tipc_node_link_down(n, &pb_id, xmitq, &maddr);
trace_tipc_node_link_down(n, true,
"node link down <- failover!");
tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
tipc_link_inputq(l));
}
/* If parallel link was already down, and this happened before
* the tunnel link came up, node failover was never started.
* Ensure that a FAILOVER_MSG is sent to get peer out of
* NODE_FAILINGOVER state, also this node must accept
* TUNNEL_MSGs from peer.
*/
if (n->state != NODE_FAILINGOVER)
tipc_node_link_failover(n, pl, l, xmitq);
/* If pkts arrive out of order, use lowest calculated syncpt */
if (less(syncpt, n->sync_point))
n->sync_point = syncpt;
}
/* Open parallel link when tunnel link reaches synch point */
if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
if (!more(rcv_nxt, n->sync_point))
return true;
tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
if (pl)
tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
return true;
}
/* No synching needed if only one link */
if (!pl || !tipc_link_is_up(pl))
return true;
/* Initiate synch mode if applicable */
if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
if (n->capabilities & TIPC_TUNNEL_ENHANCED)
syncpt = msg_syncpt(hdr);
else
syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1;
if (!tipc_link_is_up(l))
__tipc_node_link_up(n, bearer_id, xmitq);
if (n->state == SELF_UP_PEER_UP) {
n->sync_point = syncpt;
tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
}
}
/* Open tunnel link when parallel link reaches synch point */
if (n->state == NODE_SYNCHING) {
if (tipc_link_is_synching(l)) {
tnl = l;
} else {
tnl = pl;
pl = l;
}
inputq_len = skb_queue_len(tipc_link_inputq(pl));
dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
if (more(dlv_nxt, n->sync_point)) {
tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
return true;
}
if (l == pl)
return true;
if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
return true;
if (usr == LINK_PROTOCOL)
return true;
return false;
}
return true;
} | 0 | [] | linux | 0217ed2848e8538bcf9172d97ed2eeb4a26041bb | 47,776,099,743,076,800,000,000,000,000,000,000,000 | 143 | tipc: better validate user input in tipc_nl_retrieve_key()
Before calling tipc_aead_key_size(ptr), we need to ensure
we have enough data to dereference ptr->keylen.
We probably also want to make sure tipc_aead_key_size()
wont overflow with malicious ptr->keylen values.
Syzbot reported:
BUG: KMSAN: uninit-value in __tipc_nl_node_set_key net/tipc/node.c:2971 [inline]
BUG: KMSAN: uninit-value in tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023
CPU: 0 PID: 21060 Comm: syz-executor.5 Not tainted 5.11.0-rc7-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:79 [inline]
dump_stack+0x21c/0x280 lib/dump_stack.c:120
kmsan_report+0xfb/0x1e0 mm/kmsan/kmsan_report.c:118
__msan_warning+0x5f/0xa0 mm/kmsan/kmsan_instr.c:197
__tipc_nl_node_set_key net/tipc/node.c:2971 [inline]
tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023
genl_family_rcv_msg_doit net/netlink/genetlink.c:739 [inline]
genl_family_rcv_msg net/netlink/genetlink.c:783 [inline]
genl_rcv_msg+0x1319/0x1610 net/netlink/genetlink.c:800
netlink_rcv_skb+0x6fa/0x810 net/netlink/af_netlink.c:2494
genl_rcv+0x63/0x80 net/netlink/genetlink.c:811
netlink_unicast_kernel net/netlink/af_netlink.c:1304 [inline]
netlink_unicast+0x11d6/0x14a0 net/netlink/af_netlink.c:1330
netlink_sendmsg+0x1740/0x1840 net/netlink/af_netlink.c:1919
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg net/socket.c:672 [inline]
____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345
___sys_sendmsg net/socket.c:2399 [inline]
__sys_sendmsg+0x714/0x830 net/socket.c:2432
__compat_sys_sendmsg net/compat.c:347 [inline]
__do_compat_sys_sendmsg net/compat.c:354 [inline]
__se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351
__ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351
do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline]
__do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141
do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166
do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209
entry_SYSENTER_compat_after_hwframe+0x4d/0x5c
RIP: 0023:0xf7f60549
Code: 03 74 c0 01 10 05 03 74 b8 01 10 06 03 74 b4 01 10 07 03 74 b0 01 10 08 03 74 d8 01 00 00 00 00 00 51 52 55 89 e5 0f 34 cd 80 <5d> 5a 59 c3 90 90 90 90 8d b4 26 00 00 00 00 8d b4 26 00 00 00 00
RSP: 002b:00000000f555a5fc EFLAGS: 00000296 ORIG_RAX: 0000000000000172
RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000020000200
RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
Uninit was created at:
kmsan_save_stack_with_flags mm/kmsan/kmsan.c:121 [inline]
kmsan_internal_poison_shadow+0x5c/0xf0 mm/kmsan/kmsan.c:104
kmsan_slab_alloc+0x8d/0xe0 mm/kmsan/kmsan_hooks.c:76
slab_alloc_node mm/slub.c:2907 [inline]
__kmalloc_node_track_caller+0xa37/0x1430 mm/slub.c:4527
__kmalloc_reserve net/core/skbuff.c:142 [inline]
__alloc_skb+0x2f8/0xb30 net/core/skbuff.c:210
alloc_skb include/linux/skbuff.h:1099 [inline]
netlink_alloc_large_skb net/netlink/af_netlink.c:1176 [inline]
netlink_sendmsg+0xdbc/0x1840 net/netlink/af_netlink.c:1894
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg net/socket.c:672 [inline]
____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345
___sys_sendmsg net/socket.c:2399 [inline]
__sys_sendmsg+0x714/0x830 net/socket.c:2432
__compat_sys_sendmsg net/compat.c:347 [inline]
__do_compat_sys_sendmsg net/compat.c:354 [inline]
__se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351
__ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351
do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline]
__do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141
do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166
do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209
entry_SYSENTER_compat_after_hwframe+0x4d/0x5c
Fixes: e1f32190cf7d ("tipc: add support for AEAD key setting via netlink")
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Tuong Lien <[email protected]>
Cc: Jon Maloy <[email protected]>
Cc: Ying Xue <[email protected]>
Reported-by: syzbot <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
StatusWith<PrivilegeVector> AuthorizationSessionImpl::getPrivilegesForAggregate(
const NamespaceString& nss, const BSONObj& cmdObj, bool isMongos) {
if (!nss.isValid()) {
return Status(ErrorCodes::InvalidNamespace,
mongoutils::str::stream() << "Invalid input namespace, " << nss.ns());
}
PrivilegeVector privileges;
// If this connection does not need to be authenticated (for instance, if auth is disabled),
// returns an empty requirements set.
if (_externalState->shouldIgnoreAuthChecks()) {
return privileges;
}
auto statusWithAggRequest = AggregationRequest::parseFromBSON(nss, cmdObj);
if (!statusWithAggRequest.isOK()) {
return statusWithAggRequest.getStatus();
}
AggregationRequest aggRequest = std::move(statusWithAggRequest.getValue());
const auto& pipeline = aggRequest.getPipeline();
// If the aggregation pipeline is empty, confirm the user is authorized for find on 'nss'.
if (pipeline.empty()) {
Privilege currentPriv =
Privilege(ResourcePattern::forExactNamespace(nss), ActionType::find);
Privilege::addPrivilegeToPrivilegeVector(&privileges, currentPriv);
return privileges;
}
// If the first stage of the pipeline is not an initial source, the pipeline is implicitly
// reading documents from the underlying collection. The client must be authorized to do so.
auto liteParsedDocSource = LiteParsedDocumentSource::parse(aggRequest, pipeline[0]);
if (!liteParsedDocSource->isInitialSource()) {
Privilege currentPriv =
Privilege(ResourcePattern::forExactNamespace(nss), ActionType::find);
Privilege::addPrivilegeToPrivilegeVector(&privileges, currentPriv);
}
// Confirm privileges for the pipeline.
for (auto&& pipelineStage : pipeline) {
liteParsedDocSource = LiteParsedDocumentSource::parse(aggRequest, pipelineStage);
PrivilegeVector currentPrivs = liteParsedDocSource->requiredPrivileges(isMongos);
Privilege::addPrivilegesToPrivilegeVector(&privileges, currentPrivs);
}
return privileges;
} | 0 | [
"CWE-613"
] | mongo | e55d6e2292e5dbe2f97153251d8193d1cc89f5d7 | 254,917,000,525,025,500,000,000,000,000,000,000,000 | 48 | SERVER-38984 Validate unique User ID on UserCache hit |
static ssize_t iwl_dbgfs_rfkill_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
char buf[100];
int pos;
pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",
trans_pcie->debug_rfkill,
!(iwl_read32(trans, CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
} | 0 | [
"CWE-476"
] | linux | 8188a18ee2e48c9a7461139838048363bfce3fef | 77,690,510,614,235,780,000,000,000,000,000,000,000 | 16 | iwlwifi: pcie: fix rb_allocator workqueue allocation
We don't handle failures in the rb_allocator workqueue allocation
correctly. To fix that, move the code earlier so the cleanup is
easier and we don't have to undo all the interrupt allocations in
this case.
Signed-off-by: Johannes Berg <[email protected]>
Signed-off-by: Luca Coelho <[email protected]> |
static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
} | 0 | [
"CWE-20"
] | linux | 63e39d29b3da02e901349f6cd71159818a4737a6 | 13,244,672,732,872,193,000,000,000,000,000,000,000 | 6 | ixgbe: fix large MTU request from VF
Check that the MTU value requested by the VF is in the supported
range of MTUs before attempting to set the VF large packet enable,
otherwise reject the request. This also avoids unnecessary
register updates in the case of the 82599 controller.
Fixes: 872844ddb9e4 ("ixgbe: Enable jumbo frames support w/ SR-IOV")
Co-developed-by: Piotr Skajewski <[email protected]>
Signed-off-by: Piotr Skajewski <[email protected]>
Signed-off-by: Jesse Brandeburg <[email protected]>
Co-developed-by: Mateusz Palczewski <[email protected]>
Signed-off-by: Mateusz Palczewski <[email protected]>
Tested-by: Konrad Jankowski <[email protected]>
Signed-off-by: Tony Nguyen <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
initialize_environment(void)
{
putenv("PGAPPNAME=pg_regress");
if (nolocale)
{
/*
* Clear out any non-C locale settings
*/
unsetenv("LC_COLLATE");
unsetenv("LC_CTYPE");
unsetenv("LC_MONETARY");
unsetenv("LC_NUMERIC");
unsetenv("LC_TIME");
unsetenv("LANG");
/* On Windows the default locale cannot be English, so force it */
#if defined(WIN32) || defined(__CYGWIN__)
putenv("LANG=en");
#endif
}
/*
* Set translation-related settings to English; otherwise psql will
* produce translated messages and produce diffs. (XXX If we ever support
* translation of pg_regress, this needs to be moved elsewhere, where psql
* is actually called.)
*/
unsetenv("LANGUAGE");
unsetenv("LC_ALL");
putenv("LC_MESSAGES=C");
/*
* Set encoding as requested
*/
if (encoding)
doputenv("PGCLIENTENCODING", encoding);
else
unsetenv("PGCLIENTENCODING");
/*
* Set timezone and datestyle for datetime-related tests
*/
putenv("PGTZ=PST8PDT");
putenv("PGDATESTYLE=Postgres, MDY");
/*
* Likewise set intervalstyle to ensure consistent results. This is a bit
* more painful because we must use PGOPTIONS, and we want to preserve the
* user's ability to set other variables through that.
*/
{
const char *my_pgoptions = "-c intervalstyle=postgres_verbose";
const char *old_pgoptions = getenv("PGOPTIONS");
char *new_pgoptions;
if (!old_pgoptions)
old_pgoptions = "";
new_pgoptions = psprintf("PGOPTIONS=%s %s",
old_pgoptions, my_pgoptions);
putenv(new_pgoptions);
}
if (temp_install)
{
/*
* Clear out any environment vars that might cause psql to connect to
* the wrong postmaster, or otherwise behave in nondefault ways. (Note
* we also use psql's -X switch consistently, so that ~/.psqlrc files
* won't mess things up.) Also, set PGPORT to the temp port, and set
* or unset PGHOST depending on whether we are using TCP or Unix
* sockets.
*/
unsetenv("PGDATABASE");
unsetenv("PGUSER");
unsetenv("PGSERVICE");
unsetenv("PGSSLMODE");
unsetenv("PGREQUIRESSL");
unsetenv("PGCONNECT_TIMEOUT");
unsetenv("PGDATA");
if (hostname != NULL)
doputenv("PGHOST", hostname);
else
unsetenv("PGHOST");
unsetenv("PGHOSTADDR");
if (port != -1)
{
char s[16];
sprintf(s, "%d", port);
doputenv("PGPORT", s);
}
/*
* GNU make stores some flags in the MAKEFLAGS environment variable to
* pass arguments to its own children. If we are invoked by make,
* that causes the make invoked by us to think its part of the make
* task invoking us, and so it tries to communicate with the toplevel
* make. Which fails.
*
* Unset the variable to protect against such problems. We also reset
* MAKELEVEL to be certain the child doesn't notice the make above us.
*/
unsetenv("MAKEFLAGS");
unsetenv("MAKELEVEL");
/*
* Adjust path variables to point into the temp-install tree
*/
bindir = psprintf("%s/install/%s", temp_install, bindir);
libdir = psprintf("%s/install/%s", temp_install, libdir);
datadir = psprintf("%s/install/%s", temp_install, datadir);
/* psql will be installed into temp-install bindir */
psqldir = bindir;
/*
* Set up shared library paths to include the temp install.
*
* LD_LIBRARY_PATH covers many platforms. DYLD_LIBRARY_PATH works on
* Darwin, and maybe other Mach-based systems. LIBPATH is for AIX.
* Windows needs shared libraries in PATH (only those linked into
* executables, not dlopen'ed ones). Feel free to account for others
* as well.
*/
add_to_path("LD_LIBRARY_PATH", ':', libdir);
add_to_path("DYLD_LIBRARY_PATH", ':', libdir);
add_to_path("LIBPATH", ':', libdir);
#if defined(WIN32)
add_to_path("PATH", ';', libdir);
#elif defined(__CYGWIN__)
add_to_path("PATH", ':', libdir);
#endif
}
else
{
const char *pghost;
const char *pgport;
/*
* When testing an existing install, we honor existing environment
* variables, except if they're overridden by command line options.
*/
if (hostname != NULL)
{
doputenv("PGHOST", hostname);
unsetenv("PGHOSTADDR");
}
if (port != -1)
{
char s[16];
sprintf(s, "%d", port);
doputenv("PGPORT", s);
}
if (user != NULL)
doputenv("PGUSER", user);
/*
* Report what we're connecting to
*/
pghost = getenv("PGHOST");
pgport = getenv("PGPORT");
#ifndef HAVE_UNIX_SOCKETS
if (!pghost)
pghost = "localhost";
#endif
if (pghost && pgport)
printf(_("(using postmaster on %s, port %s)\n"), pghost, pgport);
if (pghost && !pgport)
printf(_("(using postmaster on %s, default port)\n"), pghost);
if (!pghost && pgport)
printf(_("(using postmaster on Unix socket, port %s)\n"), pgport);
if (!pghost && !pgport)
printf(_("(using postmaster on Unix socket, default port)\n"));
}
convert_sourcefiles();
load_resultmap();
} | 0 | [
"CWE-119"
] | postgres | 01824385aead50e557ca1af28640460fa9877d51 | 111,273,641,222,411,080,000,000,000,000,000,000,000 | 182 | Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt() |
g_make_token_header(gss_OID_const mech,
unsigned int body_size,
unsigned char **buf,
unsigned int totallen)
{
int ret = 0;
unsigned int hdrsize;
unsigned char *p = *buf;
hdrsize = 1 + gssint_der_length_size(mech->length) + mech->length;
*(*buf)++ = HEADER_ID;
if ((ret = gssint_put_der_length(hdrsize + body_size, buf, totallen)))
return (ret);
*(*buf)++ = MECH_OID;
if ((ret = gssint_put_der_length(mech->length, buf,
totallen - (int)(p - *buf))))
return (ret);
TWRITE_STR(*buf, mech->elements, mech->length);
return (0);
} | 0 | [
"CWE-415"
] | krb5 | f18ddf5d82de0ab7591a36e465bc24225776940f | 105,219,876,877,780,000,000,000,000,000,000,000,000 | 22 | Fix double-free in SPNEGO [CVE-2014-4343]
In commit cd7d6b08 ("Verify acceptor's mech in SPNEGO initiator") the
pointer sc->internal_mech became an alias into sc->mech_set->elements,
which should be considered constant for the duration of the SPNEGO
context. So don't free it.
CVE-2014-4343:
In MIT krb5 releases 1.10 and newer, an unauthenticated remote
attacker with the ability to spoof packets appearing to be from a
GSSAPI acceptor can cause a double-free condition in GSSAPI initiators
(clients) which are using the SPNEGO mechanism, by returning a
different underlying mechanism than was proposed by the initiator. At
this stage of the negotiation, the acceptor is unauthenticated, and
the acceptor's response could be spoofed by an attacker with the
ability to inject traffic to the initiator.
Historically, some double-free vulnerabilities can be translated into
remote code execution, though the necessary exploits must be tailored
to the individual application and are usually quite
complicated. Double-frees can also be exploited to cause an
application crash, for a denial of service. However, most GSSAPI
client applications are not vulnerable, as the SPNEGO mechanism is not
used by default (when GSS_C_NO_OID is passed as the mech_type argument
to gss_init_sec_context()). The most common use of SPNEGO is for
HTTP-Negotiate, used in web browsers and other web clients. Most such
clients are believed to not offer HTTP-Negotiate by default, instead
requiring a whitelist of sites for which it may be used to be
configured. If the whitelist is configured to only allow
HTTP-Negotiate over TLS connections ("https://"), a successful
attacker must also spoof the web server's SSL certificate, due to the
way the WWW-Authenticate header is sent in a 401 (Unauthorized)
response message. Unfortunately, many instructions for enabling
HTTP-Negotiate in common web browsers do not include a TLS
requirement.
CVSSv2 Vector: AV:N/AC:H/Au:N/C:C/I:C/A:C/E:POC/RL:OF/RC:C
[[email protected]: CVE summary and CVSSv2 vector]
ticket: 7969 (new)
target_version: 1.12.2
tags: pullup |
rsvg_state_inherit_run (RsvgState * dst, const RsvgState * src,
const InheritanceFunction function, const gboolean inherituninheritables)
{
gint i;
if (function (dst->has_current_color, src->has_current_color))
dst->current_color = src->current_color;
if (function (dst->has_flood_color, src->has_flood_color))
dst->flood_color = src->flood_color;
if (function (dst->has_flood_opacity, src->has_flood_opacity))
dst->flood_opacity = src->flood_opacity;
if (function (dst->has_fill_server, src->has_fill_server)) {
rsvg_paint_server_ref (src->fill);
if (dst->fill)
rsvg_paint_server_unref (dst->fill);
dst->fill = src->fill;
}
if (function (dst->has_fill_opacity, src->has_fill_opacity))
dst->fill_opacity = src->fill_opacity;
if (function (dst->has_fill_rule, src->has_fill_rule))
dst->fill_rule = src->fill_rule;
if (function (dst->has_clip_rule, src->has_clip_rule))
dst->clip_rule = src->clip_rule;
if (function (dst->overflow, src->overflow))
dst->overflow = src->overflow;
if (function (dst->has_stroke_server, src->has_stroke_server)) {
rsvg_paint_server_ref (src->stroke);
if (dst->stroke)
rsvg_paint_server_unref (dst->stroke);
dst->stroke = src->stroke;
}
if (function (dst->has_stroke_opacity, src->has_stroke_opacity))
dst->stroke_opacity = src->stroke_opacity;
if (function (dst->has_stroke_width, src->has_stroke_width))
dst->stroke_width = src->stroke_width;
if (function (dst->has_miter_limit, src->has_miter_limit))
dst->miter_limit = src->miter_limit;
if (function (dst->has_cap, src->has_cap))
dst->cap = src->cap;
if (function (dst->has_join, src->has_join))
dst->join = src->join;
if (function (dst->has_stop_color, src->has_stop_color))
dst->stop_color = src->stop_color;
if (function (dst->has_stop_opacity, src->has_stop_opacity))
dst->stop_opacity = src->stop_opacity;
if (function (dst->has_cond, src->has_cond))
dst->cond_true = src->cond_true;
if (function (dst->has_font_size, src->has_font_size))
dst->font_size = src->font_size;
if (function (dst->has_font_style, src->has_font_style))
dst->font_style = src->font_style;
if (function (dst->has_font_variant, src->has_font_variant))
dst->font_variant = src->font_variant;
if (function (dst->has_font_weight, src->has_font_weight))
dst->font_weight = src->font_weight;
if (function (dst->has_font_stretch, src->has_font_stretch))
dst->font_stretch = src->font_stretch;
if (function (dst->has_font_decor, src->has_font_decor))
dst->font_decor = src->font_decor;
if (function (dst->has_text_dir, src->has_text_dir))
dst->text_dir = src->text_dir;
if (function (dst->has_text_gravity, src->has_text_gravity))
dst->text_gravity = src->text_gravity;
if (function (dst->has_unicode_bidi, src->has_unicode_bidi))
dst->unicode_bidi = src->unicode_bidi;
if (function (dst->has_text_anchor, src->has_text_anchor))
dst->text_anchor = src->text_anchor;
if (function (dst->has_letter_spacing, src->has_letter_spacing))
dst->letter_spacing = src->letter_spacing;
if (function (dst->has_startMarker, src->has_startMarker))
dst->startMarker = src->startMarker;
if (function (dst->has_middleMarker, src->has_middleMarker))
dst->middleMarker = src->middleMarker;
if (function (dst->has_endMarker, src->has_endMarker))
dst->endMarker = src->endMarker;
if (function (dst->has_shape_rendering_type, src->has_shape_rendering_type))
dst->shape_rendering_type = src->shape_rendering_type;
if (function (dst->has_text_rendering_type, src->has_text_rendering_type))
dst->text_rendering_type = src->text_rendering_type;
if (function (dst->has_font_family, src->has_font_family)) {
g_free (dst->font_family); /* font_family is always set to something */
dst->font_family = g_strdup (src->font_family);
}
if (function (dst->has_space_preserve, src->has_space_preserve))
dst->space_preserve = src->space_preserve;
if (function (dst->has_visible, src->has_visible))
dst->visible = src->visible;
if (function (dst->has_lang, src->has_lang)) {
if (dst->has_lang)
g_free (dst->lang);
dst->lang = g_strdup (src->lang);
}
if (src->dash.n_dash > 0 && (function (dst->has_dash, src->has_dash))) {
if (dst->has_dash)
g_free (dst->dash.dash);
dst->dash.dash = g_new (gdouble, src->dash.n_dash);
dst->dash.n_dash = src->dash.n_dash;
for (i = 0; i < src->dash.n_dash; i++)
dst->dash.dash[i] = src->dash.dash[i];
}
if (function (dst->has_dashoffset, src->has_dashoffset)) {
dst->dash.offset = src->dash.offset;
}
if (inherituninheritables) {
dst->clip_path_ref = src->clip_path_ref;
dst->mask = src->mask;
dst->enable_background = src->enable_background;
dst->adobe_blend = src->adobe_blend;
dst->opacity = src->opacity;
dst->filter = src->filter;
dst->comp_op = src->comp_op;
}
} | 1 | [
"CWE-20"
] | librsvg | d1c9191949747f6dcfd207831d15dd4ba00e31f2 | 134,237,730,731,253,370,000,000,000,000,000,000,000 | 121 | state: Store mask as reference
Instead of immediately looking up the mask, store the reference and look
it up on use. |
void or_bits(REP_SET *to,REP_SET *from)
{
reg1 uint i;
for (i=0 ; i < to->size_of_bits ; i++)
to->bits[i]|=from->bits[i]; | 0 | [
"CWE-295"
] | mysql-server | b3e9211e48a3fb586e88b0270a175d2348935424 | 265,904,500,929,529,250,000,000,000,000,000,000,000 | 7 | WL#9072: Backport WL#8785 to 5.5 |
static inline NPIdentifierInfo *npidentifier_info_new(void)
{
return NPW_MemNew(NPIdentifierInfo, 1);
} | 0 | [
"CWE-264"
] | nspluginwrapper | 7e4ab8e1189846041f955e6c83f72bc1624e7a98 | 37,943,446,421,343,462,000,000,000,000,000,000,000 | 4 | Support all the new variables added |
static void init_page(page_obj_t *page, const char *name, const char *title)
{
page->name = name;
page->title = title;
} | 0 | [
"CWE-200"
] | libreport | 257578a23d1537a2d235aaa2b1488ee4f818e360 | 110,217,667,205,843,420,000,000,000,000,000,000,000 | 5 | wizard: fix save users changes after reviewing dump dir files
If the user reviewed the dump dir's files during reporting the crash, the
changes was thrown away and original data was passed to the bugzilla bug
report.
report-gtk saves the first text view buffer and then reloads data from the
reported problem directory, which causes that the changes made to those text
views are thrown away.
Function save_text_if_changed(), except of saving text, also reload the files
from dump dir and update gui state from the dump dir. The commit moves the
reloading and updating gui functions away from this function.
Related to rhbz#1270235
Signed-off-by: Matej Habrnal <[email protected]> |
static void mov_update_dts_shift(MOVStreamContext *sc, int duration)
{
if (duration < 0) {
if (duration == INT_MIN) {
av_log(NULL, AV_LOG_WARNING, "mov_update_dts_shift(): dts_shift set to %d\n", INT_MAX);
duration++;
}
sc->dts_shift = FFMAX(sc->dts_shift, -duration);
}
} | 0 | [
"CWE-399",
"CWE-834"
] | FFmpeg | 9cb4eb772839c5e1de2855d126bf74ff16d13382 | 246,832,858,396,549,800,000,000,000,000,000,000,000 | 10 | avformat/mov: Fix DoS in read_tfra()
Fixes: Missing EOF check in loop
No testcase
Found-by: Xiaohei and Wangchu from Alibaba Security Team
Signed-off-by: Michael Niedermayer <[email protected]> |
static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
if (md->bs) {
/* The md already has necessary mempools. */
if (dm_table_bio_based(t)) {
/*
* Reload bioset because front_pad may have changed
* because a different table was loaded.
*/
bioset_free(md->bs);
md->bs = p->bs;
p->bs = NULL;
}
/*
* There's no need to reload with request-based dm
* because the size of front_pad doesn't change.
* Note for future: If you are to reload bioset,
* prep-ed requests in the queue may refer
* to bio from the old bioset, so you must walk
* through the queue to unprep.
*/
goto out;
}
BUG_ON(!p || md->io_pool || md->bs);
md->io_pool = p->io_pool;
p->io_pool = NULL;
md->bs = p->bs;
p->bs = NULL;
out:
/* mempool bind completed, no longer need any mempools in the table */
dm_table_free_md_mempools(t);
} | 0 | [
"CWE-362"
] | linux | b9a41d21dceadf8104812626ef85dc56ee8a60ed | 111,458,691,362,044,400,000,000,000,000,000,000,000 | 37 | dm: fix race between dm_get_from_kobject() and __dm_destroy()
The following BUG_ON was hit when testing repeat creation and removal of
DM devices:
kernel BUG at drivers/md/dm.c:2919!
CPU: 7 PID: 750 Comm: systemd-udevd Not tainted 4.1.44
Call Trace:
[<ffffffff81649e8b>] dm_get_from_kobject+0x34/0x3a
[<ffffffff81650ef1>] dm_attr_show+0x2b/0x5e
[<ffffffff817b46d1>] ? mutex_lock+0x26/0x44
[<ffffffff811df7f5>] sysfs_kf_seq_show+0x83/0xcf
[<ffffffff811de257>] kernfs_seq_show+0x23/0x25
[<ffffffff81199118>] seq_read+0x16f/0x325
[<ffffffff811de994>] kernfs_fop_read+0x3a/0x13f
[<ffffffff8117b625>] __vfs_read+0x26/0x9d
[<ffffffff8130eb59>] ? security_file_permission+0x3c/0x44
[<ffffffff8117bdb8>] ? rw_verify_area+0x83/0xd9
[<ffffffff8117be9d>] vfs_read+0x8f/0xcf
[<ffffffff81193e34>] ? __fdget_pos+0x12/0x41
[<ffffffff8117c686>] SyS_read+0x4b/0x76
[<ffffffff817b606e>] system_call_fastpath+0x12/0x71
The bug can be easily triggered, if an extra delay (e.g. 10ms) is added
between the test of DMF_FREEING & DMF_DELETING and dm_get() in
dm_get_from_kobject().
To fix it, we need to ensure the test of DMF_FREEING & DMF_DELETING and
dm_get() are done in an atomic way, so _minor_lock is used.
The other callers of dm_get() have also been checked to be OK: some
callers invoke dm_get() under _minor_lock, some callers invoke it under
_hash_lock, and dm_start_request() invoke it after increasing
md->open_count.
Cc: [email protected]
Signed-off-by: Hou Tao <[email protected]>
Signed-off-by: Mike Snitzer <[email protected]> |
void free_blobs(TABLE *table)
{
uint *ptr, *end;
for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
ptr != end ;
ptr++)
{
/*
Reduced TABLE objects which are used by row-based replication for
type conversion might have some fields missing. Skip freeing BLOB
buffers for such missing fields.
*/
if (table->field[*ptr])
((Field_blob*) table->field[*ptr])->free();
}
} | 0 | [
"CWE-416"
] | server | c02ebf3510850ba78a106be9974c94c3b97d8585 | 94,190,117,326,399,960,000,000,000,000,000,000,000 | 16 | MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments. |
Subsets and Splits