func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
void CLASS stretch()
{
ushort newdim, (*img)[4], *pix0, *pix1;
int row, col, c;
double rc, frac;
if (pixel_aspect == 1) return;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,0,2);
#endif
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Stretching the image...\n"));
#endif
if (pixel_aspect < 1) {
newdim = height / pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (width*newdim, sizeof *img);
merror (img, "stretch()");
for (rc=row=0; row < newdim; row++, rc+=pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c*width];
if (c+1 < height) pix1 += width*4;
for (col=0; col < width; col++, pix0+=4, pix1+=4)
FORCC img[row*width+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
height = newdim;
} else {
newdim = width * pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (height*newdim, sizeof *img);
merror (img, "stretch()");
for (rc=col=0; col < newdim; col++, rc+=1/pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c];
if (c+1 < width) pix1 += 4;
for (row=0; row < height; row++, pix0+=width*4, pix1+=width*4)
FORCC img[row*newdim+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
width = newdim;
}
free (image);
image = img;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,1,2);
#endif
}
| 0 |
[
"CWE-703"
] |
LibRaw
|
11909cc59e712e09b508dda729b99aeaac2b29ad
| 122,835,095,565,617,320,000,000,000,000,000,000,000 | 44 |
cumulated data checks patch
|
TEST_F(HttpConnectionManagerConfigTest, UserDefinedSettingsDisallowServerPush) {
const std::string yaml_string = R"EOF(
codec_type: http2
stat_prefix: my_stat_prefix
route_config:
virtual_hosts:
- name: default
domains:
- "*"
routes:
- match:
prefix: "/"
route:
cluster: fake_cluster
http_filters:
- name: encoder-decoder-buffer-filter
typed_config: {}
http2_protocol_options:
custom_settings_parameters: { identifier: 2, value: 1 }
)EOF";
EXPECT_THROW_WITH_REGEX(
createHttpConnectionManagerConfig(yaml_string), EnvoyException,
"server push is not supported by Envoy and can not be enabled via a SETTINGS parameter.");
// Specify both the server push parameter and colliding named and user defined parameters.
const std::string yaml_string2 = R"EOF(
codec_type: http2
stat_prefix: my_stat_prefix
route_config:
virtual_hosts:
- name: default
domains:
- "*"
routes:
- match:
prefix: "/"
route:
cluster: fake_cluster
http_filters:
- name: encoder-decoder-buffer-filter
typed_config: {}
http2_protocol_options:
hpack_table_size: 2048
max_concurrent_streams: 4096
custom_settings_parameters:
- { identifier: 1, value: 2048 }
- { identifier: 2, value: 1 }
- { identifier: 3, value: 1024 }
)EOF";
// The server push exception is thrown first.
EXPECT_THROW_WITH_REGEX(
createHttpConnectionManagerConfig(yaml_string), EnvoyException,
"server push is not supported by Envoy and can not be enabled via a SETTINGS parameter.");
}
| 0 |
[
"CWE-22"
] |
envoy
|
5333b928d8bcffa26ab19bf018369a835f697585
| 173,410,974,943,483,400,000,000,000,000,000,000,000 | 56 |
Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <[email protected]>
|
vgetorpeek(int advance)
{
int c, c1;
int keylen;
char_u *s;
mapblock_T *mp;
#ifdef FEAT_LOCALMAP
mapblock_T *mp2;
#endif
mapblock_T *mp_match;
int mp_match_len = 0;
int timedout = FALSE; /* waited for more than 1 second
for mapping to complete */
int mapdepth = 0; /* check for recursive mapping */
int mode_deleted = FALSE; /* set when mode has been deleted */
int local_State;
int mlen;
int max_mlen;
int i;
#ifdef FEAT_CMDL_INFO
int new_wcol, new_wrow;
#endif
#ifdef FEAT_GUI
# ifdef FEAT_MENU
int idx;
# endif
int shape_changed = FALSE; /* adjusted cursor shape */
#endif
int n;
#ifdef FEAT_LANGMAP
int nolmaplen;
#endif
int old_wcol, old_wrow;
int wait_tb_len;
/*
* This function doesn't work very well when called recursively. This may
* happen though, because of:
* 1. The call to add_to_showcmd(). char_avail() is then used to check if
* there is a character available, which calls this function. In that
* case we must return NUL, to indicate no character is available.
* 2. A GUI callback function writes to the screen, causing a
* wait_return().
* Using ":normal" can also do this, but it saves the typeahead buffer,
* thus it should be OK. But don't get a key from the user then.
*/
if (vgetc_busy > 0 && ex_normal_busy == 0)
return NUL;
local_State = get_real_state();
++vgetc_busy;
if (advance)
KeyStuffed = FALSE;
init_typebuf();
start_stuff();
if (advance && typebuf.tb_maplen == 0)
reg_executing = 0;
do
{
/*
* get a character: 1. from the stuffbuffer
*/
if (typeahead_char != 0)
{
c = typeahead_char;
if (advance)
typeahead_char = 0;
}
else
c = read_readbuffers(advance);
if (c != NUL && !got_int)
{
if (advance)
{
/* KeyTyped = FALSE; When the command that stuffed something
* was typed, behave like the stuffed command was typed.
* needed for CTRL-W CTRL-] to open a fold, for example. */
KeyStuffed = TRUE;
}
if (typebuf.tb_no_abbr_cnt == 0)
typebuf.tb_no_abbr_cnt = 1; /* no abbreviations now */
}
else
{
/*
* Loop until we either find a matching mapped key, or we
* are sure that it is not a mapped key.
* If a mapped key sequence is found we go back to the start to
* try re-mapping.
*/
for (;;)
{
long wait_time;
/*
* ui_breakcheck() is slow, don't use it too often when
* inside a mapping. But call it each time for typed
* characters.
*/
if (typebuf.tb_maplen)
line_breakcheck();
else
ui_breakcheck(); /* check for CTRL-C */
keylen = 0;
if (got_int)
{
/* flush all input */
c = inchar(typebuf.tb_buf, typebuf.tb_buflen - 1, 0L);
/*
* If inchar() returns TRUE (script file was active) or we
* are inside a mapping, get out of Insert mode.
* Otherwise we behave like having gotten a CTRL-C.
* As a result typing CTRL-C in insert mode will
* really insert a CTRL-C.
*/
if ((c || typebuf.tb_maplen)
&& (State & (INSERT + CMDLINE)))
c = ESC;
else
c = Ctrl_C;
flush_buffers(FLUSH_INPUT); // flush all typeahead
if (advance)
{
/* Also record this character, it might be needed to
* get out of Insert mode. */
*typebuf.tb_buf = c;
gotchars(typebuf.tb_buf, 1);
}
cmd_silent = FALSE;
break;
}
else if (typebuf.tb_len > 0)
{
/*
* Check for a mappable key sequence.
* Walk through one maphash[] list until we find an
* entry that matches.
*
* Don't look for mappings if:
* - no_mapping set: mapping disabled (e.g. for CTRL-V)
* - maphash_valid not set: no mappings present.
* - typebuf.tb_buf[typebuf.tb_off] should not be remapped
* - in insert or cmdline mode and 'paste' option set
* - waiting for "hit return to continue" and CR or SPACE
* typed
* - waiting for a char with --more--
* - in Ctrl-X mode, and we get a valid char for that mode
*/
mp = NULL;
max_mlen = 0;
c1 = typebuf.tb_buf[typebuf.tb_off];
if (no_mapping == 0 && maphash_valid
&& (no_zero_mapping == 0 || c1 != '0')
&& (typebuf.tb_maplen == 0
|| (p_remap
&& (typebuf.tb_noremap[typebuf.tb_off]
& (RM_NONE|RM_ABBR)) == 0))
&& !(p_paste && (State & (INSERT + CMDLINE)))
&& !(State == HITRETURN && (c1 == CAR || c1 == ' '))
&& State != ASKMORE
&& State != CONFIRM
#ifdef FEAT_INS_EXPAND
&& !((ctrl_x_mode_not_default()
&& vim_is_ctrl_x_key(c1))
|| ((compl_cont_status & CONT_LOCAL)
&& (c1 == Ctrl_N || c1 == Ctrl_P)))
#endif
)
{
#ifdef FEAT_LANGMAP
if (c1 == K_SPECIAL)
nolmaplen = 2;
else
{
LANGMAP_ADJUST(c1,
(State & (CMDLINE | INSERT)) == 0
&& get_real_state() != SELECTMODE);
nolmaplen = 0;
}
#endif
#ifdef FEAT_LOCALMAP
/* First try buffer-local mappings. */
mp = curbuf->b_maphash[MAP_HASH(local_State, c1)];
mp2 = maphash[MAP_HASH(local_State, c1)];
if (mp == NULL)
{
/* There are no buffer-local mappings. */
mp = mp2;
mp2 = NULL;
}
#else
mp = maphash[MAP_HASH(local_State, c1)];
#endif
/*
* Loop until a partly matching mapping is found or
* all (local) mappings have been checked.
* The longest full match is remembered in "mp_match".
* A full match is only accepted if there is no partly
* match, so "aa" and "aaa" can both be mapped.
*/
mp_match = NULL;
mp_match_len = 0;
for ( ; mp != NULL;
#ifdef FEAT_LOCALMAP
mp->m_next == NULL ? (mp = mp2, mp2 = NULL) :
#endif
(mp = mp->m_next))
{
/*
* Only consider an entry if the first character
* matches and it is for the current state.
* Skip ":lmap" mappings if keys were mapped.
*/
if (mp->m_keys[0] == c1
&& (mp->m_mode & local_State)
&& ((mp->m_mode & LANGMAP) == 0
|| typebuf.tb_maplen == 0))
{
#ifdef FEAT_LANGMAP
int nomap = nolmaplen;
int c2;
#endif
/* find the match length of this mapping */
for (mlen = 1; mlen < typebuf.tb_len; ++mlen)
{
#ifdef FEAT_LANGMAP
c2 = typebuf.tb_buf[typebuf.tb_off + mlen];
if (nomap > 0)
--nomap;
else if (c2 == K_SPECIAL)
nomap = 2;
else
LANGMAP_ADJUST(c2, TRUE);
if (mp->m_keys[mlen] != c2)
#else
if (mp->m_keys[mlen] !=
typebuf.tb_buf[typebuf.tb_off + mlen])
#endif
break;
}
/* Don't allow mapping the first byte(s) of a
* multi-byte char. Happens when mapping
* <M-a> and then changing 'encoding'. Beware
* that 0x80 is escaped. */
{
char_u *p1 = mp->m_keys;
char_u *p2 = mb_unescape(&p1);
if (has_mbyte && p2 != NULL
&& MB_BYTE2LEN(c1) > MB_PTR2LEN(p2))
mlen = 0;
}
/*
* Check an entry whether it matches.
* - Full match: mlen == keylen
* - Partly match: mlen == typebuf.tb_len
*/
keylen = mp->m_keylen;
if (mlen == keylen
|| (mlen == typebuf.tb_len
&& typebuf.tb_len < keylen))
{
/*
* If only script-local mappings are
* allowed, check if the mapping starts
* with K_SNR.
*/
s = typebuf.tb_noremap + typebuf.tb_off;
if (*s == RM_SCRIPT
&& (mp->m_keys[0] != K_SPECIAL
|| mp->m_keys[1] != KS_EXTRA
|| mp->m_keys[2]
!= (int)KE_SNR))
continue;
/*
* If one of the typed keys cannot be
* remapped, skip the entry.
*/
for (n = mlen; --n >= 0; )
if (*s++ & (RM_NONE|RM_ABBR))
break;
if (n >= 0)
continue;
if (keylen > typebuf.tb_len)
{
if (!timedout && !(mp_match != NULL
&& mp_match->m_nowait))
{
/* break at a partly match */
keylen = KEYLEN_PART_MAP;
break;
}
}
else if (keylen > mp_match_len)
{
/* found a longer match */
mp_match = mp;
mp_match_len = keylen;
}
}
else
/* No match; may have to check for
* termcode at next character. */
if (max_mlen < mlen)
max_mlen = mlen;
}
}
/* If no partly match found, use the longest full
* match. */
if (keylen != KEYLEN_PART_MAP)
{
mp = mp_match;
keylen = mp_match_len;
}
}
/* Check for match with 'pastetoggle' */
if (*p_pt != NUL && mp == NULL && (State & (INSERT|NORMAL)))
{
for (mlen = 0; mlen < typebuf.tb_len && p_pt[mlen];
++mlen)
if (p_pt[mlen] != typebuf.tb_buf[typebuf.tb_off
+ mlen])
break;
if (p_pt[mlen] == NUL) /* match */
{
/* write chars to script file(s) */
if (mlen > typebuf.tb_maplen)
gotchars(typebuf.tb_buf + typebuf.tb_off
+ typebuf.tb_maplen,
mlen - typebuf.tb_maplen);
del_typebuf(mlen, 0); /* remove the chars */
set_option_value((char_u *)"paste",
(long)!p_paste, NULL, 0);
if (!(State & INSERT))
{
msg_col = 0;
msg_row = Rows - 1;
msg_clr_eos(); /* clear ruler */
}
status_redraw_all();
redraw_statuslines();
showmode();
setcursor();
continue;
}
/* Need more chars for partly match. */
if (mlen == typebuf.tb_len)
keylen = KEYLEN_PART_KEY;
else if (max_mlen < mlen)
/* no match, may have to check for termcode at
* next character */
max_mlen = mlen + 1;
}
if ((mp == NULL || max_mlen >= mp_match_len)
&& keylen != KEYLEN_PART_MAP)
{
int save_keylen = keylen;
/*
* When no matching mapping found or found a
* non-matching mapping that matches at least what the
* matching mapping matched:
* Check if we have a terminal code, when:
* mapping is allowed,
* keys have not been mapped,
* and not an ESC sequence, not in insert mode or
* p_ek is on,
* and when not timed out,
*/
if ((no_mapping == 0 || allow_keys != 0)
&& (typebuf.tb_maplen == 0
|| (p_remap && typebuf.tb_noremap[
typebuf.tb_off] == RM_YES))
&& !timedout)
{
keylen = check_termcode(max_mlen + 1,
NULL, 0, NULL);
/* If no termcode matched but 'pastetoggle'
* matched partially it's like an incomplete key
* sequence. */
if (keylen == 0 && save_keylen == KEYLEN_PART_KEY)
keylen = KEYLEN_PART_KEY;
/*
* When getting a partial match, but the last
* characters were not typed, don't wait for a
* typed character to complete the termcode.
* This helps a lot when a ":normal" command ends
* in an ESC.
*/
if (keylen < 0
&& typebuf.tb_len == typebuf.tb_maplen)
keylen = 0;
}
else
keylen = 0;
if (keylen == 0) /* no matching terminal code */
{
#ifdef AMIGA /* check for window bounds report */
if (typebuf.tb_maplen == 0 && (typebuf.tb_buf[
typebuf.tb_off] & 0xff) == CSI)
{
for (s = typebuf.tb_buf + typebuf.tb_off + 1;
s < typebuf.tb_buf + typebuf.tb_off
+ typebuf.tb_len
&& (VIM_ISDIGIT(*s) || *s == ';'
|| *s == ' ');
++s)
;
if (*s == 'r' || *s == '|') /* found one */
{
del_typebuf((int)(s + 1 -
(typebuf.tb_buf + typebuf.tb_off)), 0);
/* get size and redraw screen */
shell_resized();
continue;
}
if (*s == NUL) /* need more characters */
keylen = KEYLEN_PART_KEY;
}
if (keylen >= 0)
#endif
/* When there was a matching mapping and no
* termcode could be replaced after another one,
* use that mapping (loop around). If there was
* no mapping use the character from the
* typeahead buffer right here. */
if (mp == NULL)
{
/*
* get a character: 2. from the typeahead buffer
*/
c = typebuf.tb_buf[typebuf.tb_off] & 255;
if (advance) /* remove chars from tb_buf */
{
cmd_silent = (typebuf.tb_silent > 0);
if (typebuf.tb_maplen > 0)
KeyTyped = FALSE;
else
{
KeyTyped = TRUE;
/* write char to script file(s) */
gotchars(typebuf.tb_buf
+ typebuf.tb_off, 1);
}
KeyNoremap = typebuf.tb_noremap[
typebuf.tb_off];
del_typebuf(1, 0);
}
break; /* got character, break for loop */
}
}
if (keylen > 0) /* full matching terminal code */
{
#if defined(FEAT_GUI) && defined(FEAT_MENU)
if (typebuf.tb_len >= 2
&& typebuf.tb_buf[typebuf.tb_off] == K_SPECIAL
&& typebuf.tb_buf[typebuf.tb_off + 1]
== KS_MENU)
{
/*
* Using a menu may cause a break in undo!
* It's like using gotchars(), but without
* recording or writing to a script file.
*/
may_sync_undo();
del_typebuf(3, 0);
idx = get_menu_index(current_menu, local_State);
if (idx != MENU_INDEX_INVALID)
{
/*
* In Select mode and a Visual mode menu
* is used: Switch to Visual mode
* temporarily. Append K_SELECT to switch
* back to Select mode.
*/
if (VIsual_active && VIsual_select
&& (current_menu->modes & VISUAL))
{
VIsual_select = FALSE;
(void)ins_typebuf(K_SELECT_STRING,
REMAP_NONE, 0, TRUE, FALSE);
}
ins_typebuf(current_menu->strings[idx],
current_menu->noremap[idx],
0, TRUE,
current_menu->silent[idx]);
}
}
#endif /* FEAT_GUI && FEAT_MENU */
continue; /* try mapping again */
}
/* Partial match: get some more characters. When a
* matching mapping was found use that one. */
if (mp == NULL || keylen < 0)
keylen = KEYLEN_PART_KEY;
else
keylen = mp_match_len;
}
/* complete match */
if (keylen >= 0 && keylen <= typebuf.tb_len)
{
#ifdef FEAT_EVAL
int save_m_expr;
int save_m_noremap;
int save_m_silent;
char_u *save_m_keys;
char_u *save_m_str;
#else
# define save_m_noremap mp->m_noremap
# define save_m_silent mp->m_silent
#endif
/* write chars to script file(s) */
if (keylen > typebuf.tb_maplen)
gotchars(typebuf.tb_buf + typebuf.tb_off
+ typebuf.tb_maplen,
keylen - typebuf.tb_maplen);
cmd_silent = (typebuf.tb_silent > 0);
del_typebuf(keylen, 0); /* remove the mapped keys */
/*
* Put the replacement string in front of mapstr.
* The depth check catches ":map x y" and ":map y x".
*/
if (++mapdepth >= p_mmd)
{
emsg(_("E223: recursive mapping"));
if (State & CMDLINE)
redrawcmdline();
else
setcursor();
flush_buffers(FLUSH_MINIMAL);
mapdepth = 0; /* for next one */
c = -1;
break;
}
/*
* In Select mode and a Visual mode mapping is used:
* Switch to Visual mode temporarily. Append K_SELECT
* to switch back to Select mode.
*/
if (VIsual_active && VIsual_select
&& (mp->m_mode & VISUAL))
{
VIsual_select = FALSE;
(void)ins_typebuf(K_SELECT_STRING, REMAP_NONE,
0, TRUE, FALSE);
}
#ifdef FEAT_EVAL
/* Copy the values from *mp that are used, because
* evaluating the expression may invoke a function
* that redefines the mapping, thereby making *mp
* invalid. */
save_m_expr = mp->m_expr;
save_m_noremap = mp->m_noremap;
save_m_silent = mp->m_silent;
save_m_keys = NULL; /* only saved when needed */
save_m_str = NULL; /* only saved when needed */
/*
* Handle ":map <expr>": evaluate the {rhs} as an
* expression. Also save and restore the command line
* for "normal :".
*/
if (mp->m_expr)
{
int save_vgetc_busy = vgetc_busy;
vgetc_busy = 0;
save_m_keys = vim_strsave(mp->m_keys);
save_m_str = vim_strsave(mp->m_str);
s = eval_map_expr(save_m_str, NUL);
vgetc_busy = save_vgetc_busy;
}
else
#endif
s = mp->m_str;
/*
* Insert the 'to' part in the typebuf.tb_buf.
* If 'from' field is the same as the start of the
* 'to' field, don't remap the first character (but do
* allow abbreviations).
* If m_noremap is set, don't remap the whole 'to'
* part.
*/
if (s == NULL)
i = FAIL;
else
{
int noremap;
if (save_m_noremap != REMAP_YES)
noremap = save_m_noremap;
else if (
#ifdef FEAT_EVAL
STRNCMP(s, save_m_keys != NULL
? save_m_keys : mp->m_keys,
(size_t)keylen)
#else
STRNCMP(s, mp->m_keys, (size_t)keylen)
#endif
!= 0)
noremap = REMAP_YES;
else
noremap = REMAP_SKIP;
i = ins_typebuf(s, noremap,
0, TRUE, cmd_silent || save_m_silent);
#ifdef FEAT_EVAL
if (save_m_expr)
vim_free(s);
#endif
}
#ifdef FEAT_EVAL
vim_free(save_m_keys);
vim_free(save_m_str);
#endif
if (i == FAIL)
{
c = -1;
break;
}
continue;
}
}
/*
* get a character: 3. from the user - handle <Esc> in Insert mode
*/
/*
* Special case: if we get an <ESC> in insert mode and there
* are no more characters at once, we pretend to go out of
* insert mode. This prevents the one second delay after
* typing an <ESC>. If we get something after all, we may
* have to redisplay the mode. That the cursor is in the wrong
* place does not matter.
*/
c = 0;
#ifdef FEAT_CMDL_INFO
new_wcol = curwin->w_wcol;
new_wrow = curwin->w_wrow;
#endif
if ( advance
&& typebuf.tb_len == 1
&& typebuf.tb_buf[typebuf.tb_off] == ESC
&& !no_mapping
&& ex_normal_busy == 0
&& typebuf.tb_maplen == 0
&& (State & INSERT)
&& (p_timeout
|| (keylen == KEYLEN_PART_KEY && p_ttimeout))
&& (c = inchar(typebuf.tb_buf + typebuf.tb_off
+ typebuf.tb_len, 3, 25L)) == 0)
{
colnr_T col = 0, vcol;
char_u *ptr;
if (mode_displayed)
{
unshowmode(TRUE);
mode_deleted = TRUE;
}
#ifdef FEAT_GUI
/* may show a different cursor shape */
if (gui.in_use && State != NORMAL && !cmd_silent)
{
int save_State;
save_State = State;
State = NORMAL;
gui_update_cursor(TRUE, FALSE);
State = save_State;
shape_changed = TRUE;
}
#endif
validate_cursor();
old_wcol = curwin->w_wcol;
old_wrow = curwin->w_wrow;
/* move cursor left, if possible */
if (curwin->w_cursor.col != 0)
{
if (curwin->w_wcol > 0)
{
if (did_ai)
{
/*
* We are expecting to truncate the trailing
* white-space, so find the last non-white
* character -- webb
*/
col = vcol = curwin->w_wcol = 0;
ptr = ml_get_curline();
while (col < curwin->w_cursor.col)
{
if (!VIM_ISWHITE(ptr[col]))
curwin->w_wcol = vcol;
vcol += lbr_chartabsize(ptr, ptr + col,
(colnr_T)vcol);
if (has_mbyte)
col += (*mb_ptr2len)(ptr + col);
else
++col;
}
curwin->w_wrow = curwin->w_cline_row
+ curwin->w_wcol / curwin->w_width;
curwin->w_wcol %= curwin->w_width;
curwin->w_wcol += curwin_col_off();
col = 0; /* no correction needed */
}
else
{
--curwin->w_wcol;
col = curwin->w_cursor.col - 1;
}
}
else if (curwin->w_p_wrap && curwin->w_wrow)
{
--curwin->w_wrow;
curwin->w_wcol = curwin->w_width - 1;
col = curwin->w_cursor.col - 1;
}
if (has_mbyte && col > 0 && curwin->w_wcol > 0)
{
/* Correct when the cursor is on the right halve
* of a double-wide character. */
ptr = ml_get_curline();
col -= (*mb_head_off)(ptr, ptr + col);
if ((*mb_ptr2cells)(ptr + col) > 1)
--curwin->w_wcol;
}
}
setcursor();
out_flush();
#ifdef FEAT_CMDL_INFO
new_wcol = curwin->w_wcol;
new_wrow = curwin->w_wrow;
#endif
curwin->w_wcol = old_wcol;
curwin->w_wrow = old_wrow;
}
if (c < 0)
continue; /* end of input script reached */
/* Allow mapping for just typed characters. When we get here c
* is the number of extra bytes and typebuf.tb_len is 1. */
for (n = 1; n <= c; ++n)
typebuf.tb_noremap[typebuf.tb_off + n] = RM_YES;
typebuf.tb_len += c;
/* buffer full, don't map */
if (typebuf.tb_len >= typebuf.tb_maplen + MAXMAPLEN)
{
timedout = TRUE;
continue;
}
if (ex_normal_busy > 0)
{
#ifdef FEAT_CMDWIN
static int tc = 0;
#endif
/* No typeahead left and inside ":normal". Must return
* something to avoid getting stuck. When an incomplete
* mapping is present, behave like it timed out. */
if (typebuf.tb_len > 0)
{
timedout = TRUE;
continue;
}
/* When 'insertmode' is set, ESC just beeps in Insert
* mode. Use CTRL-L to make edit() return.
* For the command line only CTRL-C always breaks it.
* For the cmdline window: Alternate between ESC and
* CTRL-C: ESC for most situations and CTRL-C to close the
* cmdline window. */
if (p_im && (State & INSERT))
c = Ctrl_L;
#ifdef FEAT_TERMINAL
else if (terminal_is_active())
c = K_CANCEL;
#endif
else if ((State & CMDLINE)
#ifdef FEAT_CMDWIN
|| (cmdwin_type > 0 && tc == ESC)
#endif
)
c = Ctrl_C;
else
c = ESC;
#ifdef FEAT_CMDWIN
tc = c;
#endif
break;
}
/*
* get a character: 3. from the user - update display
*/
/* In insert mode a screen update is skipped when characters
* are still available. But when those available characters
* are part of a mapping, and we are going to do a blocking
* wait here. Need to update the screen to display the
* changed text so far. Also for when 'lazyredraw' is set and
* redrawing was postponed because there was something in the
* input buffer (e.g., termresponse). */
if (((State & INSERT) != 0 || p_lz) && (State & CMDLINE) == 0
&& advance && must_redraw != 0 && !need_wait_return)
{
update_screen(0);
setcursor(); /* put cursor back where it belongs */
}
/*
* If we have a partial match (and are going to wait for more
* input from the user), show the partially matched characters
* to the user with showcmd.
*/
#ifdef FEAT_CMDL_INFO
i = 0;
#endif
c1 = 0;
if (typebuf.tb_len > 0 && advance && !exmode_active)
{
if (((State & (NORMAL | INSERT)) || State == LANGMAP)
&& State != HITRETURN)
{
/* this looks nice when typing a dead character map */
if (State & INSERT
&& ptr2cells(typebuf.tb_buf + typebuf.tb_off
+ typebuf.tb_len - 1) == 1)
{
edit_putchar(typebuf.tb_buf[typebuf.tb_off
+ typebuf.tb_len - 1], FALSE);
setcursor(); /* put cursor back where it belongs */
c1 = 1;
}
#ifdef FEAT_CMDL_INFO
/* need to use the col and row from above here */
old_wcol = curwin->w_wcol;
old_wrow = curwin->w_wrow;
curwin->w_wcol = new_wcol;
curwin->w_wrow = new_wrow;
push_showcmd();
if (typebuf.tb_len > SHOWCMD_COLS)
i = typebuf.tb_len - SHOWCMD_COLS;
while (i < typebuf.tb_len)
(void)add_to_showcmd(typebuf.tb_buf[typebuf.tb_off
+ i++]);
curwin->w_wcol = old_wcol;
curwin->w_wrow = old_wrow;
#endif
}
/* this looks nice when typing a dead character map */
if ((State & CMDLINE)
#if defined(FEAT_CRYPT) || defined(FEAT_EVAL)
&& cmdline_star == 0
#endif
&& ptr2cells(typebuf.tb_buf + typebuf.tb_off
+ typebuf.tb_len - 1) == 1)
{
putcmdline(typebuf.tb_buf[typebuf.tb_off
+ typebuf.tb_len - 1], FALSE);
c1 = 1;
}
}
/*
* get a character: 3. from the user - get it
*/
if (typebuf.tb_len == 0)
// timedout may have been set while waiting for a mapping
// that has a <Nop> RHS.
timedout = FALSE;
if (advance)
{
if (typebuf.tb_len == 0
|| !(p_timeout
|| (p_ttimeout && keylen == KEYLEN_PART_KEY)))
// blocking wait
wait_time = -1L;
else if (keylen == KEYLEN_PART_KEY && p_ttm >= 0)
wait_time = p_ttm;
else
wait_time = p_tm;
}
else
wait_time = 0;
wait_tb_len = typebuf.tb_len;
c = inchar(typebuf.tb_buf + typebuf.tb_off + typebuf.tb_len,
typebuf.tb_buflen - typebuf.tb_off - typebuf.tb_len - 1,
wait_time);
#ifdef FEAT_CMDL_INFO
if (i != 0)
pop_showcmd();
#endif
if (c1 == 1)
{
if (State & INSERT)
edit_unputchar();
if (State & CMDLINE)
unputcmdline();
else
setcursor(); /* put cursor back where it belongs */
}
if (c < 0)
continue; /* end of input script reached */
if (c == NUL) /* no character available */
{
if (!advance)
break;
if (wait_tb_len > 0) /* timed out */
{
timedout = TRUE;
continue;
}
}
else
{ /* allow mapping for just typed characters */
while (typebuf.tb_buf[typebuf.tb_off
+ typebuf.tb_len] != NUL)
typebuf.tb_noremap[typebuf.tb_off
+ typebuf.tb_len++] = RM_YES;
#ifdef HAVE_INPUT_METHOD
/* Get IM status right after getting keys, not after the
* timeout for a mapping (focus may be lost by then). */
vgetc_im_active = im_get_status();
#endif
}
} /* for (;;) */
} /* if (!character from stuffbuf) */
/* if advance is FALSE don't loop on NULs */
} while ((c < 0 && c != K_CANCEL) || (advance && c == NUL));
/*
* The "INSERT" message is taken care of here:
* if we return an ESC to exit insert mode, the message is deleted
* if we don't return an ESC but deleted the message before, redisplay it
*/
if (advance && p_smd && msg_silent == 0 && (State & INSERT))
{
if (c == ESC && !mode_deleted && !no_mapping && mode_displayed)
{
if (typebuf.tb_len && !KeyTyped)
redraw_cmdline = TRUE; /* delete mode later */
else
unshowmode(FALSE);
}
else if (c != ESC && mode_deleted)
{
if (typebuf.tb_len && !KeyTyped)
redraw_cmdline = TRUE; /* show mode later */
else
showmode();
}
}
#ifdef FEAT_GUI
/* may unshow different cursor shape */
if (gui.in_use && shape_changed)
gui_update_cursor(TRUE, FALSE);
#endif
if (timedout && c == ESC)
{
char_u nop_buf[3];
// When recording there will be no timeout. Add a <Nop> after the ESC
// to avoid that it forms a key code with following characters.
nop_buf[0] = K_SPECIAL;
nop_buf[1] = KS_EXTRA;
nop_buf[2] = KE_NOP;
gotchars(nop_buf, 3);
}
--vgetc_busy;
return c;
}
| 0 |
[
"CWE-78"
] |
vim
|
53575521406739cf20bbe4e384d88e7dca11f040
| 338,648,984,780,645,800,000,000,000,000,000,000,000 | 1,002 |
patch 8.1.1365: source command doesn't check for the sandbox
Problem: Source command doesn't check for the sandbox. (Armin Razmjou)
Solution: Check for the sandbox when sourcing a file.
|
rpc_C_GenerateRandom (CK_X_FUNCTION_LIST *self,
p11_rpc_message *msg)
{
CK_SESSION_HANDLE session;
CK_BYTE_PTR random_data;
CK_ULONG random_len;
BEGIN_CALL (GenerateRandom);
IN_ULONG (session);
IN_BYTE_BUFFER (random_data, random_len);
PROCESS_CALL ((self, session, random_data, random_len));
OUT_BYTE_ARRAY (random_data, random_len);
END_CALL;
}
| 0 |
[
"CWE-190"
] |
p11-kit
|
5307a1d21a50cacd06f471a873a018d23ba4b963
| 247,422,120,730,431,620,000,000,000,000,000,000,000 | 14 |
Check for arithmetic overflows before allocating
|
static ssize_t comedi_write(struct file *file, const char __user *buf,
size_t nbytes, loff_t *offset)
{
struct comedi_subdevice *s;
struct comedi_async *async;
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file->f_dentry->d_inode);
struct comedi_device_file_info *dev_file_info =
comedi_get_device_file_info(minor);
struct comedi_device *dev = dev_file_info->device;
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
retval = -ENODEV;
goto done;
}
s = comedi_get_write_subdevice(dev_file_info);
if (s == NULL) {
retval = -EIO;
goto done;
}
async = s->async;
if (!nbytes) {
retval = 0;
goto done;
}
if (!s->busy) {
retval = 0;
goto done;
}
if (s->busy != file) {
retval = -EACCES;
goto done;
}
add_wait_queue(&async->wait_head, &wait);
while (nbytes > 0 && !retval) {
set_current_state(TASK_INTERRUPTIBLE);
if (!(comedi_get_subdevice_runflags(s) & SRF_RUNNING)) {
if (count == 0) {
if (comedi_get_subdevice_runflags(s) &
SRF_ERROR) {
retval = -EPIPE;
} else {
retval = 0;
}
do_become_nonbusy(dev, s);
}
break;
}
n = nbytes;
m = n;
if (async->buf_write_ptr + m > async->prealloc_bufsz)
m = async->prealloc_bufsz - async->buf_write_ptr;
comedi_buf_write_alloc(async, async->prealloc_bufsz);
if (m > comedi_buf_write_n_allocated(async))
m = comedi_buf_write_n_allocated(async);
if (m < n)
n = m;
if (n == 0) {
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
break;
}
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
schedule();
if (!s->busy)
break;
if (s->busy != file) {
retval = -EACCES;
break;
}
continue;
}
m = copy_from_user(async->prealloc_buf + async->buf_write_ptr,
buf, n);
if (m) {
n -= m;
retval = -EFAULT;
}
comedi_buf_write_free(async, n);
count += n;
nbytes -= n;
buf += n;
break; /* makes device work like a pipe */
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&async->wait_head, &wait);
done:
return count ? count : retval;
}
| 0 |
[
"CWE-200"
] |
linux
|
819cbb120eaec7e014e5abd029260db1ca8c5735
| 50,522,100,896,829,260,000,000,000,000,000,000,000 | 104 |
staging: comedi: fix infoleak to userspace
driver_name and board_name are pointers to strings, not buffers of size
COMEDI_NAMELEN. Copying COMEDI_NAMELEN bytes of a string containing
less than COMEDI_NAMELEN-1 bytes would leak some unrelated bytes.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
xmlDumpElementDecl(xmlBufferPtr buf, xmlElementPtr elem) {
if ((buf == NULL) || (elem == NULL))
return;
switch (elem->etype) {
case XML_ELEMENT_TYPE_EMPTY:
xmlBufferWriteChar(buf, "<!ELEMENT ");
if (elem->prefix != NULL) {
xmlBufferWriteCHAR(buf, elem->prefix);
xmlBufferWriteChar(buf, ":");
}
xmlBufferWriteCHAR(buf, elem->name);
xmlBufferWriteChar(buf, " EMPTY>\n");
break;
case XML_ELEMENT_TYPE_ANY:
xmlBufferWriteChar(buf, "<!ELEMENT ");
if (elem->prefix != NULL) {
xmlBufferWriteCHAR(buf, elem->prefix);
xmlBufferWriteChar(buf, ":");
}
xmlBufferWriteCHAR(buf, elem->name);
xmlBufferWriteChar(buf, " ANY>\n");
break;
case XML_ELEMENT_TYPE_MIXED:
xmlBufferWriteChar(buf, "<!ELEMENT ");
if (elem->prefix != NULL) {
xmlBufferWriteCHAR(buf, elem->prefix);
xmlBufferWriteChar(buf, ":");
}
xmlBufferWriteCHAR(buf, elem->name);
xmlBufferWriteChar(buf, " ");
xmlDumpElementContent(buf, elem->content);
xmlBufferWriteChar(buf, ">\n");
break;
case XML_ELEMENT_TYPE_ELEMENT:
xmlBufferWriteChar(buf, "<!ELEMENT ");
if (elem->prefix != NULL) {
xmlBufferWriteCHAR(buf, elem->prefix);
xmlBufferWriteChar(buf, ":");
}
xmlBufferWriteCHAR(buf, elem->name);
xmlBufferWriteChar(buf, " ");
xmlDumpElementContent(buf, elem->content);
xmlBufferWriteChar(buf, ">\n");
break;
default:
xmlErrValid(NULL, XML_ERR_INTERNAL_ERROR,
"Internal: ELEMENT struct corrupted invalid type\n",
NULL);
}
}
| 0 |
[
"CWE-416"
] |
libxml2
|
652dd12a858989b14eed4e84e453059cd3ba340e
| 300,205,370,697,292,400,000,000,000,000,000,000,000 | 50 |
[CVE-2022-23308] Use-after-free of ID and IDREF attributes
If a document is parsed with XML_PARSE_DTDVALID and without
XML_PARSE_NOENT, the value of ID attributes has to be normalized after
potentially expanding entities in xmlRemoveID. Otherwise, later calls
to xmlGetID can return a pointer to previously freed memory.
ID attributes which are empty or contain only whitespace after
entity expansion are affected in a similar way. This is fixed by
not storing such attributes in the ID table.
The test to detect streaming mode when validating against a DTD was
broken. In connection with the defects above, this could result in a
use-after-free when using the xmlReader interface with validation.
Fix detection of streaming mode to avoid similar issues. (This changes
the expected result of a test case. But as far as I can tell, using the
XML reader with XIncludes referencing the root document never worked
properly, anyway.)
All of these issues can result in denial of service. Using xmlReader
with validation could result in disclosure of memory via the error
channel, typically stderr. The security impact of xmlGetID returning
a pointer to freed memory depends on the application. The typical use
case of calling xmlGetID on an unmodified document is not affected.
|
static INLINE INT32 planar_skip_plane_rle(const BYTE* pSrcData, UINT32 SrcSize, UINT32 nWidth,
UINT32 nHeight)
{
UINT32 used = 0;
UINT32 x, y;
BYTE controlByte;
for (y = 0; y < nHeight; y++)
{
for (x = 0; x < nWidth;)
{
int cRawBytes;
int nRunLength;
if (used >= SrcSize)
return -1;
controlByte = pSrcData[used++];
nRunLength = PLANAR_CONTROL_BYTE_RUN_LENGTH(controlByte);
cRawBytes = PLANAR_CONTROL_BYTE_RAW_BYTES(controlByte);
if (nRunLength == 1)
{
nRunLength = cRawBytes + 16;
cRawBytes = 0;
}
else if (nRunLength == 2)
{
nRunLength = cRawBytes + 32;
cRawBytes = 0;
}
used += cRawBytes;
x += cRawBytes;
x += nRunLength;
if (x > nWidth)
return -1;
if (used > SrcSize)
return -1;
}
}
if (used > INT32_MAX)
return -1;
return (INT32)used;
}
| 0 |
[
"CWE-125"
] |
FreeRDP
|
17f547ae11835bb11baa3d045245dc1694866845
| 19,045,850,163,198,990,000,000,000,000,000,000,000 | 48 |
Fixed CVE-2020-11521: Out of bounds write in planar codec.
Thanks to Sunglin and HuanGMz from Knownsec 404
|
static int maria_create_trn_for_mysql(MARIA_HA *info)
{
THD *thd= ((TABLE*) info->external_ref)->in_use;
TRN *trn= THD_TRN;
DBUG_ENTER("maria_create_trn_for_mysql");
if (!trn) /* no transaction yet - open it now */
{
trn= trnman_new_trn(& thd->transaction->wt);
if (unlikely(!trn))
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
thd_set_ha_data(thd, maria_hton, trn);
if (thd->variables.option_bits & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
trans_register_ha(thd, TRUE, maria_hton, trn->trid);
}
_ma_set_trn_for_table(info, trn);
if (!trnman_increment_locked_tables(trn))
{
trans_register_ha(thd, FALSE, maria_hton, trn->trid);
trnman_new_statement(trn);
}
#ifdef EXTRA_DEBUG
if (info->lock_type == F_WRLCK &&
! (trnman_get_flags(trn) & TRN_STATE_INFO_LOGGED))
{
trnman_set_flags(trn, trnman_get_flags(trn) | TRN_STATE_INFO_LOGGED |
TRN_STATE_TABLES_CAN_CHANGE);
(void) translog_log_debug_info(trn, LOGREC_DEBUG_INFO_QUERY,
(uchar*) thd->query(),
thd->query_length());
}
else
{
DBUG_PRINT("info", ("lock_type: %d trnman_flags: %u",
info->lock_type, trnman_get_flags(trn)));
}
#endif
DBUG_RETURN(0);
}
| 0 |
[
"CWE-400"
] |
server
|
9e39d0ae44595dbd1570805d97c9c874778a6be8
| 187,788,278,126,230,900,000,000,000,000,000,000,000 | 40 |
MDEV-25787 Bug report: crash on SELECT DISTINCT thousands_blob_fields
fix a debug assert to account for not opened temp tables
|
MagickExport void *RemoveImageRegistry(const char *key)
{
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",key);
if (registry == (void *) NULL)
return((void *) NULL);
return(RemoveNodeFromSplayTree(registry,key));
}
| 0 |
[
"CWE-476"
] |
ImageMagick
|
cca91aa1861818342e3d072bb0fad7dc4ffac24a
| 169,307,934,507,850,030,000,000,000,000,000,000,000 | 8 |
https://github.com/ImageMagick/ImageMagick/issues/790
|
MagickExport Image *MergeImageLayers(Image *image,const LayerMethod method,
ExceptionInfo *exception)
{
#define MergeLayersTag "Merge/Layers"
Image
*canvas;
MagickBooleanType
proceed;
RectangleInfo
page;
register const Image
*next;
size_t
number_images,
height,
width;
ssize_t
scene;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Determine canvas image size, and its virtual canvas size and offset
*/
page=image->page;
width=image->columns;
height=image->rows;
switch (method)
{
case TrimBoundsLayer:
case MergeLayer:
default:
{
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (page.x > next->page.x)
{
width+=page.x-next->page.x;
page.x=next->page.x;
}
if (page.y > next->page.y)
{
height+=page.y-next->page.y;
page.y=next->page.y;
}
if ((ssize_t) width < (next->page.x+(ssize_t) next->columns-page.x))
width=(size_t) next->page.x+(ssize_t) next->columns-page.x;
if ((ssize_t) height < (next->page.y+(ssize_t) next->rows-page.y))
height=(size_t) next->page.y+(ssize_t) next->rows-page.y;
}
break;
}
case FlattenLayer:
{
if (page.width > 0)
width=page.width;
if (page.height > 0)
height=page.height;
page.x=0;
page.y=0;
break;
}
case MosaicLayer:
{
if (page.width > 0)
width=page.width;
if (page.height > 0)
height=page.height;
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (method == MosaicLayer)
{
page.x=next->page.x;
page.y=next->page.y;
if ((ssize_t) width < (next->page.x+(ssize_t) next->columns))
width=(size_t) next->page.x+next->columns;
if ((ssize_t) height < (next->page.y+(ssize_t) next->rows))
height=(size_t) next->page.y+next->rows;
}
}
page.width=width;
page.height=height;
page.x=0;
page.y=0;
}
break;
}
/*
Set virtual canvas size if not defined.
*/
if (page.width == 0)
page.width=page.x < 0 ? width : width+page.x;
if (page.height == 0)
page.height=page.y < 0 ? height : height+page.y;
/*
Handle "TrimBoundsLayer" method separately to normal 'layer merge'.
*/
if (method == TrimBoundsLayer)
{
number_images=GetImageListLength(image);
for (scene=0; scene < (ssize_t) number_images; scene++)
{
image->page.x-=page.x;
image->page.y-=page.y;
image->page.width=width;
image->page.height=height;
proceed=SetImageProgress(image,MergeLayersTag,(MagickOffsetType) scene,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
if (image == (Image *) NULL)
break;
}
return((Image *) NULL);
}
/*
Create canvas size of width and height, and background color.
*/
canvas=CloneImage(image,width,height,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
canvas->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(canvas,exception);
canvas->page=page;
canvas->dispose=UndefinedDispose;
/*
Compose images onto canvas, with progress monitor
*/
number_images=GetImageListLength(image);
for (scene=0; scene < (ssize_t) number_images; scene++)
{
(void) CompositeImage(canvas,image,image->compose,MagickTrue,image->page.x-
canvas->page.x,image->page.y-canvas->page.y,exception);
proceed=SetImageProgress(image,MergeLayersTag,(MagickOffsetType) scene,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
if (image == (Image *) NULL)
break;
}
return(canvas);
}
| 0 |
[
"CWE-399",
"CWE-369"
] |
ImageMagick
|
ef59bd764f88d893f1219fee8ba696a5d3f8c1c4
| 101,058,300,584,399,940,000,000,000,000,000,000,000 | 155 |
There is a Division by Zero in function OptimizeLayerFrames (#2743)
in file MagickCore/layer.c. cur->ticks_per_seconds can be zero
with a crafted input argument *image. This is similar to
CVE-2019-13454.
|
static const complex128* Begin(const TensorProto& proto) {
return reinterpret_cast<const complex128*>(proto.dcomplex_val().data());
}
| 0 |
[
"CWE-345"
] |
tensorflow
|
abcced051cb1bd8fb05046ac3b6023a7ebcc4578
| 339,996,715,432,405,850,000,000,000,000,000,000,000 | 3 |
Prevent crashes when loading tensor slices with unsupported types.
Also fix the `Tensor(const TensorShape&)` constructor swapping the LOG(FATAL)
messages for the unset and unsupported types.
PiperOrigin-RevId: 392695027
Change-Id: I4beda7db950db951d273e3259a7c8534ece49354
|
void vnc_tight_clear(VncState *vs)
{
int i;
for (i=0; i<ARRAY_SIZE(vs->tight.stream); i++) {
if (vs->tight.stream[i].opaque) {
deflateEnd(&vs->tight.stream[i]);
}
}
buffer_free(&vs->tight.tight);
buffer_free(&vs->tight.zlib);
buffer_free(&vs->tight.gradient);
#ifdef CONFIG_VNC_JPEG
buffer_free(&vs->tight.jpeg);
#endif
#ifdef CONFIG_VNC_PNG
buffer_free(&vs->tight.png);
#endif
}
| 1 |
[
"CWE-401"
] |
qemu
|
6bf21f3d83e95bcc4ba35a7a07cc6655e8b010b0
| 314,170,862,321,700,970,000,000,000,000,000,000,000 | 19 |
vnc: fix memory leak when vnc disconnect
Currently when qemu receives a vnc connect, it creates a 'VncState' to
represent this connection. In 'vnc_worker_thread_loop' it creates a
local 'VncState'. The connection 'VcnState' and local 'VncState' exchange
data in 'vnc_async_encoding_start' and 'vnc_async_encoding_end'.
In 'zrle_compress_data' it calls 'deflateInit2' to allocate the libz library
opaque data. The 'VncState' used in 'zrle_compress_data' is the local
'VncState'. In 'vnc_zrle_clear' it calls 'deflateEnd' to free the libz
library opaque data. The 'VncState' used in 'vnc_zrle_clear' is the connection
'VncState'. In currently implementation there will be a memory leak when the
vnc disconnect. Following is the asan output backtrack:
Direct leak of 29760 byte(s) in 5 object(s) allocated from:
0 0xffffa67ef3c3 in __interceptor_calloc (/lib64/libasan.so.4+0xd33c3)
1 0xffffa65071cb in g_malloc0 (/lib64/libglib-2.0.so.0+0x571cb)
2 0xffffa5e968f7 in deflateInit2_ (/lib64/libz.so.1+0x78f7)
3 0xaaaacec58613 in zrle_compress_data ui/vnc-enc-zrle.c:87
4 0xaaaacec58613 in zrle_send_framebuffer_update ui/vnc-enc-zrle.c:344
5 0xaaaacec34e77 in vnc_send_framebuffer_update ui/vnc.c:919
6 0xaaaacec5e023 in vnc_worker_thread_loop ui/vnc-jobs.c:271
7 0xaaaacec5e5e7 in vnc_worker_thread ui/vnc-jobs.c:340
8 0xaaaacee4d3c3 in qemu_thread_start util/qemu-thread-posix.c:502
9 0xffffa544e8bb in start_thread (/lib64/libpthread.so.0+0x78bb)
10 0xffffa53965cb in thread_start (/lib64/libc.so.6+0xd55cb)
This is because the opaque allocated in 'deflateInit2' is not freed in
'deflateEnd'. The reason is that the 'deflateEnd' calls 'deflateStateCheck'
and in the latter will check whether 's->strm != strm'(libz's data structure).
This check will be true so in 'deflateEnd' it just return 'Z_STREAM_ERROR' and
not free the data allocated in 'deflateInit2'.
The reason this happens is that the 'VncState' contains the whole 'VncZrle',
so when calling 'deflateInit2', the 's->strm' will be the local address.
So 's->strm != strm' will be true.
To fix this issue, we need to make 'zrle' of 'VncState' to be a pointer.
Then the connection 'VncState' and local 'VncState' exchange mechanism will
work as expection. The 'tight' of 'VncState' has the same issue, let's also turn
it to a pointer.
Reported-by: Ying Fang <[email protected]>
Signed-off-by: Li Qiang <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]>
|
static BOOL update_read_synchronize(rdpUpdate* update, wStream* s)
{
WINPR_UNUSED(update);
return Stream_SafeSeek(s, 2); /* pad2Octets (2 bytes) */
/**
* The Synchronize Update is an artifact from the
* T.128 protocol and should be ignored.
*/
}
| 0 |
[
"CWE-125"
] |
FreeRDP
|
f8890a645c221823ac133dbf991f8a65ae50d637
| 15,521,892,922,773,363,000,000,000,000,000,000,000 | 9 |
Fixed #6005: Bounds checks in update_read_bitmap_data
|
size_t rand_drbg_get_entropy(RAND_DRBG *drbg,
unsigned char **pout,
int entropy, size_t min_len, size_t max_len,
int prediction_resistance)
{
size_t ret = 0;
size_t entropy_available = 0;
RAND_POOL *pool;
if (drbg->parent != NULL && drbg->strength > drbg->parent->strength) {
/*
* We currently don't support the algorithm from NIST SP 800-90C
* 10.1.2 to use a weaker DRBG as source
*/
RANDerr(RAND_F_RAND_DRBG_GET_ENTROPY, RAND_R_PARENT_STRENGTH_TOO_WEAK);
return 0;
}
if (drbg->seed_pool != NULL) {
pool = drbg->seed_pool;
pool->entropy_requested = entropy;
} else {
pool = rand_pool_new(entropy, drbg->secure, min_len, max_len);
if (pool == NULL)
return 0;
}
if (drbg->parent != NULL) {
size_t bytes_needed = rand_pool_bytes_needed(pool, 1 /*entropy_factor*/);
unsigned char *buffer = rand_pool_add_begin(pool, bytes_needed);
if (buffer != NULL) {
size_t bytes = 0;
/*
* Get random from parent, include our state as additional input.
* Our lock is already held, but we need to lock our parent before
* generating bits from it. (Note: taking the lock will be a no-op
* if locking if drbg->parent->lock == NULL.)
*/
rand_drbg_lock(drbg->parent);
if (RAND_DRBG_generate(drbg->parent,
buffer, bytes_needed,
prediction_resistance,
NULL, 0) != 0)
bytes = bytes_needed;
drbg->reseed_next_counter
= tsan_load(&drbg->parent->reseed_prop_counter);
rand_drbg_unlock(drbg->parent);
rand_pool_add_end(pool, bytes, 8 * bytes);
entropy_available = rand_pool_entropy_available(pool);
}
} else {
if (prediction_resistance) {
/*
* We don't have any entropy sources that comply with the NIST
* standard to provide prediction resistance (see NIST SP 800-90C,
* Section 5.4).
*/
RANDerr(RAND_F_RAND_DRBG_GET_ENTROPY,
RAND_R_PREDICTION_RESISTANCE_NOT_SUPPORTED);
goto err;
}
/* Get entropy by polling system entropy sources. */
entropy_available = rand_pool_acquire_entropy(pool);
}
if (entropy_available > 0) {
ret = rand_pool_length(pool);
*pout = rand_pool_detach(pool);
}
err:
if (drbg->seed_pool == NULL)
rand_pool_free(pool);
return ret;
}
| 0 |
[
"CWE-330"
] |
openssl
|
1b0fe00e2704b5e20334a16d3c9099d1ba2ef1be
| 269,463,928,343,155,470,000,000,000,000,000,000,000 | 80 |
drbg: ensure fork-safety without using a pthread_atfork handler
When the new OpenSSL CSPRNG was introduced in version 1.1.1,
it was announced in the release notes that it would be fork-safe,
which the old CSPRNG hadn't been.
The fork-safety was implemented using a fork count, which was
incremented by a pthread_atfork handler. Initially, this handler
was enabled by default. Unfortunately, the default behaviour
had to be changed for other reasons in commit b5319bdbd095, so
the new OpenSSL CSPRNG failed to keep its promise.
This commit restores the fork-safety using a different approach.
It replaces the fork count by a fork id, which coincides with
the process id on UNIX-like operating systems and is zero on other
operating systems. It is used to detect when an automatic reseed
after a fork is necessary.
To prevent a future regression, it also adds a test to verify that
the child reseeds after fork.
CVE-2019-1549
Reviewed-by: Paul Dale <[email protected]>
Reviewed-by: Matt Caswell <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/9802)
|
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
memset(ib_attr, 0, sizeof(*ib_attr));
memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
min(sizeof(dev_attr->fw_ver),
sizeof(ib_attr->fw_ver)));
bnxt_qplib_get_guid(rdev->netdev->dev_addr,
(u8 *)&ib_attr->sys_image_guid);
ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
ib_attr->max_qp = dev_attr->max_qp;
ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
ib_attr->device_cap_flags =
IB_DEVICE_CURR_QP_STATE_MOD
| IB_DEVICE_RC_RNR_NAK_GEN
| IB_DEVICE_SHUTDOWN_PORT
| IB_DEVICE_SYS_IMAGE_GUID
| IB_DEVICE_LOCAL_DMA_LKEY
| IB_DEVICE_RESIZE_MAX_WR
| IB_DEVICE_PORT_ACTIVE_EVENT
| IB_DEVICE_N_NOTIFY_CQ
| IB_DEVICE_MEM_WINDOW
| IB_DEVICE_MEM_WINDOW_TYPE_2B
| IB_DEVICE_MEM_MGT_EXTENSIONS;
ib_attr->max_send_sge = dev_attr->max_qp_sges;
ib_attr->max_recv_sge = dev_attr->max_qp_sges;
ib_attr->max_sge_rd = dev_attr->max_qp_sges;
ib_attr->max_cq = dev_attr->max_cq;
ib_attr->max_cqe = dev_attr->max_cq_wqes;
ib_attr->max_mr = dev_attr->max_mr;
ib_attr->max_pd = dev_attr->max_pd;
ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
ib_attr->atomic_cap = IB_ATOMIC_NONE;
ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
ib_attr->max_ee_rd_atom = 0;
ib_attr->max_res_rd_atom = 0;
ib_attr->max_ee_init_rd_atom = 0;
ib_attr->max_ee = 0;
ib_attr->max_rdd = 0;
ib_attr->max_mw = dev_attr->max_mw;
ib_attr->max_raw_ipv6_qp = 0;
ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
ib_attr->max_mcast_grp = 0;
ib_attr->max_mcast_qp_attach = 0;
ib_attr->max_total_mcast_qp_attach = 0;
ib_attr->max_ah = dev_attr->max_ah;
ib_attr->max_fmr = 0;
ib_attr->max_map_per_fmr = 0;
ib_attr->max_srq = dev_attr->max_srq;
ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
ib_attr->max_srq_sge = dev_attr->max_srq_sges;
ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
ib_attr->max_pkeys = 1;
ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
return 0;
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
4a9d46a9fe14401f21df69cea97c62396d5fb053
| 290,452,764,643,445,120,000,000,000,000,000,000,000 | 71 |
RDMA: Fix goto target to release the allocated memory
In bnxt_re_create_srq(), when ib_copy_to_udata() fails allocated memory
should be released by goto fail.
Fixes: 37cb11acf1f7 ("RDMA/bnxt_re: Add SRQ support for Broadcom adapters")
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Navid Emamdoost <[email protected]>
Reviewed-by: Jason Gunthorpe <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
|
sync_is_active_scope(const Slapi_DN *dn, Slapi_PBlock *pb)
{
int rc;
char *origbase = NULL;
Slapi_DN *base = NULL;
int scope;
slapi_pblock_get(pb, SLAPI_ORIGINAL_TARGET_DN, &origbase);
slapi_pblock_get(pb, SLAPI_SEARCH_TARGET_SDN, &base);
slapi_pblock_get(pb, SLAPI_SEARCH_SCOPE, &scope);
if (NULL == base) {
base = slapi_sdn_new_dn_byref(origbase);
slapi_pblock_set(pb, SLAPI_SEARCH_TARGET_SDN, base);
}
if (slapi_sdn_scope_test(dn, base, scope)) {
rc = 1;
} else {
rc = 0;
}
return (rc);
}
| 0 |
[
"CWE-476"
] |
389-ds-base
|
d7eef2fcfbab2ef8aa6ee0bf60f0a9b16ede66e0
| 289,613,550,355,616,820,000,000,000,000,000,000,000 | 22 |
Issue 4711 - SIGSEV with sync_repl (#4738)
Bug description:
sync_repl sends back entries identified with a unique
identifier that is 'nsuniqueid'. If 'nsuniqueid' is
missing, then it may crash
Fix description:
Check a nsuniqueid is available else returns OP_ERR
relates: https://github.com/389ds/389-ds-base/issues/4711
Reviewed by: Pierre Rogier, James Chapman, William Brown (Thanks!)
Platforms tested: F33
|
entering_window(win_T *win)
{
// Only matters for a prompt window.
if (!bt_prompt(win->w_buffer))
return;
// When switching to a prompt buffer that was in Insert mode, don't stop
// Insert mode, it may have been set in leaving_window().
if (win->w_buffer->b_prompt_insert != NUL)
stop_insert_mode = FALSE;
// When entering the prompt window restart Insert mode if we were in Insert
// mode when we left it and not already in Insert mode.
if ((State & MODE_INSERT) == 0)
restart_edit = win->w_buffer->b_prompt_insert;
}
| 0 |
[
"CWE-416"
] |
vim
|
28d032cc688ccfda18c5bbcab8b50aba6e18cde5
| 178,255,991,033,639,200,000,000,000,000,000,000,000 | 16 |
patch 8.2.4979: accessing freed memory when line is flushed
Problem: Accessing freed memory when line is flushed.
Solution: Make a copy of the pattern to search for.
|
dump_ccid_reader_status (int slot)
{
log_info ("reader slot %d: using ccid driver\n", slot);
}
| 0 |
[
"CWE-20"
] |
gnupg
|
2183683bd633818dd031b090b5530951de76f392
| 290,196,204,050,127,270,000,000,000,000,000,000,000 | 4 |
Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]>
|
cmd_spec_htmlify (const char *com, const char *val, void *place_ignored _GL_UNUSED)
{
int flag = cmd_boolean (com, val, &opt.htmlify);
if (flag && !opt.htmlify)
opt.remove_listing = false;
return flag;
}
| 0 |
[
"CWE-22"
] |
wget
|
18b0979357ed7dc4e11d4f2b1d7e0f5932d82aa7
| 183,914,672,699,750,350,000,000,000,000,000,000,000 | 7 |
CVE-2014-4877: Arbitrary Symlink Access
Wget was susceptible to a symlink attack which could create arbitrary
files, directories or symbolic links and set their permissions when
retrieving a directory recursively through FTP. This commit changes the
default settings in Wget such that Wget no longer creates local symbolic
links, but rather traverses them and retrieves the pointed-to file in
such a retrieval.
The old behaviour can be attained by passing the --retr-symlinks=no
option to the Wget invokation command.
|
static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
{
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
memcpy(state->stateid.data, stateid->data, sizeof(state->stateid.data));
memcpy(state->open_stateid.data, stateid->data, sizeof(state->open_stateid.data));
switch (fmode) {
case FMODE_READ:
set_bit(NFS_O_RDONLY_STATE, &state->flags);
break;
case FMODE_WRITE:
set_bit(NFS_O_WRONLY_STATE, &state->flags);
break;
case FMODE_READ|FMODE_WRITE:
set_bit(NFS_O_RDWR_STATE, &state->flags);
}
}
| 0 |
[
"CWE-703"
] |
linux
|
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
| 38,181,591,565,063,460,000,000,000,000,000,000,000 | 16 |
NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <[email protected]>
|
static void codeCursorHint(
struct SrcList_item *pTabItem, /* FROM clause item */
WhereInfo *pWInfo, /* The where clause */
WhereLevel *pLevel, /* Which loop to provide hints for */
WhereTerm *pEndRange /* Hint this end-of-scan boundary term if not NULL */
){
Parse *pParse = pWInfo->pParse;
sqlite3 *db = pParse->db;
Vdbe *v = pParse->pVdbe;
Expr *pExpr = 0;
WhereLoop *pLoop = pLevel->pWLoop;
int iCur;
WhereClause *pWC;
WhereTerm *pTerm;
int i, j;
struct CCurHint sHint;
Walker sWalker;
if( OptimizationDisabled(db, SQLITE_CursorHints) ) return;
iCur = pLevel->iTabCur;
assert( iCur==pWInfo->pTabList->a[pLevel->iFrom].iCursor );
sHint.iTabCur = iCur;
sHint.iIdxCur = pLevel->iIdxCur;
sHint.pIdx = pLoop->u.btree.pIndex;
memset(&sWalker, 0, sizeof(sWalker));
sWalker.pParse = pParse;
sWalker.u.pCCurHint = &sHint;
pWC = &pWInfo->sWC;
for(i=0; i<pWC->nTerm; i++){
pTerm = &pWC->a[i];
if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
if( pTerm->prereqAll & pLevel->notReady ) continue;
/* Any terms specified as part of the ON(...) clause for any LEFT
** JOIN for which the current table is not the rhs are omitted
** from the cursor-hint.
**
** If this table is the rhs of a LEFT JOIN, "IS" or "IS NULL" terms
** that were specified as part of the WHERE clause must be excluded.
** This is to address the following:
**
** SELECT ... t1 LEFT JOIN t2 ON (t1.a=t2.b) WHERE t2.c IS NULL;
**
** Say there is a single row in t2 that matches (t1.a=t2.b), but its
** t2.c values is not NULL. If the (t2.c IS NULL) constraint is
** pushed down to the cursor, this row is filtered out, causing
** SQLite to synthesize a row of NULL values. Which does match the
** WHERE clause, and so the query returns a row. Which is incorrect.
**
** For the same reason, WHERE terms such as:
**
** WHERE 1 = (t2.c IS NULL)
**
** are also excluded. See codeCursorHintIsOrFunction() for details.
*/
if( pTabItem->fg.jointype & JT_LEFT ){
Expr *pExpr = pTerm->pExpr;
if( !ExprHasProperty(pExpr, EP_FromJoin)
|| pExpr->iRightJoinTable!=pTabItem->iCursor
){
sWalker.eCode = 0;
sWalker.xExprCallback = codeCursorHintIsOrFunction;
sqlite3WalkExpr(&sWalker, pTerm->pExpr);
if( sWalker.eCode ) continue;
}
}else{
if( ExprHasProperty(pTerm->pExpr, EP_FromJoin) ) continue;
}
/* All terms in pWLoop->aLTerm[] except pEndRange are used to initialize
** the cursor. These terms are not needed as hints for a pure range
** scan (that has no == terms) so omit them. */
if( pLoop->u.btree.nEq==0 && pTerm!=pEndRange ){
for(j=0; j<pLoop->nLTerm && pLoop->aLTerm[j]!=pTerm; j++){}
if( j<pLoop->nLTerm ) continue;
}
/* No subqueries or non-deterministic functions allowed */
if( sqlite3ExprContainsSubquery(pTerm->pExpr) ) continue;
/* For an index scan, make sure referenced columns are actually in
** the index. */
if( sHint.pIdx!=0 ){
sWalker.eCode = 0;
sWalker.xExprCallback = codeCursorHintCheckExpr;
sqlite3WalkExpr(&sWalker, pTerm->pExpr);
if( sWalker.eCode ) continue;
}
/* If we survive all prior tests, that means this term is worth hinting */
pExpr = sqlite3ExprAnd(pParse, pExpr, sqlite3ExprDup(db, pTerm->pExpr, 0));
}
if( pExpr!=0 ){
sWalker.xExprCallback = codeCursorHintFixExpr;
sqlite3WalkExpr(&sWalker, pExpr);
sqlite3VdbeAddOp4(v, OP_CursorHint,
(sHint.pIdx ? sHint.iIdxCur : sHint.iTabCur), 0, 0,
(const char*)pExpr, P4_EXPR);
}
}
| 0 |
[
"CWE-476"
] |
sqlite
|
57f7ece78410a8aae86aa4625fb7556897db384c
| 218,143,655,321,183,450,000,000,000,000,000,000,000 | 100 |
Fix a problem that comes up when using generated columns that evaluate to a
constant in an index and then making use of that index in a join.
FossilOrigin-Name: 8b12e95fec7ce6e0de82a04ca3dfcf1a8e62e233b7382aa28a8a9be6e862b1af
|
static struct task_struct *pick_next_task_fair(struct rq *rq)
{
struct task_struct *p;
struct cfs_rq *cfs_rq = &rq->cfs;
struct sched_entity *se;
if (unlikely(!cfs_rq->nr_running))
return NULL;
do {
se = pick_next_entity(cfs_rq);
cfs_rq = group_cfs_rq(se);
} while (cfs_rq);
p = task_of(se);
hrtick_start_fair(rq, p);
return p;
}
| 0 |
[] |
linux-2.6
|
6a6029b8cefe0ca7e82f27f3904dbedba3de4e06
| 108,963,055,352,993,180,000,000,000,000,000,000,000 | 19 |
sched: simplify sched_slice()
Use the existing calc_delta_mine() calculation for sched_slice(). This
saves a divide and simplifies the code because we share it with the
other /cfs_rq->load users.
It also improves code size:
text data bss dec hex filename
42659 2740 144 45543 b1e7 sched.o.before
42093 2740 144 44977 afb1 sched.o.after
Signed-off-by: Ingo Molnar <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
|
isoent_traverse_tree(struct archive_write *a, struct vdd* vdd)
{
struct iso9660 *iso9660 = a->format_data;
struct isoent *np;
struct idr idr;
int depth;
int r;
int (*genid)(struct archive_write *, struct isoent *, struct idr *);
idr_init(iso9660, vdd, &idr);
np = vdd->rootent;
depth = 0;
if (vdd->vdd_type == VDD_JOLIET)
genid = isoent_gen_joliet_identifier;
else
genid = isoent_gen_iso9660_identifier;
do {
if (np->virtual &&
!archive_entry_mtime_is_set(np->file->entry)) {
/* Set properly times to virtual directory */
archive_entry_set_mtime(np->file->entry,
iso9660->birth_time, 0);
archive_entry_set_atime(np->file->entry,
iso9660->birth_time, 0);
archive_entry_set_ctime(np->file->entry,
iso9660->birth_time, 0);
}
if (np->children.first != NULL) {
if (vdd->vdd_type != VDD_JOLIET &&
!iso9660->opt.rr && depth + 1 >= vdd->max_depth) {
if (np->children.cnt > 0)
iso9660->directories_too_deep = np;
} else {
/* Generate Identifier */
r = genid(a, np, &idr);
if (r < 0)
goto exit_traverse_tree;
r = isoent_make_sorted_files(a, np, &idr);
if (r < 0)
goto exit_traverse_tree;
if (np->subdirs.first != NULL &&
depth + 1 < vdd->max_depth) {
/* Enter to sub directories. */
np = np->subdirs.first;
depth++;
continue;
}
}
}
while (np != np->parent) {
if (np->drnext == NULL) {
/* Return to the parent directory. */
np = np->parent;
depth--;
} else {
np = np->drnext;
break;
}
}
} while (np != np->parent);
r = ARCHIVE_OK;
exit_traverse_tree:
idr_cleanup(&idr);
return (r);
}
| 0 |
[
"CWE-190"
] |
libarchive
|
3014e19820ea53c15c90f9d447ca3e668a0b76c6
| 79,575,849,497,665,830,000,000,000,000,000,000,000 | 68 |
Issue 711: Be more careful about verifying filename lengths when writing ISO9660 archives
* Don't cast size_t to int, since this can lead to overflow
on machines where sizeof(int) < sizeof(size_t)
* Check a + b > limit by writing it as
a > limit || b > limit || a + b > limit
to avoid problems when a + b wraps around.
|
jwa_enc r_jwe_get_enc(jwe_t * jwe) {
if (jwe != NULL) {
return jwe->enc;
} else {
return R_JWA_ENC_UNKNOWN;
}
}
| 0 |
[
"CWE-787"
] |
rhonabwy
|
b4c2923a1ba4fabf9b55a89244127e153a3e549b
| 205,353,951,367,598,700,000,000,000,000,000,000,000 | 7 |
Fix buffer overflow on r_jwe_aesgcm_key_unwrap
|
static void test_wl4435_2()
{
MYSQL_STMT *stmt;
int i;
int rc;
char query[MAX_TEST_QUERY_LENGTH];
myheader("test_wl4435_2");
mct_start_logging("test_wl4435_2");
/*
Do a few iterations so that we catch any problem with incorrect
handling/flushing prepared statement results.
*/
for (i= 0; i < 10; ++i)
{
/*
Prepare a procedure. That can be moved out of the loop, but it was
left in the loop for the sake of having as many statements as
possible.
*/
rc= mysql_query(mysql, "DROP PROCEDURE IF EXISTS p1");
myquery(rc);
rc= mysql_query(mysql,
"CREATE PROCEDURE p1()"
"BEGIN "
" SELECT 1; "
" SELECT 2, 3 UNION SELECT 4, 5; "
" SELECT 6, 7, 8; "
"END");
myquery(rc);
/* Invoke a procedure, that returns several result sets. */
strmov(query, "CALL p1()");
stmt= mysql_simple_prepare(mysql, query);
check_stmt(stmt);
/* Execute! */
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
/* Flush all the results. */
mysql_stmt_close(stmt);
/* Clean up. */
rc= mysql_commit(mysql);
myquery(rc);
rc= mysql_query(mysql, "DROP PROCEDURE p1");
myquery(rc);
}
mct_close_log();
}
| 0 |
[
"CWE-416"
] |
mysql-server
|
4797ea0b772d5f4c5889bc552424132806f46e93
| 327,743,632,394,290,500,000,000,000,000,000,000,000 | 59 |
BUG#17512527: LIST HANDLING INCORRECT IN MYSQL_PRUNE_STMT_LIST()
Analysis:
---------
Invalid memory access maybe observed when using prepared statements if:
a) The mysql client connection is lost after statement preparation
is complete and
b) There is at least one statement which is in initialized state but
not prepared yet.
When the client detects a closed connection, it calls end_server()
to shutdown the connection. As part of the clean up, the
mysql_prune_stmt_list() removes the statements which has transitioned
beyond the initialized state and retains only the statements which
are in a initialized state. During this processing, the initialized
statements are moved from 'mysql->stmts' to a temporary 'pruned_list'.
When moving the first 'INIT_DONE' element to the pruned_list,
'element->next' is set to NULL. Hence the rest of the list is never
traversed and the statements which have transitioned beyond the
initialized state are never invalidated.
When the mysql_stmt_close() is called for the statement which is not
invalidated; the statements list is updated in order to remove the
statement. This would end up accessing freed memory(freed by the
mysql_stmt_close() for a previous statement in the list).
Fix:
---
mysql_prune_stmt_list() called list_add() incorrectly to create a
temporary list. The use case of list_add() is to add a single
element to the front of the doubly linked list.
mysql_prune_stmt_list() called list_add() by passing an entire
list as the 'element'.
mysql_prune_stmt_list() now uses list_delete() to remove the
statement which has transitioned beyond the initialized phase.
Thus the statement list would contain only elements where the
the state of the statement is initialized.
Note: Run the test with valgrind-mysqltest and leak-check=full
option to see the invalid memory access.
|
static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
{
struct udp_iter_state *state = seq->private;
state->bucket = MAX_UDP_PORTS;
return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
}
| 0 |
[
"CWE-400"
] |
linux-2.6
|
c377411f2494a931ff7facdbb3a6839b1266bcf6
| 90,152,448,635,463,180,000,000,000,000,000,000,000 | 7 |
net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int read_prga(unsigned char **dest, char *file)
{
FILE *f;
int size;
if(file == NULL) return( 1 );
if(*dest == NULL) *dest = (unsigned char*) malloc(1501);
f = fopen(file, "r");
if(f == NULL)
{
printf("Error opening %s\n", file);
return( 1 );
}
fseek(f, 0, SEEK_END);
size = ftell(f);
rewind(f);
if(size > 1500) size = 1500;
if( fread( (*dest), size, 1, f ) != 1 )
{
fclose(f);
fprintf( stderr, "fread failed\n" );
return( 1 );
}
opt.prgalen = size;
fclose(f);
return( 0 );
}
| 0 |
[
"CWE-787"
] |
aircrack-ng
|
091b153f294b9b695b0b2831e65936438b550d7b
| 137,785,672,738,286,990,000,000,000,000,000,000,000 | 34 |
Aireplay-ng: Fixed tcp_test stack overflow (Closes #14 on GitHub).
git-svn-id: http://svn.aircrack-ng.org/trunk@2417 28c6078b-6c39-48e3-add9-af49d547ecab
|
input_parse_screen(struct input_ctx *ictx, struct screen *s,
screen_write_init_ctx_cb cb, void *arg, u_char *buf, size_t len)
{
struct screen_write_ctx *sctx = &ictx->ctx;
if (len == 0)
return;
screen_write_start_callback(sctx, s, cb, arg);
input_parse(ictx, buf, len);
screen_write_stop(sctx);
}
| 0 |
[
"CWE-787"
] |
tmux
|
a868bacb46e3c900530bed47a1c6f85b0fbe701c
| 195,953,457,782,995,760,000,000,000,000,000,000,000 | 12 |
Do not write after the end of the array and overwrite the stack when
colon-separated SGR sequences contain empty arguments. Reported by Sergey
Nizovtsev.
|
static bool fix_general_log_file(sys_var *self, THD *thd, enum_var_type type)
{
return fix_log(&opt_logname, default_logfile_name, ".log", opt_log,
reopen_general_log);
}
| 0 |
[
"CWE-264"
] |
mysql-server
|
48bd8b16fe382be302c6f0b45931be5aa6f29a0e
| 225,908,184,337,183,470,000,000,000,000,000,000,000 | 5 |
Bug#24388753: PRIVILEGE ESCALATION USING MYSQLD_SAFE
[This is the 5.5/5.6 version of the bugfix].
The problem was that it was possible to write log files ending
in .ini/.cnf that later could be parsed as an options file.
This made it possible for users to specify startup options
without the permissions to do so.
This patch fixes the problem by disallowing general query log
and slow query log to be written to files ending in .ini and .cnf.
|
TEST(QueryProjectionTest, IdInclusionProjectionDoesNotPreserveOtherFields) {
auto proj = createProjection("{}", "{_id: 1}");
ASSERT_FALSE(proj.isFieldRetainedExactly("a"));
}
| 0 |
[
"CWE-732"
] |
mongo
|
cd583b6c4d8aa2364f255992708b9bb54e110cf4
| 228,645,302,678,079,000,000,000,000,000,000,000,000 | 4 |
SERVER-53929 Add stricter parser checks around positional projection
|
bool sys_var_pluginvar::do_check(THD *thd, set_var *var)
{
st_item_value_holder value;
DBUG_ASSERT(!is_readonly());
DBUG_ASSERT(plugin_var->check);
value.value_type= item_value_type;
value.val_str= item_val_str;
value.val_int= item_val_int;
value.val_real= item_val_real;
value.is_unsigned= item_is_unsigned;
value.item= var->value;
return plugin_var->check(thd, plugin_var, &var->save_result, &value);
}
| 0 |
[
"CWE-416"
] |
server
|
c05fd700970ad45735caed3a6f9930d4ce19a3bd
| 92,102,002,285,618,860,000,000,000,000,000,000,000 | 15 |
MDEV-26323 use-after-poison issue of MariaDB server
|
static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
{
return skb->head + skb->csum_start;
| 0 |
[
"CWE-20"
] |
linux
|
2b16f048729bf35e6c28a40cbfad07239f9dcd90
| 146,038,077,496,983,370,000,000,000,000,000,000,000 | 4 |
net: create skb_gso_validate_mac_len()
If you take a GSO skb, and split it into packets, will the MAC
length (L2 + L3 + L4 headers + payload) of those packets be small
enough to fit within a given length?
Move skb_gso_mac_seglen() to skbuff.h with other related functions
like skb_gso_network_seglen() so we can use it, and then create
skb_gso_validate_mac_len to do the full calculation.
Signed-off-by: Daniel Axtens <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int store_field(Field *from) { return from->save_in_field(this); }
| 0 |
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
| 209,143,882,155,109,780,000,000,000,000,000,000,000 | 1 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]>
|
nma_gconf_connection_new_from_connection (GConfClient *client,
const char *conf_dir,
NMConnection *connection)
{
g_return_val_if_fail (GCONF_IS_CLIENT (client), NULL);
g_return_val_if_fail (conf_dir != NULL, NULL);
g_return_val_if_fail (NM_IS_CONNECTION (connection), NULL);
return (NMAGConfConnection *) g_object_new (NMA_TYPE_GCONF_CONNECTION,
NMA_GCONF_CONNECTION_CLIENT, client,
NMA_GCONF_CONNECTION_DIR, conf_dir,
NM_EXPORTED_CONNECTION_CONNECTION, connection,
NULL);
}
| 0 |
[
"CWE-200"
] |
network-manager-applet
|
8627880e07c8345f69ed639325280c7f62a8f894
| 42,614,409,342,807,740,000,000,000,000,000,000,000 | 14 |
editor: prevent any registration of objects on the system bus
D-Bus access-control is name-based; so requests for a specific name
are allowed/denied based on the rules in /etc/dbus-1/system.d. But
apparently apps still get a non-named service on the bus, and if we
register *any* object even though we don't have a named service,
dbus and dbus-glib will happily proxy signals. Since the connection
editor shouldn't ever expose anything having to do with connections
on any bus, make sure that's the case.
|
static void khugepaged_do_scan(struct page **hpage)
{
unsigned int progress = 0, pass_through_head = 0;
unsigned int pages = khugepaged_pages_to_scan;
barrier(); /* write khugepaged_pages_to_scan to local stack */
while (progress < pages) {
cond_resched();
#ifndef CONFIG_NUMA
if (!*hpage) {
*hpage = alloc_hugepage(khugepaged_defrag());
if (unlikely(!*hpage)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
break;
}
count_vm_event(THP_COLLAPSE_ALLOC);
}
#else
if (IS_ERR(*hpage))
break;
#endif
if (unlikely(kthread_should_stop() || freezing(current)))
break;
spin_lock(&khugepaged_mm_lock);
if (!khugepaged_scan.mm_slot)
pass_through_head++;
if (khugepaged_has_work() &&
pass_through_head < 2)
progress += khugepaged_scan_mm_slot(pages - progress,
hpage);
else
progress = pages;
spin_unlock(&khugepaged_mm_lock);
}
}
| 0 |
[
"CWE-399"
] |
linux
|
78f11a255749d09025f54d4e2df4fbcb031530e2
| 64,168,602,687,032,930,000,000,000,000,000,000,000 | 39 |
mm: thp: fix /dev/zero MAP_PRIVATE and vm_flags cleanups
The huge_memory.c THP page fault was allowed to run if vm_ops was null
(which would succeed for /dev/zero MAP_PRIVATE, as the f_op->mmap wouldn't
setup a special vma->vm_ops and it would fallback to regular anonymous
memory) but other THP logics weren't fully activated for vmas with vm_file
not NULL (/dev/zero has a not NULL vma->vm_file).
So this removes the vm_file checks so that /dev/zero also can safely use
THP (the other albeit safer approach to fix this bug would have been to
prevent the THP initial page fault to run if vm_file was set).
After removing the vm_file checks, this also makes huge_memory.c stricter
in khugepaged for the DEBUG_VM=y case. It doesn't replace the vm_file
check with a is_pfn_mapping check (but it keeps checking for VM_PFNMAP
under VM_BUG_ON) because for a is_cow_mapping() mapping VM_PFNMAP should
only be allowed to exist before the first page fault, and in turn when
vma->anon_vma is null (so preventing khugepaged registration). So I tend
to think the previous comment saying if vm_file was set, VM_PFNMAP might
have been set and we could still be registered in khugepaged (despite
anon_vma was not NULL to be registered in khugepaged) was too paranoid.
The is_linear_pfn_mapping check is also I think superfluous (as described
by comment) but under DEBUG_VM it is safe to stay.
Addresses https://bugzilla.kernel.org/show_bug.cgi?id=33682
Signed-off-by: Andrea Arcangeli <[email protected]>
Reported-by: Caspar Zhang <[email protected]>
Acked-by: Mel Gorman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38.x]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
BN_ULONG BN_get_word(const BIGNUM *a)
{
if (a->top > 1)
return BN_MASK2;
else if (a->top == 1)
return a->d[0];
/* a->top == 0 */
return 0;
}
| 0 |
[
"CWE-310"
] |
openssl
|
f9b6c0ba4c02497782f801e3c45688f3efaac55c
| 222,490,349,658,244,650,000,000,000,000,000,000,000 | 9 |
Fix for CVE-2014-0076
Fix for the attack described in the paper "Recovering OpenSSL
ECDSA Nonces Using the FLUSH+RELOAD Cache Side-channel Attack"
by Yuval Yarom and Naomi Benger. Details can be obtained from:
http://eprint.iacr.org/2014/140
Thanks to Yuval Yarom and Naomi Benger for discovering this
flaw and to Yuval Yarom for supplying a fix.
(cherry picked from commit 2198be3483259de374f91e57d247d0fc667aef29)
Conflicts:
CHANGES
|
int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
{
return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
}
| 0 |
[
"CWE-125"
] |
linux
|
04f25edb48c441fc278ecc154c270f16966cbb90
| 117,836,242,690,763,220,000,000,000,000,000,000,000 | 4 |
net: hns3: add some error checking in hclge_tm module
When hdev->tx_sch_mode is HCLGE_FLAG_VNET_BASE_SCH_MODE, the
hclge_tm_schd_mode_vnet_base_cfg calls hclge_tm_pri_schd_mode_cfg
with vport->vport_id as pri_id, which is used as index for
hdev->tm_info.tc_info, it will cause out of bound access issue
if vport_id is equal to or larger than HNAE3_MAX_TC.
Also hardware only support maximum speed of HCLGE_ETHER_MAX_RATE.
So this patch adds two checks for above cases.
Fixes: 848440544b41 ("net: hns3: Add support of TX Scheduler & Shaper to HNS3 driver")
Signed-off-by: Yunsheng Lin <[email protected]>
Signed-off-by: Peng Li <[email protected]>
Signed-off-by: Huazhong Tan <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
PHP_FUNCTION(imageistruecolor)
{
zval *IM;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &IM) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
RETURN_BOOL(im->trueColor);
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
php-src
|
2938329ce19cb8c4197dec146c3ec887c6f61d01
| 306,228,812,210,749,000,000,000,000,000,000,000,000 | 13 |
Fixed bug #66356 (Heap Overflow Vulnerability in imagecrop())
And also fixed the bug: arguments are altered after some calls
|
void ipc_rcu_getref(void *ptr)
{
container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
}
| 1 |
[
"CWE-703",
"CWE-189"
] |
linux
|
6062a8dc0517bce23e3c2f7d2fea5e22411269a3
| 238,549,685,646,292,100,000,000,000,000,000,000,000 | 4 |
ipc,sem: fine grained locking for semtimedop
Introduce finer grained locking for semtimedop, to handle the common case
of a program wanting to manipulate one semaphore from an array with
multiple semaphores.
If the call is a semop manipulating just one semaphore in an array with
multiple semaphores, only take the lock for that semaphore itself.
If the call needs to manipulate multiple semaphores, or another caller is
in a transaction that manipulates multiple semaphores, the sem_array lock
is taken, as well as all the locks for the individual semaphores.
On a 24 CPU system, performance numbers with the semop-multi
test with N threads and N semaphores, look like this:
vanilla Davidlohr's Davidlohr's + Davidlohr's +
threads patches rwlock patches v3 patches
10 610652 726325 1783589 2142206
20 341570 365699 1520453 1977878
30 288102 307037 1498167 2037995
40 290714 305955 1612665 2256484
50 288620 312890 1733453 2650292
60 289987 306043 1649360 2388008
70 291298 306347 1723167 2717486
80 290948 305662 1729545 2763582
90 290996 306680 1736021 2757524
100 292243 306700 1773700 3059159
[[email protected]: do not call sem_lock when bogus sma]
[[email protected]: make refcounter atomic]
Signed-off-by: Rik van Riel <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Acked-by: Davidlohr Bueso <[email protected]>
Cc: Chegu Vinod <[email protected]>
Cc: Jason Low <[email protected]>
Reviewed-by: Michel Lespinasse <[email protected]>
Cc: Peter Hurley <[email protected]>
Cc: Stanislav Kinsbursky <[email protected]>
Tested-by: Emmanuel Benisty <[email protected]>
Tested-by: Sedat Dilek <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
mrb_io_sysseek(mrb_state *mrb, mrb_value io)
{
struct mrb_io *fptr;
off_t pos;
mrb_int offset, whence = -1;
mrb_get_args(mrb, "i|i", &offset, &whence);
if (whence < 0) {
whence = 0;
}
fptr = (struct mrb_io *)mrb_get_datatype(mrb, io, &mrb_io_type);
pos = lseek(fptr->fd, (off_t)offset, (int)whence);
if (pos == -1) {
mrb_sys_fail(mrb, "sysseek");
}
if (pos > MRB_INT_MAX) {
#ifndef MRB_WITHOUT_FLOAT
return mrb_float_value(mrb, (mrb_float)pos);
#else
mrb_raise(mrb, E_IO_ERROR, "sysseek reached too far for MRB_WITHOUT_FLOAT");
#endif
} else {
return mrb_fixnum_value(pos);
}
}
| 0 |
[
"CWE-416",
"CWE-787"
] |
mruby
|
b51b21fc63c9805862322551387d9036f2b63433
| 102,127,989,853,418,600,000,000,000,000,000,000,000 | 26 |
Fix `use after free in File#initilialize_copy`; fix #4001
The bug and the fix were reported by https://hackerone.com/pnoltof
|
usage(void)
{
static const char *msg[] =
{
"Usage: test_tparm [options] [capability] [value1 [value2 [...]]]",
"",
"Print all distinct combinations of given capability.",
"",
"Options:",
" -T TERM override $TERM; this may be a comma-separated list or \"-\"",
" to read a list from standard-input",
" -a if capability is given, test all combinations of values",
" -r NUM repeat tests NUM times",
" -v show values and results",
};
unsigned n;
for (n = 0; n < SIZEOF(msg); ++n) {
fprintf(stderr, "%s\n", msg[n]);
}
ExitProgram(EXIT_FAILURE);
}
| 0 |
[] |
ncurses
|
790a85dbd4a81d5f5d8dd02a44d84f01512ef443
| 137,734,356,371,160,130,000,000,000,000,000,000,000 | 21 |
ncurses 6.2 - patch 20200531
+ correct configure version-check/warnng for g++ to allow for 10.x
+ re-enable "bel" in konsole-base (report by Nia Huang)
+ add linux-s entry (patch by Alexandre Montaron).
+ drop long-obsolete convert_configure.pl
+ add test/test_parm.c, for checking tparm changes.
+ improve parameter-checking for tparm, adding function _nc_tiparm() to
handle the most-used case, which accepts only numeric parameters
(report/testcase by "puppet-meteor").
+ use a more conservative estimate of the buffer-size in lib_tparm.c's
save_text() and save_number(), in case the sprintf() function
passes-through unexpected characters from a format specifier
(report/testcase by "puppet-meteor").
+ add a check for end-of-string in cvtchar to handle a malformed
string in infotocap (report/testcase by "puppet-meteor").
|
void explain(OperationContext* opCtx,
ExplainOptions::Verbosity verbosity,
BSONObjBuilder* result) override {
uassertStatusOK(_command->explain(opCtx, *_request, verbosity, result));
}
| 0 |
[
"CWE-20"
] |
mongo
|
d315547544d7146b93a8e6e94cc4b88cd0d19c95
| 129,189,214,099,070,740,000,000,000,000,000,000,000 | 5 |
SERVER-38275 ban explain with UUID
|
changeDependencyFor(Oid classId, Oid objectId,
Oid refClassId, Oid oldRefObjectId,
Oid newRefObjectId)
{
long count = 0;
Relation depRel;
ScanKeyData key[2];
SysScanDesc scan;
HeapTuple tup;
ObjectAddress objAddr;
ObjectAddress depAddr;
bool oldIsPinned;
bool newIsPinned;
depRel = table_open(DependRelationId, RowExclusiveLock);
/*
* Check to see if either oldRefObjectId or newRefObjectId is pinned.
* Pinned objects should not have any dependency entries pointing to them,
* so in these cases we should add or remove a pg_depend entry, or do
* nothing at all, rather than update an entry as in the normal case.
*/
objAddr.classId = refClassId;
objAddr.objectId = oldRefObjectId;
objAddr.objectSubId = 0;
oldIsPinned = isObjectPinned(&objAddr, depRel);
objAddr.objectId = newRefObjectId;
newIsPinned = isObjectPinned(&objAddr, depRel);
if (oldIsPinned)
{
table_close(depRel, RowExclusiveLock);
/*
* If both are pinned, we need do nothing. However, return 1 not 0,
* else callers will think this is an error case.
*/
if (newIsPinned)
return 1;
/*
* There is no old dependency record, but we should insert a new one.
* Assume a normal dependency is wanted.
*/
depAddr.classId = classId;
depAddr.objectId = objectId;
depAddr.objectSubId = 0;
recordDependencyOn(&depAddr, &objAddr, DEPENDENCY_NORMAL);
return 1;
}
/* There should be existing dependency record(s), so search. */
ScanKeyInit(&key[0],
Anum_pg_depend_classid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(classId));
ScanKeyInit(&key[1],
Anum_pg_depend_objid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(objectId));
scan = systable_beginscan(depRel, DependDependerIndexId, true,
NULL, 2, key);
while (HeapTupleIsValid((tup = systable_getnext(scan))))
{
Form_pg_depend depform = (Form_pg_depend) GETSTRUCT(tup);
if (depform->refclassid == refClassId &&
depform->refobjid == oldRefObjectId)
{
if (newIsPinned)
CatalogTupleDelete(depRel, &tup->t_self);
else
{
/* make a modifiable copy */
tup = heap_copytuple(tup);
depform = (Form_pg_depend) GETSTRUCT(tup);
depform->refobjid = newRefObjectId;
CatalogTupleUpdate(depRel, &tup->t_self, tup);
heap_freetuple(tup);
}
count++;
}
}
systable_endscan(scan);
table_close(depRel, RowExclusiveLock);
return count;
}
| 0 |
[
"CWE-94"
] |
postgres
|
7e92f78abe80e4b30e648a40073abb59057e21f8
| 229,541,559,643,104,740,000,000,000,000,000,000,000 | 100 |
In extensions, don't replace objects not belonging to the extension.
Previously, if an extension script did CREATE OR REPLACE and there was
an existing object not belonging to the extension, it would overwrite
the object and adopt it into the extension. This is problematic, first
because the overwrite is probably unintentional, and second because we
didn't change the object's ownership. Thus a hostile user could create
an object in advance of an expected CREATE EXTENSION command, and would
then have ownership rights on an extension object, which could be
modified for trojan-horse-type attacks.
Hence, forbid CREATE OR REPLACE of an existing object unless it already
belongs to the extension. (Note that we've always forbidden replacing
an object that belongs to some other extension; only the behavior for
previously-free-standing objects changes here.)
For the same reason, also fail CREATE IF NOT EXISTS when there is
an existing object that doesn't belong to the extension.
Our thanks to Sven Klemm for reporting this problem.
Security: CVE-2022-2625
|
static OPJ_BOOL opj_j2k_write_cod( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager )
{
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
OPJ_UINT32 l_code_size,l_remaining_size;
OPJ_BYTE * l_current_data = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = &l_cp->tcps[p_j2k->m_current_tile_number];
l_code_size = 9 + opj_j2k_get_SPCod_SPCoc_size(p_j2k,p_j2k->m_current_tile_number,0);
l_remaining_size = l_code_size;
if (l_code_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_code_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write COD marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_code_size;
}
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
opj_write_bytes(l_current_data,J2K_MS_COD,2); /* COD */
l_current_data += 2;
opj_write_bytes(l_current_data,l_code_size-2,2); /* L_COD */
l_current_data += 2;
opj_write_bytes(l_current_data,l_tcp->csty,1); /* Scod */
++l_current_data;
opj_write_bytes(l_current_data,l_tcp->prg,1); /* SGcod (A) */
++l_current_data;
opj_write_bytes(l_current_data,l_tcp->numlayers,2); /* SGcod (B) */
l_current_data+=2;
opj_write_bytes(l_current_data,l_tcp->mct,1); /* SGcod (C) */
++l_current_data;
l_remaining_size -= 9;
if (! opj_j2k_write_SPCod_SPCoc(p_j2k,p_j2k->m_current_tile_number,0,l_current_data,&l_remaining_size,p_manager)) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing COD marker\n");
return OPJ_FALSE;
}
if (l_remaining_size != 0) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing COD marker\n");
return OPJ_FALSE;
}
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_code_size,p_manager) != l_code_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
| 0 |
[
"CWE-416"
] |
openjpeg
|
940100c28ae28931722290794889cf84a92c5f6f
| 282,360,588,974,069,700,000,000,000,000,000,000,000 | 70 |
Fix potential use-after-free in opj_j2k_write_mco function
Fixes #563
|
jbig2_image_compose_opt_XNOR(const uint8_t *s, uint8_t *d, int early, int late, uint8_t mask, uint8_t rightmask, uint32_t bytewidth, uint32_t h, uint32_t shift, uint32_t dstride, uint32_t sstride)
{
if (early || late)
template_image_compose_opt(s, d, early, late, mask, rightmask, bytewidth, h, shift, dstride, sstride, JBIG2_COMPOSE_XNOR);
else
template_image_compose_opt(s, d, 0, 0, mask, rightmask, bytewidth, h, shift, dstride, sstride, JBIG2_COMPOSE_XNOR);
}
| 0 |
[
"CWE-787"
] |
jbig2dec
|
0726320a4b55078e9d8deb590e477d598b3da66e
| 269,326,375,636,593,900,000,000,000,000,000,000,000 | 7 |
Fix OSS-Fuzz issue 20332: buffer overflow in jbig2_image_compose.
With extreme values of x/y/w/h we can get overflow. Test for this
and exit safely.
Thanks for OSS-Fuzz for reporting.
|
static int vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
vcpu->arch.l1tf_flush_l1d = true;
for (;;) {
/*
* If another guest vCPU requests a PV TLB flush in the middle
* of instruction emulation, the rest of the emulation could
* use a stale page translation. Assume that any code after
* this point can start executing an instruction.
*/
vcpu->arch.at_instruction_boundary = false;
if (kvm_vcpu_running(vcpu)) {
r = vcpu_enter_guest(vcpu);
} else {
r = vcpu_block(vcpu);
}
if (r <= 0)
break;
kvm_clear_request(KVM_REQ_UNBLOCK, vcpu);
if (kvm_xen_has_pending_events(vcpu))
kvm_xen_inject_pending_events(vcpu);
if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu);
if (dm_request_for_irq_injection(vcpu) &&
kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
r = 0;
vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
++vcpu->stat.request_irq_exits;
break;
}
if (__xfer_to_guest_mode_work_pending()) {
kvm_vcpu_srcu_read_unlock(vcpu);
r = xfer_to_guest_mode_handle_work(vcpu);
kvm_vcpu_srcu_read_lock(vcpu);
if (r)
return r;
}
}
return r;
}
| 0 |
[
"CWE-703"
] |
linux
|
6cd88243c7e03845a450795e134b488fc2afb736
| 124,530,075,821,709,700,000,000,000,000,000,000,000 | 49 |
KVM: x86: do not report a vCPU as preempted outside instruction boundaries
If a vCPU is outside guest mode and is scheduled out, it might be in the
process of making a memory access. A problem occurs if another vCPU uses
the PV TLB flush feature during the period when the vCPU is scheduled
out, and a virtual address has already been translated but has not yet
been accessed, because this is equivalent to using a stale TLB entry.
To avoid this, only report a vCPU as preempted if sure that the guest
is at an instruction boundary. A rescheduling request will be delivered
to the host physical CPU as an external interrupt, so for simplicity
consider any vmexit *not* instruction boundary except for external
interrupts.
It would in principle be okay to report the vCPU as preempted also
if it is sleeping in kvm_vcpu_block(): a TLB flush IPI will incur the
vmentry/vmexit overhead unnecessarily, and optimistic spinning is
also unlikely to succeed. However, leave it for later because right
now kvm_vcpu_check_block() is doing memory accesses. Even
though the TLB flush issue only applies to virtual memory address,
it's very much preferrable to be conservative.
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
glob3(Char *pathbuf, Char *pathbuf_last, Char *pathend, Char *pathend_last,
Char *pattern, Char *restpattern, Char *restpattern_last, glob_t *pglob,
struct glob_lim *limitp, int recursion)
{
struct dirent *dp;
DIR *dirp;
int err;
char buf[PATH_MAX];
/*
* The readdirfunc declaration can't be prototyped, because it is
* assigned, below, to two functions which are prototyped in glob.h
* and dirent.h as taking pointers to differently typed opaque
* structures.
*/
struct dirent *(*readdirfunc)(void *);
if (pathend > pathend_last) {
return 1;
}
*pathend = EOS;
errno = 0;
if (recursion >= pglob->gl_maxdepth) {
return GLOB_NOSPACE;
}
if ((dirp = g_opendir(pathbuf, pglob)) == NULL) {
/* TODO: don't call for ENOENT or ENOTDIR? */
if (pglob->gl_errfunc) {
if (g_Ctoc(pathbuf, buf, sizeof(buf))) {
return GLOB_ABORTED;
}
if (pglob->gl_errfunc(buf, errno) ||
pglob->gl_flags & GLOB_ERR) {
return GLOB_ABORTED;
}
}
return 0;
}
err = 0;
/* Search directory for matching names. */
if (pglob->gl_flags & GLOB_ALTDIRFUNC) {
readdirfunc = pglob->gl_readdir;
} else {
readdirfunc = (struct dirent *(*)(void *))readdir;
}
while ((dp = (*readdirfunc)(dirp))) {
unsigned char *sc;
Char *dc;
if (limitp->glim_readdir++ >= pglob->gl_maxfiles) {
errno = 0;
*pathend++ = SEP;
*pathend = EOS;
err = GLOB_NOSPACE;
break;
}
/* Initial DOT must be matched literally. */
if (dp->d_name[0] == DOT && *pattern != DOT) {
continue;
}
dc = pathend;
sc = (unsigned char *) dp->d_name;
while (dc < pathend_last && (*dc++ = *sc++) != EOS)
;
if (dc >= pathend_last) {
*dc = EOS;
err = 1;
break;
}
if (!match(pathend, pattern, restpattern, pglob->gl_maxdepth)) {
*pathend = EOS;
continue;
}
err = glob2(pathbuf, pathbuf_last, --dc, pathend_last,
restpattern, restpattern_last, pglob, limitp, recursion);
if (err) {
break;
}
}
if (pglob->gl_flags & GLOB_ALTDIRFUNC) {
(*pglob->gl_closedir)(dirp);
} else {
closedir(dirp);
}
return err;
}
| 0 |
[] |
pure-ftpd
|
0627004e23a24108785dc1506c5767392b90f807
| 141,439,633,488,974,880,000,000,000,000,000,000,000 | 92 |
BSD glob(): check max pattern length after having initialized pglob
|
int dn_dev_ioctl(unsigned int cmd, void __user *arg)
{
char buffer[DN_IFREQ_SIZE];
struct ifreq *ifr = (struct ifreq *)buffer;
struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr;
struct dn_dev *dn_db;
struct net_device *dev;
struct dn_ifaddr *ifa = NULL;
struct dn_ifaddr __rcu **ifap = NULL;
int ret = 0;
if (copy_from_user(ifr, arg, DN_IFREQ_SIZE))
return -EFAULT;
ifr->ifr_name[IFNAMSIZ-1] = 0;
dev_load(&init_net, ifr->ifr_name);
switch (cmd) {
case SIOCGIFADDR:
break;
case SIOCSIFADDR:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
if (sdn->sdn_family != AF_DECnet)
return -EINVAL;
break;
default:
return -EINVAL;
}
rtnl_lock();
if ((dev = __dev_get_by_name(&init_net, ifr->ifr_name)) == NULL) {
ret = -ENODEV;
goto done;
}
if ((dn_db = rtnl_dereference(dev->dn_ptr)) != NULL) {
for (ifap = &dn_db->ifa_list;
(ifa = rtnl_dereference(*ifap)) != NULL;
ifap = &ifa->ifa_next)
if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0)
break;
}
if (ifa == NULL && cmd != SIOCSIFADDR) {
ret = -EADDRNOTAVAIL;
goto done;
}
switch (cmd) {
case SIOCGIFADDR:
*((__le16 *)sdn->sdn_nodeaddr) = ifa->ifa_local;
goto rarok;
case SIOCSIFADDR:
if (!ifa) {
if ((ifa = dn_dev_alloc_ifa()) == NULL) {
ret = -ENOBUFS;
break;
}
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
} else {
if (ifa->ifa_local == dn_saddr2dn(sdn))
break;
dn_dev_del_ifa(dn_db, ifap, 0);
}
ifa->ifa_local = ifa->ifa_address = dn_saddr2dn(sdn);
ret = dn_dev_set_ifa(dev, ifa);
}
done:
rtnl_unlock();
return ret;
rarok:
if (copy_to_user(arg, ifr, DN_IFREQ_SIZE))
ret = -EFAULT;
goto done;
}
| 0 |
[
"CWE-264"
] |
net
|
90f62cf30a78721641e08737bda787552428061e
| 39,678,347,158,337,687,000,000,000,000,000,000,000 | 81 |
net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
struct net_device *dev)
{
struct netdev_notifier_info info;
netdev_notifier_info_init(&info, dev);
return nb->notifier_call(nb, val, &info);
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
linux
|
fac8e0f579695a3ecbc4d3cac369139d7f819971
| 238,888,066,351,036,340,000,000,000,000,000,000,000 | 8 |
tunnels: Don't apply GRO to multiple layers of encapsulation.
When drivers express support for TSO of encapsulated packets, they
only mean that they can do it for one layer of encapsulation.
Supporting additional levels would mean updating, at a minimum,
more IP length fields and they are unaware of this.
No encapsulation device expresses support for handling offloaded
encapsulated packets, so we won't generate these types of frames
in the transmit path. However, GRO doesn't have a check for
multiple levels of encapsulation and will attempt to build them.
UDP tunnel GRO actually does prevent this situation but it only
handles multiple UDP tunnels stacked on top of each other. This
generalizes that solution to prevent any kind of tunnel stacking
that would cause problems.
Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack")
Signed-off-by: Jesse Gross <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
gdAlphaBlend (int dst, int src)
{
return (((((gdAlphaTransparent - gdTrueColorGetAlpha (src)) *
gdTrueColorGetRed (src) / gdAlphaMax) +
(gdTrueColorGetAlpha (src) *
gdTrueColorGetRed (dst)) / gdAlphaMax) << 16) +
((((gdAlphaTransparent - gdTrueColorGetAlpha (src)) *
gdTrueColorGetGreen (src) / gdAlphaMax) +
(gdTrueColorGetAlpha (src) *
gdTrueColorGetGreen (dst)) / gdAlphaMax) << 8) +
(((gdAlphaTransparent - gdTrueColorGetAlpha (src)) *
gdTrueColorGetBlue (src) / gdAlphaMax) +
(gdTrueColorGetAlpha (src) *
gdTrueColorGetBlue (dst)) / gdAlphaMax));
}
| 0 |
[
"CWE-119"
] |
php-src
|
feba44546c27b0158f9ac20e72040a224b918c75
| 328,393,245,086,412,630,000,000,000,000,000,000,000 | 15 |
Fixed bug #22965 (Crash in gd lib's ImageFillToBorder()).
|
bool save_in_param(THD *thd, Item_param *param)
{
// It should not be possible to have "EXECUTE .. USING DEFAULT(a)"
DBUG_ASSERT(arg == NULL);
param->set_default();
return false;
}
| 0 |
[
"CWE-617"
] |
server
|
2e7891080667c59ac80f788eef4d59d447595772
| 118,986,326,919,875,400,000,000,000,000,000,000,000 | 7 |
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view
This bug could manifest itself after pushing a where condition over a
mergeable derived table / view / CTE DT into a grouping view / derived
table / CTE V whose item list contained set functions with constant
arguments such as MIN(2), SUM(1) etc. In such cases the field references
used in the condition pushed into the view V that correspond set functions
are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation
of the virtual method const_item() for the class Item_direct_view_ref the
wrapped set functions with constant arguments could be erroneously taken
for constant items. This could lead to a wrong result set returned by the
main select query in 10.2. In 10.4 where a possibility of pushing condition
from HAVING into WHERE had been added this could cause a crash.
Approved by Sergey Petrunya <[email protected]>
|
ldns_rdf2buffer_str_str(ldns_buffer *output, const ldns_rdf *rdf)
{
if(ldns_rdf_size(rdf) < 1) {
return LDNS_STATUS_WIRE_RDATA_ERR;
}
if((int)ldns_rdf_size(rdf) < ldns_rdf_data(rdf)[0] + 1) {
return LDNS_STATUS_WIRE_RDATA_ERR;
}
ldns_buffer_printf(output, "\"");
ldns_characters2buffer_str(output,
ldns_rdf_data(rdf)[0], ldns_rdf_data(rdf) + 1);
ldns_buffer_printf(output, "\"");
return ldns_buffer_status(output);
}
| 0 |
[
"CWE-415"
] |
ldns
|
070b4595981f48a21cc6b4f5047fdc2d09d3da91
| 7,200,047,130,738,233,000,000,000,000,000,000,000 | 14 |
CAA and URI
|
static inline void SetPixelChannel(const Image *magick_restrict image,
const PixelChannel channel,const Quantum quantum,
Quantum *magick_restrict pixel)
{
if (image->channel_map[channel].traits != UndefinedPixelTrait)
pixel[image->channel_map[channel].offset]=quantum;
}
| 0 |
[
"CWE-20",
"CWE-125"
] |
ImageMagick
|
8187d2d8fd010d2d6b1a3a8edd935beec404dddc
| 189,046,922,701,832,170,000,000,000,000,000,000,000 | 7 |
https://github.com/ImageMagick/ImageMagick/issues/1610
|
void _CbInRangeAav(RCore *core, ut64 from, ut64 to, int vsize, bool asterisk, int count) {
bool isarm = archIsArmOrThumb (core);
if (isarm) {
if (to & 1) {
// .dword 0x000080b9 in reality is 0x000080b8
to--;
r_anal_hint_set_bits (core->anal, to, 16);
// can we assume is gonna be always a function?
} else {
r_core_seek_archbits (core, from);
ut64 bits = r_config_get_i (core->config, "asm.bits");
r_anal_hint_set_bits (core->anal, from, bits);
}
} else {
bool ismips = archIsMips (core);
if (ismips) {
if (from % 4 || to % 4) {
eprintf ("False positive\n");
return;
}
}
}
if (asterisk) {
r_cons_printf ("ax 0x%"PFMT64x " 0x%"PFMT64x "\n", to, from);
r_cons_printf ("Cd %d @ 0x%"PFMT64x "\n", vsize, from);
r_cons_printf ("f+ aav.0x%08"PFMT64x "= 0x%08"PFMT64x, to, to);
} else {
#if 1
r_anal_xrefs_set (core->anal, from, to, R_ANAL_REF_TYPE_NULL);
r_meta_add (core->anal, 'd', from, from + vsize, NULL);
if (!r_flag_get_at (core->flags, to, false)) {
char *name = r_str_newf ("aav.0x%08"PFMT64x, to);
r_flag_set (core->flags, name, to, vsize);
free (name);
}
#else
r_core_cmdf (core, "ax 0x%"PFMT64x " 0x%"PFMT64x, to, from);
r_core_cmdf (core, "Cd %d @ 0x%"PFMT64x, vsize, from);
r_core_cmdf (core, "f+ aav.0x%08"PFMT64x "= 0x%08"PFMT64x, to, to);
#endif
}
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
radare2
|
a1bc65c3db593530775823d6d7506a457ed95267
| 180,687,368,999,889,530,000,000,000,000,000,000,000 | 42 |
Fix #12375 - Crash in bd+ao (#12382)
|
GF_Err gf_lz_compress_payload(u8 **data, u32 data_len, u32 *max_size)
{
*max_size = 0;
return GF_NOT_SUPPORTED;
}
| 0 |
[
"CWE-787"
] |
gpac
|
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
| 295,282,339,554,501,300,000,000,000,000,000,000,000 | 5 |
fixed #2138
|
void psi_memstall_enter(unsigned long *flags)
{
struct rq_flags rf;
struct rq *rq;
if (static_branch_likely(&psi_disabled))
return;
*flags = current->in_memstall;
if (*flags)
return;
/*
* in_memstall setting & accounting needs to be atomic wrt
* changes to the task's scheduling state, otherwise we can
* race with CPU migration.
*/
rq = this_rq_lock_irq(&rf);
current->in_memstall = 1;
psi_task_change(current, 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
rq_unlock_irq(rq, &rf);
}
| 0 |
[
"CWE-416"
] |
linux
|
a06247c6804f1a7c86a2e5398a4c1f1db1471848
| 332,855,886,813,972,850,000,000,000,000,000,000,000 | 23 |
psi: Fix uaf issue when psi trigger is destroyed while being polled
With write operation on psi files replacing old trigger with a new one,
the lifetime of its waitqueue is totally arbitrary. Overwriting an
existing trigger causes its waitqueue to be freed and pending poll()
will stumble on trigger->event_wait which was destroyed.
Fix this by disallowing to redefine an existing psi trigger. If a write
operation is used on a file descriptor with an already existing psi
trigger, the operation will fail with EBUSY error.
Also bypass a check for psi_disabled in the psi_trigger_destroy as the
flag can be flipped after the trigger is created, leading to a memory
leak.
Fixes: 0e94682b73bf ("psi: introduce psi monitor")
Reported-by: [email protected]
Suggested-by: Linus Torvalds <[email protected]>
Analyzed-by: Eric Biggers <[email protected]>
Signed-off-by: Suren Baghdasaryan <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Eric Biggers <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: [email protected]
Link: https://lore.kernel.org/r/[email protected]
|
static void stats_reset(void) {
STATS_LOCK();
stats.total_items = stats.total_conns = 0;
stats.rejected_conns = 0;
stats.evictions = 0;
stats.reclaimed = 0;
stats.listen_disabled_num = 0;
stats_prefix_clear();
STATS_UNLOCK();
threadlocal_stats_reset();
item_stats_reset();
}
| 0 |
[
"CWE-189"
] |
memcached
|
6695ccbc525c36d693aaa3e8337b36aa0c784424
| 110,926,514,592,246,360,000,000,000,000,000,000,000 | 12 |
Fix segfault on specially crafted packet.
|
static int ocfs2_file_clone_range(struct file *file_in,
loff_t pos_in,
struct file *file_out,
loff_t pos_out,
u64 len)
{
return ocfs2_reflink_remap_range(file_in, pos_in, file_out, pos_out,
len, false);
}
| 0 |
[
"CWE-401"
] |
linux
|
28f5a8a7c033cbf3e32277f4cc9c6afd74f05300
| 170,649,176,540,896,600,000,000,000,000,000,000,000 | 9 |
ocfs2: should wait dio before inode lock in ocfs2_setattr()
we should wait dio requests to finish before inode lock in
ocfs2_setattr(), otherwise the following deadlock will happen:
process 1 process 2 process 3
truncate file 'A' end_io of writing file 'A' receiving the bast messages
ocfs2_setattr
ocfs2_inode_lock_tracker
ocfs2_inode_lock_full
inode_dio_wait
__inode_dio_wait
-->waiting for all dio
requests finish
dlm_proxy_ast_handler
dlm_do_local_bast
ocfs2_blocking_ast
ocfs2_generic_handle_bast
set OCFS2_LOCK_BLOCKED flag
dio_end_io
dio_bio_end_aio
dio_complete
ocfs2_dio_end_io
ocfs2_dio_end_io_write
ocfs2_inode_lock
__ocfs2_cluster_lock
ocfs2_wait_for_mask
-->waiting for OCFS2_LOCK_BLOCKED
flag to be cleared, that is waiting
for 'process 1' unlocking the inode lock
inode_dio_end
-->here dec the i_dio_count, but will never
be called, so a deadlock happened.
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Alex Chen <[email protected]>
Reviewed-by: Jun Piao <[email protected]>
Reviewed-by: Joseph Qi <[email protected]>
Acked-by: Changwei Ge <[email protected]>
Cc: Mark Fasheh <[email protected]>
Cc: Joel Becker <[email protected]>
Cc: Junxiao Bi <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void ocfs2_free_unwritten_list(struct inode *inode,
struct list_head *head)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_unwritten_extent *ue = NULL, *tmp = NULL;
list_for_each_entry_safe(ue, tmp, head, ue_node) {
list_del(&ue->ue_node);
spin_lock(&oi->ip_lock);
list_del(&ue->ue_ip_node);
spin_unlock(&oi->ip_lock);
kfree(ue);
}
}
| 0 |
[
"CWE-362"
] |
linux
|
3e4c56d41eef5595035872a2ec5a483f42e8917f
| 238,619,070,474,479,230,000,000,000,000,000,000,000 | 14 |
ocfs2: ip_alloc_sem should be taken in ocfs2_get_block()
ip_alloc_sem should be taken in ocfs2_get_block() when reading file in
DIRECT mode to prevent concurrent access to extent tree with
ocfs2_dio_end_io_write(), which may cause BUGON in the following
situation:
read file 'A' end_io of writing file 'A'
vfs_read
__vfs_read
ocfs2_file_read_iter
generic_file_read_iter
ocfs2_direct_IO
__blockdev_direct_IO
do_blockdev_direct_IO
do_direct_IO
get_more_blocks
ocfs2_get_block
ocfs2_extent_map_get_blocks
ocfs2_get_clusters
ocfs2_get_clusters_nocache()
ocfs2_search_extent_list
return the index of record which
contains the v_cluster, that is
v_cluster > rec[i]->e_cpos.
ocfs2_dio_end_io
ocfs2_dio_end_io_write
down_write(&oi->ip_alloc_sem);
ocfs2_mark_extent_written
ocfs2_change_extent_flag
ocfs2_split_extent
...
--> modify the rec[i]->e_cpos, resulting
in v_cluster < rec[i]->e_cpos.
BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos))
[[email protected]: v3]
Link: http://lkml.kernel.org/r/[email protected]
Link: http://lkml.kernel.org/r/[email protected]
Fixes: c15471f79506 ("ocfs2: fix sparse file & data ordering issue in direct io")
Signed-off-by: Alex Chen <[email protected]>
Reviewed-by: Jun Piao <[email protected]>
Reviewed-by: Joseph Qi <[email protected]>
Reviewed-by: Gang He <[email protected]>
Acked-by: Changwei Ge <[email protected]>
Cc: Mark Fasheh <[email protected]>
Cc: Joel Becker <[email protected]>
Cc: Junxiao Bi <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void php_libxml_ctx_error_level(int level, void *ctx, const char *msg TSRMLS_DC)
{
xmlParserCtxtPtr parser;
parser = (xmlParserCtxtPtr) ctx;
if (parser != NULL && parser->input != NULL) {
if (parser->input->filename) {
php_error_docref(NULL TSRMLS_CC, level, "%s in %s, line: %d", msg, parser->input->filename, parser->input->line);
} else {
php_error_docref(NULL TSRMLS_CC, level, "%s in Entity, line: %d", msg, parser->input->line);
}
}
}
| 0 |
[
"CWE-200"
] |
php-src
|
8e76d0404b7f664ee6719fd98f0483f0ac4669d6
| 268,944,880,877,181,200,000,000,000,000,000,000,000 | 14 |
Fixed external entity loading
|
static void nf_tables_chain_free_chain_rules(struct nft_chain *chain)
{
struct nft_rule **g0 = rcu_dereference_raw(chain->rules_gen_0);
struct nft_rule **g1 = rcu_dereference_raw(chain->rules_gen_1);
if (g0 != g1)
kvfree(g1);
kvfree(g0);
/* should be NULL either via abort or via successful commit */
WARN_ON_ONCE(chain->rules_next);
kvfree(chain->rules_next);
}
| 0 |
[
"CWE-665"
] |
linux
|
ad9f151e560b016b6ad3280b48e42fa11e1a5440
| 75,593,388,871,948,210,000,000,000,000,000,000,000 | 13 |
netfilter: nf_tables: initialize set before expression setup
nft_set_elem_expr_alloc() needs an initialized set if expression sets on
the NFT_EXPR_GC flag. Move set fields initialization before expression
setup.
[4512935.019450] ==================================================================
[4512935.019456] BUG: KASAN: null-ptr-deref in nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019487] Read of size 8 at addr 0000000000000070 by task nft/23532
[4512935.019494] CPU: 1 PID: 23532 Comm: nft Not tainted 5.12.0-rc4+ #48
[...]
[4512935.019502] Call Trace:
[4512935.019505] dump_stack+0x89/0xb4
[4512935.019512] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019536] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019560] kasan_report.cold.12+0x5f/0xd8
[4512935.019566] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019590] nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019615] nf_tables_newset+0xc7f/0x1460 [nf_tables]
Reported-by: [email protected]
Fixes: 65038428b2c6 ("netfilter: nf_tables: allow to specify stateful expression in set definition")
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
Handle(RegistrationMap::const_iterator it) : it_(it) {}
| 0 |
[] |
envoy
|
2c60632d41555ec8b3d9ef5246242be637a2db0f
| 313,844,712,534,194,400,000,000,000,000,000,000,000 | 1 |
http: header map security fixes for duplicate headers (#197)
Previously header matching did not match on all headers for
non-inline headers. This patch changes the default behavior to
always logically match on all headers. Multiple individual
headers will be logically concatenated with ',' similar to what
is done with inline headers. This makes the behavior effectively
consistent. This behavior can be temporary reverted by setting
the runtime value "envoy.reloadable_features.header_match_on_all_headers"
to "false".
Targeted fixes have been additionally performed on the following
extensions which make them consider all duplicate headers by default as
a comma concatenated list:
1) Any extension using CEL matching on headers.
2) The header to metadata filter.
3) The JWT filter.
4) The Lua filter.
Like primary header matching used in routing, RBAC, etc. this behavior
can be disabled by setting the runtime value
"envoy.reloadable_features.header_match_on_all_headers" to false.
Finally, the setCopy() header map API previously only set the first
header in the case of duplicate non-inline headers. setCopy() now
behaves similiarly to the other set*() APIs and replaces all found
headers with a single value. This may have had security implications
in the extauth filter which uses this API. This behavior can be disabled
by setting the runtime value
"envoy.reloadable_features.http_set_copy_replace_all_headers" to false.
Fixes https://github.com/envoyproxy/envoy-setec/issues/188
Signed-off-by: Matt Klein <[email protected]>
|
void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers, bool end_stream) {
ScopeTrackerScopeState scope(this,
connection_manager_.read_callbacks_->connection().dispatcher());
request_headers_ = std::move(headers);
// For Admin thread, we don't use routeConfigProvider or SRDS route provider.
if (dynamic_cast<Server::Admin*>(&connection_manager_.config_) == nullptr &&
connection_manager_.config_.scopedRouteConfigProvider() != nullptr) {
ASSERT(snapped_route_config_ == nullptr,
"Route config already latched to the active stream when scoped RDS is enabled.");
// We need to snap snapped_route_config_ here as it's used in mutateRequestHeaders later.
snapScopedRouteConfig();
}
if (Http::Headers::get().MethodValues.Head ==
request_headers_->Method()->value().getStringView()) {
is_head_request_ = true;
}
ENVOY_STREAM_LOG(debug, "request headers complete (end_stream={}):\n{}", *this, end_stream,
*request_headers_);
// We end the decode here only if the request is header only. If we convert the request to a
// header only, the stream will be marked as done once a subsequent decodeData/decodeTrailers is
// called with end_stream=true.
maybeEndDecode(end_stream);
// Drop new requests when overloaded as soon as we have decoded the headers.
if (connection_manager_.overload_stop_accepting_requests_ref_ ==
Server::OverloadActionState::Active) {
// In this one special case, do not create the filter chain. If there is a risk of memory
// overload it is more important to avoid unnecessary allocation than to create the filters.
state_.created_filter_chain_ = true;
connection_manager_.stats_.named_.downstream_rq_overload_close_.inc();
sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_),
Http::Code::ServiceUnavailable, "envoy overloaded", nullptr, is_head_request_,
absl::nullopt, StreamInfo::ResponseCodeDetails::get().Overload);
return;
}
if (!connection_manager_.config_.proxy100Continue() && request_headers_->Expect() &&
request_headers_->Expect()->value() == Headers::get().ExpectValues._100Continue.c_str()) {
// Note in the case Envoy is handling 100-Continue complexity, it skips the filter chain
// and sends the 100-Continue directly to the encoder.
chargeStats(continueHeader());
response_encoder_->encode100ContinueHeaders(continueHeader());
// Remove the Expect header so it won't be handled again upstream.
request_headers_->removeExpect();
}
connection_manager_.user_agent_.initializeFromHeaders(
*request_headers_, connection_manager_.stats_.prefix_, connection_manager_.stats_.scope_);
// Make sure we are getting a codec version we support.
Protocol protocol = connection_manager_.codec_->protocol();
if (protocol == Protocol::Http10) {
// Assume this is HTTP/1.0. This is fine for HTTP/0.9 but this code will also affect any
// requests with non-standard version numbers (0.9, 1.3), basically anything which is not
// HTTP/1.1.
//
// The protocol may have shifted in the HTTP/1.0 case so reset it.
stream_info_.protocol(protocol);
if (!connection_manager_.config_.http1Settings().accept_http_10_) {
// Send "Upgrade Required" if HTTP/1.0 support is not explicitly configured on.
sendLocalReply(false, Code::UpgradeRequired, "", nullptr, is_head_request_, absl::nullopt,
StreamInfo::ResponseCodeDetails::get().LowVersion);
return;
} else {
// HTTP/1.0 defaults to single-use connections. Make sure the connection
// will be closed unless Keep-Alive is present.
state_.saw_connection_close_ = true;
if (request_headers_->Connection() &&
absl::EqualsIgnoreCase(request_headers_->Connection()->value().getStringView(),
Http::Headers::get().ConnectionValues.KeepAlive)) {
state_.saw_connection_close_ = false;
}
}
}
if (!request_headers_->Host()) {
if ((protocol == Protocol::Http10) &&
!connection_manager_.config_.http1Settings().default_host_for_http_10_.empty()) {
// Add a default host if configured to do so.
request_headers_->insertHost().value(
connection_manager_.config_.http1Settings().default_host_for_http_10_);
} else {
// Require host header. For HTTP/1.1 Host has already been translated to :authority.
sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::BadRequest, "",
nullptr, is_head_request_, absl::nullopt,
StreamInfo::ResponseCodeDetails::get().MissingHost);
return;
}
}
ASSERT(connection_manager_.config_.maxRequestHeadersKb() > 0);
if (request_headers_->byteSize() > (connection_manager_.config_.maxRequestHeadersKb() * 1024)) {
sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_),
Code::RequestHeaderFieldsTooLarge, "", nullptr, is_head_request_, absl::nullopt,
StreamInfo::ResponseCodeDetails::get().RequestHeadersTooLarge);
return;
}
// Currently we only support relative paths at the application layer. We expect the codec to have
// broken the path into pieces if applicable. NOTE: Currently the HTTP/1.1 codec only does this
// when the allow_absolute_url flag is enabled on the HCM.
// https://tools.ietf.org/html/rfc7230#section-5.3 We also need to check for the existence of
// :path because CONNECT does not have a path, and we don't support that currently.
if (!request_headers_->Path() || request_headers_->Path()->value().getStringView().empty() ||
request_headers_->Path()->value().getStringView()[0] != '/') {
const bool has_path =
request_headers_->Path() && !request_headers_->Path()->value().getStringView().empty();
connection_manager_.stats_.named_.downstream_rq_non_relative_path_.inc();
sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::NotFound, "", nullptr,
is_head_request_, absl::nullopt,
has_path ? StreamInfo::ResponseCodeDetails::get().AbsolutePath
: StreamInfo::ResponseCodeDetails::get().MissingPath);
return;
}
// Path sanitization should happen before any path access other than the above sanity check.
if (!ConnectionManagerUtility::maybeNormalizePath(*request_headers_,
connection_manager_.config_)) {
sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::BadRequest, "",
nullptr, is_head_request_, absl::nullopt,
StreamInfo::ResponseCodeDetails::get().PathNormalizationFailed);
return;
}
if (protocol == Protocol::Http11 && request_headers_->Connection() &&
absl::EqualsIgnoreCase(request_headers_->Connection()->value().getStringView(),
Http::Headers::get().ConnectionValues.Close)) {
state_.saw_connection_close_ = true;
}
// Note: Proxy-Connection is not a standard header, but is supported here
// since it is supported by http-parser the underlying parser for http
// requests.
if (protocol != Protocol::Http2 && !state_.saw_connection_close_ &&
request_headers_->ProxyConnection() &&
absl::EqualsIgnoreCase(request_headers_->ProxyConnection()->value().getStringView(),
Http::Headers::get().ConnectionValues.Close)) {
state_.saw_connection_close_ = true;
}
if (!state_.is_internally_created_) { // Only sanitize headers on first pass.
// Modify the downstream remote address depending on configuration and headers.
stream_info_.setDownstreamRemoteAddress(ConnectionManagerUtility::mutateRequestHeaders(
*request_headers_, connection_manager_.read_callbacks_->connection(),
connection_manager_.config_, *snapped_route_config_, connection_manager_.random_generator_,
connection_manager_.local_info_));
}
ASSERT(stream_info_.downstreamRemoteAddress() != nullptr);
ASSERT(!cached_route_);
refreshCachedRoute();
if (!state_.is_internally_created_) { // Only mutate tracing headers on first pass.
ConnectionManagerUtility::mutateTracingRequestHeader(
*request_headers_, connection_manager_.runtime_, connection_manager_.config_,
cached_route_.value().get());
}
const bool upgrade_rejected = createFilterChain() == false;
// TODO if there are no filters when starting a filter iteration, the connection manager
// should return 404. The current returns no response if there is no router filter.
if (protocol == Protocol::Http11 && hasCachedRoute()) {
if (upgrade_rejected) {
// Do not allow upgrades if the route does not support it.
connection_manager_.stats_.named_.downstream_rq_ws_on_non_ws_route_.inc();
sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::Forbidden, "",
nullptr, is_head_request_, absl::nullopt,
StreamInfo::ResponseCodeDetails::get().UpgradeFailed);
return;
}
// Allow non websocket requests to go through websocket enabled routes.
}
if (hasCachedRoute()) {
const Router::RouteEntry* route_entry = cached_route_.value()->routeEntry();
if (route_entry != nullptr && route_entry->idleTimeout()) {
idle_timeout_ms_ = route_entry->idleTimeout().value();
if (idle_timeout_ms_.count()) {
// If we have a route-level idle timeout but no global stream idle timeout, create a timer.
if (stream_idle_timer_ == nullptr) {
stream_idle_timer_ =
connection_manager_.read_callbacks_->connection().dispatcher().createTimer(
[this]() -> void { onIdleTimeout(); });
}
} else if (stream_idle_timer_ != nullptr) {
// If we had a global stream idle timeout but the route-level idle timeout is set to zero
// (to override), we disable the idle timer.
stream_idle_timer_->disableTimer();
stream_idle_timer_ = nullptr;
}
}
}
// Check if tracing is enabled at all.
if (connection_manager_.config_.tracingConfig()) {
traceRequest();
}
decodeHeaders(nullptr, *request_headers_, end_stream);
// Reset it here for both global and overridden cases.
resetIdleTimer();
}
| 1 |
[
"CWE-400",
"CWE-703"
] |
envoy
|
afc39bea36fd436e54262f150c009e8d72db5014
| 165,885,183,581,622,710,000,000,000,000,000,000,000 | 205 |
Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <[email protected]>
|
static void process_bin_complete_sasl_auth(conn *c) {
assert(settings.sasl);
const char *out = NULL;
unsigned int outlen = 0;
assert(c->item);
init_sasl_conn(c);
int nkey = c->binary_header.request.keylen;
int vlen = c->binary_header.request.bodylen - nkey;
char mech[nkey+1];
memcpy(mech, ITEM_key((item*)c->item), nkey);
mech[nkey] = 0x00;
if (settings.verbose)
fprintf(stderr, "mech: ``%s'' with %d bytes of data\n", mech, vlen);
const char *challenge = vlen == 0 ? NULL : ITEM_data((item*) c->item);
int result=-1;
switch (c->cmd) {
case PROTOCOL_BINARY_CMD_SASL_AUTH:
result = sasl_server_start(c->sasl_conn, mech,
challenge, vlen,
&out, &outlen);
break;
case PROTOCOL_BINARY_CMD_SASL_STEP:
result = sasl_server_step(c->sasl_conn,
challenge, vlen,
&out, &outlen);
break;
default:
assert(false); /* CMD should be one of the above */
/* This code is pretty much impossible, but makes the compiler
happier */
if (settings.verbose) {
fprintf(stderr, "Unhandled command %d with challenge %s\n",
c->cmd, challenge);
}
break;
}
item_unlink(c->item);
if (settings.verbose) {
fprintf(stderr, "sasl result code: %d\n", result);
}
switch(result) {
case SASL_OK:
write_bin_response(c, "Authenticated", 0, 0, strlen("Authenticated"));
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.auth_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
break;
case SASL_CONTINUE:
add_bin_header(c, PROTOCOL_BINARY_RESPONSE_AUTH_CONTINUE, 0, 0, outlen);
if(outlen > 0) {
add_iov(c, out, outlen);
}
conn_set_state(c, conn_mwrite);
c->write_and_go = conn_new_cmd;
break;
default:
if (settings.verbose)
fprintf(stderr, "Unknown sasl response: %d\n", result);
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, 0);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.auth_cmds++;
c->thread->stats.auth_errors++;
pthread_mutex_unlock(&c->thread->stats.mutex);
}
}
| 1 |
[
"CWE-287"
] |
memcached
|
87c1cf0f20be20608d3becf854e9cf0910f4ad32
| 285,427,073,575,557,700,000,000,000,000,000,000,000 | 75 |
explicitly record sasl auth states
It was previously possible to bypass authentication due to implicit
state management. Now we explicitly consider ourselves
unauthenticated on any new connections and authentication attempts.
bug316
Signed-off-by: Dustin Sallings <[email protected]>
|
ecma_new_ecma_string_from_utf8_buffer (lit_utf8_size_t length, /**< length of the buffer */
lit_utf8_size_t size, /**< size of the buffer */
lit_utf8_byte_t **data_p) /**< [out] pointer to the start of the string buffer */
{
if (JERRY_LIKELY (size <= UINT16_MAX))
{
if (JERRY_LIKELY (length == size) && size <= (UINT8_MAX + 1))
{
ecma_string_t *string_desc_p;
string_desc_p = (ecma_string_t *) ecma_alloc_string_buffer (size + ECMA_ASCII_STRING_HEADER_SIZE);
string_desc_p->refs_and_container = ECMA_STRING_CONTAINER_HEAP_ASCII_STRING | ECMA_STRING_REF_ONE;
ECMA_ASCII_STRING_SET_SIZE (string_desc_p, size);
*data_p = ECMA_ASCII_STRING_GET_BUFFER (string_desc_p);
return (ecma_string_t *) string_desc_p;
}
ecma_short_string_t *string_desc_p;
string_desc_p = (ecma_short_string_t *) ecma_alloc_string_buffer (size + sizeof (ecma_short_string_t));
string_desc_p->header.refs_and_container = ECMA_STRING_CONTAINER_HEAP_UTF8_STRING | ECMA_STRING_REF_ONE;
string_desc_p->size = (uint16_t) size;
string_desc_p->length = (uint16_t) length;
*data_p = ECMA_SHORT_STRING_GET_BUFFER (string_desc_p);
return (ecma_string_t *) string_desc_p;
}
ecma_long_string_t *long_string_p;
long_string_p = (ecma_long_string_t *) ecma_alloc_string_buffer (size + sizeof (ecma_long_string_t));
long_string_p->header.refs_and_container = ECMA_STRING_CONTAINER_LONG_OR_EXTERNAL_STRING | ECMA_STRING_REF_ONE;
long_string_p->string_p = ECMA_LONG_STRING_BUFFER_START (long_string_p);
long_string_p->size = size;
long_string_p->length = length;
*data_p = ECMA_LONG_STRING_BUFFER_START (long_string_p);
return (ecma_string_t *) long_string_p;
} /* ecma_new_ecma_string_from_utf8_buffer */
| 0 |
[
"CWE-416"
] |
jerryscript
|
3bcd48f72d4af01d1304b754ef19fe1a02c96049
| 166,492,176,408,787,200,000,000,000,000,000,000,000 | 37 |
Improve parse_identifier (#4691)
Ascii string length is no longer computed during string allocation.
JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected]
|
ins_char_bytes(char_u *buf, int charlen)
{
int c = buf[0];
int newlen; // nr of bytes inserted
int oldlen; // nr of bytes deleted (0 when not replacing)
char_u *p;
char_u *newp;
char_u *oldp;
int linelen; // length of old line including NUL
colnr_T col;
linenr_T lnum = curwin->w_cursor.lnum;
int i;
// Break tabs if needed.
if (virtual_active() && curwin->w_cursor.coladd > 0)
coladvance_force(getviscol());
col = curwin->w_cursor.col;
oldp = ml_get(lnum);
linelen = (int)STRLEN(oldp) + 1;
// The lengths default to the values for when not replacing.
oldlen = 0;
newlen = charlen;
if (State & REPLACE_FLAG)
{
if (State & VREPLACE_FLAG)
{
colnr_T new_vcol = 0; // init for GCC
colnr_T vcol;
int old_list;
// Disable 'list' temporarily, unless 'cpo' contains the 'L' flag.
// Returns the old value of list, so when finished,
// curwin->w_p_list should be set back to this.
old_list = curwin->w_p_list;
if (old_list && vim_strchr(p_cpo, CPO_LISTWM) == NULL)
curwin->w_p_list = FALSE;
// In virtual replace mode each character may replace one or more
// characters (zero if it's a TAB). Count the number of bytes to
// be deleted to make room for the new character, counting screen
// cells. May result in adding spaces to fill a gap.
getvcol(curwin, &curwin->w_cursor, NULL, &vcol, NULL);
new_vcol = vcol + chartabsize(buf, vcol);
while (oldp[col + oldlen] != NUL && vcol < new_vcol)
{
vcol += chartabsize(oldp + col + oldlen, vcol);
// Don't need to remove a TAB that takes us to the right
// position.
if (vcol > new_vcol && oldp[col + oldlen] == TAB)
break;
oldlen += (*mb_ptr2len)(oldp + col + oldlen);
// Deleted a bit too much, insert spaces.
if (vcol > new_vcol)
newlen += vcol - new_vcol;
}
curwin->w_p_list = old_list;
}
else if (oldp[col] != NUL)
{
// normal replace
oldlen = (*mb_ptr2len)(oldp + col);
}
// Push the replaced bytes onto the replace stack, so that they can be
// put back when BS is used. The bytes of a multi-byte character are
// done the other way around, so that the first byte is popped off
// first (it tells the byte length of the character).
replace_push(NUL);
for (i = 0; i < oldlen; ++i)
{
if (has_mbyte)
i += replace_push_mb(oldp + col + i) - 1;
else
replace_push(oldp[col + i]);
}
}
newp = alloc(linelen + newlen - oldlen);
if (newp == NULL)
return;
// Copy bytes before the cursor.
if (col > 0)
mch_memmove(newp, oldp, (size_t)col);
// Copy bytes after the changed character(s).
p = newp + col;
if (linelen > col + oldlen)
mch_memmove(p + newlen, oldp + col + oldlen,
(size_t)(linelen - col - oldlen));
// Insert or overwrite the new character.
mch_memmove(p, buf, charlen);
i = charlen;
// Fill with spaces when necessary.
while (i < newlen)
p[i++] = ' ';
// Replace the line in the buffer.
ml_replace(lnum, newp, FALSE);
// mark the buffer as changed and prepare for displaying
inserted_bytes(lnum, col, newlen - oldlen);
// If we're in Insert or Replace mode and 'showmatch' is set, then briefly
// show the match for right parens and braces.
if (p_sm && (State & MODE_INSERT)
&& msg_silent == 0
&& !ins_compl_active())
{
if (has_mbyte)
showmatch(mb_ptr2char(buf));
else
showmatch(c);
}
#ifdef FEAT_RIGHTLEFT
if (!p_ri || (State & REPLACE_FLAG))
#endif
{
// Normal insert: move cursor right
curwin->w_cursor.col += charlen;
}
// TODO: should try to update w_row here, to avoid recomputing it later.
}
| 0 |
[
"CWE-120"
] |
vim
|
7ce5b2b590256ce53d6af28c1d203fb3bc1d2d97
| 165,001,631,251,496,540,000,000,000,000,000,000,000 | 131 |
patch 8.2.4969: changing text in Visual mode may cause invalid memory access
Problem: Changing text in Visual mode may cause invalid memory access.
Solution: Check the Visual position after making a change.
|
static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
struct scm_cookie *scm)
{
scm_set_cred(scm, task_tgid(current), current_cred());
scm->fp = NULL;
unix_get_peersec_dgram(sock, scm);
if (msg->msg_controllen <= 0)
return 0;
return __scm_send(sock, msg, scm);
}
| 1 |
[] |
linux-2.6
|
16e5726269611b71c930054ffe9b858c1cea88eb
| 81,501,451,115,101,620,000,000,000,000,000,000,000 | 10 |
af_unix: dont send SCM_CREDENTIALS by default
Since commit 7361c36c5224 (af_unix: Allow credentials to work across
user and pid namespaces) af_unix performance dropped a lot.
This is because we now take a reference on pid and cred in each write(),
and release them in read(), usually done from another process,
eventually from another cpu. This triggers false sharing.
# Events: 154K cycles
#
# Overhead Command Shared Object Symbol
# ........ ....... .................. .........................
#
10.40% hackbench [kernel.kallsyms] [k] put_pid
8.60% hackbench [kernel.kallsyms] [k] unix_stream_recvmsg
7.87% hackbench [kernel.kallsyms] [k] unix_stream_sendmsg
6.11% hackbench [kernel.kallsyms] [k] do_raw_spin_lock
4.95% hackbench [kernel.kallsyms] [k] unix_scm_to_skb
4.87% hackbench [kernel.kallsyms] [k] pid_nr_ns
4.34% hackbench [kernel.kallsyms] [k] cred_to_ucred
2.39% hackbench [kernel.kallsyms] [k] unix_destruct_scm
2.24% hackbench [kernel.kallsyms] [k] sub_preempt_count
1.75% hackbench [kernel.kallsyms] [k] fget_light
1.51% hackbench [kernel.kallsyms] [k]
__mutex_lock_interruptible_slowpath
1.42% hackbench [kernel.kallsyms] [k] sock_alloc_send_pskb
This patch includes SCM_CREDENTIALS information in a af_unix message/skb
only if requested by the sender, [man 7 unix for details how to include
ancillary data using sendmsg() system call]
Note: This might break buggy applications that expected SCM_CREDENTIAL
from an unaware write() system call, and receiver not using SO_PASSCRED
socket option.
If SOCK_PASSCRED is set on source or destination socket, we still
include credentials for mere write() syscalls.
Performance boost in hackbench : more than 50% gain on a 16 thread
machine (2 quad-core cpus, 2 threads per core)
hackbench 20 thread 2000
4.228 sec instead of 9.102 sec
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Tim Chen <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
Data (bool deleteStream, int numThreads, bool reconstructChunkOffsetTable):
InputStreamMutex(),
deleteStream (deleteStream),
numThreads (numThreads),
reconstructChunkOffsetTable(reconstructChunkOffsetTable)
{
}
| 0 |
[
"CWE-94",
"CWE-787"
] |
openexr
|
8b5370c688a7362673c3a5256d93695617a4cd9a
| 208,550,500,327,876,700,000,000,000,000,000,000,000 | 7 |
Fix #491, issue with part number range check reconstructing chunk offset table
The chunk offset was incorrectly testing for a part number that was the
same size (i.e. an invalid index)
Signed-off-by: Kimball Thurston <[email protected]>
|
xfs_inode_set_reclaim_tag(
struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_perag *pag;
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
XFS_ICI_RECLAIM_TAG);
xfs_perag_set_reclaim_tag(pag);
__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
spin_unlock(&ip->i_flags_lock);
spin_unlock(&pag->pag_ici_lock);
xfs_perag_put(pag);
}
| 0 |
[
"CWE-476"
] |
linux
|
afca6c5b2595fc44383919fba740c194b0b76aff
| 102,621,945,854,949,950,000,000,000,000,000,000,000 | 19 |
xfs: validate cached inodes are free when allocated
A recent fuzzed filesystem image cached random dcache corruption
when the reproducer was run. This often showed up as panics in
lookup_slow() on a null inode->i_ops pointer when doing pathwalks.
BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
....
Call Trace:
lookup_slow+0x44/0x60
walk_component+0x3dd/0x9f0
link_path_walk+0x4a7/0x830
path_lookupat+0xc1/0x470
filename_lookup+0x129/0x270
user_path_at_empty+0x36/0x40
path_listxattr+0x98/0x110
SyS_listxattr+0x13/0x20
do_syscall_64+0xf5/0x280
entry_SYSCALL_64_after_hwframe+0x42/0xb7
but had many different failure modes including deadlocks trying to
lock the inode that was just allocated or KASAN reports of
use-after-free violations.
The cause of the problem was a corrupt INOBT on a v4 fs where the
root inode was marked as free in the inobt record. Hence when we
allocated an inode, it chose the root inode to allocate, found it in
the cache and re-initialised it.
We recently fixed a similar inode allocation issue caused by inobt
record corruption problem in xfs_iget_cache_miss() in commit
ee457001ed6c ("xfs: catch inode allocation state mismatch
corruption"). This change adds similar checks to the cache-hit path
to catch it, and turns the reproducer into a corruption shutdown
situation.
Reported-by: Wen Xu <[email protected]>
Signed-Off-By: Dave Chinner <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Reviewed-by: Carlos Maiolino <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
[darrick: fix typos in comment]
Signed-off-by: Darrick J. Wong <[email protected]>
|
QPDFObjectHandle::isInteger()
{
dereference();
return QPDFObjectTypeAccessor<QPDF_Integer>::check(obj.getPointer());
}
| 0 |
[
"CWE-835"
] |
qpdf
|
afe0242b263a9e1a8d51dd81e42ab6de2e5127eb
| 312,608,874,952,455,840,000,000,000,000,000,000,000 | 5 |
Handle object ID 0 (fixes #99)
This is CVE-2017-9208.
The QPDF library uses object ID 0 internally as a sentinel to
represent a direct object, but prior to this fix, was not blocking
handling of 0 0 obj or 0 0 R as a special case. Creating an object in
the file with 0 0 obj could cause various infinite loops. The PDF spec
doesn't allow for object 0. Having qpdf handle object 0 might be a
better fix, but changing all the places in the code that assumes objid
== 0 means direct would be risky.
|
struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue)
{
struct sctp_chunk *chunk;
sctp_chunkhdr_t *ch = NULL;
chunk = queue->in_progress;
/* If there is no more chunks in this packet, say so */
if (chunk->singleton ||
chunk->end_of_packet ||
chunk->pdiscard)
return NULL;
ch = (sctp_chunkhdr_t *)chunk->chunk_end;
return ch;
}
| 0 |
[] |
linux
|
196d67593439b03088913227093e374235596e33
| 93,351,896,487,047,140,000,000,000,000,000,000,000 | 16 |
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call
The current SCTP stack is lacking a mechanism to have per association
statistics. This is an implementation modeled after OpenSolaris'
SCTP_GET_ASSOC_STATS.
Userspace part will follow on lksctp if/when there is a general ACK on
this.
V4:
- Move ipackets++ before q->immediate.func() for consistency reasons
- Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid
returning bogus RTO values
- return asoc->rto_min when max_obs_rto value has not changed
V3:
- Increase ictrlchunks in sctp_assoc_bh_rcv() as well
- Move ipackets++ to sctp_inq_push()
- return 0 when no rto updates took place since the last call
V2:
- Implement partial retrieval of stat struct to cope for future expansion
- Kill the rtxpackets counter as it cannot be precise anyway
- Rename outseqtsns to outofseqtsns to make it clearer that these are out
of sequence unexpected TSNs
- Move asoc->ipackets++ under a lock to avoid potential miscounts
- Fold asoc->opackets++ into the already existing asoc check
- Kill unneeded (q->asoc) test when increasing rtxchunks
- Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0)
- Don't count SHUTDOWNs as SACKs
- Move SCTP_GET_ASSOC_STATS to the private space API
- Adjust the len check in sctp_getsockopt_assoc_stats() to allow for
future struct growth
- Move association statistics in their own struct
- Update idupchunks when we send a SACK with dup TSNs
- return min_rto in max_rto when RTO has not changed. Also return the
transport when max_rto last changed.
Signed-off: Michele Baldessari <[email protected]>
Acked-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void mvhd_del(GF_Box *s)
{
GF_MovieHeaderBox *ptr = (GF_MovieHeaderBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
gpac
|
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
| 307,729,151,636,646,450,000,000,000,000,000,000,000 | 6 |
prevent dref memleak on invalid input (#1183)
|
void restore_set_statement_var()
{
main_lex.restore_set_statement_var();
}
| 0 |
[
"CWE-416"
] |
server
|
4681b6f2d8c82b4ec5cf115e83698251963d80d5
| 176,083,745,892,414,760,000,000,000,000,000,000,000 | 4 |
MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob
the bug was that in_vector array in Item_func_in was allocated in the
statement arena, not in the table->expr_arena.
revert part of the 5acd391e8b2d. Instead, change the arena correctly
in fix_all_session_vcol_exprs().
Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force
item tree changes to be rolled back (because they were allocated in the
wrong arena and didn't persist. now they do)
|
void set(std::initializer_list<TokenID> l) {
for (auto in : l) {
seen |= dex(in);
if (dex(in) & (dex(TokenID::Sid) | dex(TokenID::Effect) |
dex(TokenID::Principal) | dex(TokenID::NotPrincipal) |
dex(TokenID::Action) | dex(TokenID::NotAction) |
dex(TokenID::Resource) | dex(TokenID::NotResource) |
dex(TokenID::Condition) | dex(TokenID::AWS) |
dex(TokenID::Federated) | dex(TokenID::Service) |
dex(TokenID::CanonicalUser))) {
v |= dex(in);
}
}
}
| 0 |
[
"CWE-617"
] |
ceph
|
b3118cabb8060a8cc6a01c4e8264cb18e7b1745a
| 247,639,114,942,158,370,000,000,000,000,000,000,000 | 14 |
rgw: Remove assertions in IAM Policy
A couple of them could be triggered by user input.
Signed-off-by: Adam C. Emerson <[email protected]>
|
STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; }
| 0 |
[
"CWE-787"
] |
stb
|
5ba0baaa269b3fd681828e0e3b3ac0f1472eaf40
| 294,281,572,191,649,500,000,000,000,000,000,000,000 | 1 |
stb_image: Reject fractional JPEG component subsampling ratios
The component resamplers are not written to support this and I've
never seen it happen in a real (non-crafted) JPEG file so I'm
fine rejecting this as outright corrupt.
Fixes issue #1178.
|
reallyExecFormatted(Widget w, char *format, char *data, CELL *start, CELL *finish)
{
XtermWidget xw;
if ((xw = getXtermWidget(w)) != 0) {
char **argv;
if ((argv = tokenizeFormat(format)) != 0) {
char *blob = argv[0];
int argc;
for (argc = 0; argv[argc] != 0; ++argc) {
argv[argc] = expandFormat(xw, argv[argc], data, start, finish);
}
executeCommand(TScreenOf(xw)->pid, argv);
freeArgv(blob, argv);
}
}
}
| 0 |
[
"CWE-399"
] |
xterm-snapshots
|
82ba55b8f994ab30ff561a347b82ea340ba7075c
| 329,753,789,280,130,630,000,000,000,000,000,000,000 | 19 |
snapshot of project "xterm", label xterm-365d
|
static unsigned uivector_copy(uivector* p, const uivector* q)
{
size_t i;
if(!uivector_resize(p, q->size)) return 0;
for(i = 0; i < q->size; i++) p->data[i] = q->data[i];
return 1;
}
| 0 |
[
"CWE-401"
] |
FreeRDP
|
9fee4ae076b1ec97b97efb79ece08d1dab4df29a
| 165,503,475,279,442,660,000,000,000,000,000,000,000 | 7 |
Fixed #5645: realloc return handling
|
static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe)
{
struct dentry *upperdentry = ACCESS_ONCE(oe->__upperdentry);
/*
* Make sure to order reads to upperdentry wrt ovl_dentry_update()
*/
smp_read_barrier_depends();
return upperdentry;
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
69c433ed2ecd2d3264efd7afec4439524b319121
| 246,219,064,398,654,900,000,000,000,000,000,000,000 | 9 |
fs: limit filesystem stacking depth
Add a simple read-only counter to super_block that indicates how deep this
is in the stack of filesystems. Previously ecryptfs was the only stackable
filesystem and it explicitly disallowed multiple layers of itself.
Overlayfs, however, can be stacked recursively and also may be stacked
on top of ecryptfs or vice versa.
To limit the kernel stack usage we must limit the depth of the
filesystem stack. Initially the limit is set to 2.
Signed-off-by: Miklos Szeredi <[email protected]>
|
static void dump_backtrace(void)
{
struct st_connection *conn= cur_con;
fprintf(stderr, "read_command_buf (%p): ", read_command_buf);
my_safe_puts_stderr(read_command_buf, sizeof(read_command_buf));
if (conn)
{
fprintf(stderr, "conn->name (%p): ", conn->name);
my_safe_puts_stderr(conn->name, conn->name_len);
#ifdef EMBEDDED_LIBRARY
fprintf(stderr, "conn->cur_query (%p): ", conn->cur_query);
my_safe_puts_stderr(conn->cur_query, conn->cur_query_len);
#endif
}
fputs("Attempting backtrace...\n", stderr);
my_print_stacktrace(NULL, my_thread_stack_size);
}
| 0 |
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
| 181,847,327,929,621,600,000,000,000,000,000,000,000 | 19 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
|
static void ConvertLoopSlice(ModSample &src, ModSample &dest, SmpLength start, SmpLength len, bool loop)
{
if(!src.HasSampleData()
|| start >= src.nLength
|| src.nLength - start < len)
{
return;
}
dest.FreeSample();
dest = src;
dest.nLength = len;
dest.pSample = nullptr;
if(!dest.AllocateSample())
{
return;
}
// only preserve cue points if the target sample length is the same
if(len != src.nLength)
MemsetZero(dest.cues);
std::memcpy(dest.pSample8, src.pSample8 + start, len);
dest.uFlags.set(CHN_LOOP, loop);
if(loop)
{
dest.nLoopStart = 0;
dest.nLoopEnd = len;
} else
{
dest.nLoopStart = 0;
dest.nLoopEnd = 0;
}
}
| 0 |
[
"CWE-125"
] |
openmpt
|
b60b322cf9f0ffa624018f1bb9783edf0dc908c3
| 286,214,831,279,603,750,000,000,000,000,000,000,000 | 35 |
[Sec] STP: Possible out-of-bounds memory read with malformed STP files (caught with afl-fuzz).
Patch-by: sagamusix
(originally committed as part of r9568)
git-svn-id: https://source.openmpt.org/svn/openmpt/branches/OpenMPT-1.27@9576 56274372-70c3-4bfc-bfc3-4c3a0b034d27
|
static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx)
{
struct Vmxnet3_TxDesc txd;
uint32_t txd_idx;
uint32_t data_len;
hwaddr data_pa;
for (;;) {
if (!vmxnet3_pop_next_tx_descr(s, qidx, &txd, &txd_idx)) {
break;
}
vmxnet3_dump_tx_descr(&txd);
if (!s->skip_current_tx_pkt) {
data_len = (txd.len > 0) ? txd.len : VMXNET3_MAX_TX_BUF_SIZE;
data_pa = txd.addr;
if (!net_tx_pkt_add_raw_fragment(s->tx_pkt,
data_pa,
data_len)) {
s->skip_current_tx_pkt = true;
}
}
if (s->tx_sop) {
vmxnet3_tx_retrieve_metadata(s, &txd);
s->tx_sop = false;
}
if (txd.eop) {
if (!s->skip_current_tx_pkt && net_tx_pkt_parse(s->tx_pkt)) {
if (s->needs_vlan) {
net_tx_pkt_setup_vlan_header(s->tx_pkt, s->tci);
}
vmxnet3_send_packet(s, qidx);
} else {
vmxnet3_on_tx_done_update_stats(s, qidx,
VMXNET3_PKT_STATUS_ERROR);
}
vmxnet3_complete_packet(s, qidx, txd_idx);
s->tx_sop = true;
s->skip_current_tx_pkt = false;
net_tx_pkt_reset(s->tx_pkt);
}
}
}
| 0 |
[] |
qemu
|
d05dcd94aee88728facafb993c7280547eb4d645
| 57,182,715,500,997,560,000,000,000,000,000,000,000 | 49 |
net: vmxnet3: validate configuration values during activate (CVE-2021-20203)
While activating device in vmxnet3_acticate_device(), it does not
validate guest supplied configuration values against predefined
minimum - maximum limits. This may lead to integer overflow or
OOB access issues. Add checks to avoid it.
Fixes: CVE-2021-20203
Buglink: https://bugs.launchpad.net/qemu/+bug/1913873
Reported-by: Gaoning Pan <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Signed-off-by: Jason Wang <[email protected]>
|
static void update_cursor_data_virgl(VirtIOGPU *g,
struct virtio_gpu_scanout *s,
uint32_t resource_id)
{
uint32_t width, height;
uint32_t pixels, *data;
data = virgl_renderer_get_cursor_data(resource_id, &width, &height);
if (!data) {
return;
}
if (width != s->current_cursor->width ||
height != s->current_cursor->height) {
free(data);
return;
}
pixels = s->current_cursor->width * s->current_cursor->height;
memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t));
free(data);
}
| 0 |
[
"CWE-772",
"CWE-401"
] |
qemu
|
dd248ed7e204ee8a1873914e02b8b526e8f1b80d
| 141,069,397,515,348,610,000,000,000,000,000,000,000 | 22 |
virtio-gpu: fix memory leak in set scanout
In virtio_gpu_set_scanout function, when creating the 'rect'
its refcount is set to 2, by pixman_image_create_bits and
qemu_create_displaysurface_pixman function. This can lead
a memory leak issues. This patch avoid this issue.
Signed-off-by: Li Qiang <[email protected]>
Reviewed-by: Marc-André Lureau <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]>
|
HttpTransact::HandlePushTunnelFailure(State* s)
{
HandlePushError(s, "Cache Error");
}
| 0 |
[
"CWE-119"
] |
trafficserver
|
8b5f0345dade6b2822d9b52c8ad12e63011a5c12
| 249,275,576,896,300,140,000,000,000,000,000,000,000 | 4 |
Fix the internal buffer sizing. Thanks to Sudheer for helping isolating this bug
|
ar6000_tkip_micerr_event(struct ar6_softc *ar, u8 keyid, bool ismcast)
{
static const char *tag = "MLME-MICHAELMICFAILURE.indication";
char buf[128];
union iwreq_data wrqu;
/*
* For AP case, keyid will have aid of STA which sent pkt with
* MIC error. Use this aid to get MAC & send it to hostapd.
*/
if (ar->arNetworkType == AP_NETWORK) {
sta_t *s = ieee80211_find_conn_for_aid(ar, (keyid >> 2));
if(!s){
A_PRINTF("AP TKIP MIC error received from Invalid aid / STA not found =%d\n", keyid);
return;
}
A_PRINTF("AP TKIP MIC error received from aid=%d\n", keyid);
snprintf(buf,sizeof(buf), "%s addr=%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
tag, s->mac[0],s->mac[1],s->mac[2],s->mac[3],s->mac[4],s->mac[5]);
} else {
ar6k_cfg80211_tkip_micerr_event(ar, keyid, ismcast);
A_PRINTF("AR6000 TKIP MIC error received for keyid %d %scast\n",
keyid & 0x3, ismcast ? "multi": "uni");
snprintf(buf, sizeof(buf), "%s(keyid=%d %sicast)", tag, keyid & 0x3,
ismcast ? "mult" : "un");
}
memset(&wrqu, 0, sizeof(wrqu));
wrqu.data.length = strlen(buf);
wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf);
}
| 0 |
[
"CWE-703",
"CWE-264"
] |
linux
|
550fd08c2cebad61c548def135f67aba284c6162
| 217,088,296,811,287,430,000,000,000,000,000,000,000 | 33 |
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <[email protected]>
CC: Karsten Keil <[email protected]>
CC: "David S. Miller" <[email protected]>
CC: Jay Vosburgh <[email protected]>
CC: Andy Gospodarek <[email protected]>
CC: Patrick McHardy <[email protected]>
CC: Krzysztof Halasa <[email protected]>
CC: "John W. Linville" <[email protected]>
CC: Greg Kroah-Hartman <[email protected]>
CC: Marcel Holtmann <[email protected]>
CC: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void test_checkout_nasty__dotgit_backslash_path(void)
{
#ifdef GIT_WIN32
test_checkout_fails("refs/heads/dotgit_backslash_path", ".git/foobar");
#endif
}
| 0 |
[
"CWE-20",
"CWE-706"
] |
libgit2
|
3f7851eadca36a99627ad78cbe56a40d3776ed01
| 189,628,567,389,801,300,000,000,000,000,000,000,000 | 6 |
Disallow NTFS Alternate Data Stream attacks, even on Linux/macOS
A little-known feature of NTFS is that it offers to store metadata in
so-called "Alternate Data Streams" (inspired by Apple's "resource
forks") that are copied together with the file they are associated with.
These Alternate Data Streams can be accessed via `<file name>:<stream
name>:<stream type>`.
Directories, too, have Alternate Data Streams, and they even have a
default stream type `$INDEX_ALLOCATION`. Which means that `abc/` and
`abc::$INDEX_ALLOCATION/` are actually equivalent.
This is of course another attack vector on the Git directory that we
definitely want to prevent.
On Windows, we already do this incidentally, by disallowing colons in
file/directory names.
While it looks as if files'/directories' Alternate Data Streams are not
accessible in the Windows Subsystem for Linux, and neither via
CIFS/SMB-mounted network shares in Linux, it _is_ possible to access
them on SMB-mounted network shares on macOS.
Therefore, let's go the extra mile and prevent this particular attack
_everywhere_. To keep things simple, let's just disallow *any* Alternate
Data Stream of `.git`.
This is libgit2's variant of CVE-2019-1352.
Signed-off-by: Johannes Schindelin <[email protected]>
|
static string name() { return "ConvBwdFilter"; }
| 0 |
[
"CWE-369",
"CWE-787"
] |
tensorflow
|
c570e2ecfc822941335ad48f6e10df4e21f11c96
| 46,661,509,395,925,060,000,000,000,000,000,000,000 | 1 |
Fix issues in Conv2DBackpropFilter.
PiperOrigin-RevId: 369772454
Change-Id: I49b465f2ae2ce91def61b56cea8000197d5177d8
|
static uint16_t nvme_offline_zone(NvmeNamespace *ns, NvmeZone *zone,
NvmeZoneState state, NvmeRequest *req)
{
switch (state) {
case NVME_ZONE_STATE_READ_ONLY:
nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_OFFLINE);
/* fall through */
case NVME_ZONE_STATE_OFFLINE:
return NVME_SUCCESS;
default:
return NVME_ZONE_INVAL_TRANSITION;
}
}
| 0 |
[] |
qemu
|
736b01642d85be832385063f278fe7cd4ffb5221
| 93,998,031,785,145,370,000,000,000,000,000,000,000 | 13 |
hw/nvme: fix CVE-2021-3929
This fixes CVE-2021-3929 "locally" by denying DMA to the iomem of the
device itself. This still allows DMA to MMIO regions of other devices
(e.g. doing P2P DMA to the controller memory buffer of another NVMe
device).
Fixes: CVE-2021-3929
Reported-by: Qiuhao Li <[email protected]>
Reviewed-by: Keith Busch <[email protected]>
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Signed-off-by: Klaus Jensen <[email protected]>
|
void wireless_spy_update(struct net_device * dev,
unsigned char * address,
struct iw_quality * wstats)
{
struct iw_spy_data * spydata = get_spydata(dev);
int i;
int match = -1;
/* Make sure driver is not buggy or using the old API */
if(!spydata)
return;
#ifdef WE_SPY_DEBUG
printk(KERN_DEBUG "wireless_spy_update() : offset %ld, spydata %p, address %02X:%02X:%02X:%02X:%02X:%02X\n", dev->wireless_handlers->spy_offset, spydata, address[0], address[1], address[2], address[3], address[4], address[5]);
#endif /* WE_SPY_DEBUG */
/* Update all records that match */
for(i = 0; i < spydata->spy_number; i++)
if(!memcmp(address, spydata->spy_address[i], ETH_ALEN)) {
memcpy(&(spydata->spy_stat[i]), wstats,
sizeof(struct iw_quality));
match = i;
}
/* Generate an event if we cross the spy threshold.
* To avoid event storms, we have a simple hysteresis : we generate
* event only when we go under the low threshold or above the
* high threshold. */
if(match >= 0) {
if(spydata->spy_thr_under[match]) {
if(wstats->level > spydata->spy_thr_high.level) {
spydata->spy_thr_under[match] = 0;
iw_send_thrspy_event(dev, spydata,
address, wstats);
}
} else {
if(wstats->level < spydata->spy_thr_low.level) {
spydata->spy_thr_under[match] = 1;
iw_send_thrspy_event(dev, spydata,
address, wstats);
}
}
}
}
| 0 |
[
"CWE-200"
] |
linux-2.6
|
9ef1d4c7c7aca1cd436612b6ca785b726ffb8ed8
| 140,616,274,045,403,100,000,000,000,000,000,000,000 | 44 |
[NETLINK]: Missing initializations in dumped data
Mostly missing initialization of padding fields of 1 or 2 bytes length,
two instances of uninitialized nlmsgerr->msg of 16 bytes length.
Signed-off-by: Patrick McHardy <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void llsec_geniv(u8 iv[16], __le64 addr,
const struct ieee802154_sechdr *sec)
{
__be64 addr_bytes = (__force __be64) swab64((__force u64) addr);
__be32 frame_counter = (__force __be32) swab32((__force u32) sec->frame_counter);
iv[0] = 1; /* L' = L - 1 = 1 */
memcpy(iv + 1, &addr_bytes, sizeof(addr_bytes));
memcpy(iv + 9, &frame_counter, sizeof(frame_counter));
iv[13] = sec->level;
iv[14] = 0;
iv[15] = 1;
}
| 0 |
[
"CWE-416"
] |
linux
|
1165affd484889d4986cf3b724318935a0b120d8
| 112,983,015,007,639,160,000,000,000,000,000,000,000 | 13 |
net: mac802154: Fix general protection fault
syzbot found general protection fault in crypto_destroy_tfm()[1].
It was caused by wrong clean up loop in llsec_key_alloc().
If one of the tfm array members is in IS_ERR() range it will
cause general protection fault in clean up function [1].
Call Trace:
crypto_free_aead include/crypto/aead.h:191 [inline] [1]
llsec_key_alloc net/mac802154/llsec.c:156 [inline]
mac802154_llsec_key_add+0x9e0/0xcc0 net/mac802154/llsec.c:249
ieee802154_add_llsec_key+0x56/0x80 net/mac802154/cfg.c:338
rdev_add_llsec_key net/ieee802154/rdev-ops.h:260 [inline]
nl802154_add_llsec_key+0x3d3/0x560 net/ieee802154/nl802154.c:1584
genl_family_rcv_msg_doit+0x228/0x320 net/netlink/genetlink.c:739
genl_family_rcv_msg net/netlink/genetlink.c:783 [inline]
genl_rcv_msg+0x328/0x580 net/netlink/genetlink.c:800
netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2502
genl_rcv+0x24/0x40 net/netlink/genetlink.c:811
netlink_unicast_kernel net/netlink/af_netlink.c:1312 [inline]
netlink_unicast+0x533/0x7d0 net/netlink/af_netlink.c:1338
netlink_sendmsg+0x856/0xd90 net/netlink/af_netlink.c:1927
sock_sendmsg_nosec net/socket.c:654 [inline]
sock_sendmsg+0xcf/0x120 net/socket.c:674
____sys_sendmsg+0x6e8/0x810 net/socket.c:2350
___sys_sendmsg+0xf3/0x170 net/socket.c:2404
__sys_sendmsg+0xe5/0x1b0 net/socket.c:2433
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
Signed-off-by: Pavel Skripkin <[email protected]>
Reported-by: [email protected]
Acked-by: Alexander Aring <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Stefan Schmidt <[email protected]>
|
ip6_checkentry(const struct ip6t_ip6 *ipv6)
{
if (ipv6->flags & ~IP6T_F_MASK)
return false;
if (ipv6->invflags & ~IP6T_INV_MASK)
return false;
return true;
}
| 0 |
[
"CWE-476"
] |
linux
|
57ebd808a97d7c5b1e1afb937c2db22beba3c1f8
| 276,750,326,886,506,300,000,000,000,000,000,000,000 | 9 |
netfilter: add back stackpointer size checks
The rationale for removing the check is only correct for rulesets
generated by ip(6)tables.
In iptables, a jump can only occur to a user-defined chain, i.e.
because we size the stack based on number of user-defined chains we
cannot exceed stack size.
However, the underlying binary format has no such restriction,
and the validation step only ensures that the jump target is a
valid rule start point.
IOW, its possible to build a rule blob that has no user-defined
chains but does contain a jump.
If this happens, no jump stack gets allocated and crash occurs
because no jumpstack was allocated.
Fixes: 7814b6ec6d0d6 ("netfilter: xtables: don't save/restore jumpstack offset")
Reported-by: [email protected]
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
static int add_attributes(am_cache_entry_t *session, request_rec *r,
const char *name_id, LassoSaml2Assertion *assertion)
{
am_dir_cfg_rec *dir_cfg;
GList *atr_stmt_itr;
LassoSaml2AttributeStatement *atr_stmt;
GList *atr_itr;
LassoSaml2Attribute *attribute;
GList *value_itr;
LassoSaml2AttributeValue *value;
GList *any_itr;
char *content;
char *dump;
int ret;
dir_cfg = am_get_dir_cfg(r);
/* Set expires to whatever is set by MellonSessionLength. */
if(dir_cfg->session_length == -1) {
/* -1 means "use default. The current default is 86400 seconds. */
am_cache_update_expires(session, apr_time_now()
+ apr_time_make(86400, 0));
} else {
am_cache_update_expires(session, apr_time_now()
+ apr_time_make(dir_cfg->session_length, 0));
}
/* Save session information. */
ret = am_cache_env_append(session, "NAME_ID", name_id);
if(ret != OK) {
return ret;
}
/* Update expires timestamp of session. */
am_handle_session_expire(r, session, assertion);
/* assertion->AttributeStatement is a list of
* LassoSaml2AttributeStatement objects.
*/
for(atr_stmt_itr = g_list_first(assertion->AttributeStatement);
atr_stmt_itr != NULL;
atr_stmt_itr = g_list_next(atr_stmt_itr)) {
atr_stmt = atr_stmt_itr->data;
if (!LASSO_IS_SAML2_ATTRIBUTE_STATEMENT(atr_stmt)) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"Wrong type of AttributeStatement node.");
continue;
}
/* atr_stmt->Attribute is list of LassoSaml2Attribute objects. */
for(atr_itr = g_list_first(atr_stmt->Attribute);
atr_itr != NULL;
atr_itr = g_list_next(atr_itr)) {
attribute = atr_itr->data;
if (!LASSO_IS_SAML2_ATTRIBUTE(attribute)) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"Wrong type of Attribute node.");
continue;
}
/* attribute->AttributeValue is a list of
* LassoSaml2AttributeValue objects.
*/
for(value_itr = g_list_first(attribute->AttributeValue);
value_itr != NULL;
value_itr = g_list_next(value_itr)) {
value = value_itr->data;
if (!LASSO_IS_SAML2_ATTRIBUTE_VALUE(value)) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"Wrong type of AttributeValue node.");
continue;
}
/* value->any is a list with the child nodes of the
* AttributeValue element.
*
* We assume that the list contains a single text node.
*/
if(value->any == NULL) {
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
"AttributeValue element was empty.");
continue;
}
content = "";
for (any_itr = g_list_first(value->any);
any_itr != NULL;
any_itr = g_list_next(any_itr)) {
/* Verify that this is a LassoNode object. */
if(!LASSO_NODE(any_itr->data)) {
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
"AttributeValue element contained an "
" element which wasn't a Node.");
continue;
}
dump = lasso_node_dump(LASSO_NODE(any_itr->data));
if (!dump) {
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r,
"AttributeValue content dump failed.");
continue;
}
/* Use the request pool, no need to free results */
content = apr_pstrcat(r->pool, content, dump, NULL);
g_free(dump);
}
/* Decode and save the attribute. */
ret = am_cache_env_append(session, attribute->Name, content);
if(ret != OK) {
return ret;
}
}
}
}
return OK;
}
| 0 |
[] |
mod_auth_mellon
|
6bdda9170a8f1757dabc5b109958657417728018
| 230,311,186,236,541,470,000,000,000,000,000,000,000 | 120 |
Fix segmentation fault when receiving badly formed logout message.
If the logout message is badly formed, we won't get the entityID in
`logout->parent.remote_providerID`. If we call `apr_hash_get()` with a
null pointer, it will cause a segmentation fault.
Add a check to validate that the entityID is correctly set.
|
inline void BroadcastMaximumDispatch(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const int8* input1_data,
const RuntimeShape& input2_shape,
const int8* input2_data,
const RuntimeShape& output_shape,
int8* output_data, Op op) {
if (params.broadcast_category == BroadcastableOpCategory::kGenericBroadcast) {
return reference_ops::MaximumMinimumBroadcastSlow(
input1_shape, input1_data, input2_shape, input2_data, output_shape,
output_data, op);
}
BinaryBroadcastFiveFold(params, input1_shape, input1_data, input2_shape,
input2_data, output_shape, output_data,
MaximumElementwise, MaximumScalarBroadcast);
}
| 0 |
[
"CWE-476",
"CWE-369"
] |
tensorflow
|
15691e456c7dc9bd6be203b09765b063bf4a380c
| 112,155,216,766,907,970,000,000,000,000,000,000,000 | 17 |
Prevent dereferencing of null pointers in TFLite's `add.cc`.
PiperOrigin-RevId: 387244946
Change-Id: I56094233327fbd8439b92e1dbb1262176e00eeb9
|
static int triclr(jas_iccprof_t *iccprof, int op, jas_cmpxformseq_t **retpxformseq)
{
int i;
jas_iccattrval_t *trcs[3];
jas_iccattrval_t *cols[3];
jas_cmshapmat_t *shapmat;
jas_cmpxform_t *pxform;
jas_cmpxformseq_t *pxformseq;
jas_cmreal_t mat[3][4];
jas_cmshapmatlut_t lut;
pxform = 0;
pxformseq = 0;
for (i = 0; i < 3; ++i) {
trcs[i] = 0;
cols[i] = 0;
}
jas_cmshapmatlut_init(&lut);
if (!(trcs[0] = jas_iccprof_getattr(iccprof, JAS_ICC_TAG_REDTRC)) ||
!(trcs[1] = jas_iccprof_getattr(iccprof, JAS_ICC_TAG_GRNTRC)) ||
!(trcs[2] = jas_iccprof_getattr(iccprof, JAS_ICC_TAG_BLUTRC)) ||
!(cols[0] = jas_iccprof_getattr(iccprof, JAS_ICC_TAG_REDMATCOL)) ||
!(cols[1] = jas_iccprof_getattr(iccprof, JAS_ICC_TAG_GRNMATCOL)) ||
!(cols[2] = jas_iccprof_getattr(iccprof, JAS_ICC_TAG_BLUMATCOL)))
goto error;
for (i = 0; i < 3; ++i) {
if (trcs[i]->type != JAS_ICC_TYPE_CURV ||
cols[i]->type != JAS_ICC_TYPE_XYZ)
goto error;
}
if (!(pxform = jas_cmpxform_createshapmat()))
goto error;
pxform->numinchans = 3;
pxform->numoutchans = 3;
shapmat = &pxform->data.shapmat;
if (!(pxformseq = jas_cmpxformseq_create()))
goto error;
if (jas_cmpxformseq_insertpxform(pxformseq, -1, pxform))
goto error;
shapmat->mono = 0;
shapmat->useluts = 1;
shapmat->usemat = 1;
if (!op) {
shapmat->order = 0;
for (i = 0; i < 3; ++i) {
shapmat->mat[0][i] = cols[i]->data.xyz.x / 65536.0;
shapmat->mat[1][i] = cols[i]->data.xyz.y / 65536.0;
shapmat->mat[2][i] = cols[i]->data.xyz.z / 65536.0;
}
for (i = 0; i < 3; ++i)
shapmat->mat[i][3] = 0.0;
for (i = 0; i < 3; ++i) {
if (jas_cmshapmatlut_set(&shapmat->luts[i], &trcs[i]->data.curv))
goto error;
}
} else {
shapmat->order = 1;
for (i = 0; i < 3; ++i) {
mat[0][i] = cols[i]->data.xyz.x / 65536.0;
mat[1][i] = cols[i]->data.xyz.y / 65536.0;
mat[2][i] = cols[i]->data.xyz.z / 65536.0;
}
for (i = 0; i < 3; ++i)
mat[i][3] = 0.0;
if (jas_cmshapmat_invmat(shapmat->mat, mat))
goto error;
for (i = 0; i < 3; ++i) {
jas_cmshapmatlut_init(&lut);
if (jas_cmshapmatlut_set(&lut, &trcs[i]->data.curv))
goto error;
if (jas_cmshapmatlut_invert(&shapmat->luts[i], &lut, lut.size))
goto error;
jas_cmshapmatlut_cleanup(&lut);
}
}
for (i = 0; i < 3; ++i) {
jas_iccattrval_destroy(trcs[i]);
jas_iccattrval_destroy(cols[i]);
}
jas_cmpxform_destroy(pxform);
*retpxformseq = pxformseq;
return 0;
error:
for (i = 0; i < 3; ++i) {
if (trcs[i]) {
jas_iccattrval_destroy(trcs[i]);
}
if (cols[i]) {
jas_iccattrval_destroy(cols[i]);
}
}
if (pxformseq) {
jas_cmpxformseq_destroy(pxformseq);
}
if (pxform) {
jas_cmpxform_destroy(pxform);
}
return -1;
}
| 0 |
[
"CWE-189"
] |
jasper
|
3c55b399c36ef46befcb21e4ebc4799367f89684
| 71,757,627,067,769,850,000,000,000,000,000,000,000 | 103 |
At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems.
|
static char *pdf_readstring(const char *q0, int len, const char *key, unsigned *slen)
{
char *s, *s0;
const char *start, *q, *end;
if (slen)
*slen = 0;
q = pdf_getdict(q0, &len, key);
if (!q)
return NULL;
if (*q == '(') {
int paren = 1;
start = ++q;
for (;paren > 0 && len > 0; q++,len--) {
switch (*q) {
case '(':
paren++;
break;
case ')':
paren--;
break;
case '\\':
q++;
len--;
break;
default:
break;
}
}
q--;
len = q - start;
s0 = s = cli_malloc(len + 1);
if (!s)
return NULL;
end = start + len;
for (q = start;q < end;q++) {
if (*q != '\\') {
*s++ = *q;
} else {
q++;
switch (*q) {
case 'n':
*s++ = '\n';
break;
case 'r':
*s++ = '\r';
break;
case 't':
*s++ = '\t';
break;
case 'b':
*s++ = '\b';
break;
case 'f':
*s++ = '\f';
break;
case '(':/* fall-through */
case ')':/* fall-through */
case '\\':
*s++ = *q;
break;
case '\n':
/* ignore */
break;
case '\r':
/* ignore */
if (q+1 < end && q[1] == '\n')
q++;
break;
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
/* octal escape */
if (q+2 < end)
q++;
*s++ = 64*(q[0] - '0')+
8*(q[1] - '0')+
(q[2] - '0');
break;
default:
/* ignore */
q--;
break;
}
}
}
*s++ = '\0';
if (slen)
*slen = s - s0 - 1;
return s0;
}
if (*q == '<') {
start = ++q;
q = memchr(q+1, '>', len);
if (!q)
return NULL;
s = cli_malloc((q - start)/2 + 1);
cli_hex2str_to(start, s, q - start);
s[(q-start)/2] = '\0';
if (slen)
*slen = (q - start)/2;
return s;
}
cli_dbgmsg("cli_pdf: %s is invalid string in dict\n", key);
return NULL;
}
| 0 |
[
"CWE-119",
"CWE-189",
"CWE-79"
] |
clamav-devel
|
24ff855c82d3f5c62bc5788a5776cefbffce2971
| 69,984,198,765,142,940,000,000,000,000,000,000,000 | 112 |
pdf: bb #7053
|
enum_field_types binlog_type() const { return MYSQL_TYPE_DATETIME2; }
| 0 |
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
| 138,378,750,268,416,300,000,000,000,000,000,000,000 | 1 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]>
|
REDIS_STATIC quicklistNode *quicklistCreateNode(void) {
quicklistNode *node;
node = zmalloc(sizeof(*node));
node->zl = NULL;
node->count = 0;
node->sz = 0;
node->next = node->prev = NULL;
node->encoding = QUICKLIST_NODE_ENCODING_RAW;
node->container = QUICKLIST_NODE_CONTAINER_ZIPLIST;
node->recompress = 0;
return node;
}
| 0 |
[
"CWE-190"
] |
redis
|
f6a40570fa63d5afdd596c78083d754081d80ae3
| 43,940,213,420,367,150,000,000,000,000,000,000,000 | 12 |
Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628)
- fix possible heap corruption in ziplist and listpack resulting by trying to
allocate more than the maximum size of 4GB.
- prevent ziplist (hash and zset) from reaching size of above 1GB, will be
converted to HT encoding, that's not a useful size.
- prevent listpack (stream) from reaching size of above 1GB.
- XADD will start a new listpack if the new record may cause the previous
listpack to grow over 1GB.
- XADD will respond with an error if a single stream record is over 1GB
- List type (ziplist in quicklist) was truncating strings that were over 4GB,
now it'll respond with an error.
|
static void php_build_argv(char *s, zval *track_vars_array TSRMLS_DC)
{
zval *arr, *argc, *tmp;
int count = 0;
char *ss, *space;
if (!(SG(request_info).argc || track_vars_array)) {
return;
}
ALLOC_INIT_ZVAL(arr);
array_init(arr);
/* Prepare argv */
if (SG(request_info).argc) { /* are we in cli sapi? */
int i;
for (i = 0; i < SG(request_info).argc; i++) {
ALLOC_ZVAL(tmp);
Z_TYPE_P(tmp) = IS_STRING;
Z_STRLEN_P(tmp) = strlen(SG(request_info).argv[i]);
Z_STRVAL_P(tmp) = estrndup(SG(request_info).argv[i], Z_STRLEN_P(tmp));
INIT_PZVAL(tmp);
if (zend_hash_next_index_insert(Z_ARRVAL_P(arr), &tmp, sizeof(zval *), NULL) == FAILURE) {
if (Z_TYPE_P(tmp) == IS_STRING) {
efree(Z_STRVAL_P(tmp));
}
}
}
} else if (s && *s) {
ss = s;
while (ss) {
space = strchr(ss, '+');
if (space) {
*space = '\0';
}
/* auto-type */
ALLOC_ZVAL(tmp);
Z_TYPE_P(tmp) = IS_STRING;
Z_STRLEN_P(tmp) = strlen(ss);
Z_STRVAL_P(tmp) = estrndup(ss, Z_STRLEN_P(tmp));
INIT_PZVAL(tmp);
count++;
if (zend_hash_next_index_insert(Z_ARRVAL_P(arr), &tmp, sizeof(zval *), NULL) == FAILURE) {
if (Z_TYPE_P(tmp) == IS_STRING) {
efree(Z_STRVAL_P(tmp));
}
}
if (space) {
*space = '+';
ss = space + 1;
} else {
ss = space;
}
}
}
/* prepare argc */
ALLOC_INIT_ZVAL(argc);
if (SG(request_info).argc) {
Z_LVAL_P(argc) = SG(request_info).argc;
} else {
Z_LVAL_P(argc) = count;
}
Z_TYPE_P(argc) = IS_LONG;
if (SG(request_info).argc) {
Z_ADDREF_P(arr);
Z_ADDREF_P(argc);
zend_hash_update(&EG(symbol_table), "argv", sizeof("argv"), &arr, sizeof(zval *), NULL);
zend_hash_update(&EG(symbol_table), "argc", sizeof("argc"), &argc, sizeof(zval *), NULL);
}
if (track_vars_array) {
Z_ADDREF_P(arr);
Z_ADDREF_P(argc);
zend_hash_update(Z_ARRVAL_P(track_vars_array), "argv", sizeof("argv"), &arr, sizeof(zval *), NULL);
zend_hash_update(Z_ARRVAL_P(track_vars_array), "argc", sizeof("argc"), &argc, sizeof(zval *), NULL);
}
zval_ptr_dtor(&arr);
zval_ptr_dtor(&argc);
}
| 1 |
[
"CWE-601"
] |
php-src
|
98b9dfaec95e6f910f125ed172cdbd25abd006ec
| 17,600,917,477,970,302,000,000,000,000,000,000,000 | 80 |
Fix for HTTP_PROXY issue.
The following changes are made:
- _SERVER/_ENV only has HTTP_PROXY if the local environment has it,
and only one from the environment.
- getenv('HTTP_PROXY') only returns one from the local environment
- getenv has optional second parameter, telling it to only consider
local environment
|
inline RequestNote *getRequestNote(request_rec *r) {
void *pointer = 0;
apr_pool_userdata_get(&pointer, "Phusion Passenger", r->pool);
if (pointer != NULL) {
RequestNote *note = (RequestNote *) pointer;
if (OXT_LIKELY(note->enabled)) {
return note;
} else {
return 0;
}
} else {
return 0;
}
}
| 0 |
[
"CWE-59"
] |
passenger
|
9dda49f4a3ebe9bafc48da1bd45799f30ce19566
| 225,303,727,733,539,000,000,000,000,000,000,000,000 | 14 |
Fixed a problem with graceful web server restarts.
This problem was introduced in 4.0.6 during the attempt to fix issue #910.
|
BGD_DECLARE(gdImagePtr) gdImageCropThreshold(gdImagePtr im, const unsigned int color, const float threshold)
{
const int width = gdImageSX(im);
const int height = gdImageSY(im);
int x,y;
int match;
gdRect crop;
crop.x = 0;
crop.y = 0;
crop.width = 0;
crop.height = 0;
/* Pierre: crop everything sounds bad */
if (threshold > 100.0) {
return NULL;
}
/* TODO: Add gdImageGetRowPtr and works with ptr at the row level
* for the true color and palette images
* new formats will simply work with ptr
*/
match = 1;
for (y = 0; match && y < height; y++) {
for (x = 0; match && x < width; x++) {
match = (gdColorMatch(im, color, gdImageGetPixel(im, x,y), threshold)) > 0;
}
}
/* Pierre
* Nothing to do > bye
* Duplicate the image?
*/
if (y == height - 1) {
return NULL;
}
crop.y = y -1;
match = 1;
for (y = height - 1; match && y >= 0; y--) {
for (x = 0; match && x < width; x++) {
match = (gdColorMatch(im, color, gdImageGetPixel(im, x, y), threshold)) > 0;
}
}
if (y == 0) {
crop.height = height - crop.y + 1;
} else {
crop.height = y - crop.y + 2;
}
match = 1;
for (x = 0; match && x < width; x++) {
for (y = 0; match && y < crop.y + crop.height - 1; y++) {
match = (gdColorMatch(im, color, gdImageGetPixel(im, x,y), threshold)) > 0;
}
}
crop.x = x - 1;
match = 1;
for (x = width - 1; match && x >= 0; x--) {
for (y = 0; match && y < crop.y + crop.height - 1; y++) {
match = (gdColorMatch(im, color, gdImageGetPixel(im, x,y), threshold)) > 0;
}
}
crop.width = x - crop.x + 2;
return gdImageCrop(im, &crop);
}
| 1 |
[
"CWE-20"
] |
libgd
|
1ccfe21e14c4d18336f9da8515cd17db88c3de61
| 276,255,854,978,816,880,000,000,000,000,000,000,000 | 70 |
fix php 72494, invalid color index not handled, can lead to crash
|
bool cephx_build_service_ticket_reply(CephContext *cct,
CryptoKey& principal_secret,
vector<CephXSessionAuthInfo> ticket_info_vec,
bool should_encrypt_ticket,
CryptoKey& ticket_enc_key,
bufferlist& reply)
{
__u8 service_ticket_reply_v = 1;
::encode(service_ticket_reply_v, reply);
uint32_t num = ticket_info_vec.size();
::encode(num, reply);
ldout(cct, 10) << "build_service_ticket_reply encoding " << num
<< " tickets with secret " << principal_secret << dendl;
for (vector<CephXSessionAuthInfo>::iterator ticket_iter = ticket_info_vec.begin();
ticket_iter != ticket_info_vec.end();
++ticket_iter) {
CephXSessionAuthInfo& info = *ticket_iter;
::encode(info.service_id, reply);
__u8 service_ticket_v = 1;
::encode(service_ticket_v, reply);
CephXServiceTicket msg_a;
msg_a.session_key = info.session_key;
msg_a.validity = info.validity;
std::string error;
if (encode_encrypt(cct, msg_a, principal_secret, reply, error)) {
ldout(cct, -1) << "error encoding encrypted: " << error << dendl;
return false;
}
bufferlist service_ticket_bl;
CephXTicketBlob blob;
if (!cephx_build_service_ticket_blob(cct, info, blob)) {
return false;
}
::encode(blob, service_ticket_bl);
ldout(cct, 30) << "service_ticket_blob is ";
service_ticket_bl.hexdump(*_dout);
*_dout << dendl;
::encode((__u8)should_encrypt_ticket, reply);
if (should_encrypt_ticket) {
if (encode_encrypt(cct, service_ticket_bl, ticket_enc_key, reply, error)) {
ldout(cct, -1) << "error encoding encrypted ticket: " << error << dendl;
return false;
}
} else {
::encode(service_ticket_bl, reply);
}
}
return true;
}
| 0 |
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
| 311,672,726,420,566,040,000,000,000,000,000,000,000 | 56 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.