func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
eval5(char_u **arg, typval_T *rettv, evalarg_T *evalarg)
{
/*
* Get the first expression.
*/
if (eval6(arg, rettv, evalarg) == FAIL)
return FAIL;
/*
* Repeat computing, until no '<<' or '>>' is following.
*/
for (;;)
{
char_u *p;
int getnext;
exprtype_T type;
int evaluate;
typval_T var2;
int vim9script;
p = eval_next_non_blank(*arg, evalarg, &getnext);
if (p[0] == '<' && p[1] == '<')
type = EXPR_LSHIFT;
else if (p[0] == '>' && p[1] == '>')
type = EXPR_RSHIFT;
else
return OK;
// Handle a bitwise left or right shift operator
if (rettv->v_type != VAR_NUMBER)
{
// left operand should be a number
emsg(_(e_bitshift_ops_must_be_number));
clear_tv(rettv);
return FAIL;
}
evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE);
vim9script = in_vim9script();
if (getnext)
{
*arg = eval_next_line(*arg, evalarg);
p = *arg;
}
else if (evaluate && vim9script && !VIM_ISWHITE(**arg))
{
error_white_both(*arg, 2);
clear_tv(rettv);
return FAIL;
}
/*
* Get the second variable.
*/
if (evaluate && vim9script && !IS_WHITE_OR_NUL(p[2]))
{
error_white_both(p, 2);
clear_tv(rettv);
return FAIL;
}
*arg = skipwhite_and_linebreak(p + 2, evalarg);
if (eval6(arg, &var2, evalarg) == FAIL)
{
clear_tv(rettv);
return FAIL;
}
if (var2.v_type != VAR_NUMBER || var2.vval.v_number < 0)
{
// right operand should be a positive number
if (var2.v_type != VAR_NUMBER)
emsg(_(e_bitshift_ops_must_be_number));
else
emsg(_(e_bitshift_ops_must_be_postive));
clear_tv(rettv);
clear_tv(&var2);
return FAIL;
}
if (evaluate)
{
if (var2.vval.v_number > MAX_LSHIFT_BITS)
// shifting more bits than we have always results in zero
rettv->vval.v_number = 0;
else if (type == EXPR_LSHIFT)
rettv->vval.v_number =
(uvarnumber_T)rettv->vval.v_number << var2.vval.v_number;
else
rettv->vval.v_number =
(uvarnumber_T)rettv->vval.v_number >> var2.vval.v_number;
}
clear_tv(&var2);
}
return OK;
} | 0 | [
"CWE-476"
] | vim | 79481367a457951aabd9501b510fd7e3eb29c3d8 | 16,414,140,799,211,954,000,000,000,000,000,000,000 | 97 | patch 8.2.5169: nested :source may use NULL pointer
Problem: Nested :source may use NULL pointer.
Solution: Do not use the NULL pointer. |
static int __init srpt_init_module(void)
{
int ret;
ret = -EINVAL;
if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
pr_err("invalid value %d for kernel module parameter"
" srp_max_req_size -- must be at least %d.\n",
srp_max_req_size, MIN_MAX_REQ_SIZE);
goto out;
}
if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
|| srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
pr_err("invalid value %d for kernel module parameter"
" srpt_srq_size -- must be in the range [%d..%d].\n",
srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
goto out;
}
ret = target_register_template(&srpt_template);
if (ret)
goto out;
ret = ib_register_client(&srpt_client);
if (ret) {
pr_err("couldn't register IB client\n");
goto out_unregister_target;
}
return 0;
out_unregister_target:
target_unregister_template(&srpt_template);
out:
return ret;
} | 0 | [
"CWE-200",
"CWE-476"
] | linux | 51093254bf879bc9ce96590400a87897c7498463 | 266,957,527,551,022,360,000,000,000,000,000,000,000 | 37 | IB/srpt: Simplify srpt_handle_tsk_mgmt()
Let the target core check task existence instead of the SRP target
driver. Additionally, let the target core check the validity of the
task management request instead of the ib_srpt driver.
This patch fixes the following kernel crash:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000001
IP: [<ffffffffa0565f37>] srpt_handle_new_iu+0x6d7/0x790 [ib_srpt]
Oops: 0002 [#1] SMP
Call Trace:
[<ffffffffa05660ce>] srpt_process_completion+0xde/0x570 [ib_srpt]
[<ffffffffa056669f>] srpt_compl_thread+0x13f/0x160 [ib_srpt]
[<ffffffff8109726f>] kthread+0xcf/0xe0
[<ffffffff81613cfc>] ret_from_fork+0x7c/0xb0
Signed-off-by: Bart Van Assche <[email protected]>
Fixes: 3e4f574857ee ("ib_srpt: Convert TMR path to target_submit_tmr")
Tested-by: Alex Estrin <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: Nicholas Bellinger <[email protected]>
Cc: Sagi Grimberg <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Doug Ledford <[email protected]> |
xmlNodeDumpOutput(xmlOutputBufferPtr buf, xmlDocPtr doc, xmlNodePtr cur,
int level, int format, const char *encoding)
{
xmlSaveCtxt ctxt;
#ifdef LIBXML_HTML_ENABLED
xmlDtdPtr dtd;
int is_xhtml = 0;
#endif
xmlInitParser();
if ((buf == NULL) || (cur == NULL)) return;
if (encoding == NULL)
encoding = "UTF-8";
memset(&ctxt, 0, sizeof(ctxt));
ctxt.doc = doc;
ctxt.buf = buf;
ctxt.level = level;
ctxt.format = format ? 1 : 0;
ctxt.encoding = (const xmlChar *) encoding;
xmlSaveCtxtInit(&ctxt);
ctxt.options |= XML_SAVE_AS_XML;
#ifdef LIBXML_HTML_ENABLED
dtd = xmlGetIntSubset(doc);
if (dtd != NULL) {
is_xhtml = xmlIsXHTML(dtd->SystemID, dtd->ExternalID);
if (is_xhtml < 0)
is_xhtml = 0;
}
if (is_xhtml)
xhtmlNodeDumpOutput(&ctxt, cur);
else
#endif
xmlNodeDumpOutputInternal(&ctxt, cur);
} | 0 | [
"CWE-502"
] | libxml2 | c97750d11bb8b6f3303e7131fe526a61ac65bcfd | 73,806,460,904,504,780,000,000,000,000,000,000,000 | 39 | Avoid an out of bound access when serializing malformed strings
For https://bugzilla.gnome.org/show_bug.cgi?id=766414
* xmlsave.c: xmlBufAttrSerializeTxtContent() if an attribute value
is not UTF-8 be more careful when serializing it as we may do an
out of bound access as a result. |
void exec_command_done_array(ExecCommand *c, size_t n) {
size_t i;
for (i = 0; i < n; i++)
exec_command_done(c+i);
} | 0 | [
"CWE-269"
] | systemd | f69567cbe26d09eac9d387c0be0fc32c65a83ada | 194,978,776,682,525,770,000,000,000,000,000,000,000 | 6 | core: expose SUID/SGID restriction as new unit setting RestrictSUIDSGID= |
store_tabletStylusLower(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
int new_button = map_str_to_val(stylus_button_map, buf, count);
if (new_button == AIPTEK_INVALID_VALUE)
return -EINVAL;
aiptek->newSetting.stylusButtonLower = new_button;
return count;
} | 0 | [
"CWE-476",
"CWE-401"
] | linux | 8e20cf2bce122ce9262d6034ee5d5b76fbb92f96 | 94,984,491,110,213,970,000,000,000,000,000,000,000 | 11 | Input: aiptek - fix crash on detecting device without endpoints
The aiptek driver crashes in aiptek_probe() when a specially crafted USB
device without endpoints is detected. This fix adds a check that the device
has proper configuration expected by the driver. Also an error return value
is changed to more matching one in one of the error paths.
Reported-by: Ralf Spenneberg <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Signed-off-by: Dmitry Torokhov <[email protected]> |
static void t1_close_font_file(const char *close_name_suffix)
{
t1_log(close_name_suffix);
t1_close();
cur_file_name = NULL;
} | 0 | [
"CWE-119"
] | texlive-source | 6ed0077520e2b0da1fd060c7f88db7b2e6068e4c | 76,597,195,508,991,590,000,000,000,000,000,000,000 | 6 | writet1 protection against buffer overflow
git-svn-id: svn://tug.org/texlive/trunk/Build/source@48697 c570f23f-e606-0410-a88d-b1316a301751 |
_rsvg_handle_allow_load (RsvgHandle *handle,
const char *uri,
GError **error)
{
RsvgHandlePrivate *priv = handle->priv;
GFile *base;
char *path, *dir;
char *scheme = NULL, *cpath = NULL, *cdir = NULL;
g_assert (handle->priv->load_policy == RSVG_LOAD_POLICY_STRICT);
scheme = g_uri_parse_scheme (uri);
/* Not a valid URI */
if (scheme == NULL)
goto deny;
/* Allow loads of data: from any location */
if (g_str_equal (scheme, "data"))
goto allow;
/* No base to compare to? */
if (priv->base_gfile == NULL)
goto deny;
/* Deny loads from differing URI schemes */
if (!g_file_has_uri_scheme (priv->base_gfile, scheme))
goto deny;
/* resource: is allowed to load anything from other resources */
if (g_str_equal (scheme, "resource"))
goto allow;
/* Non-file: isn't allowed to load anything */
if (!g_str_equal (scheme, "file"))
goto deny;
base = g_file_get_parent (priv->base_gfile);
if (base == NULL)
goto deny;
dir = g_file_get_path (base);
g_object_unref (base);
cdir = realpath (dir, NULL);
g_free (dir);
if (cdir == NULL)
goto deny;
path = g_filename_from_uri (uri, NULL, NULL);
if (path == NULL)
goto deny;
cpath = realpath (path, NULL);
g_free (path);
if (cpath == NULL)
goto deny;
/* Now check that @cpath is below @cdir */
if (!g_str_has_prefix (cpath, cdir) ||
cpath[strlen (cdir)] != G_DIR_SEPARATOR)
goto deny;
/* Allow load! */
allow:
g_free (scheme);
free (cpath);
free (cdir);
return TRUE;
deny:
g_free (scheme);
free (cpath);
free (cdir);
g_set_error (error, G_IO_ERROR, G_IO_ERROR_PERMISSION_DENIED,
"File may not link to URI \"%s\"", uri);
return FALSE;
} | 0 | [] | librsvg | a51919f7e1ca9c535390a746fbf6e28c8402dc61 | 257,015,802,366,990,320,000,000,000,000,000,000 | 81 | rsvg: Add rsvg_acquire_node()
This function does proper recursion checks when looking up resources
from URLs and thereby helps avoiding infinite loops when cyclic
references span multiple types of elements. |
sds sdsMakeRoomFor(sds s, size_t addlen) {
void *sh, *newsh;
size_t avail = sdsavail(s);
size_t len, newlen;
char type, oldtype = s[-1] & SDS_TYPE_MASK;
int hdrlen;
/* Return ASAP if there is enough space left. */
if (avail >= addlen) return s;
len = sdslen(s);
sh = (char*)s-sdsHdrSize(oldtype);
newlen = (len+addlen);
assert(newlen > len); /* Catch size_t overflow */
if (newlen < SDS_MAX_PREALLOC)
newlen *= 2;
else
newlen += SDS_MAX_PREALLOC;
type = sdsReqType(newlen);
/* Don't use type 5: the user is appending to the string and type 5 is
* not able to remember empty space, so sdsMakeRoomFor() must be called
* at every appending operation. */
if (type == SDS_TYPE_5) type = SDS_TYPE_8;
hdrlen = sdsHdrSize(type);
assert(hdrlen + newlen + 1 > len); /* Catch size_t overflow */
if (oldtype==type) {
newsh = s_realloc(sh, hdrlen+newlen+1);
if (newsh == NULL) return NULL;
s = (char*)newsh+hdrlen;
} else {
/* Since the header size changes, need to move the string forward,
* and can't use realloc */
newsh = s_malloc(hdrlen+newlen+1);
if (newsh == NULL) return NULL;
memcpy((char*)newsh+hdrlen, s, len+1);
s_free(sh);
s = (char*)newsh+hdrlen;
s[-1] = type;
sdssetlen(s, len);
}
sdssetalloc(s, newlen);
return s;
} | 0 | [
"CWE-190"
] | redis | c992857618db99776917f10bf4f2345a5fdc78b0 | 125,064,884,014,930,770,000,000,000,000,000,000,000 | 46 | Fix integer overflow (CVE-2021-21309). (#8522)
On 32-bit systems, setting the proto-max-bulk-len config parameter to a high value may result with integer overflow and a subsequent heap overflow when parsing an input bulk (CVE-2021-21309).
This fix has two parts:
Set a reasonable limit to the config parameter.
Add additional checks to prevent the problem in other potential but unknown code paths.
(cherry picked from commit d32f2e9999ce003bad0bd2c3bca29f64dcce4433) |
void lex_end_nops(LEX *lex)
{
DBUG_ENTER("lex_end_nops");
sp_head::destroy(lex->sphead);
lex->sphead= NULL;
/* Reset LEX_MASTER_INFO */
lex->mi.reset(lex->sql_command == SQLCOM_CHANGE_MASTER);
delete_dynamic(&lex->delete_gtid_domain);
DBUG_VOID_RETURN;
} | 0 | [
"CWE-476"
] | server | 3a52569499e2f0c4d1f25db1e81617a9d9755400 | 248,913,329,645,306,970,000,000,000,000,000,000,000 | 12 | MDEV-25636: Bug report: abortion in sql/sql_parse.cc:6294
The asserion failure was caused by this query
select /*id=1*/ from t1
where
col= ( select /*id=2*/ from ... where corr_cond1
union
select /*id=4*/ from ... where corr_cond2)
Here,
- select with id=2 was correlated due to corr_cond1.
- select with id=4 was initially correlated due to corr_cond2, but then
the optimizer optimized away the correlation, making the select with id=4
uncorrelated.
However, since select with id=2 remained correlated, the execution had to
re-compute the whole UNION. When it tried to execute select with id=4, it
hit an assertion (join buffer already free'd).
This is because select with id=4 has freed its execution structures after
it has been executed once. The select is uncorrelated, so it did not expect
it would need to be executed for the second time.
Fixed this by adding this logic in
st_select_lex::optimize_unflattened_subqueries():
If a member of a UNION is correlated, mark all its members as
correlated, so that they are prepared to be executed multiple times. |
static int magicmouse_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
struct magicmouse_sc *msc = hid_get_drvdata(hdev);
struct input_dev *input = msc->input;
int x = 0, y = 0, ii, clicks = 0, npoints;
switch (data[0]) {
case TRACKPAD_REPORT_ID:
/* Expect four bytes of prefix, and N*9 bytes of touch data. */
if (size < 4 || ((size - 4) % 9) != 0)
return 0;
npoints = (size - 4) / 9;
if (npoints > 15) {
hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n",
size);
return 0;
}
msc->ntouches = 0;
for (ii = 0; ii < npoints; ii++)
magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
clicks = data[1];
/* The following bits provide a device specific timestamp. They
* are unused here.
*
* ts = data[1] >> 6 | data[2] << 2 | data[3] << 10;
*/
break;
case MOUSE_REPORT_ID:
/* Expect six bytes of prefix, and N*8 bytes of touch data. */
if (size < 6 || ((size - 6) % 8) != 0)
return 0;
npoints = (size - 6) / 8;
if (npoints > 15) {
hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n",
size);
return 0;
}
msc->ntouches = 0;
for (ii = 0; ii < npoints; ii++)
magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
/* When emulating three-button mode, it is important
* to have the current touch information before
* generating a click event.
*/
x = (int)(((data[3] & 0x0c) << 28) | (data[1] << 22)) >> 22;
y = (int)(((data[3] & 0x30) << 26) | (data[2] << 22)) >> 22;
clicks = data[3];
/* The following bits provide a device specific timestamp. They
* are unused here.
*
* ts = data[3] >> 6 | data[4] << 2 | data[5] << 10;
*/
break;
case DOUBLE_REPORT_ID:
/* Sometimes the trackpad sends two touch reports in one
* packet.
*/
magicmouse_raw_event(hdev, report, data + 2, data[1]);
magicmouse_raw_event(hdev, report, data + 2 + data[1],
size - 2 - data[1]);
break;
default:
return 0;
}
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
magicmouse_emit_buttons(msc, clicks & 3);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
input_report_key(input, BTN_MOUSE, clicks & 1);
input_mt_report_pointer_emulation(input, true);
}
input_sync(input);
return 1;
} | 0 | [
"CWE-119",
"CWE-787"
] | linux | c54def7bd64d7c0b6993336abcffb8444795bf38 | 147,857,904,346,825,620,000,000,000,000,000,000,000 | 82 | HID: magicmouse: sanity check report size in raw_event() callback
The report passed to us from transport driver could potentially be
arbitrarily large, therefore we better sanity-check it so that
magicmouse_emit_touch() gets only valid values of raw_id.
Cc: [email protected]
Reported-by: Steven Vittitoe <[email protected]>
Signed-off-by: Jiri Kosina <[email protected]> |
int ssl3_send_next_proto(SSL *s)
{
unsigned int len, padding_len;
unsigned char *d;
if (s->state == SSL3_ST_CW_NEXT_PROTO_A)
{
len = s->next_proto_negotiated_len;
padding_len = 32 - ((len + 2) % 32);
d = (unsigned char *)s->init_buf->data;
d[4] = len;
memcpy(d + 5, s->next_proto_negotiated, len);
d[5 + len] = padding_len;
memset(d + 6 + len, 0, padding_len);
*(d++)=SSL3_MT_NEXT_PROTO;
l2n3(2 + len + padding_len, d);
s->state = SSL3_ST_CW_NEXT_PROTO_B;
s->init_num = 4 + 2 + len + padding_len;
s->init_off = 0;
}
return ssl3_do_write(s, SSL3_RT_HANDSHAKE);
} | 0 | [] | openssl | ee2ffc279417f15fef3b1073c7dc81a908991516 | 323,008,693,603,566,900,000,000,000,000,000,000,000 | 23 | Add Next Protocol Negotiation. |
static zval *phar_rename_archive(phar_archive_data **sphar, char *ext, zend_bool compress TSRMLS_DC) /* {{{ */
{
const char *oldname = NULL;
phar_archive_data *phar = *sphar;
char *oldpath = NULL;
char *basename = NULL, *basepath = NULL;
char *newname = NULL, *newpath = NULL;
zval *ret, arg1;
zend_class_entry *ce;
char *error;
const char *pcr_error;
int ext_len = ext ? strlen(ext) : 0;
int oldname_len;
phar_archive_data **pphar = NULL;
php_stream_statbuf ssb;
if (!ext) {
if (phar->is_zip) {
if (phar->is_data) {
ext = "zip";
} else {
ext = "phar.zip";
}
} else if (phar->is_tar) {
switch (phar->flags) {
case PHAR_FILE_COMPRESSED_GZ:
if (phar->is_data) {
ext = "tar.gz";
} else {
ext = "phar.tar.gz";
}
break;
case PHAR_FILE_COMPRESSED_BZ2:
if (phar->is_data) {
ext = "tar.bz2";
} else {
ext = "phar.tar.bz2";
}
break;
default:
if (phar->is_data) {
ext = "tar";
} else {
ext = "phar.tar";
}
}
} else {
switch (phar->flags) {
case PHAR_FILE_COMPRESSED_GZ:
ext = "phar.gz";
break;
case PHAR_FILE_COMPRESSED_BZ2:
ext = "phar.bz2";
break;
default:
ext = "phar";
}
}
} else if (phar_path_check(&ext, &ext_len, &pcr_error) > pcr_is_ok) {
if (phar->is_data) {
zend_throw_exception_ex(spl_ce_BadMethodCallException, 0 TSRMLS_CC, "data phar converted from \"%s\" has invalid extension %s", phar->fname, ext);
} else {
zend_throw_exception_ex(spl_ce_BadMethodCallException, 0 TSRMLS_CC, "phar converted from \"%s\" has invalid extension %s", phar->fname, ext);
}
return NULL;
}
if (ext[0] == '.') {
++ext;
}
oldpath = estrndup(phar->fname, phar->fname_len);
oldname = zend_memrchr(phar->fname, '/', phar->fname_len);
++oldname;
oldname_len = strlen(oldname);
basename = estrndup(oldname, oldname_len);
spprintf(&newname, 0, "%s.%s", strtok(basename, "."), ext);
efree(basename);
basepath = estrndup(oldpath, (strlen(oldpath) - oldname_len));
phar->fname_len = spprintf(&newpath, 0, "%s%s", basepath, newname);
phar->fname = newpath;
phar->ext = newpath + phar->fname_len - strlen(ext) - 1;
efree(basepath);
efree(newname);
if (PHAR_G(manifest_cached) && SUCCESS == zend_hash_find(&cached_phars, newpath, phar->fname_len, (void **) &pphar)) {
efree(oldpath);
zend_throw_exception_ex(spl_ce_BadMethodCallException, 0 TSRMLS_CC, "Unable to add newly converted phar \"%s\" to the list of phars, new phar name is in phar.cache_list", phar->fname);
return NULL;
}
if (SUCCESS == zend_hash_find(&(PHAR_GLOBALS->phar_fname_map), newpath, phar->fname_len, (void **) &pphar)) {
if ((*pphar)->fname_len == phar->fname_len && !memcmp((*pphar)->fname, phar->fname, phar->fname_len)) {
if (!zend_hash_num_elements(&phar->manifest)) {
(*pphar)->is_tar = phar->is_tar;
(*pphar)->is_zip = phar->is_zip;
(*pphar)->is_data = phar->is_data;
(*pphar)->flags = phar->flags;
(*pphar)->fp = phar->fp;
phar->fp = NULL;
phar_destroy_phar_data(phar TSRMLS_CC);
*sphar = NULL;
phar = *pphar;
*sphar = NULL;
phar->refcount++;
newpath = oldpath;
goto its_ok;
}
}
efree(oldpath);
zend_throw_exception_ex(spl_ce_BadMethodCallException, 0 TSRMLS_CC, "Unable to add newly converted phar \"%s\" to the list of phars, a phar with that name already exists", phar->fname);
return NULL;
}
its_ok:
if (SUCCESS == php_stream_stat_path(newpath, &ssb)) {
zend_throw_exception_ex(spl_ce_BadMethodCallException, 0 TSRMLS_CC, "phar \"%s\" exists and must be unlinked prior to conversion", newpath);
efree(oldpath);
return NULL;
}
if (!phar->is_data) {
if (SUCCESS != phar_detect_phar_fname_ext(newpath, phar->fname_len, (const char **) &(phar->ext), &(phar->ext_len), 1, 1, 1 TSRMLS_CC)) {
efree(oldpath);
zend_throw_exception_ex(spl_ce_BadMethodCallException, 0 TSRMLS_CC, "phar \"%s\" has invalid extension %s", phar->fname, ext);
return NULL;
}
if (phar->alias) {
if (phar->is_temporary_alias) {
phar->alias = NULL;
phar->alias_len = 0;
} else {
phar->alias = estrndup(newpath, strlen(newpath));
phar->alias_len = strlen(newpath);
phar->is_temporary_alias = 1;
zend_hash_update(&(PHAR_GLOBALS->phar_alias_map), newpath, phar->fname_len, (void*)&phar, sizeof(phar_archive_data*), NULL);
}
}
} else {
if (SUCCESS != phar_detect_phar_fname_ext(newpath, phar->fname_len, (const char **) &(phar->ext), &(phar->ext_len), 0, 1, 1 TSRMLS_CC)) {
efree(oldpath);
zend_throw_exception_ex(spl_ce_BadMethodCallException, 0 TSRMLS_CC, "data phar \"%s\" has invalid extension %s", phar->fname, ext);
return NULL;
}
phar->alias = NULL;
phar->alias_len = 0;
}
if ((!pphar || phar == *pphar) && SUCCESS != zend_hash_update(&(PHAR_GLOBALS->phar_fname_map), newpath, phar->fname_len, (void*)&phar, sizeof(phar_archive_data*), NULL)) {
efree(oldpath);
zend_throw_exception_ex(spl_ce_BadMethodCallException, 0 TSRMLS_CC, "Unable to add newly converted phar \"%s\" to the list of phars", phar->fname);
return NULL;
}
phar_flush(phar, 0, 0, 1, &error TSRMLS_CC);
if (error) {
zend_throw_exception_ex(spl_ce_BadMethodCallException, 0 TSRMLS_CC, "%s", error);
efree(error);
efree(oldpath);
return NULL;
}
efree(oldpath);
if (phar->is_data) {
ce = phar_ce_data;
} else {
ce = phar_ce_archive;
}
MAKE_STD_ZVAL(ret);
if (SUCCESS != object_init_ex(ret, ce)) {
zval_dtor(ret);
zend_throw_exception_ex(spl_ce_BadMethodCallException, 0 TSRMLS_CC, "Unable to instantiate phar object when converting archive \"%s\"", phar->fname);
return NULL;
}
INIT_PZVAL(&arg1);
ZVAL_STRINGL(&arg1, phar->fname, phar->fname_len, 0);
zend_call_method_with_1_params(&ret, ce, &ce->constructor, "__construct", NULL, &arg1);
return ret;
} | 0 | [
"CWE-119"
] | php-src | 13ad4d3e971807f9a58ab5933182907dc2958539 | 208,161,561,763,122,300,000,000,000,000,000,000,000 | 197 | Fix bug #71354 - remove UMR when size is 0 |
static int screen_width() {
DEVMODE mode;
mode.dmSize = sizeof(DEVMODE);
mode.dmDriverExtra = 0;
EnumDisplaySettings(0,ENUM_CURRENT_SETTINGS,&mode);
return (int)mode.dmPelsWidth;
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 21,781,803,715,525,990,000,000,000,000,000,000,000 | 7 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
static int xennet_remove(struct xenbus_device *dev)
{
struct netfront_info *info = dev_get_drvdata(&dev->dev);
xennet_bus_close(dev);
xennet_disconnect_backend(info);
if (info->netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(info->netdev);
if (info->queues) {
rtnl_lock();
xennet_destroy_queues(info);
rtnl_unlock();
}
xennet_free_netdev(info->netdev);
return 0;
} | 0 | [] | linux | f63c2c2032c2e3caad9add3b82cc6e91c376fd26 | 92,325,619,925,859,320,000,000,000,000,000,000,000 | 19 | xen-netfront: restore __skb_queue_tail() positioning in xennet_get_responses()
The commit referenced below moved the invocation past the "next" label,
without any explanation. In fact this allows misbehaving backends undue
control over the domain the frontend runs in, as earlier detected errors
require the skb to not be freed (it may be retained for later processing
via xennet_move_rx_slot(), or it may simply be unsafe to have it freed).
This is CVE-2022-33743 / XSA-405.
Fixes: 6c5aa6fc4def ("xen networking: add basic XDP support for xen-netfront")
Signed-off-by: Jan Beulich <[email protected]>
Reviewed-by: Juergen Gross <[email protected]>
Signed-off-by: Juergen Gross <[email protected]> |
zend_object_iterator *zend_generator_get_iterator(zend_class_entry *ce, zval *object, int by_ref) /* {{{ */
{
zend_object_iterator *iterator;
zend_generator *generator = (zend_generator*)Z_OBJ_P(object);
if (!generator->execute_data) {
zend_throw_exception(NULL, "Cannot traverse an already closed generator", 0);
return NULL;
}
if (UNEXPECTED(by_ref) && !(generator->execute_data->func->op_array.fn_flags & ZEND_ACC_RETURN_REFERENCE)) {
zend_throw_exception(NULL, "You can only iterate a generator by-reference if it declared that it yields by-reference", 0);
return NULL;
}
iterator = generator->iterator = emalloc(sizeof(zend_object_iterator));
zend_iterator_init(iterator);
iterator->funcs = &zend_generator_iterator_functions;
ZVAL_COPY(&iterator->data, object);
return iterator;
} | 0 | [] | php-src | 83e2b9e2202da6cc25bdaac67a58022b90be88e7 | 265,422,378,934,385,440,000,000,000,000,000,000,000 | 24 | Fixed bug #76946 |
static int msg_cache_commit (IMAP_DATA* idata, HEADER* h)
{
char id[_POSIX_PATH_MAX];
if (!idata || !h)
return -1;
idata->bcache = msg_cache_open (idata);
snprintf (id, sizeof (id), "%u-%u", idata->uid_validity, HEADER_DATA(h)->uid);
return mutt_bcache_commit (idata->bcache, id);
} | 0 | [
"CWE-787"
] | mutt | 3287534daa3beac68e2e83ca4b4fe8a3148ff870 | 247,614,405,393,164,000,000,000,000,000,000,000,000 | 12 | Don't overflow tmp in msg_parse_fetch.
Ensure INTERNALDATE and RFC822.SIZE field sizes fit temp buffer.
Thanks to Jeriko One for the bug report and patch, which this patch is
based upon. |
static uint32 GetFineLinearSlideDownTable(const CSoundFile *sndFile, uint32 i) { MPT_ASSERT(i < CountOf(FineLinearSlideDownTable)); return sndFile->m_playBehaviour[kHertzInLinearMode] ? FineLinearSlideDownTable[i] : FineLinearSlideUpTable[i]; } | 0 | [
"CWE-125"
] | openmpt | 7ebf02af2e90f03e0dbd0e18b8b3164f372fb97c | 189,033,187,464,133,260,000,000,000,000,000,000,000 | 1 | [Fix] Possible out-of-bounds read when computing length of some IT files with pattern loops (OpenMPT: formats that are converted to IT, libopenmpt: IT/ITP/MO3), caught with afl-fuzz.
git-svn-id: https://source.openmpt.org/svn/openmpt/trunk/OpenMPT@10027 56274372-70c3-4bfc-bfc3-4c3a0b034d27 |
static int p54u_bulk_msg(struct p54u_priv *priv, unsigned int ep,
void *data, size_t len)
{
int alen;
return usb_bulk_msg(priv->udev, usb_sndbulkpipe(priv->udev, ep),
data, len, &alen, 2000);
} | 0 | [
"CWE-416"
] | linux | 6e41e2257f1094acc37618bf6c856115374c6922 | 22,021,680,975,378,740,000,000,000,000,000,000,000 | 7 | p54usb: Fix race between disconnect and firmware loading
The syzbot fuzzer found a bug in the p54 USB wireless driver. The
issue involves a race between disconnect and the firmware-loader
callback routine, and it has several aspects.
One big problem is that when the firmware can't be loaded, the
callback routine tries to unbind the driver from the USB _device_ (by
calling device_release_driver) instead of from the USB _interface_ to
which it is actually bound (by calling usb_driver_release_interface).
The race involves access to the private data structure. The driver's
disconnect handler waits for a completion that is signalled by the
firmware-loader callback routine. As soon as the completion is
signalled, you have to assume that the private data structure may have
been deallocated by the disconnect handler -- even if the firmware was
loaded without errors. However, the callback routine does access the
private data several times after that point.
Another problem is that, in order to ensure that the USB device
structure hasn't been freed when the callback routine runs, the driver
takes a reference to it. This isn't good enough any more, because now
that the callback routine calls usb_driver_release_interface, it has
to ensure that the interface structure hasn't been freed.
Finally, the driver takes an unnecessary reference to the USB device
structure in the probe function and drops the reference in the
disconnect handler. This extra reference doesn't accomplish anything,
because the USB core already guarantees that a device structure won't
be deallocated while a driver is still bound to any of its interfaces.
To fix these problems, this patch makes the following changes:
Call usb_driver_release_interface() rather than
device_release_driver().
Don't signal the completion until after the important
information has been copied out of the private data structure,
and don't refer to the private data at all thereafter.
Lock udev (the interface's parent) before unbinding the driver
instead of locking udev->parent.
During the firmware loading process, take a reference to the
USB interface instead of the USB device.
Don't take an unnecessary reference to the device during probe
(and then don't drop it during disconnect).
Signed-off-by: Alan Stern <[email protected]>
Reported-and-tested-by: [email protected]
CC: <[email protected]>
Acked-by: Christian Lamparter <[email protected]>
Signed-off-by: Kalle Valo <[email protected]> |
DEFUN (clear_ip_bgp_external,
clear_ip_bgp_external_cmd,
"clear ip bgp external",
CLEAR_STR
IP_STR
BGP_STR
"Clear all external peers\n")
{
return bgp_clear_vty (vty, NULL, 0, 0, clear_external, BGP_CLEAR_SOFT_NONE, NULL);
} | 0 | [
"CWE-125"
] | frr | 6d58272b4cf96f0daa846210dd2104877900f921 | 15,677,840,312,072,416,000,000,000,000,000,000,000 | 10 | [bgpd] cleanup, compact and consolidate capability parsing code
2007-07-26 Paul Jakma <[email protected]>
* (general) Clean up and compact capability parsing slightly.
Consolidate validation of length and logging of generic TLV, and
memcpy of capability data, thus removing such from cap specifc
code (not always present or correct).
* bgp_open.h: Add structures for the generic capability TLV header
and for the data formats of the various specific capabilities we
support. Hence remove the badly named, or else misdefined, struct
capability.
* bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data.
Do the length checks *before* memcpy()'ing based on that length
(stored capability - should have been validated anyway on input,
but..).
(bgp_afi_safi_valid_indices) new function to validate (afi,safi)
which is about to be used as index into arrays, consolidates
several instances of same, at least one of which appeared to be
incomplete..
(bgp_capability_mp) Much condensed.
(bgp_capability_orf_entry) New, process one ORF entry
(bgp_capability_orf) Condensed. Fixed to process all ORF entries.
(bgp_capability_restart) Condensed, and fixed to use a
cap-specific type, rather than abusing capability_mp.
(struct message capcode_str) added to aid generic logging.
(size_t cap_minsizes[]) added to aid generic validation of
capability length field.
(bgp_capability_parse) Generic logging and validation of TLV
consolidated here. Code compacted as much as possible.
* bgp_packet.c: (bgp_open_receive) Capability parsers now use
streams, so no more need here to manually fudge the input stream
getp.
(bgp_capability_msg_parse) use struct capability_mp_data. Validate
lengths /before/ memcpy. Use bgp_afi_safi_valid_indices.
(bgp_capability_receive) Exported for use by test harness.
* bgp_vty.c: (bgp_show_summary) fix conversion warning
(bgp_show_peer) ditto
* bgp_debug.h: Fix storage 'extern' after type 'const'.
* lib/log.c: (mes_lookup) warning about code not being in
same-number array slot should be debug, not warning. E.g. BGP
has several discontigious number spaces, allocating from
different parts of a space is not uncommon (e.g. IANA
assigned versus vendor-assigned code points in some number
space). |
xmlDumpEntitiesTable(xmlBufferPtr buf, xmlEntitiesTablePtr table) {
xmlHashScan(table, xmlDumpEntityDeclScan, buf);
} | 0 | [
"CWE-787"
] | libxml2 | bf22713507fe1fc3a2c4b525cf0a88c2dc87a3a2 | 136,266,802,265,345,060,000,000,000,000,000,000,000 | 3 | Validate UTF8 in xmlEncodeEntities
Code is currently assuming UTF-8 without validating. Truncated UTF-8
input can cause out-of-bounds array access.
Adds further checks to partial fix in 50f06b3e.
Fixes #178 |
static inline int security_bprm_set_creds(struct linux_binprm *bprm)
{
return cap_bprm_set_creds(bprm);
} | 0 | [] | linux-2.6 | ee18d64c1f632043a02e6f5ba5e045bb26a5465f | 15,023,589,374,489,210,000,000,000,000,000,000,000 | 4 | KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]> |
void __fastcall TCustomDialog::AddComboBox(TCustomCombo * Combo, TLabel * Label, TStrings * Items, bool OneLine)
{
AddEditLikeControl(Combo, Label, OneLine);
SetUpComboBox(Combo, Items, OneLine);
}
| 0 | [
"CWE-787"
] | winscp | faa96e8144e6925a380f94a97aa382c9427f688d | 227,410,921,870,224,170,000,000,000,000,000,000,000 | 6 | Bug 1943: Prevent loading session settings that can lead to remote code execution from handled URLs
https://winscp.net/tracker/1943
(cherry picked from commit ec584f5189a856cd79509f754722a6898045c5e0)
Source commit: 0f4be408b3f01132b00682da72d925d6c4ee649b |
bool OSDService::should_share_map(entity_name_t name, Connection *con,
epoch_t epoch, const OSDMapRef& osdmap,
const epoch_t *sent_epoch_p)
{
dout(20) << "should_share_map "
<< name << " " << con->get_peer_addr()
<< " " << epoch << dendl;
// does client have old map?
if (name.is_client()) {
bool message_sendmap = epoch < osdmap->get_epoch();
if (message_sendmap && sent_epoch_p) {
dout(20) << "client session last_sent_epoch: "
<< *sent_epoch_p
<< " versus osdmap epoch " << osdmap->get_epoch() << dendl;
if (*sent_epoch_p < osdmap->get_epoch()) {
return true;
} // else we don't need to send it out again
}
}
if (con->get_messenger() == osd->cluster_messenger &&
con != osd->cluster_messenger->get_loopback_connection() &&
osdmap->is_up(name.num()) &&
(osdmap->get_cluster_addr(name.num()) == con->get_peer_addr() ||
osdmap->get_hb_back_addr(name.num()) == con->get_peer_addr())) {
// remember
epoch_t has = MAX(get_peer_epoch(name.num()), epoch);
// share?
if (has < osdmap->get_epoch()) {
dout(10) << name << " " << con->get_peer_addr()
<< " has old map " << epoch << " < "
<< osdmap->get_epoch() << dendl;
return true;
}
}
return false;
} | 0 | [
"CWE-287",
"CWE-284"
] | ceph | 5ead97120e07054d80623dada90a5cc764c28468 | 150,327,181,157,215,340,000,000,000,000,000,000,000 | 40 | auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random() |
nfsd4_secinfo_no_name(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_secinfo_no_name *sin)
{
__be32 err;
switch (sin->sin_style) {
case NFS4_SECINFO_STYLE4_CURRENT_FH:
break;
case NFS4_SECINFO_STYLE4_PARENT:
err = nfsd4_do_lookupp(rqstp, &cstate->current_fh);
if (err)
return err;
break;
default:
return nfserr_inval;
}
sin->sin_exp = exp_get(cstate->current_fh.fh_export);
fh_put(&cstate->current_fh);
return nfs_ok;
} | 0 | [
"CWE-20",
"CWE-129"
] | linux | b550a32e60a4941994b437a8d662432a486235a5 | 116,616,341,690,783,040,000,000,000,000,000,000,000 | 21 | nfsd: fix undefined behavior in nfsd4_layout_verify
UBSAN: Undefined behaviour in fs/nfsd/nfs4proc.c:1262:34
shift exponent 128 is too large for 32-bit type 'int'
Depending on compiler+architecture, this may cause the check for
layout_type to succeed for overly large values (which seems to be the
case with amd64). The large value will be later used in de-referencing
nfsd4_layout_ops for function pointers.
Reported-by: Jani Tuovila <[email protected]>
Signed-off-by: Ari Kauppi <[email protected]>
[[email protected]: use LAYOUT_TYPE_MAX instead of 32]
Cc: [email protected]
Reviewed-by: Dan Carpenter <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Signed-off-by: J. Bruce Fields <[email protected]> |
void smtp_server_connection_register_rcpt_param(
struct smtp_server_connection *conn, const char *param)
{
param = p_strdup(conn->pool, param);
if (!array_is_created(&conn->rcpt_param_extensions)) {
p_array_init(&conn->rcpt_param_extensions, conn->pool, 8);
array_push_back(&conn->rcpt_param_extensions, ¶m);
} else {
unsigned int count = array_count(&conn->rcpt_param_extensions);
i_assert(count > 0);
array_idx_set(&conn->rcpt_param_extensions,
count - 1, ¶m);
}
array_append_zero(&conn->rcpt_param_extensions);
} | 0 | [
"CWE-77"
] | core | 321c339756f9b2b98fb7326359d1333adebb5295 | 20,372,476,059,086,181,000,000,000,000,000,000,000 | 17 | lib-smtp: smtp-server-connection - Fix STARTTLS command injection vulnerability.
The input handler kept reading more commands even though the input was locked by
the STARTTLS command, thereby causing it to read the command pipelined beyond
STARTTLS. This causes a STARTTLS command injection vulerability. |
GF_Err adkm_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem)
{
GF_AdobeDRMKeyManagementSystemBox *ptr = (GF_AdobeDRMKeyManagementSystemBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_AHDR:
BOX_FIELD_ASSIGN(header, GF_AdobeDRMHeaderBox)
break;
case GF_ISOM_BOX_TYPE_ADAF:
BOX_FIELD_ASSIGN(au_format, GF_AdobeDRMAUFormatBox)
break;
}
return GF_OK;
} | 0 | [
"CWE-703"
] | gpac | f19668964bf422cf5a63e4dbe1d3c6c75edadcbb | 202,165,889,999,168,800,000,000,000,000,000,000,000 | 13 | fixed #1879 |
static inline void del_timer_wait_running(struct timer_list *timer) { } | 0 | [
"CWE-200",
"CWE-330"
] | linux | f227e3ec3b5cad859ad15666874405e8c1bbc1d4 | 256,336,763,285,541,400,000,000,000,000,000,000,000 | 1 | random32: update the net random state on interrupt and activity
This modifies the first 32 bits out of the 128 bits of a random CPU's
net_rand_state on interrupt or CPU activity to complicate remote
observations that could lead to guessing the network RNG's internal
state.
Note that depending on some network devices' interrupt rate moderation
or binding, this re-seeding might happen on every packet or even almost
never.
In addition, with NOHZ some CPUs might not even get timer interrupts,
leaving their local state rarely updated, while they are running
networked processes making use of the random state. For this reason, we
also perform this update in update_process_times() in order to at least
update the state when there is user or system activity, since it's the
only case we care about.
Reported-by: Amit Klein <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Cc: Eric Dumazet <[email protected]>
Cc: "Jason A. Donenfeld" <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: <[email protected]>
Signed-off-by: Willy Tarreau <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
setpassfilter(argv)
char **argv;
{
pcap_t *pc;
int ret = 1;
pc = pcap_open_dead(DLT_PPP_PPPD, 65535);
if (pcap_compile(pc, &pass_filter, *argv, 1, netmask) == -1) {
option_error("error in pass-filter expression: %s\n",
pcap_geterr(pc));
ret = 0;
}
pcap_close(pc);
return ret;
} | 0 | [
"CWE-415",
"CWE-119"
] | ppp | 7658e8257183f062dc01f87969c140707c7e52cb | 83,642,470,002,862,795,000,000,000,000,000,000,000 | 16 | pppd: Eliminate potential integer overflow in option parsing
When we are reading in a word from an options file, we maintain a count
of the length we have seen so far in 'len', which is an int. When len
exceeds MAXWORDLEN - 1 (i.e. 1023) we cease storing characters in the
buffer but we continue to increment len. Since len is an int, it will
wrap around to -2147483648 after it reaches 2147483647. At that point
our test of (len < MAXWORDLEN-1) will succeed and we will start writing
characters to memory again.
This may enable an attacker to overwrite the heap and thereby corrupt
security-relevant variables. For this reason it has been assigned a
CVE identifier, CVE-2014-3158.
This fixes the bug by ceasing to increment len once it reaches MAXWORDLEN.
Reported-by: Lee Campbell <[email protected]>
Signed-off-by: Paul Mackerras <[email protected]> |
bool SNC_io_parser<EW>::
read_sedge(SHalfedge_handle seh) {
bool OK = true;
int index;
#ifdef CGAL_NEF_NATURAL_COORDINATE_INPUT
typename K::RT a,b,c,d;
#endif
in >> index;
OK = OK && test_string("{");
in >> index;
seh->twin() = SEdge_of[index];
OK = OK && test_string(",");
in >> index;
seh->sprev() = SEdge_of[index];
OK = OK && test_string(",");
in >> index;
seh->snext() = SEdge_of[index];
OK = OK && test_string(",");
in >> index;
seh->source() = Edge_of[index];
OK = OK && test_string(",");
in >> index;
seh->incident_sface() = SFace_of[index];
OK = OK && test_string(",");
in >> index;
seh->prev() = SEdge_of[index];
OK = OK && test_string(",");
in >> index;
seh->next() = SEdge_of[index];
OK = OK && test_string(",");
in >> index;
seh->facet() = Halffacet_of[index];
OK = OK && test_string("|");
#ifdef CGAL_NEF_NATURAL_COORDINATE_INPUT
in >> a >> b >> c >> d;
seh->circle() = Sphere_circle(Plane_3(a,b,c,d));
#else
seh->circle() =
Geometry_io<typename K::Kernel_tag, Kernel>::
template read_plane<Kernel, K>(in);
#endif
OK = OK && test_string("}");
in >> seh->mark();
return OK;
} | 1 | [
"CWE-125"
] | cgal | 5a1ab45058112f8647c14c02f58905ecc597ec76 | 39,770,160,928,380,027,000,000,000,000,000,000,000 | 49 | Fix Nef_3 |
void Curl_free_request_state(struct Curl_easy *data)
{
Curl_safefree(data->req.protop);
Curl_safefree(data->req.newurl);
} | 0 | [
"CWE-119"
] | curl | 9b5e12a5491d2e6b68e0c88ca56f3a9ef9fba400 | 68,385,860,137,123,870,000,000,000,000,000,000,000 | 5 | url: fix alignment of ssl_backend_data struct
- Align the array of ssl_backend_data on a max 32 byte boundary.
8 is likely to be ok but I went with 32 for posterity should one of
the ssl_backend_data structs change to contain a larger sized variable
in the future.
Prior to this change (since dev 70f1db3, release 7.56) the connectdata
structure was undersized by 4 bytes in 32-bit builds with ssl enabled
because long long * was mistakenly used for alignment instead of
long long, with the intention being an 8 byte boundary. Also long long
may not be an available type.
The undersized connectdata could lead to oob read/write past the end in
what was expected to be the last 4 bytes of the connection's secondary
socket https proxy ssl_backend_data struct (the secondary socket in a
connection is used by ftp, others?).
Closes https://github.com/curl/curl/issues/2093
CVE-2017-8818
Bug: https://curl.haxx.se/docs/adv_2017-af0a.html |
deep_count_more_files_callback (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
DeepCountState *state;
NautilusDirectory *directory;
GList *files, *l;
GFileInfo *info;
state = user_data;
if (state->directory == NULL) {
/* Operation was cancelled. Bail out */
deep_count_state_free (state);
return;
}
directory = nautilus_directory_ref (state->directory);
g_assert (directory->details->deep_count_in_progress != NULL);
g_assert (directory->details->deep_count_in_progress == state);
files = g_file_enumerator_next_files_finish (state->enumerator,
res, NULL);
for (l = files; l != NULL; l = l->next) {
info = l->data;
deep_count_one (state, info);
g_object_unref (info);
}
if (files == NULL) {
g_file_enumerator_close_async (state->enumerator, 0, NULL, NULL, NULL);
g_object_unref (state->enumerator);
state->enumerator = NULL;
deep_count_next_dir (state);
} else {
g_file_enumerator_next_files_async (state->enumerator,
DIRECTORY_LOAD_ITEMS_PER_CALLBACK,
G_PRIORITY_LOW,
state->cancellable,
deep_count_more_files_callback,
state);
}
g_list_free (files);
nautilus_directory_unref (directory);
} | 0 | [] | nautilus | 7632a3e13874a2c5e8988428ca913620a25df983 | 216,541,454,087,301,430,000,000,000,000,000,000,000 | 50 | Check for trusted desktop file launchers.
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-directory-async.c:
Check for trusted desktop file launchers.
* libnautilus-private/nautilus-file-private.h:
* libnautilus-private/nautilus-file.c:
* libnautilus-private/nautilus-file.h:
Add nautilus_file_is_trusted_link.
Allow unsetting of custom display name.
* libnautilus-private/nautilus-mime-actions.c:
Display dialog when trying to launch a non-trusted desktop file.
svn path=/trunk/; revision=15003 |
static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
void *next_key)
{
return -ENOTSUPP;
} | 0 | [
"CWE-787"
] | bpf | 4b81ccebaeee885ab1aa1438133f2991e3a2b6ea | 290,760,647,587,037,650,000,000,000,000,000,000,000 | 5 | bpf, ringbuf: Deny reserve of buffers larger than ringbuf
A BPF program might try to reserve a buffer larger than the ringbuf size.
If the consumer pointer is way ahead of the producer, that would be
successfully reserved, allowing the BPF program to read or write out of
the ringbuf allocated area.
Reported-by: Ryota Shiga (Flatt Security)
Fixes: 457f44363a88 ("bpf: Implement BPF ring buffer and verifier support for it")
Signed-off-by: Thadeu Lima de Souza Cascardo <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Andrii Nakryiko <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]> |
void delete_run_files(pid_t pid) {
delete_sandbox_run_file(pid);
delete_bandwidth_run_file(pid);
delete_network_run_file(pid);
delete_name_run_file(pid);
delete_x11_run_file(pid);
delete_profile_run_file(pid);
} | 0 | [
"CWE-269",
"CWE-94"
] | firejail | 27cde3d7d1e4e16d4190932347c7151dc2a84c50 | 286,918,065,086,137,600,000,000,000,000,000,000,000 | 8 | fixing CVE-2022-31214 |
__getcwd_generic (char *buf, size_t size)
{
/* Lengths of big file name components and entire file names, and a
deep level of file name nesting. These numbers are not upper
bounds; they are merely large values suitable for initial
allocations, designed to be large enough for most real-world
uses. */
enum
{
BIG_FILE_NAME_COMPONENT_LENGTH = 255,
BIG_FILE_NAME_LENGTH = MIN (4095, PATH_MAX - 1),
DEEP_NESTING = 100
};
#if HAVE_OPENAT_SUPPORT
int fd = AT_FDCWD;
bool fd_needs_closing = false;
#else
char dots[DEEP_NESTING * sizeof ".." + BIG_FILE_NAME_COMPONENT_LENGTH + 1];
char *dotlist = dots;
size_t dotsize = sizeof dots;
size_t dotlen = 0;
#endif
DIR *dirstream = NULL;
dev_t rootdev, thisdev;
ino_t rootino, thisino;
char *dir;
register char *dirp;
struct __stat64_t64 st;
size_t allocated = size;
size_t used;
/* A size of 1 byte is never useful. */
if (allocated == 1)
{
__set_errno (ERANGE);
return NULL;
}
#if HAVE_MINIMALLY_WORKING_GETCWD
/* If AT_FDCWD is not defined, the algorithm below is O(N**2) and
this is much slower than the system getcwd (at least on
GNU/Linux). So trust the system getcwd's results unless they
look suspicious.
Use the system getcwd even if we have openat support, since the
system getcwd works even when a parent is unreadable, while the
openat-based approach does not.
But on AIX 5.1..7.1, the system getcwd is not even minimally
working: If the current directory name is slightly longer than
PATH_MAX, it omits the first directory component and returns
this wrong result with errno = 0. */
# undef getcwd
dir = getcwd_system (buf, size);
if (dir || (size && errno == ERANGE))
return dir;
/* Solaris getcwd (NULL, 0) fails with errno == EINVAL, but it has
internal magic that lets it work even if an ancestor directory is
inaccessible, which is better in many cases. So in this case try
again with a buffer that's almost always big enough. */
if (errno == EINVAL && buf == NULL && size == 0)
{
char big_buffer[BIG_FILE_NAME_LENGTH + 1];
dir = getcwd_system (big_buffer, sizeof big_buffer);
if (dir)
return strdup (dir);
}
# if HAVE_PARTLY_WORKING_GETCWD
/* The system getcwd works, except it sometimes fails when it
shouldn't, setting errno to ERANGE, ENAMETOOLONG, or ENOENT. */
if (errno != ERANGE && errno != ENAMETOOLONG && errno != ENOENT)
return NULL;
# endif
#endif
if (size == 0)
{
if (buf != NULL)
{
__set_errno (EINVAL);
return NULL;
}
allocated = BIG_FILE_NAME_LENGTH + 1;
}
if (buf == NULL)
{
dir = malloc (allocated);
if (dir == NULL)
return NULL;
}
else
dir = buf;
dirp = dir + allocated;
*--dirp = '\0';
if (__lstat64_time64 (".", &st) < 0)
goto lose;
thisdev = st.st_dev;
thisino = st.st_ino;
if (__lstat64_time64 ("/", &st) < 0)
goto lose;
rootdev = st.st_dev;
rootino = st.st_ino;
while (!(thisdev == rootdev && thisino == rootino))
{
struct dirent64 *d;
dev_t dotdev;
ino_t dotino;
bool mount_point;
int parent_status;
size_t dirroom;
size_t namlen;
bool use_d_ino = true;
/* Look at the parent directory. */
#if HAVE_OPENAT_SUPPORT
fd = __openat64 (fd, "..", O_RDONLY);
if (fd < 0)
goto lose;
fd_needs_closing = true;
parent_status = __fstat64_time64 (fd, &st);
#else
dotlist[dotlen++] = '.';
dotlist[dotlen++] = '.';
dotlist[dotlen] = '\0';
parent_status = __lstat64_time64 (dotlist, &st);
#endif
if (parent_status != 0)
goto lose;
if (dirstream && __closedir (dirstream) != 0)
{
dirstream = NULL;
goto lose;
}
/* Figure out if this directory is a mount point. */
dotdev = st.st_dev;
dotino = st.st_ino;
mount_point = dotdev != thisdev;
/* Search for the last directory. */
#if HAVE_OPENAT_SUPPORT
dirstream = __fdopendir (fd);
if (dirstream == NULL)
goto lose;
fd_needs_closing = false;
#else
dirstream = __opendir (dotlist);
if (dirstream == NULL)
goto lose;
dotlist[dotlen++] = '/';
#endif
for (;;)
{
/* Clear errno to distinguish EOF from error if readdir returns
NULL. */
__set_errno (0);
d = __readdir64 (dirstream);
/* When we've iterated through all directory entries without finding
one with a matching d_ino, rewind the stream and consider each
name again, but this time, using lstat. This is necessary in a
chroot on at least one system (glibc-2.3.6 + linux 2.6.12), where
.., ../.., ../../.., etc. all had the same device number, yet the
d_ino values for entries in / did not match those obtained
via lstat. */
if (d == NULL && errno == 0 && use_d_ino)
{
use_d_ino = false;
__rewinddir (dirstream);
d = __readdir64 (dirstream);
}
if (d == NULL)
{
if (errno == 0)
/* EOF on dirstream, which can mean e.g., that the current
directory has been removed. */
__set_errno (ENOENT);
goto lose;
}
if (d->d_name[0] == '.' &&
(d->d_name[1] == '\0' ||
(d->d_name[1] == '.' && d->d_name[2] == '\0')))
continue;
if (use_d_ino)
{
bool match = (MATCHING_INO (d, thisino) || mount_point);
if (! match)
continue;
}
{
int entry_status;
#if HAVE_OPENAT_SUPPORT
entry_status = __fstatat64_time64 (fd, d->d_name, &st,
AT_SYMLINK_NOFOLLOW);
#else
/* Compute size needed for this file name, or for the file
name ".." in the same directory, whichever is larger.
Room for ".." might be needed the next time through
the outer loop. */
size_t name_alloc = _D_ALLOC_NAMLEN (d);
size_t filesize = dotlen + MAX (sizeof "..", name_alloc);
if (filesize < dotlen)
goto memory_exhausted;
if (dotsize < filesize)
{
/* My, what a deep directory tree you have, Grandma. */
size_t newsize = MAX (filesize, dotsize * 2);
size_t i;
if (newsize < dotsize)
goto memory_exhausted;
if (dotlist != dots)
free (dotlist);
dotlist = malloc (newsize);
if (dotlist == NULL)
goto lose;
dotsize = newsize;
i = 0;
do
{
dotlist[i++] = '.';
dotlist[i++] = '.';
dotlist[i++] = '/';
}
while (i < dotlen);
}
memcpy (dotlist + dotlen, d->d_name, _D_ALLOC_NAMLEN (d));
entry_status = __lstat64_time64 (dotlist, &st);
#endif
/* We don't fail here if we cannot stat() a directory entry.
This can happen when (network) file systems fail. If this
entry is in fact the one we are looking for we will find
out soon as we reach the end of the directory without
having found anything. */
if (entry_status == 0 && S_ISDIR (st.st_mode)
&& st.st_dev == thisdev && st.st_ino == thisino)
break;
}
}
dirroom = dirp - dir;
namlen = _D_EXACT_NAMLEN (d);
if (dirroom <= namlen)
{
if (size != 0)
{
__set_errno (ERANGE);
goto lose;
}
else
{
char *tmp;
size_t oldsize = allocated;
allocated += MAX (allocated, namlen);
if (allocated < oldsize
|| ! (tmp = realloc (dir, allocated)))
goto memory_exhausted;
/* Move current contents up to the end of the buffer.
This is guaranteed to be non-overlapping. */
dirp = memcpy (tmp + allocated - (oldsize - dirroom),
tmp + dirroom,
oldsize - dirroom);
dir = tmp;
}
}
dirp -= namlen;
memcpy (dirp, d->d_name, namlen);
*--dirp = '/';
thisdev = dotdev;
thisino = dotino;
}
if (dirstream && __closedir (dirstream) != 0)
{
dirstream = NULL;
goto lose;
}
if (dirp == &dir[allocated - 1])
*--dirp = '/';
#if ! HAVE_OPENAT_SUPPORT
if (dotlist != dots)
free (dotlist);
#endif
used = dir + allocated - dirp;
memmove (dir, dirp, used);
if (size == 0)
/* Ensure that the buffer is only as large as necessary. */
buf = (used < allocated ? realloc (dir, used) : dir);
if (buf == NULL)
/* Either buf was NULL all along, or 'realloc' failed but
we still have the original string. */
buf = dir;
return buf;
memory_exhausted:
__set_errno (ENOMEM);
lose:
{
int save = errno;
if (dirstream)
__closedir (dirstream);
#if HAVE_OPENAT_SUPPORT
if (fd_needs_closing)
__close_nocancel_nostatus (fd);
#else
if (dotlist != dots)
free (dotlist);
#endif
if (buf == NULL)
free (dir);
__set_errno (save);
}
return NULL;
} | 0 | [
"CWE-284"
] | glibc | 23e0e8f5f1fb5ed150253d986ecccdc90c2dcd5e | 32,913,813,816,445,095,000,000,000,000,000,000,000 | 340 | getcwd: Set errno to ERANGE for size == 1 (CVE-2021-3999)
No valid path returned by getcwd would fit into 1 byte, so reject the
size early and return NULL with errno set to ERANGE. This change is
prompted by CVE-2021-3999, which describes a single byte buffer
underflow and overflow when all of the following conditions are met:
- The buffer size (i.e. the second argument of getcwd) is 1 byte
- The current working directory is too long
- '/' is also mounted on the current working directory
Sequence of events:
- In sysdeps/unix/sysv/linux/getcwd.c, the syscall returns ENAMETOOLONG
because the linux kernel checks for name length before it checks
buffer size
- The code falls back to the generic getcwd in sysdeps/posix
- In the generic func, the buf[0] is set to '\0' on line 250
- this while loop on line 262 is bypassed:
while (!(thisdev == rootdev && thisino == rootino))
since the rootfs (/) is bind mounted onto the directory and the flow
goes on to line 449, where it puts a '/' in the byte before the
buffer.
- Finally on line 458, it moves 2 bytes (the underflowed byte and the
'\0') to the buf[0] and buf[1], resulting in a 1 byte buffer overflow.
- buf is returned on line 469 and errno is not set.
This resolves BZ #28769.
Reviewed-by: Andreas Schwab <[email protected]>
Reviewed-by: Adhemerval Zanella <[email protected]>
Signed-off-by: Qualys Security Advisory <[email protected]>
Signed-off-by: Siddhesh Poyarekar <[email protected]> |
static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
{
struct ax25_ctl_struct ax25_ctl;
ax25_digi digi;
ax25_dev *ax25_dev;
ax25_cb *ax25;
unsigned int k;
int ret = 0;
if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl)))
return -EFAULT;
if ((ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr)) == NULL)
return -ENODEV;
if (ax25_ctl.digi_count > AX25_MAX_DIGIS)
return -EINVAL;
if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL)
return -EINVAL;
digi.ndigi = ax25_ctl.digi_count;
for (k = 0; k < digi.ndigi; k++)
digi.calls[k] = ax25_ctl.digi_addr[k];
if ((ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev)) == NULL)
return -ENOTCONN;
switch (ax25_ctl.cmd) {
case AX25_KILL:
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
#ifdef CONFIG_AX25_DAMA_SLAVE
if (ax25_dev->dama.slave && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
ax25_dama_off(ax25);
#endif
ax25_disconnect(ax25, ENETRESET);
break;
case AX25_WINDOW:
if (ax25->modulus == AX25_MODULUS) {
if (ax25_ctl.arg < 1 || ax25_ctl.arg > 7)
goto einval_put;
} else {
if (ax25_ctl.arg < 1 || ax25_ctl.arg > 63)
goto einval_put;
}
ax25->window = ax25_ctl.arg;
break;
case AX25_T1:
if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ)
goto einval_put;
ax25->rtt = (ax25_ctl.arg * HZ) / 2;
ax25->t1 = ax25_ctl.arg * HZ;
break;
case AX25_T2:
if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ)
goto einval_put;
ax25->t2 = ax25_ctl.arg * HZ;
break;
case AX25_N2:
if (ax25_ctl.arg < 1 || ax25_ctl.arg > 31)
goto einval_put;
ax25->n2count = 0;
ax25->n2 = ax25_ctl.arg;
break;
case AX25_T3:
if (ax25_ctl.arg > ULONG_MAX / HZ)
goto einval_put;
ax25->t3 = ax25_ctl.arg * HZ;
break;
case AX25_IDLE:
if (ax25_ctl.arg > ULONG_MAX / (60 * HZ))
goto einval_put;
ax25->idle = ax25_ctl.arg * 60 * HZ;
break;
case AX25_PACLEN:
if (ax25_ctl.arg < 16 || ax25_ctl.arg > 65535)
goto einval_put;
ax25->paclen = ax25_ctl.arg;
break;
default:
goto einval_put;
}
out_put:
ax25_dev_put(ax25_dev);
ax25_cb_put(ax25);
return ret;
einval_put:
ret = -EINVAL;
goto out_put;
} | 1 | [
"CWE-416"
] | linux | 87563a043cef044fed5db7967a75741cc16ad2b1 | 288,150,873,176,406,500,000,000,000,000,000,000,000 | 101 | ax25: fix reference count leaks of ax25_dev
The previous commit d01ffb9eee4a ("ax25: add refcount in ax25_dev
to avoid UAF bugs") introduces refcount into ax25_dev, but there
are reference leak paths in ax25_ctl_ioctl(), ax25_fwd_ioctl(),
ax25_rt_add(), ax25_rt_del() and ax25_rt_opt().
This patch uses ax25_dev_put() and adjusts the position of
ax25_addr_ax25dev() to fix reference cout leaks of ax25_dev.
Fixes: d01ffb9eee4a ("ax25: add refcount in ax25_dev to avoid UAF bugs")
Signed-off-by: Duoming Zhou <[email protected]>
Reviewed-by: Dan Carpenter <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]> |
static inline unsigned short ReadPropertyUnsignedShort(const EndianType endian,
const unsigned char *buffer)
{
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) ((buffer[1] << 8) | buffer[0]);
return((unsigned short) (value & 0xffff));
}
value=(unsigned short) ((((unsigned char *) buffer)[0] << 8) |
((unsigned char *) buffer)[1]);
return((unsigned short) (value & 0xffff));
} | 1 | [
"CWE-190",
"CWE-125"
] | ImageMagick | d8ab7f046587f2e9f734b687ba7e6e10147c294b | 282,251,191,333,977,860,000,000,000,000,000,000,000 | 15 | Improve checking of EXIF profile to prevent integer overflow (bug report from Ibrahim el-sayed) |
HiiGetLanguages (
IN CONST EFI_HII_STRING_PROTOCOL *This,
IN EFI_HII_HANDLE PackageList,
IN OUT CHAR8 *Languages,
IN OUT UINTN *LanguagesSize
)
{
LIST_ENTRY *Link;
HII_DATABASE_PRIVATE_DATA *Private;
HII_DATABASE_RECORD *DatabaseRecord;
HII_DATABASE_PACKAGE_LIST_INSTANCE *PackageListNode;
HII_STRING_PACKAGE_INSTANCE *StringPackage;
UINTN ResultSize;
if (This == NULL || LanguagesSize == NULL || PackageList == NULL) {
return EFI_INVALID_PARAMETER;
}
if (*LanguagesSize != 0 && Languages == NULL) {
return EFI_INVALID_PARAMETER;
}
if (!IsHiiHandleValid (PackageList)) {
return EFI_NOT_FOUND;
}
Private = HII_STRING_DATABASE_PRIVATE_DATA_FROM_THIS (This);
PackageListNode = NULL;
for (Link = Private->DatabaseList.ForwardLink; Link != &Private->DatabaseList; Link = Link->ForwardLink) {
DatabaseRecord = CR (Link, HII_DATABASE_RECORD, DatabaseEntry, HII_DATABASE_RECORD_SIGNATURE);
if (DatabaseRecord->Handle == PackageList) {
PackageListNode = DatabaseRecord->PackageList;
break;
}
}
if (PackageListNode == NULL) {
return EFI_NOT_FOUND;
}
//
// Search the languages in the specified packagelist.
//
ResultSize = 0;
for (Link = PackageListNode->StringPkgHdr.ForwardLink;
Link != &PackageListNode->StringPkgHdr;
Link = Link->ForwardLink
) {
StringPackage = CR (Link, HII_STRING_PACKAGE_INSTANCE, StringEntry, HII_STRING_PACKAGE_SIGNATURE);
ResultSize += AsciiStrSize (StringPackage->StringPkgHdr->Language);
if (ResultSize <= *LanguagesSize) {
AsciiStrCpyS (Languages, *LanguagesSize / sizeof (CHAR8), StringPackage->StringPkgHdr->Language);
Languages += AsciiStrSize (StringPackage->StringPkgHdr->Language);
*(Languages - 1) = L';';
}
}
if (ResultSize == 0) {
return EFI_NOT_FOUND;
}
if (*LanguagesSize < ResultSize) {
*LanguagesSize = ResultSize;
return EFI_BUFFER_TOO_SMALL;
}
*(Languages - 1) = 0;
return EFI_SUCCESS;
}
| 0 | [] | edk2 | 764e8ba1389a617639d79d2c4f0d53f4ea4a7387 | 79,192,582,179,902,140,000,000,000,000,000,000,000 | 66 | MdeModulePkg/String.c: Zero memory before free (CVE-2019-14558)
REF: https://bugzilla.tianocore.org/show_bug.cgi?id=1611
Cc: Liming Gao <[email protected]>
Cc: Eric Dong <[email protected]>
Cc: Jian J Wang <[email protected]>
Signed-off-by: Dandan Bi <[email protected]>
Reviewed-by: Eric Dong <[email protected]>
Reviewed-by: Jian J Wang <[email protected]> |
static inline __s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
struct inotify_watch **watchp)
{
return -EOPNOTSUPP;
} | 0 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 196,476,253,226,070,730,000,000,000,000,000,000,000 | 5 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int sqlite_handle_begin(pdo_dbh_t *dbh TSRMLS_DC)
{
pdo_sqlite_db_handle *H = (pdo_sqlite_db_handle *)dbh->driver_data;
char *errmsg = NULL;
if (sqlite3_exec(H->db, "BEGIN", NULL, NULL, &errmsg) != SQLITE_OK) {
pdo_sqlite_error(dbh);
if (errmsg)
sqlite3_free(errmsg);
return 0;
}
return 1;
} | 0 | [
"CWE-264"
] | php-src | 055ecbc62878e86287d742c7246c21606cee8183 | 222,267,785,866,113,750,000,000,000,000,000,000,000 | 13 | Improve check for :memory: pseudo-filename in SQlite |
dirserv_pick_cached_dir_obj(cached_dir_t *cache_src,
cached_dir_t *auth_src,
time_t dirty, cached_dir_t *(*regenerate)(void),
const char *name,
authority_type_t auth_type)
{
or_options_t *options = get_options();
int authority = (auth_type == V1_AUTHORITY && authdir_mode_v1(options)) ||
(auth_type == V2_AUTHORITY && authdir_mode_v2(options));
if (!authority || authdir_mode_bridge(options)) {
return cache_src;
} else {
/* We're authoritative. */
if (regenerate != NULL) {
if (dirty && dirty + DIR_REGEN_SLACK_TIME < time(NULL)) {
if (!(auth_src = regenerate())) {
log_err(LD_BUG, "Couldn't generate %s?", name);
exit(1);
}
} else {
log_info(LD_DIRSERV, "The %s is still clean; reusing.", name);
}
}
return auth_src ? auth_src : cache_src;
}
} | 0 | [
"CWE-264"
] | tor | 00fffbc1a15e2696a89c721d0c94dc333ff419ef | 86,482,405,461,628,280,000,000,000,000,000,000,000 | 27 | Don't give the Guard flag to relays without the CVE-2011-2768 fix |
static MagickBooleanType IsITUFaxImage(const Image *image)
{
const StringInfo
*profile;
const unsigned char
*datum;
profile=GetImageProfile(image,"8bim");
if (profile == (const StringInfo *) NULL)
return(MagickFalse);
if (GetStringInfoLength(profile) < 5)
return(MagickFalse);
datum=GetStringInfoDatum(profile);
if ((datum[0] == 0x47) && (datum[1] == 0x33) && (datum[2] == 0x46) &&
(datum[3] == 0x41) && (datum[4] == 0x58))
return(MagickTrue);
return(MagickFalse);
} | 0 | [
"CWE-416"
] | ImageMagick | 39f226a9c137f547e12afde972eeba7551124493 | 182,732,393,898,537,900,000,000,000,000,000,000,000 | 19 | https://github.com/ImageMagick/ImageMagick/issues/1641 |
int lxc_wait_for_pid_status(pid_t pid)
{
int status, ret;
again:
ret = waitpid(pid, &status, 0);
if (ret == -1) {
if (errno == EINTR)
goto again;
return -1;
}
if (ret != pid)
goto again;
return status;
} | 0 | [
"CWE-59",
"CWE-61"
] | lxc | 592fd47a6245508b79fe6ac819fe6d3b2c1289be | 159,072,435,703,666,000,000,000,000,000,000,000,000 | 15 | CVE-2015-1335: Protect container mounts against symlinks
When a container starts up, lxc sets up the container's inital fstree
by doing a bunch of mounting, guided by the container configuration
file. The container config is owned by the admin or user on the host,
so we do not try to guard against bad entries. However, since the
mount target is in the container, it's possible that the container admin
could divert the mount with symbolic links. This could bypass proper
container startup (i.e. confinement of a root-owned container by the
restrictive apparmor policy, by diverting the required write to
/proc/self/attr/current), or bypass the (path-based) apparmor policy
by diverting, say, /proc to /mnt in the container.
To prevent this,
1. do not allow mounts to paths containing symbolic links
2. do not allow bind mounts from relative paths containing symbolic
links.
Details:
Define safe_mount which ensures that the container has not inserted any
symbolic links into any mount targets for mounts to be done during
container setup.
The host's mount path may contain symbolic links. As it is under the
control of the administrator, that's ok. So safe_mount begins the check
for symbolic links after the rootfs->mount, by opening that directory.
It opens each directory along the path using openat() relative to the
parent directory using O_NOFOLLOW. When the target is reached, it
mounts onto /proc/self/fd/<targetfd>.
Use safe_mount() in mount_entry(), when mounting container proc,
and when needed. In particular, safe_mount() need not be used in
any case where:
1. the mount is done in the container's namespace
2. the mount is for the container's rootfs
3. the mount is relative to a tmpfs or proc/sysfs which we have
just safe_mount()ed ourselves
Since we were using proc/net as a temporary placeholder for /proc/sys/net
during container startup, and proc/net is a symbolic link, use proc/tty
instead.
Update the lxc.container.conf manpage with details about the new
restrictions.
Finally, add a testcase to test some symbolic link possibilities.
Reported-by: Roman Fiedler
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: Stéphane Graber <[email protected]> |
Status setFromString(const std::string& str) {
std::vector<std::string> strList;
splitStringDelim(str, &strList, ',');
std::vector<UserName> out;
for (const auto& nameStr : strList) {
auto swUserName = UserName::parse(nameStr);
if (!swUserName.isOK()) {
return swUserName.getStatus();
}
out.push_back(std::move(swUserName.getValue()));
}
auto status = _checkForSystemUser(out);
if (!status.isOK()) {
return status;
}
auto authzManager = _authzManager;
if (!authzManager) {
return Status::OK();
}
{
stdx::lock_guard<stdx::mutex> lk(_mutex);
_userNames = std::move(out);
}
authzManager->invalidateUserCache(Client::getCurrent()->getOperationContext());
return Status::OK();
} | 0 | [
"CWE-613"
] | mongo | e55d6e2292e5dbe2f97153251d8193d1cc89f5d7 | 238,192,706,991,968,800,000,000,000,000,000,000,000 | 31 | SERVER-38984 Validate unique User ID on UserCache hit |
int main(int argc, char** argv)
{
int sck;
int dis;
struct sockaddr_un sa;
size_t len;
char* p;
char* display;
if (argc != 1)
{
printf("xrdp disconnect utility\n");
printf("run with no parameters to disconnect you xrdp session\n");
return 0;
}
display = getenv("DISPLAY");
if (display == 0)
{
printf("display not set\n");
return 1;
}
dis = strtol(display + 1, &p, 10);
memset(&sa, 0, sizeof(sa));
sa.sun_family = AF_UNIX;
sprintf(sa.sun_path, "/tmp/xrdp_disconnect_display_%d", dis);
if (access(sa.sun_path, F_OK) != 0)
{
printf("not in an xrdp session\n");
return 1;
}
sck = socket(PF_UNIX, SOCK_DGRAM, 0);
len = sizeof(sa);
if (sendto(sck, "sig", 4, 0, (struct sockaddr*)&sa, len) > 0)
{
printf("message sent ok\n");
}
return 0;
} | 1 | [] | xrdp | d8f9e8310dac362bb9578763d1024178f94f4ecc | 256,290,203,844,973,900,000,000,000,000,000,000,000 | 39 | move temp files from /tmp to /tmp/.xrdp |
static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
{
switch (tcm_mgmt_status) {
case TMR_FUNCTION_COMPLETE:
return SRP_TSK_MGMT_SUCCESS;
case TMR_FUNCTION_REJECTED:
return SRP_TSK_MGMT_FUNC_NOT_SUPP;
}
return SRP_TSK_MGMT_FAILED;
} | 0 | [
"CWE-200",
"CWE-476"
] | linux | 51093254bf879bc9ce96590400a87897c7498463 | 148,853,436,718,485,500,000,000,000,000,000,000,000 | 10 | IB/srpt: Simplify srpt_handle_tsk_mgmt()
Let the target core check task existence instead of the SRP target
driver. Additionally, let the target core check the validity of the
task management request instead of the ib_srpt driver.
This patch fixes the following kernel crash:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000001
IP: [<ffffffffa0565f37>] srpt_handle_new_iu+0x6d7/0x790 [ib_srpt]
Oops: 0002 [#1] SMP
Call Trace:
[<ffffffffa05660ce>] srpt_process_completion+0xde/0x570 [ib_srpt]
[<ffffffffa056669f>] srpt_compl_thread+0x13f/0x160 [ib_srpt]
[<ffffffff8109726f>] kthread+0xcf/0xe0
[<ffffffff81613cfc>] ret_from_fork+0x7c/0xb0
Signed-off-by: Bart Van Assche <[email protected]>
Fixes: 3e4f574857ee ("ib_srpt: Convert TMR path to target_submit_tmr")
Tested-by: Alex Estrin <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: Nicholas Bellinger <[email protected]>
Cc: Sagi Grimberg <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Doug Ledford <[email protected]> |
my_bool mariadb_get_infov(MYSQL *mysql, enum mariadb_value value, void *arg, ...)
{
va_list ap;
va_start(ap, arg);
switch(value) {
case MARIADB_MAX_ALLOWED_PACKET:
*((size_t *)arg)= (size_t)max_allowed_packet;
break;
case MARIADB_NET_BUFFER_LENGTH:
*((size_t *)arg)= (size_t)net_buffer_length;
break;
case MARIADB_CONNECTION_ERROR_ID:
if (!mysql)
goto error;
*((unsigned int *)arg)= mysql->net.last_errno;
break;
case MARIADB_CONNECTION_ERROR:
if (!mysql)
goto error;
*((char **)arg)= mysql->net.last_error;
break;
case MARIADB_CONNECTION_SQLSTATE:
if (!mysql)
goto error;
*((char **)arg)= mysql->net.sqlstate;
break;
case MARIADB_CONNECTION_TLS_VERSION:
#ifdef HAVE_TLS
if (mysql && mysql->net.pvio && mysql->net.pvio->ctls)
*((char **)arg)= (char *)ma_pvio_tls_get_protocol_version(mysql->net.pvio->ctls);
else
#endif
goto error;
break;
case MARIADB_CONNECTION_TLS_VERSION_ID:
#ifdef HAVE_TLS
if (mysql && mysql->net.pvio && mysql->net.pvio->ctls)
*((unsigned int *)arg)= ma_pvio_tls_get_protocol_version_id(mysql->net.pvio->ctls);
else
#endif
goto error;
break;
case MARIADB_TLS_LIBRARY:
#ifdef HAVE_TLS
*((const char **)arg)= tls_library_version;
#else
*((const char **)arg)= "Off";
#endif
break;
case MARIADB_CLIENT_VERSION:
*((const char **)arg)= MARIADB_CLIENT_VERSION_STR;
break;
case MARIADB_CLIENT_VERSION_ID:
*((size_t *)arg)= MARIADB_VERSION_ID;
break;
case MARIADB_CONNECTION_SERVER_VERSION:
if (mysql)
*((char **)arg)= mysql->server_version;
else
goto error;
break;
case MARIADB_CONNECTION_SERVER_TYPE:
if (mysql)
*((const char **)arg)= mariadb_connection(mysql) ? "MariaDB" : "MySQL";
else
goto error;
break;
case MARIADB_CONNECTION_SERVER_VERSION_ID:
if (mysql)
*((size_t *)arg)= mariadb_server_version_id(mysql);
else
goto error;
break;
case MARIADB_CONNECTION_PROTOCOL_VERSION_ID:
if (mysql)
*((unsigned int *)arg)= mysql->protocol_version;
else
goto error;
break;
case MARIADB_CONNECTION_MARIADB_CHARSET_INFO:
if (mysql)
mariadb_get_charset_info(mysql, (MY_CHARSET_INFO *)arg);
else
goto error;
break;
case MARIADB_CONNECTION_SOCKET:
if (mysql)
*((my_socket *)arg)= mariadb_get_socket(mysql);
else
goto error;
break;
case MARIADB_CONNECTION_TYPE:
if (mysql && mysql->net.pvio)
*((int *)arg)= (int)mysql->net.pvio->type;
else
goto error;
break;
case MARIADB_CONNECTION_ASYNC_TIMEOUT_MS:
if (mysql && mysql->options.extension && mysql->options.extension->async_context)
*((unsigned int *)arg)= mysql->options.extension->async_context->timeout_value;
break;
case MARIADB_CONNECTION_ASYNC_TIMEOUT:
if (mysql && mysql->options.extension && mysql->options.extension->async_context)
{
unsigned int timeout= mysql->options.extension->async_context->timeout_value;
if (timeout > UINT_MAX - 999)
*((unsigned int *)arg)= (timeout - 1)/1000 + 1;
else
*((unsigned int *)arg)= (timeout+999)/1000;
}
break;
case MARIADB_CHARSET_NAME:
{
char *name;
name= va_arg(ap, char *);
if (name)
*((MARIADB_CHARSET_INFO **)arg)= (MARIADB_CHARSET_INFO *)mysql_find_charset_name(name);
else
goto error;
}
break;
case MARIADB_CHARSET_ID:
{
unsigned int nr;
nr= va_arg(ap, unsigned int);
*((MARIADB_CHARSET_INFO **)arg)= (MARIADB_CHARSET_INFO *)mysql_find_charset_nr(nr);
}
break;
case MARIADB_CONNECTION_SSL_CIPHER:
#ifdef HAVE_TLS
if (mysql && mysql->net.pvio && mysql->net.pvio->ctls)
*((char **)arg)= (char *)ma_pvio_tls_cipher(mysql->net.pvio->ctls);
else
#endif
goto error;
break;
case MARIADB_CLIENT_ERRORS:
*((char ***)arg)= (char **)client_errors;
break;
case MARIADB_CONNECTION_INFO:
if (mysql)
*((char **)arg)= (char *)mysql->info;
else
goto error;
break;
case MARIADB_CONNECTION_PVIO_TYPE:
if (mysql && mysql->net.pvio)
*((unsigned int *)arg)= (unsigned int)mysql->net.pvio->type;
else
goto error;
break;
case MARIADB_CONNECTION_SCHEMA:
if (mysql)
*((char **)arg)= mysql->db;
else
goto error;
break;
case MARIADB_CONNECTION_USER:
if (mysql)
*((char **)arg)= mysql->user;
else
goto error;
break;
case MARIADB_CONNECTION_PORT:
if (mysql)
*((unsigned int *)arg)= mysql->port;
else
goto error;
break;
case MARIADB_CONNECTION_UNIX_SOCKET:
if (mysql)
*((char **)arg)= mysql->unix_socket;
else
goto error;
break;
case MARIADB_CONNECTION_HOST:
if (mysql)
*((char **)arg)= mysql->host;
else
goto error;
break;
case MARIADB_CONNECTION_SERVER_STATUS:
if (mysql)
*((unsigned int *)arg)= mysql->server_status;
else
goto error;
break;
case MARIADB_CONNECTION_SERVER_CAPABILITIES:
if (mysql)
*((unsigned long *)arg)= mysql->server_capabilities;
else
goto error;
break;
case MARIADB_CONNECTION_EXTENDED_SERVER_CAPABILITIES:
if (mysql)
*((unsigned long *)arg)= mysql->extension->mariadb_server_capabilities;
else
goto error;
break;
case MARIADB_CONNECTION_CLIENT_CAPABILITIES:
if (mysql)
*((unsigned long *)arg)= mysql->client_flag;
else
goto error;
break;
default:
va_end(ap);
return(-1);
}
va_end(ap);
return(0);
error:
va_end(ap);
return(-1);
} | 0 | [
"CWE-20"
] | mariadb-connector-c | 2759b87d72926b7c9b5426437a7c8dd15ff57945 | 169,671,727,087,038,340,000,000,000,000,000,000,000 | 217 | sanity checks for client-supplied OK packet content
reported by Matthias Kaiser, Apple Information Security |
lvs_syncd_handler(vector_t *strvec)
{
unsigned val;
size_t i;
if (global_data->lvs_syncd.ifname) {
report_config_error(CONFIG_GENERAL_ERROR, "lvs_sync_daemon has already been specified as %s %s - ignoring", global_data->lvs_syncd.ifname, global_data->lvs_syncd.vrrp_name);
return;
}
if (vector_size(strvec) < 3) {
report_config_error(CONFIG_GENERAL_ERROR, "lvs_sync_daemon requires interface, VRRP instance");
return;
}
if (strlen(strvec_slot(strvec, 1)) >= IP_VS_IFNAME_MAXLEN) {
report_config_error(CONFIG_GENERAL_ERROR, "lvs_sync_daemon interface name '%s' too long - ignoring", FMT_STR_VSLOT(strvec, 1));
return;
}
if (strlen(strvec_slot(strvec, 2)) >= IP_VS_IFNAME_MAXLEN) {
report_config_error(CONFIG_GENERAL_ERROR, "lvs_sync_daemon vrrp interface name '%s' too long - ignoring", FMT_STR_VSLOT(strvec, 2));
return;
}
global_data->lvs_syncd.ifname = set_value(strvec);
global_data->lvs_syncd.vrrp_name = MALLOC(strlen(strvec_slot(strvec, 2)) + 1);
if (!global_data->lvs_syncd.vrrp_name)
return;
strcpy(global_data->lvs_syncd.vrrp_name, strvec_slot(strvec, 2));
/* This is maintained for backwards compatibility, prior to adding "id" option */
if (vector_size(strvec) >= 4 && isdigit(FMT_STR_VSLOT(strvec, 3)[0])) {
report_config_error(CONFIG_GENERAL_ERROR, "Please use keyword \"id\" before lvs_sync_daemon syncid value");
if (!read_unsigned_strvec(strvec, 3, &val, 0, 255, false))
report_config_error(CONFIG_GENERAL_ERROR, "Invalid syncid (%s) - defaulting to vrid", FMT_STR_VSLOT(strvec, 3));
else
global_data->lvs_syncd.syncid = val;
i = 4;
}
else
i = 3;
for ( ; i < vector_size(strvec); i++) {
if (!strcmp(strvec_slot(strvec, i), "id")) {
if (i == vector_size(strvec) - 1) {
report_config_error(CONFIG_GENERAL_ERROR, "No value specified for lvs_sync_daemon id, defaulting to vrid");
continue;
}
if (!read_unsigned_strvec(strvec, i + 1, &val, 0, 255, false))
report_config_error(CONFIG_GENERAL_ERROR, "Invalid syncid (%s) - defaulting to vrid", FMT_STR_VSLOT(strvec, i+1));
else
global_data->lvs_syncd.syncid = val;
i++; /* skip over value */
continue;
}
#ifdef _HAVE_IPVS_SYNCD_ATTRIBUTES_
if (!strcmp(strvec_slot(strvec, i), "maxlen")) {
if (i == vector_size(strvec) - 1) {
report_config_error(CONFIG_GENERAL_ERROR, "No value specified for lvs_sync_daemon maxlen - ignoring");
continue;
}
if (!read_unsigned_strvec(strvec, i + 1, &val, 0, 65535 - 20 - 8, false))
report_config_error(CONFIG_GENERAL_ERROR, "Invalid lvs_sync_daemon maxlen (%s) - ignoring", FMT_STR_VSLOT(strvec, i+1));
else
global_data->lvs_syncd.sync_maxlen = (uint16_t)val;
i++; /* skip over value */
continue;
}
if (!strcmp(strvec_slot(strvec, i), "port")) {
if (i == vector_size(strvec) - 1) {
report_config_error(CONFIG_GENERAL_ERROR, "No value specified for lvs_sync_daemon port - ignoring");
continue;
}
if (!read_unsigned_strvec(strvec, i + 1, &val, 0, 65535, false))
report_config_error(CONFIG_GENERAL_ERROR, "Invalid lvs_sync_daemon port (%s) - ignoring", FMT_STR_VSLOT(strvec, i+1));
else
global_data->lvs_syncd.mcast_port = (uint16_t)val;
i++; /* skip over value */
continue;
}
if (!strcmp(strvec_slot(strvec, i), "ttl")) {
if (i == vector_size(strvec) - 1) {
report_config_error(CONFIG_GENERAL_ERROR, "No value specified for lvs_sync_daemon ttl - ignoring");
continue;
}
if (!read_unsigned_strvec(strvec, i + 1, &val, 0, 255, false))
report_config_error(CONFIG_GENERAL_ERROR, "Invalid lvs_sync_daemon ttl (%s) - ignoring", FMT_STR_VSLOT(strvec, i+1));
else
global_data->lvs_syncd.mcast_ttl = (uint8_t)val;
i++; /* skip over value */
continue;
}
if (!strcmp(strvec_slot(strvec, i), "group")) {
if (i == vector_size(strvec) - 1) {
report_config_error(CONFIG_GENERAL_ERROR, "No value specified for lvs_sync_daemon group - ignoring");
continue;
}
if (inet_stosockaddr(strvec_slot(strvec, i+1), NULL, &global_data->lvs_syncd.mcast_group) < 0)
report_config_error(CONFIG_GENERAL_ERROR, "Invalid lvs_sync_daemon group (%s) - ignoring", FMT_STR_VSLOT(strvec, i+1));
if ((global_data->lvs_syncd.mcast_group.ss_family == AF_INET && !IN_MULTICAST(htonl(((struct sockaddr_in *)&global_data->lvs_syncd.mcast_group)->sin_addr.s_addr))) ||
(global_data->lvs_syncd.mcast_group.ss_family == AF_INET6 && !IN6_IS_ADDR_MULTICAST(&((struct sockaddr_in6 *)&global_data->lvs_syncd.mcast_group)->sin6_addr))) {
report_config_error(CONFIG_GENERAL_ERROR, "lvs_sync_daemon group address %s is not multicast - ignoring", FMT_STR_VSLOT(strvec, i+1));
global_data->lvs_syncd.mcast_group.ss_family = AF_UNSPEC;
}
i++; /* skip over value */
continue;
}
#endif
report_config_error(CONFIG_GENERAL_ERROR, "Unknown option %s specified for lvs_sync_daemon", FMT_STR_VSLOT(strvec, i));
}
} | 0 | [
"CWE-200"
] | keepalived | c6247a9ef2c7b33244ab1d3aa5d629ec49f0a067 | 100,019,062,761,506,020,000,000,000,000,000,000,000 | 116 | Add command line and configuration option to set umask
Issue #1048 identified that files created by keepalived are created
with mode 0666. This commit changes the default to 0644, and also
allows the umask to be specified in the configuration or as a command
line option.
Signed-off-by: Quentin Armitage <[email protected]> |
ibytestream::ibytestream(Sirikata::DecoderReader *p, unsigned int byte_offset,
const Sirikata::JpegAllocator<uint8_t> &alloc)
: parent(p) {
bytes_read = byte_offset;
} | 0 | [
"CWE-1187"
] | lepton | 82167c144a322cc956da45407f6dce8d4303d346 | 195,448,183,390,127,730,000,000,000,000,000,000,000 | 5 | fix #87 : always check that threads_required set up the appropriate number of threads---fire off nop functions on unused threads for consistency |
static struct user_namespace *netns_owner(struct ns_common *ns)
{
return to_net_ns(ns)->user_ns;
} | 0 | [
"CWE-416"
] | linux | 21b5944350052d2583e82dd59b19a9ba94a007f0 | 227,082,250,324,104,040,000,000,000,000,000,000,000 | 4 | net: Fix double free and memory corruption in get_net_ns_by_id()
(I can trivially verify that that idr_remove in cleanup_net happens
after the network namespace count has dropped to zero --EWB)
Function get_net_ns_by_id() does not check for net::count
after it has found a peer in netns_ids idr.
It may dereference a peer, after its count has already been
finaly decremented. This leads to double free and memory
corruption:
put_net(peer) rtnl_lock()
atomic_dec_and_test(&peer->count) [count=0] ...
__put_net(peer) get_net_ns_by_id(net, id)
spin_lock(&cleanup_list_lock)
list_add(&net->cleanup_list, &cleanup_list)
spin_unlock(&cleanup_list_lock)
queue_work() peer = idr_find(&net->netns_ids, id)
| get_net(peer) [count=1]
| ...
| (use after final put)
v ...
cleanup_net() ...
spin_lock(&cleanup_list_lock) ...
list_replace_init(&cleanup_list, ..) ...
spin_unlock(&cleanup_list_lock) ...
... ...
... put_net(peer)
... atomic_dec_and_test(&peer->count) [count=0]
... spin_lock(&cleanup_list_lock)
... list_add(&net->cleanup_list, &cleanup_list)
... spin_unlock(&cleanup_list_lock)
... queue_work()
... rtnl_unlock()
rtnl_lock() ...
for_each_net(tmp) { ...
id = __peernet2id(tmp, peer) ...
spin_lock_irq(&tmp->nsid_lock) ...
idr_remove(&tmp->netns_ids, id) ...
... ...
net_drop_ns() ...
net_free(peer) ...
} ...
|
v
cleanup_net()
...
(Second free of peer)
Also, put_net() on the right cpu may reorder with left's cpu
list_replace_init(&cleanup_list, ..), and then cleanup_list
will be corrupted.
Since cleanup_net() is executed in worker thread, while
put_net(peer) can happen everywhere, there should be
enough time for concurrent get_net_ns_by_id() to pick
the peer up, and the race does not seem to be unlikely.
The patch fixes the problem in standard way.
(Also, there is possible problem in peernet2id_alloc(), which requires
check for net::count under nsid_lock and maybe_get_net(peer), but
in current stable kernel it's used under rtnl_lock() and it has to be
safe. Openswitch begun to use peernet2id_alloc(), and possibly it should
be fixed too. While this is not in stable kernel yet, so I'll send
a separate message to netdev@ later).
Cc: Nicolas Dichtel <[email protected]>
Signed-off-by: Kirill Tkhai <[email protected]>
Fixes: 0c7aecd4bde4 "netns: add rtnl cmd to add and get peer netns ids"
Reviewed-by: Andrey Ryabinin <[email protected]>
Reviewed-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: Eric W. Biederman <[email protected]>
Reviewed-by: Eric Dumazet <[email protected]>
Acked-by: Nicolas Dichtel <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
_XimProtoGetIMValues(
XIM xim,
XIMArg *arg)
{
Xim im = (Xim)xim;
register XIMArg *p;
register int n;
CARD8 *buf;
CARD16 *buf_s;
INT16 len;
CARD32 reply32[BUFSIZE/4];
char *reply = (char *)reply32;
XPointer preply = NULL;
int buf_size;
int ret_code;
char *makeid_name;
char *decode_name;
CARD16 *data = NULL;
INT16 data_len = 0;
#ifndef XIM_CONNECTABLE
if (!IS_SERVER_CONNECTED(im))
return arg->name;
#else
if (!IS_SERVER_CONNECTED(im)) {
if (IS_CONNECTABLE(im)) {
if (!_XimConnectServer(im)) {
return _XimDelayModeGetIMValues(im, arg);
}
} else {
return arg->name;
}
}
#endif /* XIM_CONNECTABLE */
for (n = 0, p = arg; p->name; p++)
n++;
if (!n)
return (char *)NULL;
buf_size = sizeof(CARD16) * n;
buf_size += XIM_HEADER_SIZE
+ sizeof(CARD16)
+ sizeof(INT16)
+ XIM_PAD(buf_size);
if (!(buf = Xcalloc(buf_size, 1)))
return arg->name;
buf_s = (CARD16 *)&buf[XIM_HEADER_SIZE];
makeid_name = _XimMakeIMAttrIDList(im, im->core.im_resources,
im->core.im_num_resources, arg,
&buf_s[2], &len, XIM_GETIMVALUES);
if (len) {
buf_s[0] = im->private.proto.imid; /* imid */
buf_s[1] = len; /* length of im-attr-id */
XIM_SET_PAD(&buf_s[2], len); /* pad */
len += sizeof(CARD16) /* sizeof imid */
+ sizeof(INT16); /* sizeof length of attr */
_XimSetHeader((XPointer)buf, XIM_GET_IM_VALUES, 0, &len);
if (!(_XimWrite(im, len, (XPointer)buf))) {
Xfree(buf);
return arg->name;
}
_XimFlush(im);
Xfree(buf);
buf_size = BUFSIZE;
ret_code = _XimRead(im, &len, (XPointer)reply, buf_size,
_XimGetIMValuesCheck, 0);
if(ret_code == XIM_TRUE) {
preply = reply;
} else if(ret_code == XIM_OVERFLOW) {
if(len <= 0) {
preply = reply;
} else {
buf_size = len;
preply = Xmalloc(buf_size);
ret_code = _XimRead(im, &len, preply, buf_size,
_XimGetIMValuesCheck, 0);
if(ret_code != XIM_TRUE) {
Xfree(preply);
return arg->name;
}
}
} else
return arg->name;
buf_s = (CARD16 *)((char *)preply + XIM_HEADER_SIZE);
if (*((CARD8 *)preply) == XIM_ERROR) {
_XimProcError(im, 0, (XPointer)&buf_s[3]);
if(reply != preply)
Xfree(preply);
return arg->name;
}
data = &buf_s[2];
data_len = buf_s[1];
}
decode_name = _XimDecodeIMATTRIBUTE(im, im->core.im_resources,
im->core.im_num_resources, data, data_len,
arg, XIM_GETIMVALUES);
if (reply != preply)
Xfree(preply);
if (decode_name)
return decode_name;
else
return makeid_name;
} | 0 | [
"CWE-190"
] | libx11 | 1a566c9e00e5f35c1f9e7f3d741a02e5170852b2 | 28,229,397,813,910,910,000,000,000,000,000,000,000 | 110 | Zero out buffers in functions
It looks like uninitialized stack or heap memory can leak
out via padding bytes.
Signed-off-by: Matthieu Herrb <[email protected]>
Reviewed-by: Matthieu Herrb <[email protected]> |
void CLASS samsung2_load_raw()
{
static const ushort tab[14] = {0x304, 0x307, 0x206, 0x205, 0x403, 0x600, 0x709,
0x80a, 0x90b, 0xa0c, 0xa0d, 0x501, 0x408, 0x402};
ushort huff[1026], vpred[2][2] = {{0, 0}, {0, 0}}, hpred[2];
int i, c, n, row, col, diff;
huff[0] = 10;
for (n = i = 0; i < 14; i++)
FORC(1024 >> (tab[i] >> 8)) huff[++n] = tab[i];
getbits(-1);
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 0; col < raw_width; col++)
{
diff = ljpeg_diff(huff);
if (col < 2)
hpred[col] = vpred[row & 1][col] += diff;
else
hpred[col & 1] += diff;
RAW(row, col) = hpred[col & 1];
if (hpred[col & 1] >> tiff_bps)
derror();
}
}
} | 0 | [
"CWE-476",
"CWE-119"
] | LibRaw | d7c3d2cb460be10a3ea7b32e9443a83c243b2251 | 310,899,651,109,105,530,000,000,000,000,000,000,000 | 29 | Secunia SA75000 advisory: several buffer overruns |
void gnutls_x509_key_purpose_deinit(gnutls_x509_key_purposes_t p)
{
key_purposes_deinit(p);
gnutls_free(p);
} | 0 | [] | gnutls | d6972be33264ecc49a86cd0958209cd7363af1e9 | 11,152,735,264,234,518,000,000,000,000,000,000,000 | 5 | eliminated double-free in the parsing of dist points
Reported by Robert Święcki. |
hfinfo_char_value_format_display(int display, char buf[7], guint32 value)
{
char *ptr = &buf[6];
static const gchar hex_digits[16] =
{ '0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
*ptr = '\0';
*(--ptr) = '\'';
/* Properly format value */
if (g_ascii_isprint(value)) {
/*
* Printable, so just show the character, and, if it needs
* to be escaped, escape it.
*/
*(--ptr) = value;
if (value == '\\' || value == '\'')
*(--ptr) = '\\';
} else {
/*
* Non-printable; show it as an escape sequence.
*/
switch (value) {
case '\0':
/*
* Show a NUL with only one digit.
*/
*(--ptr) = '0';
break;
case '\a':
*(--ptr) = 'a';
break;
case '\b':
*(--ptr) = 'b';
break;
case '\f':
*(--ptr) = 'f';
break;
case '\n':
*(--ptr) = 'n';
break;
case '\r':
*(--ptr) = 'r';
break;
case '\t':
*(--ptr) = 't';
break;
case '\v':
*(--ptr) = 'v';
break;
default:
switch (FIELD_DISPLAY(display)) {
case BASE_OCT:
*(--ptr) = (value & 0x7) + '0';
value >>= 3;
*(--ptr) = (value & 0x7) + '0';
value >>= 3;
*(--ptr) = (value & 0x7) + '0';
break;
case BASE_HEX:
*(--ptr) = hex_digits[value & 0x0F];
value >>= 4;
*(--ptr) = hex_digits[value & 0x0F];
*(--ptr) = 'x';
break;
default:
g_assert_not_reached();
}
}
*(--ptr) = '\\';
}
*(--ptr) = '\'';
return ptr;
} | 0 | [
"CWE-401"
] | wireshark | a9fc769d7bb4b491efb61c699d57c9f35269d871 | 216,749,810,885,394,730,000,000,000,000,000,000,000 | 86 | epan: Fix a memory leak.
Make sure _proto_tree_add_bits_ret_val allocates a bits array using the
packet scope, otherwise we leak memory. Fixes #17032. |
GF_Err hinf_AddBox(GF_Box *s, GF_Box *a)
{
GF_MAXRBox *maxR;
GF_HintInfoBox *hinf = (GF_HintInfoBox *)s;
u32 i;
switch (a->type) {
case GF_ISOM_BOX_TYPE_MAXR:
i=0;
while ((maxR = (GF_MAXRBox *)gf_list_enum(hinf->other_boxes, &i))) {
if ((maxR->type==GF_ISOM_BOX_TYPE_MAXR) && (maxR->granularity == ((GF_MAXRBox *)a)->granularity))
return GF_ISOM_INVALID_FILE;
}
break;
}
return gf_isom_box_add_default(s, a);
} | 0 | [
"CWE-400",
"CWE-401"
] | gpac | d2371b4b204f0a3c0af51ad4e9b491144dd1225c | 204,374,378,212,002,700,000,000,000,000,000,000,000 | 16 | prevent dref memleak on invalid input (#1183) |
\param align Appending alignment.
\param[in,out] XYZ Contains the XYZ coordinates at start / exit of the function.
\param exit_on_anykey Exit function when any key is pressed.
**/
const CImgList<T>& display(const char *const title=0, const bool display_info=true,
const char axis='x', const float align=0,
unsigned int *const XYZ=0, const bool exit_on_anykey=false) const { | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 135,320,808,744,916,700,000,000,000,000,000,000,000 | 7 | Fix other issues in 'CImg<T>::load_bmp()'. |
static int lg_dinovo_mapping(struct hid_input *hi, struct hid_usage *usage,
unsigned long **bit, int *max)
{
if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
return 0;
switch (usage->hid & HID_USAGE) {
case 0x00d: lg_map_key_clear(KEY_MEDIA); break;
default:
return 0;
}
return 1;
} | 0 | [
"CWE-119",
"CWE-787"
] | linux | 4ab25786c87eb20857bbb715c3ae34ec8fd6a214 | 130,431,400,262,320,800,000,000,000,000,000,000,000 | 15 | HID: fix a couple of off-by-ones
There are a few very theoretical off-by-one bugs in report descriptor size
checking when performing a pre-parsing fixup. Fix those.
Cc: [email protected]
Reported-by: Ben Hawkes <[email protected]>
Reviewed-by: Benjamin Tissoires <[email protected]>
Signed-off-by: Jiri Kosina <[email protected]> |
void mnt_pin(struct vfsmount *mnt)
{
lock_mount_hash();
real_mount(mnt)->mnt_pinned++;
unlock_mount_hash();
} | 0 | [
"CWE-269"
] | user-namespace | a6138db815df5ee542d848318e5dae681590fccd | 310,148,427,782,653,080,000,000,000,000,000,000,000 | 6 | mnt: Only change user settable mount flags in remount
Kenton Varda <[email protected]> discovered that by remounting a
read-only bind mount read-only in a user namespace the
MNT_LOCK_READONLY bit would be cleared, allowing an unprivileged user
to the remount a read-only mount read-write.
Correct this by replacing the mask of mount flags to preserve
with a mask of mount flags that may be changed, and preserve
all others. This ensures that any future bugs with this mask and
remount will fail in an easy to detect way where new mount flags
simply won't change.
Cc: [email protected]
Acked-by: Serge E. Hallyn <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]> |
rb_str_buf_cat(str, ptr, len)
VALUE str;
const char *ptr;
long len;
{
long capa, total;
if (len == 0) return str;
if (len < 0) {
rb_raise(rb_eArgError, "negative string size (or size too big)");
}
rb_str_modify(str);
if (FL_TEST(str, STR_ASSOC)) {
FL_UNSET(str, STR_ASSOC);
capa = RSTRING(str)->aux.capa = RSTRING(str)->len;
}
else {
capa = RSTRING(str)->aux.capa;
}
total = RSTRING(str)->len+len;
if (capa <= total) {
while (total > capa) {
capa = (capa + 1) * 2;
}
RESIZE_CAPA(str, capa);
}
memcpy(RSTRING(str)->ptr + RSTRING(str)->len, ptr, len);
RSTRING(str)->len = total;
RSTRING(str)->ptr[total] = '\0'; /* sentinel */
return str;
} | 0 | [
"CWE-20"
] | ruby | e926ef5233cc9f1035d3d51068abe9df8b5429da | 244,958,716,966,193,000,000,000,000,000,000,000,000 | 32 | * random.c (rb_genrand_int32, rb_genrand_real), intern.h: Export.
* string.c (rb_str_tmp_new), intern.h: New function.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_1_8@16014 b2dd03c8-39d4-4d8f-98ff-823fe69b080e |
static int apply_color_transform(WebPContext *s)
{
ImageContext *img, *cimg;
int x, y, cx, cy;
uint8_t *p, *cp;
img = &s->image[IMAGE_ROLE_ARGB];
cimg = &s->image[IMAGE_ROLE_COLOR_TRANSFORM];
for (y = 0; y < img->frame->height; y++) {
for (x = 0; x < img->frame->width; x++) {
cx = x >> cimg->size_reduction;
cy = y >> cimg->size_reduction;
cp = GET_PIXEL(cimg->frame, cx, cy);
p = GET_PIXEL(img->frame, x, y);
p[1] += color_transform_delta(cp[3], p[2]);
p[3] += color_transform_delta(cp[2], p[2]) +
color_transform_delta(cp[1], p[1]);
}
}
return 0;
} | 0 | [
"CWE-119",
"CWE-787"
] | FFmpeg | 6b5d3fb26fb4be48e4966e4b1d97c2165538d4ef | 211,977,978,536,524,970,000,000,000,000,000,000,000 | 23 | avcodec/webp: Always set pix_fmt
Fixes: out of array access
Fixes: 1434/clusterfuzz-testcase-minimized-6314998085189632
Fixes: 1435/clusterfuzz-testcase-minimized-6483783723253760
Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg
Reviewed-by: "Ronald S. Bultje" <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]> |
CtPtr ProtocolV1::handle_message_front(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read message front failed" << dendl;
return _fault();
}
ldout(cct, 20) << __func__ << " got front " << front.length() << dendl;
return read_message_middle();
} | 0 | [
"CWE-294"
] | ceph | 6c14c2fb5650426285428dfe6ca1597e5ea1d07d | 22,510,778,078,433,137,000,000,000,000,000,000,000 | 12 | mon/MonClient: bring back CEPHX_V2 authorizer challenges
Commit c58c5754dfd2 ("msg/async/ProtocolV1: use AuthServer and
AuthClient") introduced a backwards compatibility issue into msgr1.
To fix it, commit 321548010578 ("mon/MonClient: skip CEPHX_V2
challenge if client doesn't support it") set out to skip authorizer
challenges for peers that don't support CEPHX_V2. However, it
made it so that authorizer challenges are skipped for all peers in
both msgr1 and msgr2 cases, effectively disabling the protection
against replay attacks that was put in place in commit f80b848d3f83
("auth/cephx: add authorizer challenge", CVE-2018-1128).
This is because con->get_features() always returns 0 at that
point. In msgr1 case, the peer shares its features along with the
authorizer, but while they are available in connect_msg.features they
aren't assigned to con until ProtocolV1::open(). In msgr2 case, the
peer doesn't share its features until much later (in CLIENT_IDENT
frame, i.e. after the authentication phase). The result is that
!CEPHX_V2 branch is taken in all cases and replay attack protection
is lost.
Only clusters with cephx_service_require_version set to 2 on the
service daemons would not be silently downgraded. But, since the
default is 1 and there are no reports of looping on BADAUTHORIZER
faults, I'm pretty sure that no one has ever done that. Note that
cephx_require_version set to 2 would have no effect even though it
is supposed to be stronger than cephx_service_require_version
because MonClient::handle_auth_request() didn't check it.
To fix:
- for msgr1, check connect_msg.features (as was done before commit
c58c5754dfd2) and challenge if CEPHX_V2 is supported. Together
with two preceding patches that resurrect proper cephx_* option
handling in msgr1, this covers both "I want old clients to work"
and "I wish to require better authentication" use cases.
- for msgr2, don't check anything and always challenge. CEPHX_V2
predates msgr2, anyone speaking msgr2 must support it.
Signed-off-by: Ilya Dryomov <[email protected]>
(cherry picked from commit 4a82c72e3bdddcb625933e83af8b50a444b961f1) |
static void hns_xgmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_en, u32 *tx_en)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
u32 pause_ctrl;
pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
*rx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B);
*tx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B);
} | 0 | [
"CWE-119",
"CWE-703"
] | linux | 412b65d15a7f8a93794653968308fc100f2aa87c | 150,744,141,725,437,010,000,000,000,000,000,000,000 | 9 | net: hns: fix ethtool_get_strings overflow in hns driver
hns_get_sset_count() returns HNS_NET_STATS_CNT and the data space allocated
is not enough for ethtool_get_strings(), which will cause random memory
corruption.
When SLAB and DEBUG_SLAB are both enabled, memory corruptions like the
the following can be observed without this patch:
[ 43.115200] Slab corruption (Not tainted): Acpi-ParseExt start=ffff801fb0b69030, len=80
[ 43.115206] Redzone: 0x9f911029d006462/0x5f78745f31657070.
[ 43.115208] Last user: [<5f7272655f746b70>](0x5f7272655f746b70)
[ 43.115214] 010: 70 70 65 31 5f 74 78 5f 70 6b 74 00 6b 6b 6b 6b ppe1_tx_pkt.kkkk
[ 43.115217] 030: 70 70 65 31 5f 74 78 5f 70 6b 74 5f 6f 6b 00 6b ppe1_tx_pkt_ok.k
[ 43.115218] Next obj: start=ffff801fb0b69098, len=80
[ 43.115220] Redzone: 0x706d655f6f666966/0x9f911029d74e35b.
[ 43.115229] Last user: [<ffff0000084b11b0>](acpi_os_release_object+0x28/0x38)
[ 43.115231] 000: 74 79 00 6b 6b 6b 6b 6b 70 70 65 31 5f 74 78 5f ty.kkkkkppe1_tx_
[ 43.115232] 010: 70 6b 74 5f 65 72 72 5f 63 73 75 6d 5f 66 61 69 pkt_err_csum_fai
Signed-off-by: Timmy Li <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
err_gettext(const char *str)
{
#ifdef ENABLE_NLS
if (in_error_recursion_trouble())
return str;
else
return gettext(str);
#else
return str;
#endif
} | 0 | [
"CWE-89"
] | postgres | 2b3a8b20c2da9f39ffecae25ab7c66974fbc0d3b | 10,688,349,985,067,283,000,000,000,000,000,000,000 | 11 | Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244 |
plperl_func_handler(PG_FUNCTION_ARGS)
{
plperl_proc_desc *prodesc;
SV *perlret;
Datum retval = 0;
ReturnSetInfo *rsi;
ErrorContextCallback pl_error_context;
if (SPI_connect() != SPI_OK_CONNECT)
elog(ERROR, "could not connect to SPI manager");
prodesc = compile_plperl_function(fcinfo->flinfo->fn_oid, false, false);
current_call_data->prodesc = prodesc;
increment_prodesc_refcount(prodesc);
/* Set a callback for error reporting */
pl_error_context.callback = plperl_exec_callback;
pl_error_context.previous = error_context_stack;
pl_error_context.arg = prodesc->proname;
error_context_stack = &pl_error_context;
rsi = (ReturnSetInfo *) fcinfo->resultinfo;
if (prodesc->fn_retisset)
{
/* Check context before allowing the call to go through */
if (!rsi || !IsA(rsi, ReturnSetInfo) ||
(rsi->allowedModes & SFRM_Materialize) == 0 ||
rsi->expectedDesc == NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("set-valued function called in context that "
"cannot accept a set")));
}
activate_interpreter(prodesc->interp);
perlret = plperl_call_perl_func(prodesc, fcinfo);
/************************************************************
* Disconnect from SPI manager and then create the return
* values datum (if the input function does a palloc for it
* this must not be allocated in the SPI memory context
* because SPI_finish would free it).
************************************************************/
if (SPI_finish() != SPI_OK_FINISH)
elog(ERROR, "SPI_finish() failed");
if (prodesc->fn_retisset)
{
SV *sav;
/*
* If the Perl function returned an arrayref, we pretend that it
* called return_next() for each element of the array, to handle old
* SRFs that didn't know about return_next(). Any other sort of return
* value is an error, except undef which means return an empty set.
*/
sav = get_perl_array_ref(perlret);
if (sav)
{
int i = 0;
SV **svp = 0;
AV *rav = (AV *) SvRV(sav);
while ((svp = av_fetch(rav, i, FALSE)) != NULL)
{
plperl_return_next(*svp);
i++;
}
}
else if (SvOK(perlret))
{
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("set-returning PL/Perl function must return "
"reference to array or use return_next")));
}
rsi->returnMode = SFRM_Materialize;
if (current_call_data->tuple_store)
{
rsi->setResult = current_call_data->tuple_store;
rsi->setDesc = current_call_data->ret_tdesc;
}
retval = (Datum) 0;
}
else
{
retval = plperl_sv_to_datum(perlret,
prodesc->result_oid,
-1,
fcinfo,
&prodesc->result_in_func,
prodesc->result_typioparam,
&fcinfo->isnull);
if (fcinfo->isnull && rsi && IsA(rsi, ReturnSetInfo))
rsi->isDone = ExprEndResult;
}
/* Restore the previous error callback */
error_context_stack = pl_error_context.previous;
SvREFCNT_dec(perlret);
return retval;
} | 0 | [
"CWE-264"
] | postgres | 537cbd35c893e67a63c59bc636c3e888bd228bc7 | 172,900,307,007,990,400,000,000,000,000,000,000,000 | 108 | Prevent privilege escalation in explicit calls to PL validators.
The primary role of PL validators is to be called implicitly during
CREATE FUNCTION, but they are also normal functions that a user can call
explicitly. Add a permissions check to each validator to ensure that a
user cannot use explicit validator calls to achieve things he could not
otherwise achieve. Back-patch to 8.4 (all supported versions).
Non-core procedural language extensions ought to make the same two-line
change to their own validators.
Andres Freund, reviewed by Tom Lane and Noah Misch.
Security: CVE-2014-0061 |
static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!vmx->nested.hv_evmcs)
return;
kunmap(vmx->nested.hv_evmcs_page);
kvm_release_page_dirty(vmx->nested.hv_evmcs_page);
vmx->nested.hv_evmcs_vmptr = -1ull;
vmx->nested.hv_evmcs_page = NULL;
vmx->nested.hv_evmcs = NULL;
} | 0 | [
"CWE-863"
] | kvm | acff78477b9b4f26ecdf65733a4ed77fe837e9dc | 319,555,060,615,269,200,000,000,000,000,000,000,000 | 13 | KVM: x86: nVMX: close leak of L0's x2APIC MSRs (CVE-2019-3887)
The nested_vmx_prepare_msr_bitmap() function doesn't directly guard the
x2APIC MSR intercepts with the "virtualize x2APIC mode" MSR. As a
result, we discovered the potential for a buggy or malicious L1 to get
access to L0's x2APIC MSRs, via an L2, as follows.
1. L1 executes WRMSR(IA32_SPEC_CTRL, 1). This causes the spec_ctrl
variable, in nested_vmx_prepare_msr_bitmap() to become true.
2. L1 disables "virtualize x2APIC mode" in VMCS12.
3. L1 enables "APIC-register virtualization" in VMCS12.
Now, KVM will set VMCS02's x2APIC MSR intercepts from VMCS12, and then
set "virtualize x2APIC mode" to 0 in VMCS02. Oops.
This patch closes the leak by explicitly guarding VMCS02's x2APIC MSR
intercepts with VMCS12's "virtualize x2APIC mode" control.
The scenario outlined above and fix prescribed here, were verified with
a related patch in kvm-unit-tests titled "Add leak scenario to
virt_x2apic_mode_test".
Note, it looks like this issue may have been introduced inadvertently
during a merge---see 15303ba5d1cd.
Signed-off-by: Marc Orr <[email protected]>
Reviewed-by: Jim Mattson <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
{
struct net *net = dev_net(skb->dev);
return !net->xfrm.policy_count[XFRM_POLICY_OUT] ||
(skb_dst(skb)->flags & DST_NOXFRM) ||
__xfrm_route_forward(skb, family);
} | 0 | [
"CWE-416"
] | linux | dbb2483b2a46fbaf833cfb5deb5ed9cace9c7399 | 178,999,006,892,464,040,000,000,000,000,000,000,000 | 8 | xfrm: clean up xfrm protocol checks
In commit 6a53b7593233 ("xfrm: check id proto in validate_tmpl()")
I introduced a check for xfrm protocol, but according to Herbert
IPSEC_PROTO_ANY should only be used as a wildcard for lookup, so
it should be removed from validate_tmpl().
And, IPSEC_PROTO_ANY is expected to only match 3 IPSec-specific
protocols, this is why xfrm_state_flush() could still miss
IPPROTO_ROUTING, which leads that those entries are left in
net->xfrm.state_all before exit net. Fix this by replacing
IPSEC_PROTO_ANY with zero.
This patch also extracts the check from validate_tmpl() to
xfrm_id_proto_valid() and uses it in parse_ipsecrequest().
With this, no other protocols should be added into xfrm.
Fixes: 6a53b7593233 ("xfrm: check id proto in validate_tmpl()")
Reported-by: [email protected]
Cc: Steffen Klassert <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Acked-by: Herbert Xu <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]> |
static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr)
{
vc_uniscr_free(vc->vc_uni_screen);
vc->vc_uni_screen = new_uniscr;
} | 0 | [
"CWE-125"
] | linux | 3c4e0dff2095c579b142d5a0693257f1c58b4804 | 113,892,631,699,186,880,000,000,000,000,000,000,000 | 5 | vt: Disable KD_FONT_OP_COPY
It's buggy:
On Fri, Nov 06, 2020 at 10:30:08PM +0800, Minh Yuan wrote:
> We recently discovered a slab-out-of-bounds read in fbcon in the latest
> kernel ( v5.10-rc2 for now ). The root cause of this vulnerability is that
> "fbcon_do_set_font" did not handle "vc->vc_font.data" and
> "vc->vc_font.height" correctly, and the patch
> <https://lkml.org/lkml/2020/9/27/223> for VT_RESIZEX can't handle this
> issue.
>
> Specifically, we use KD_FONT_OP_SET to set a small font.data for tty6, and
> use KD_FONT_OP_SET again to set a large font.height for tty1. After that,
> we use KD_FONT_OP_COPY to assign tty6's vc_font.data to tty1's vc_font.data
> in "fbcon_do_set_font", while tty1 retains the original larger
> height. Obviously, this will cause an out-of-bounds read, because we can
> access a smaller vc_font.data with a larger vc_font.height.
Further there was only one user ever.
- Android's loadfont, busybox and console-tools only ever use OP_GET
and OP_SET
- fbset documentation only mentions the kernel cmdline font: option,
not anything else.
- systemd used OP_COPY before release 232 published in Nov 2016
Now unfortunately the crucial report seems to have gone down with
gmane, and the commit message doesn't say much. But the pull request
hints at OP_COPY being broken
https://github.com/systemd/systemd/pull/3651
So in other words, this never worked, and the only project which
foolishly every tried to use it, realized that rather quickly too.
Instead of trying to fix security issues here on dead code by adding
missing checks, fix the entire thing by removing the functionality.
Note that systemd code using the OP_COPY function ignored the return
value, so it doesn't matter what we're doing here really - just in
case a lone server somewhere happens to be extremely unlucky and
running an affected old version of systemd. The relevant code from
font_copy_to_all_vcs() in systemd was:
/* copy font from active VT, where the font was uploaded to */
cfo.op = KD_FONT_OP_COPY;
cfo.height = vcs.v_active-1; /* tty1 == index 0 */
(void) ioctl(vcfd, KDFONTOP, &cfo);
Note this just disables the ioctl, garbage collecting the now unused
callbacks is left for -next.
v2: Tetsuo found the old mail, which allowed me to find it on another
archive. Add the link too.
Acked-by: Peilin Ye <[email protected]>
Reported-by: Minh Yuan <[email protected]>
References: https://lists.freedesktop.org/archives/systemd-devel/2016-June/036935.html
References: https://github.com/systemd/systemd/pull/3651
Cc: Greg KH <[email protected]>
Cc: Peilin Ye <[email protected]>
Cc: Tetsuo Handa <[email protected]>
Signed-off-by: Daniel Vetter <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
MagickExport void ConvertLCHabToRGB(const double luma,const double chroma,
const double hue,Quantum *red,Quantum *green,Quantum *blue)
{
double
X,
Y,
Z;
/*
Convert LCHab to RGB colorspace.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
ConvertLCHabToXYZ(100.0*luma,255.0*(chroma-0.5),360.0*hue,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
} | 0 | [
"CWE-369"
] | ImageMagick6 | 90255f0834eead08d59f46b0bda7b1580451cc0f | 332,141,419,501,292,640,000,000,000,000,000,000,000 | 17 | https://github.com/ImageMagick/ImageMagick/issues/3077 |
int kvm_arch_hardware_enable(void *garbage)
{
struct kvm *kvm;
struct kvm_vcpu *vcpu;
int i;
int ret;
u64 local_tsc;
u64 max_tsc = 0;
bool stable, backwards_tsc = false;
kvm_shared_msr_cpu_online();
ret = kvm_x86_ops->hardware_enable(garbage);
if (ret != 0)
return ret;
local_tsc = native_read_tsc();
stable = !check_tsc_unstable();
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (!stable && vcpu->cpu == smp_processor_id())
set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
if (stable && vcpu->arch.last_host_tsc > local_tsc) {
backwards_tsc = true;
if (vcpu->arch.last_host_tsc > max_tsc)
max_tsc = vcpu->arch.last_host_tsc;
}
}
}
/*
* Sometimes, even reliable TSCs go backwards. This happens on
* platforms that reset TSC during suspend or hibernate actions, but
* maintain synchronization. We must compensate. Fortunately, we can
* detect that condition here, which happens early in CPU bringup,
* before any KVM threads can be running. Unfortunately, we can't
* bring the TSCs fully up to date with real time, as we aren't yet far
* enough into CPU bringup that we know how much real time has actually
* elapsed; our helper function, get_kernel_ns() will be using boot
* variables that haven't been updated yet.
*
* So we simply find the maximum observed TSC above, then record the
* adjustment to TSC in each VCPU. When the VCPU later gets loaded,
* the adjustment will be applied. Note that we accumulate
* adjustments, in case multiple suspend cycles happen before some VCPU
* gets a chance to run again. In the event that no KVM threads get a
* chance to run, we will miss the entire elapsed period, as we'll have
* reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
* loose cycle time. This isn't too big a deal, since the loss will be
* uniform across all VCPUs (not to mention the scenario is extremely
* unlikely). It is possible that a second hibernate recovery happens
* much faster than a first, causing the observed TSC here to be
* smaller; this would require additional padding adjustment, which is
* why we set last_host_tsc to the local tsc observed here.
*
* N.B. - this code below runs only on platforms with reliable TSC,
* as that is the only way backwards_tsc is set above. Also note
* that this runs for ALL vcpus, which is not a bug; all VCPUs should
* have the same delta_cyc adjustment applied if backwards_tsc
* is detected. Note further, this adjustment is only done once,
* as we reset last_host_tsc on all VCPUs to stop this from being
* called multiple times (one for each physical CPU bringup).
*
* Platforms with unreliable TSCs don't have to deal with this, they
* will be compensated by the logic in vcpu_load, which sets the TSC to
* catchup mode. This will catchup all VCPUs to real time, but cannot
* guarantee that they stay in perfect synchronization.
*/
if (backwards_tsc) {
u64 delta_cyc = max_tsc - local_tsc;
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.tsc_offset_adjustment += delta_cyc;
vcpu->arch.last_host_tsc = local_tsc;
set_bit(KVM_REQ_MASTERCLOCK_UPDATE,
&vcpu->requests);
}
/*
* We have to disable TSC offset matching.. if you were
* booting a VM while issuing an S4 host suspend....
* you may have some problem. Solving this issue is
* left as an exercise to the reader.
*/
kvm->arch.last_tsc_nsec = 0;
kvm->arch.last_tsc_write = 0;
}
}
return 0;
} | 0 | [
"CWE-119"
] | kvm | c300aa64ddf57d9c5d9c898a64b36877345dd4a9 | 305,503,957,380,218,860,000,000,000,000,000,000,000 | 90 | KVM: x86: fix for buffer overflow in handling of MSR_KVM_SYSTEM_TIME (CVE-2013-1796)
If the guest sets the GPA of the time_page so that the request to update the
time straddles a page then KVM will write onto an incorrect page. The
write is done byusing kmap atomic to get a pointer to the page for the time
structure and then performing a memcpy to that page starting at an offset
that the guest controls. Well behaved guests always provide a 32-byte aligned
address, however a malicious guest could use this to corrupt host kernel
memory.
Tested: Tested against kvmclock unit test.
Signed-off-by: Andrew Honig <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
{
const struct bpf_reg_state *reg = cur_regs(env) + regno;
return reg->type == PTR_TO_CTX;
} | 0 | [
"CWE-125"
] | linux | b799207e1e1816b09e7a5920fbb2d5fcf6edd681 | 63,662,825,948,430,520,000,000,000,000,000,000,000 | 6 | bpf: 32-bit RSH verification must truncate input before the ALU op
When I wrote commit 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification"), I
assumed that, in order to emulate 64-bit arithmetic with 32-bit logic, it
is sufficient to just truncate the output to 32 bits; and so I just moved
the register size coercion that used to be at the start of the function to
the end of the function.
That assumption is true for almost every op, but not for 32-bit right
shifts, because those can propagate information towards the least
significant bit. Fix it by always truncating inputs for 32-bit ops to 32
bits.
Also get rid of the coerce_reg_to_size() after the ALU op, since that has
no effect.
Fixes: 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification")
Acked-by: Daniel Borkmann <[email protected]>
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]> |
sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_errhdr_t *err;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* Make sure that the ERROR chunk has a valid length.
* The parameter walking depends on this as well.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
/* Process the error here */
/* FUTURE FIXME: When PR-SCTP related and other optional
* parms are emitted, this will have to change to handle multiple
* errors.
*/
sctp_walk_errors(err, chunk->chunk_hdr) {
if (SCTP_ERROR_STALE_COOKIE == err->cause)
return sctp_sf_do_5_2_6_stale(ep, asoc, type,
arg, commands);
}
/* It is possible to have malformed error causes, and that
* will cause us to end the walk early. However, since
* we are discarding the packet, there should be no adverse
* affects.
*/
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
} | 0 | [
"CWE-20"
] | linux-2.6 | ba0166708ef4da7eeb61dd92bbba4d5a749d6561 | 100,516,642,136,689,600,000,000,000,000,000,000,000 | 37 | sctp: Fix kernel panic while process protocol violation parameter
Since call to function sctp_sf_abort_violation() need paramter 'arg' with
'struct sctp_chunk' type, it will read the chunk type and chunk length from
the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen()
always with 'struct sctp_paramhdr' type's parameter, it will be passed to
sctp_sf_abort_violation(). This may cause kernel panic.
sctp_sf_violation_paramlen()
|-- sctp_sf_abort_violation()
|-- sctp_make_abort_violation()
This patch fixed this problem. This patch also fix two place which called
sctp_sf_violation_paramlen() with wrong paramter type.
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int ZEND_FASTCALL ZEND_IS_NOT_IDENTICAL_SPEC_TMP_VAR_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
zend_free_op free_op1, free_op2;
zval *result = &EX_T(opline->result.u.var).tmp_var;
is_identical_function(result,
_get_zval_ptr_tmp(&opline->op1, EX(Ts), &free_op1 TSRMLS_CC),
_get_zval_ptr_var(&opline->op2, EX(Ts), &free_op2 TSRMLS_CC) TSRMLS_CC);
Z_LVAL_P(result) = !Z_LVAL_P(result);
zval_dtor(free_op1.var);
if (free_op2.var) {zval_ptr_dtor(&free_op2.var);};
ZEND_VM_NEXT_OPCODE();
} | 0 | [] | php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 194,245,800,088,683,630,000,000,000,000,000,000,000 | 14 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
has_sequence_privilege_id_id(PG_FUNCTION_ARGS)
{
Oid roleid = PG_GETARG_OID(0);
Oid sequenceoid = PG_GETARG_OID(1);
text *priv_type_text = PG_GETARG_TEXT_P(2);
AclMode mode;
AclResult aclresult;
char relkind;
mode = convert_sequence_priv_string(priv_type_text);
relkind = get_rel_relkind(sequenceoid);
if (relkind == '\0')
PG_RETURN_NULL();
else if (relkind != RELKIND_SEQUENCE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is not a sequence",
get_rel_name(sequenceoid))));
aclresult = pg_class_aclcheck(sequenceoid, roleid, mode);
PG_RETURN_BOOL(aclresult == ACLCHECK_OK);
} | 0 | [
"CWE-264"
] | postgres | fea164a72a7bfd50d77ba5fb418d357f8f2bb7d0 | 37,996,429,806,248,237,000,000,000,000,000,000,000 | 23 | Shore up ADMIN OPTION restrictions.
Granting a role without ADMIN OPTION is supposed to prevent the grantee
from adding or removing members from the granted role. Issuing SET ROLE
before the GRANT bypassed that, because the role itself had an implicit
right to add or remove members. Plug that hole by recognizing that
implicit right only when the session user matches the current role.
Additionally, do not recognize it during a security-restricted operation
or during execution of a SECURITY DEFINER function. The restriction on
SECURITY DEFINER is not security-critical. However, it seems best for a
user testing his own SECURITY DEFINER function to see the same behavior
others will see. Back-patch to 8.4 (all supported versions).
The SQL standards do not conflate roles and users as PostgreSQL does;
only SQL roles have members, and only SQL users initiate sessions. An
application using PostgreSQL users and roles as SQL users and roles will
never attempt to grant membership in the role that is the session user,
so the implicit right to add or remove members will never arise.
The security impact was mostly that a role member could revoke access
from others, contrary to the wishes of his own grantor. Unapproved role
member additions are less notable, because the member can still largely
achieve that by creating a view or a SECURITY DEFINER function.
Reviewed by Andres Freund and Tom Lane. Reported, independently, by
Jonas Sundman and Noah Misch.
Security: CVE-2014-0060 |
trad_enc_update_keys(struct trad_enc_ctx *ctx, uint8_t c)
{
uint8_t t;
#define CRC32(c, b) (crc32(c ^ 0xffffffffUL, &b, 1) ^ 0xffffffffUL)
ctx->keys[0] = CRC32(ctx->keys[0], c);
ctx->keys[1] = (ctx->keys[1] + (ctx->keys[0] & 0xff)) * 134775813L + 1;
t = (ctx->keys[1] >> 24) & 0xff;
ctx->keys[2] = CRC32(ctx->keys[2], t);
#undef CRC32
} | 0 | [
"CWE-20"
] | libarchive | d0331e8e5b05b475f20b1f3101fe1ad772d7e7e7 | 78,282,238,078,551,310,000,000,000,000,000,000,000 | 11 | Issue #656: Fix CVE-2016-1541, VU#862384
When reading OS X metadata entries in Zip archives that were stored
without compression, libarchive would use the uncompressed entry size
to allocate a buffer but would use the compressed entry size to limit
the amount of data copied into that buffer. Since the compressed
and uncompressed sizes are provided by data in the archive itself,
an attacker could manipulate these values to write data beyond
the end of the allocated buffer.
This fix provides three new checks to guard against such
manipulation and to make libarchive generally more robust when
handling this type of entry:
1. If an OS X metadata entry is stored without compression,
abort the entire archive if the compressed and uncompressed
data sizes do not match.
2. When sanity-checking the size of an OS X metadata entry,
abort this entry if either the compressed or uncompressed
size is larger than 4MB.
3. When copying data into the allocated buffer, check the copy
size against both the compressed entry size and uncompressed
entry size. |
static int __ip_tun_to_nlattr(struct sk_buff *skb,
const struct ip_tunnel_key *output,
const void *tun_opts, int swkey_tun_opts_len,
unsigned short tun_proto, u8 mode)
{
if (output->tun_flags & TUNNEL_KEY &&
nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id,
OVS_TUNNEL_KEY_ATTR_PAD))
return -EMSGSIZE;
if (mode & IP_TUNNEL_INFO_BRIDGE)
return nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE)
? -EMSGSIZE : 0;
switch (tun_proto) {
case AF_INET:
if (output->u.ipv4.src &&
nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
output->u.ipv4.src))
return -EMSGSIZE;
if (output->u.ipv4.dst &&
nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
output->u.ipv4.dst))
return -EMSGSIZE;
break;
case AF_INET6:
if (!ipv6_addr_any(&output->u.ipv6.src) &&
nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_SRC,
&output->u.ipv6.src))
return -EMSGSIZE;
if (!ipv6_addr_any(&output->u.ipv6.dst) &&
nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_DST,
&output->u.ipv6.dst))
return -EMSGSIZE;
break;
}
if (output->tos &&
nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
return -EMSGSIZE;
if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_CSUM) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
return -EMSGSIZE;
if (output->tp_src &&
nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_SRC, output->tp_src))
return -EMSGSIZE;
if (output->tp_dst &&
nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst))
return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_OAM) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
return -EMSGSIZE;
if (swkey_tun_opts_len) {
if (output->tun_flags & TUNNEL_GENEVE_OPT &&
nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
swkey_tun_opts_len, tun_opts))
return -EMSGSIZE;
else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
return -EMSGSIZE;
else if (output->tun_flags & TUNNEL_ERSPAN_OPT &&
nla_put(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
swkey_tun_opts_len, tun_opts))
return -EMSGSIZE;
}
return 0;
} | 0 | [
"CWE-362",
"CWE-787"
] | linux | cefa91b2332d7009bc0be5d951d6cbbf349f90f8 | 246,952,300,186,833,600,000,000,000,000,000,000,000 | 72 | openvswitch: fix OOB access in reserve_sfa_size()
Given a sufficiently large number of actions, while copying and
reserving memory for a new action of a new flow, if next_offset is
greater than MAX_ACTIONS_BUFSIZE, the function reserve_sfa_size() does
not return -EMSGSIZE as expected, but it allocates MAX_ACTIONS_BUFSIZE
bytes increasing actions_len by req_size. This can then lead to an OOB
write access, especially when further actions need to be copied.
Fix it by rearranging the flow action size check.
KASAN splat below:
==================================================================
BUG: KASAN: slab-out-of-bounds in reserve_sfa_size+0x1ba/0x380 [openvswitch]
Write of size 65360 at addr ffff888147e4001c by task handler15/836
CPU: 1 PID: 836 Comm: handler15 Not tainted 5.18.0-rc1+ #27
...
Call Trace:
<TASK>
dump_stack_lvl+0x45/0x5a
print_report.cold+0x5e/0x5db
? __lock_text_start+0x8/0x8
? reserve_sfa_size+0x1ba/0x380 [openvswitch]
kasan_report+0xb5/0x130
? reserve_sfa_size+0x1ba/0x380 [openvswitch]
kasan_check_range+0xf5/0x1d0
memcpy+0x39/0x60
reserve_sfa_size+0x1ba/0x380 [openvswitch]
__add_action+0x24/0x120 [openvswitch]
ovs_nla_add_action+0xe/0x20 [openvswitch]
ovs_ct_copy_action+0x29d/0x1130 [openvswitch]
? __kernel_text_address+0xe/0x30
? unwind_get_return_address+0x56/0xa0
? create_prof_cpu_mask+0x20/0x20
? ovs_ct_verify+0xf0/0xf0 [openvswitch]
? prep_compound_page+0x198/0x2a0
? __kasan_check_byte+0x10/0x40
? kasan_unpoison+0x40/0x70
? ksize+0x44/0x60
? reserve_sfa_size+0x75/0x380 [openvswitch]
__ovs_nla_copy_actions+0xc26/0x2070 [openvswitch]
? __zone_watermark_ok+0x420/0x420
? validate_set.constprop.0+0xc90/0xc90 [openvswitch]
? __alloc_pages+0x1a9/0x3e0
? __alloc_pages_slowpath.constprop.0+0x1da0/0x1da0
? unwind_next_frame+0x991/0x1e40
? __mod_node_page_state+0x99/0x120
? __mod_lruvec_page_state+0x2e3/0x470
? __kasan_kmalloc_large+0x90/0xe0
ovs_nla_copy_actions+0x1b4/0x2c0 [openvswitch]
ovs_flow_cmd_new+0x3cd/0xb10 [openvswitch]
...
Cc: [email protected]
Fixes: f28cd2af22a0 ("openvswitch: fix flow actions reallocation")
Signed-off-by: Paolo Valerio <[email protected]>
Acked-by: Eelco Chaudron <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
{
return tp->snd_una + tp->snd_wnd;
} | 0 | [
"CWE-416",
"CWE-269"
] | linux | bb1fceca22492109be12640d49f5ea5a544c6bb4 | 66,456,230,419,607,550,000,000,000,000,000,000,000 | 4 | tcp: fix use after free in tcp_xmit_retransmit_queue()
When tcp_sendmsg() allocates a fresh and empty skb, it puts it at the
tail of the write queue using tcp_add_write_queue_tail()
Then it attempts to copy user data into this fresh skb.
If the copy fails, we undo the work and remove the fresh skb.
Unfortunately, this undo lacks the change done to tp->highest_sack and
we can leave a dangling pointer (to a freed skb)
Later, tcp_xmit_retransmit_queue() can dereference this pointer and
access freed memory. For regular kernels where memory is not unmapped,
this might cause SACK bugs because tcp_highest_sack_seq() is buggy,
returning garbage instead of tp->snd_nxt, but with various debug
features like CONFIG_DEBUG_PAGEALLOC, this can crash the kernel.
This bug was found by Marco Grassi thanks to syzkaller.
Fixes: 6859d49475d4 ("[TCP]: Abstract tp->highest_sack accessing & point to next skb")
Reported-by: Marco Grassi <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Ilpo Järvinen <[email protected]>
Cc: Yuchung Cheng <[email protected]>
Cc: Neal Cardwell <[email protected]>
Acked-by: Neal Cardwell <[email protected]>
Reviewed-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void __exit ecryptfs_exit(void)
{
int rc;
rc = ecryptfs_destroy_crypto();
if (rc)
printk(KERN_ERR "Failure whilst attempting to destroy crypto; "
"rc = [%d]\n", rc);
ecryptfs_release_messaging();
ecryptfs_destroy_kthread();
do_sysfs_unregistration();
unregister_filesystem(&ecryptfs_fs_type);
ecryptfs_free_kmem_caches();
} | 0 | [
"CWE-284",
"CWE-264"
] | linux | 69c433ed2ecd2d3264efd7afec4439524b319121 | 53,530,936,873,311,420,000,000,000,000,000,000,000 | 14 | fs: limit filesystem stacking depth
Add a simple read-only counter to super_block that indicates how deep this
is in the stack of filesystems. Previously ecryptfs was the only stackable
filesystem and it explicitly disallowed multiple layers of itself.
Overlayfs, however, can be stacked recursively and also may be stacked
on top of ecryptfs or vice versa.
To limit the kernel stack usage we must limit the depth of the
filesystem stack. Initially the limit is set to 2.
Signed-off-by: Miklos Szeredi <[email protected]> |
GF_Err stsd_box_read(GF_Box *s, GF_BitStream *bs)
{
ISOM_DECREASE_SIZE(s, 4)
gf_bs_read_u32(bs);
return gf_isom_box_array_read_ex(s, bs, GF_ISOM_BOX_TYPE_STSD);
} | 0 | [
"CWE-476",
"CWE-787"
] | gpac | b8f8b202d4fc23eb0ab4ce71ae96536ca6f5d3f8 | 127,422,303,642,203,730,000,000,000,000,000,000,000 | 7 | fixed #1757 |
ews_connection_schedule_queue_message (EEwsConnection *cnc,
SoupMessage *message,
SoupSessionCallback callback,
gpointer user_data)
{
EwsScheduleData *sd;
GSource *source;
g_return_if_fail (E_IS_EWS_CONNECTION (cnc));
g_return_if_fail (SOUP_IS_MESSAGE (message));
sd = g_new0 (EwsScheduleData, 1);
sd->cnc = g_object_ref (cnc);
sd->message = g_object_ref (message);
sd->op = EWS_SCHEDULE_OP_QUEUE_MESSAGE;
sd->queue_callback = callback;
sd->queue_user_data = user_data;
source = g_idle_source_new ();
g_source_set_priority (source, G_PRIORITY_DEFAULT);
g_source_set_callback (source, ews_connection_scheduled_cb, sd, NULL);
g_source_attach (source, cnc->priv->soup_context);
g_source_unref (source);
} | 0 | [
"CWE-295"
] | evolution-ews | 915226eca9454b8b3e5adb6f2fff9698451778de | 204,423,609,411,915,220,000,000,000,000,000,000,000 | 24 | I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27 |
static int opj_j2k_initialise_4K_poc(opj_poc_t *POC, int numres)
{
POC[0].tile = 1;
POC[0].resno0 = 0;
POC[0].compno0 = 0;
POC[0].layno1 = 1;
POC[0].resno1 = (OPJ_UINT32)(numres - 1);
POC[0].compno1 = 3;
POC[0].prg1 = OPJ_CPRL;
POC[1].tile = 1;
POC[1].resno0 = (OPJ_UINT32)(numres - 1);
POC[1].compno0 = 0;
POC[1].layno1 = 1;
POC[1].resno1 = (OPJ_UINT32)numres;
POC[1].compno1 = 3;
POC[1].prg1 = OPJ_CPRL;
return 2;
} | 0 | [
"CWE-416",
"CWE-787"
] | openjpeg | 4241ae6fbbf1de9658764a80944dc8108f2b4154 | 328,152,533,937,005,870,000,000,000,000,000,000,000 | 18 | Fix assertion in debug mode / heap-based buffer overflow in opj_write_bytes_LE for Cinema profiles with numresolutions = 1 (#985) |
void AuthorizationManagerImpl::_updateCacheGeneration_inlock() {
_cacheGeneration = OID::gen();
} | 0 | [
"CWE-613"
] | mongo | 6dfb92b1299de04677d0bd2230e89a52eb01003c | 61,973,710,250,695,030,000,000,000,000,000,000,000 | 3 | SERVER-38984 Validate unique User ID on UserCache hit
(cherry picked from commit e55d6e2292e5dbe2f97153251d8193d1cc89f5d7) |
static ut64 bbJump(RAnalFunction *fcn, ut64 addr) {
RListIter *iter;
RAnalBlock *bb;
r_list_foreach (fcn->bbs, iter, bb) {
if (R_BETWEEN (bb->addr, addr, bb->addr + bb->size - 1)) {
return bb->jump;
}
}
return UT64_MAX;
} | 0 | [
"CWE-415",
"CWE-703"
] | radare2 | cb8b683758edddae2d2f62e8e63a738c39f92683 | 112,497,130,772,092,520,000,000,000,000,000,000,000 | 10 | Fix #16303 - c->table_query double free (#16318) |
std::map<std::string, std::string> GetAddedFileSystemPaths(
content::WebContents* web_contents) {
auto* pref_service = GetPrefService(web_contents);
const base::Value* file_system_paths_value =
pref_service->GetDictionary(prefs::kDevToolsFileSystemPaths);
std::map<std::string, std::string> result;
if (file_system_paths_value) {
const base::DictionaryValue* file_system_paths_dict;
file_system_paths_value->GetAsDictionary(&file_system_paths_dict);
for (auto it : file_system_paths_dict->DictItems()) {
std::string type =
it.second.is_string() ? it.second.GetString() : std::string();
result[it.first] = type;
}
}
return result;
} | 0 | [] | electron | e9fa834757f41c0b9fe44a4dffe3d7d437f52d34 | 69,548,695,681,342,700,000,000,000,000,000,000,000 | 18 | fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame
Co-authored-by: Samuel Attard <[email protected]>
Co-authored-by: Samuel Attard <[email protected]> |
static int vmci_transport_recv_connecting_client_negotiate(
struct sock *sk,
struct vmci_transport_packet *pkt)
{
int err;
struct vsock_sock *vsk;
struct vmci_handle handle;
struct vmci_qp *qpair;
u32 attach_sub_id;
u32 detach_sub_id;
bool is_local;
u32 flags;
bool old_proto = true;
bool old_pkt_proto;
u16 version;
vsk = vsock_sk(sk);
handle = VMCI_INVALID_HANDLE;
attach_sub_id = VMCI_INVALID_ID;
detach_sub_id = VMCI_INVALID_ID;
/* If we have gotten here then we should be past the point where old
* linux vsock could have sent the bogus rst.
*/
vsk->sent_request = false;
vsk->ignore_connecting_rst = false;
/* Verify that we're OK with the proposed queue pair size */
if (pkt->u.size < vmci_trans(vsk)->queue_pair_min_size ||
pkt->u.size > vmci_trans(vsk)->queue_pair_max_size) {
err = -EINVAL;
goto destroy;
}
/* At this point we know the CID the peer is using to talk to us. */
if (vsk->local_addr.svm_cid == VMADDR_CID_ANY)
vsk->local_addr.svm_cid = pkt->dg.dst.context;
/* Setup the notify ops to be the highest supported version that both
* the server and the client support.
*/
if (vmci_transport_old_proto_override(&old_pkt_proto)) {
old_proto = old_pkt_proto;
} else {
if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE)
old_proto = true;
else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2)
old_proto = false;
}
if (old_proto)
version = VSOCK_PROTO_INVALID;
else
version = pkt->proto;
if (!vmci_transport_proto_to_notify_struct(sk, &version, old_proto)) {
err = -EINVAL;
goto destroy;
}
/* Subscribe to attach and detach events first.
*
* XXX We attach once for each queue pair created for now so it is easy
* to find the socket (it's provided), but later we should only
* subscribe once and add a way to lookup sockets by queue pair handle.
*/
err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_ATTACH,
vmci_transport_peer_attach_cb,
sk, &attach_sub_id);
if (err < VMCI_SUCCESS) {
err = vmci_transport_error_to_vsock_error(err);
goto destroy;
}
err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
vmci_transport_peer_detach_cb,
sk, &detach_sub_id);
if (err < VMCI_SUCCESS) {
err = vmci_transport_error_to_vsock_error(err);
goto destroy;
}
/* Make VMCI select the handle for us. */
handle = VMCI_INVALID_HANDLE;
is_local = vsk->remote_addr.svm_cid == vsk->local_addr.svm_cid;
flags = is_local ? VMCI_QPFLAG_LOCAL : 0;
err = vmci_transport_queue_pair_alloc(&qpair,
&handle,
pkt->u.size,
pkt->u.size,
vsk->remote_addr.svm_cid,
flags,
vmci_transport_is_trusted(
vsk,
vsk->
remote_addr.svm_cid));
if (err < 0)
goto destroy;
err = vmci_transport_send_qp_offer(sk, handle);
if (err < 0) {
err = vmci_transport_error_to_vsock_error(err);
goto destroy;
}
vmci_trans(vsk)->qp_handle = handle;
vmci_trans(vsk)->qpair = qpair;
vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size =
pkt->u.size;
vmci_trans(vsk)->attach_sub_id = attach_sub_id;
vmci_trans(vsk)->detach_sub_id = detach_sub_id;
vmci_trans(vsk)->notify_ops->process_negotiate(sk);
return 0;
destroy:
if (attach_sub_id != VMCI_INVALID_ID)
vmci_event_unsubscribe(attach_sub_id);
if (detach_sub_id != VMCI_INVALID_ID)
vmci_event_unsubscribe(detach_sub_id);
if (!vmci_handle_is_invalid(handle))
vmci_qpair_detach(&qpair);
return err;
} | 0 | [
"CWE-20",
"CWE-269"
] | linux | f3d3342602f8bcbf37d7c46641cb9bca7618eb1c | 32,232,281,856,347,805,000,000,000,000,000,000,000 | 134 | net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int cma_listen_on_all(struct rdma_id_private *id_priv)
{
struct rdma_id_private *to_destroy;
struct cma_device *cma_dev;
int ret;
mutex_lock(&lock);
list_add_tail(&id_priv->list, &listen_any_list);
list_for_each_entry(cma_dev, &dev_list, list) {
ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
if (ret) {
/* Prevent racing with cma_process_remove() */
if (to_destroy)
list_del_init(&to_destroy->list);
goto err_listen;
}
}
mutex_unlock(&lock);
return 0;
err_listen:
_cma_cancel_listens(id_priv);
mutex_unlock(&lock);
if (to_destroy)
rdma_destroy_id(&to_destroy->id);
return ret;
} | 0 | [
"CWE-416"
] | linux | bc0bdc5afaa740d782fbf936aaeebd65e5c2921d | 164,406,797,403,097,060,000,000,000,000,000,000,000 | 27 | RDMA/cma: Do not change route.addr.src_addr.ss_family
If the state is not idle then rdma_bind_addr() will immediately fail and
no change to global state should happen.
For instance if the state is already RDMA_CM_LISTEN then this will corrupt
the src_addr and would cause the test in cma_cancel_operation():
if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
To view a mangled src_addr, eg with a IPv6 loopback address but an IPv4
family, failing the test.
This would manifest as this trace from syzkaller:
BUG: KASAN: use-after-free in __list_add_valid+0x93/0xa0 lib/list_debug.c:26
Read of size 8 at addr ffff8881546491e0 by task syz-executor.1/32204
CPU: 1 PID: 32204 Comm: syz-executor.1 Not tainted 5.12.0-rc8-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:79 [inline]
dump_stack+0x141/0x1d7 lib/dump_stack.c:120
print_address_description.constprop.0.cold+0x5b/0x2f8 mm/kasan/report.c:232
__kasan_report mm/kasan/report.c:399 [inline]
kasan_report.cold+0x7c/0xd8 mm/kasan/report.c:416
__list_add_valid+0x93/0xa0 lib/list_debug.c:26
__list_add include/linux/list.h:67 [inline]
list_add_tail include/linux/list.h:100 [inline]
cma_listen_on_all drivers/infiniband/core/cma.c:2557 [inline]
rdma_listen+0x787/0xe00 drivers/infiniband/core/cma.c:3751
ucma_listen+0x16a/0x210 drivers/infiniband/core/ucma.c:1102
ucma_write+0x259/0x350 drivers/infiniband/core/ucma.c:1732
vfs_write+0x28e/0xa30 fs/read_write.c:603
ksys_write+0x1ee/0x250 fs/read_write.c:658
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
Which is indicating that an rdma_id_private was destroyed without doing
cma_cancel_listens().
Instead of trying to re-use the src_addr memory to indirectly create an
any address build one explicitly on the stack and bind to that as any
other normal flow would do.
Link: https://lore.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: 732d41c545bb ("RDMA/cma: Make the locking for automatic state transition more clear")
Reported-by: [email protected]
Tested-by: Hao Sun <[email protected]>
Reviewed-by: Leon Romanovsky <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]> |
dup_obj_with_new_start(VALUE obj, double sg)
{
volatile VALUE dup = dup_obj(obj);
{
get_d1(dup);
set_sg(dat, sg);
}
return dup;
} | 0 | [] | date | 3959accef8da5c128f8a8e2fd54e932a4fb253b0 | 197,556,792,020,431,700,000,000,000,000,000,000,000 | 9 | Add length limit option for methods that parses date strings
`Date.parse` now raises an ArgumentError when a given date string is
longer than 128. You can configure the limit by giving `limit` keyword
arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`,
the limit is disabled.
Not only `Date.parse` but also the following methods are changed.
* Date._parse
* Date.parse
* DateTime.parse
* Date._iso8601
* Date.iso8601
* DateTime.iso8601
* Date._rfc3339
* Date.rfc3339
* DateTime.rfc3339
* Date._xmlschema
* Date.xmlschema
* DateTime.xmlschema
* Date._rfc2822
* Date.rfc2822
* DateTime.rfc2822
* Date._rfc822
* Date.rfc822
* DateTime.rfc822
* Date._jisx0301
* Date.jisx0301
* DateTime.jisx0301 |
static void unix_sock_destructor(struct sock *sk)
{
struct unix_sock *u = unix_sk(sk);
skb_queue_purge(&sk->sk_receive_queue);
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
WARN_ON(!sk_unhashed(sk));
WARN_ON(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
return;
}
if (u->addr)
unix_release_addr(u->addr);
atomic_long_dec(&unix_nr_socks);
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
#ifdef UNIX_REFCNT_DEBUG
printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
atomic_long_read(&unix_nr_socks));
#endif
} | 0 | [] | linux-2.6 | 16e5726269611b71c930054ffe9b858c1cea88eb | 288,793,615,722,939,300,000,000,000,000,000,000,000 | 26 | af_unix: dont send SCM_CREDENTIALS by default
Since commit 7361c36c5224 (af_unix: Allow credentials to work across
user and pid namespaces) af_unix performance dropped a lot.
This is because we now take a reference on pid and cred in each write(),
and release them in read(), usually done from another process,
eventually from another cpu. This triggers false sharing.
# Events: 154K cycles
#
# Overhead Command Shared Object Symbol
# ........ ....... .................. .........................
#
10.40% hackbench [kernel.kallsyms] [k] put_pid
8.60% hackbench [kernel.kallsyms] [k] unix_stream_recvmsg
7.87% hackbench [kernel.kallsyms] [k] unix_stream_sendmsg
6.11% hackbench [kernel.kallsyms] [k] do_raw_spin_lock
4.95% hackbench [kernel.kallsyms] [k] unix_scm_to_skb
4.87% hackbench [kernel.kallsyms] [k] pid_nr_ns
4.34% hackbench [kernel.kallsyms] [k] cred_to_ucred
2.39% hackbench [kernel.kallsyms] [k] unix_destruct_scm
2.24% hackbench [kernel.kallsyms] [k] sub_preempt_count
1.75% hackbench [kernel.kallsyms] [k] fget_light
1.51% hackbench [kernel.kallsyms] [k]
__mutex_lock_interruptible_slowpath
1.42% hackbench [kernel.kallsyms] [k] sock_alloc_send_pskb
This patch includes SCM_CREDENTIALS information in a af_unix message/skb
only if requested by the sender, [man 7 unix for details how to include
ancillary data using sendmsg() system call]
Note: This might break buggy applications that expected SCM_CREDENTIAL
from an unaware write() system call, and receiver not using SO_PASSCRED
socket option.
If SOCK_PASSCRED is set on source or destination socket, we still
include credentials for mere write() syscalls.
Performance boost in hackbench : more than 50% gain on a 16 thread
machine (2 quad-core cpus, 2 threads per core)
hackbench 20 thread 2000
4.228 sec instead of 9.102 sec
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Tim Chen <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
calc_enc_length (gnutls_session_t session, int data_size,
int hash_size, uint8_t * pad, int random_pad,
cipher_type_t block_algo, uint16_t blocksize)
{
uint8_t rnd;
int length, ret;
*pad = 0;
switch (block_algo)
{
case CIPHER_STREAM:
length = data_size + hash_size;
break;
case CIPHER_BLOCK:
ret = _gnutls_rnd (GNUTLS_RND_NONCE, &rnd, 1);
if (ret < 0)
{
gnutls_assert ();
return ret;
}
/* make rnd a multiple of blocksize */
if (session->security_parameters.version == GNUTLS_SSL3 ||
random_pad == 0)
{
rnd = 0;
}
else
{
rnd = (rnd / blocksize) * blocksize;
/* added to avoid the case of pad calculated 0
* seen below for pad calculation.
*/
if (rnd > blocksize)
rnd -= blocksize;
}
length = data_size + hash_size;
*pad = (uint8_t) (blocksize - (length % blocksize)) + rnd;
length += *pad;
if (_gnutls_version_has_explicit_iv
(session->security_parameters.version))
length += blocksize; /* for the IV */
break;
default:
gnutls_assert ();
return GNUTLS_E_INTERNAL_ERROR;
}
return length;
} | 0 | [
"CWE-310"
] | gnutls | 422214868061370aeeb0ac9cd0f021a5c350a57d | 216,178,467,878,641,900,000,000,000,000,000,000,000 | 56 | better check decrypted data. |
GF_Err csgp_box_size(GF_Box *s)
{
u32 i, bits;
GF_CompactSampleGroupBox *ptr = (GF_CompactSampleGroupBox*)s;
u32 pattern_size = get_size_by_code( ((ptr->flags>>4) & 0x3) );
u32 scount_size = get_size_by_code( ((ptr->flags>>2) & 0x3) );
u32 index_size = get_size_by_code( (ptr->flags & 0x3) );
ptr->size += 12; //v, flags , grouping_type, pattern_length
if (ptr->flags & (1<<6))
ptr->size+=4;
ptr->size += ptr->pattern_count * (pattern_size + scount_size) / 8;
bits=0;
for (i=0; i<ptr->pattern_count; i++)
bits += ptr->patterns[i].length * index_size;
ptr->size += bits/8;
if (bits % 8) ptr->size++;
return GF_OK;
} | 0 | [
"CWE-787"
] | gpac | 77510778516803b7f7402d7423c6d6bef50254c3 | 263,600,547,192,747,460,000,000,000,000,000,000,000 | 20 | fixed #2255 |
save_reduce(PicklerObject *self, PyObject *args, PyObject *obj)
{
PyObject *callable;
PyObject *argtup;
PyObject *state = NULL;
PyObject *listitems = Py_None;
PyObject *dictitems = Py_None;
PickleState *st = _Pickle_GetGlobalState();
Py_ssize_t size;
int use_newobj = 0, use_newobj_ex = 0;
const char reduce_op = REDUCE;
const char build_op = BUILD;
const char newobj_op = NEWOBJ;
const char newobj_ex_op = NEWOBJ_EX;
size = PyTuple_Size(args);
if (size < 2 || size > 5) {
PyErr_SetString(st->PicklingError, "tuple returned by "
"__reduce__ must contain 2 through 5 elements");
return -1;
}
if (!PyArg_UnpackTuple(args, "save_reduce", 2, 5,
&callable, &argtup, &state, &listitems, &dictitems))
return -1;
if (!PyCallable_Check(callable)) {
PyErr_SetString(st->PicklingError, "first item of the tuple "
"returned by __reduce__ must be callable");
return -1;
}
if (!PyTuple_Check(argtup)) {
PyErr_SetString(st->PicklingError, "second item of the tuple "
"returned by __reduce__ must be a tuple");
return -1;
}
if (state == Py_None)
state = NULL;
if (listitems == Py_None)
listitems = NULL;
else if (!PyIter_Check(listitems)) {
PyErr_Format(st->PicklingError, "fourth element of the tuple "
"returned by __reduce__ must be an iterator, not %s",
Py_TYPE(listitems)->tp_name);
return -1;
}
if (dictitems == Py_None)
dictitems = NULL;
else if (!PyIter_Check(dictitems)) {
PyErr_Format(st->PicklingError, "fifth element of the tuple "
"returned by __reduce__ must be an iterator, not %s",
Py_TYPE(dictitems)->tp_name);
return -1;
}
if (self->proto >= 2) {
PyObject *name;
_Py_IDENTIFIER(__name__);
if (_PyObject_LookupAttrId(callable, &PyId___name__, &name) < 0) {
return -1;
}
if (name != NULL && PyUnicode_Check(name)) {
_Py_IDENTIFIER(__newobj_ex__);
use_newobj_ex = _PyUnicode_EqualToASCIIId(
name, &PyId___newobj_ex__);
if (!use_newobj_ex) {
_Py_IDENTIFIER(__newobj__);
use_newobj = _PyUnicode_EqualToASCIIId(name, &PyId___newobj__);
}
}
Py_XDECREF(name);
}
if (use_newobj_ex) {
PyObject *cls;
PyObject *args;
PyObject *kwargs;
if (PyTuple_GET_SIZE(argtup) != 3) {
PyErr_Format(st->PicklingError,
"length of the NEWOBJ_EX argument tuple must be "
"exactly 3, not %zd", PyTuple_GET_SIZE(argtup));
return -1;
}
cls = PyTuple_GET_ITEM(argtup, 0);
if (!PyType_Check(cls)) {
PyErr_Format(st->PicklingError,
"first item from NEWOBJ_EX argument tuple must "
"be a class, not %.200s", Py_TYPE(cls)->tp_name);
return -1;
}
args = PyTuple_GET_ITEM(argtup, 1);
if (!PyTuple_Check(args)) {
PyErr_Format(st->PicklingError,
"second item from NEWOBJ_EX argument tuple must "
"be a tuple, not %.200s", Py_TYPE(args)->tp_name);
return -1;
}
kwargs = PyTuple_GET_ITEM(argtup, 2);
if (!PyDict_Check(kwargs)) {
PyErr_Format(st->PicklingError,
"third item from NEWOBJ_EX argument tuple must "
"be a dict, not %.200s", Py_TYPE(kwargs)->tp_name);
return -1;
}
if (self->proto >= 4) {
if (save(self, cls, 0) < 0 ||
save(self, args, 0) < 0 ||
save(self, kwargs, 0) < 0 ||
_Pickler_Write(self, &newobj_ex_op, 1) < 0) {
return -1;
}
}
else {
PyObject *newargs;
PyObject *cls_new;
Py_ssize_t i;
_Py_IDENTIFIER(__new__);
newargs = PyTuple_New(PyTuple_GET_SIZE(args) + 2);
if (newargs == NULL)
return -1;
cls_new = _PyObject_GetAttrId(cls, &PyId___new__);
if (cls_new == NULL) {
Py_DECREF(newargs);
return -1;
}
PyTuple_SET_ITEM(newargs, 0, cls_new);
Py_INCREF(cls);
PyTuple_SET_ITEM(newargs, 1, cls);
for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
PyObject *item = PyTuple_GET_ITEM(args, i);
Py_INCREF(item);
PyTuple_SET_ITEM(newargs, i + 2, item);
}
callable = PyObject_Call(st->partial, newargs, kwargs);
Py_DECREF(newargs);
if (callable == NULL)
return -1;
newargs = PyTuple_New(0);
if (newargs == NULL) {
Py_DECREF(callable);
return -1;
}
if (save(self, callable, 0) < 0 ||
save(self, newargs, 0) < 0 ||
_Pickler_Write(self, &reduce_op, 1) < 0) {
Py_DECREF(newargs);
Py_DECREF(callable);
return -1;
}
Py_DECREF(newargs);
Py_DECREF(callable);
}
}
else if (use_newobj) {
PyObject *cls;
PyObject *newargtup;
PyObject *obj_class;
int p;
/* Sanity checks. */
if (PyTuple_GET_SIZE(argtup) < 1) {
PyErr_SetString(st->PicklingError, "__newobj__ arglist is empty");
return -1;
}
cls = PyTuple_GET_ITEM(argtup, 0);
if (!PyType_Check(cls)) {
PyErr_SetString(st->PicklingError, "args[0] from "
"__newobj__ args is not a type");
return -1;
}
if (obj != NULL) {
obj_class = get_class(obj);
p = obj_class != cls; /* true iff a problem */
Py_DECREF(obj_class);
if (p) {
PyErr_SetString(st->PicklingError, "args[0] from "
"__newobj__ args has the wrong class");
return -1;
}
}
/* XXX: These calls save() are prone to infinite recursion. Imagine
what happen if the value returned by the __reduce__() method of
some extension type contains another object of the same type. Ouch!
Here is a quick example, that I ran into, to illustrate what I
mean:
>>> import pickle, copyreg
>>> copyreg.dispatch_table.pop(complex)
>>> pickle.dumps(1+2j)
Traceback (most recent call last):
...
RecursionError: maximum recursion depth exceeded
Removing the complex class from copyreg.dispatch_table made the
__reduce_ex__() method emit another complex object:
>>> (1+1j).__reduce_ex__(2)
(<function __newobj__ at 0xb7b71c3c>,
(<class 'complex'>, (1+1j)), None, None, None)
Thus when save() was called on newargstup (the 2nd item) recursion
ensued. Of course, the bug was in the complex class which had a
broken __getnewargs__() that emitted another complex object. But,
the point, here, is it is quite easy to end up with a broken reduce
function. */
/* Save the class and its __new__ arguments. */
if (save(self, cls, 0) < 0)
return -1;
newargtup = PyTuple_GetSlice(argtup, 1, PyTuple_GET_SIZE(argtup));
if (newargtup == NULL)
return -1;
p = save(self, newargtup, 0);
Py_DECREF(newargtup);
if (p < 0)
return -1;
/* Add NEWOBJ opcode. */
if (_Pickler_Write(self, &newobj_op, 1) < 0)
return -1;
}
else { /* Not using NEWOBJ. */
if (save(self, callable, 0) < 0 ||
save(self, argtup, 0) < 0 ||
_Pickler_Write(self, &reduce_op, 1) < 0)
return -1;
}
/* obj can be NULL when save_reduce() is used directly. A NULL obj means
the caller do not want to memoize the object. Not particularly useful,
but that is to mimic the behavior save_reduce() in pickle.py when
obj is None. */
if (obj != NULL) {
/* If the object is already in the memo, this means it is
recursive. In this case, throw away everything we put on the
stack, and fetch the object back from the memo. */
if (PyMemoTable_Get(self->memo, obj)) {
const char pop_op = POP;
if (_Pickler_Write(self, &pop_op, 1) < 0)
return -1;
if (memo_get(self, obj) < 0)
return -1;
return 0;
}
else if (memo_put(self, obj) < 0)
return -1;
}
if (listitems && batch_list(self, listitems) < 0)
return -1;
if (dictitems && batch_dict(self, dictitems) < 0)
return -1;
if (state) {
if (save(self, state, 0) < 0 ||
_Pickler_Write(self, &build_op, 1) < 0)
return -1;
}
return 0;
} | 0 | [
"CWE-190",
"CWE-369"
] | cpython | a4ae828ee416a66d8c7bf5ee71d653c2cc6a26dd | 323,747,327,772,955,570,000,000,000,000,000,000,000 | 282 | closes bpo-34656: Avoid relying on signed overflow in _pickle memos. (GH-9261) |
static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, pte_t pte)
{
/*
* The pte is non-present, so there's no hardware state to
* preserve.
*/
set_pte_at(mm, addr, ptep, pte);
} | 0 | [
"CWE-264"
] | linux-2.6 | 1a5a9906d4e8d1976b701f889d8f35d54b928f25 | 135,348,320,654,434,180,000,000,000,000,000,000,000 | 10 | mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38+]
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
TEST_F(HttpConnectionManagerImplTest, FilterShouldUseSantizedPath) {
setup(false, "");
// Enable path sanitizer
normalize_path_ = true;
const std::string original_path = "/x/%2E%2e/z";
const std::string normalized_path = "/z";
auto* filter = new MockStreamFilter();
EXPECT_CALL(filter_factory_, createFilterChain(_))
.WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {
callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter});
}));
EXPECT_CALL(*filter, decodeComplete());
EXPECT_CALL(*filter, decodeHeaders(_, true))
.WillRepeatedly(Invoke([&](RequestHeaderMap& header_map, bool) -> FilterHeadersStatus {
EXPECT_EQ(normalized_path, header_map.getPathValue());
return FilterHeadersStatus::StopIteration;
}));
EXPECT_CALL(*filter, setDecoderFilterCallbacks(_));
EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {
decoder_ = &conn_manager_->newStream(response_encoder_);
RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{
{":authority", "host"}, {":path", original_path}, {":method", "GET"}}};
decoder_->decodeHeaders(std::move(headers), true);
return Http::okStatus();
}));
// Kick off the incoming data.
Buffer::OwnedImpl fake_input("1234");
conn_manager_->onData(fake_input, false);
EXPECT_CALL(*filter, onStreamComplete());
EXPECT_CALL(*filter, onDestroy());
filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose);
} | 0 | [
"CWE-22"
] | envoy | 5333b928d8bcffa26ab19bf018369a835f697585 | 86,124,743,079,514,600,000,000,000,000,000,000,000 | 39 | Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <[email protected]> |
mrb_obj_ivar_get(mrb_state *mrb, mrb_value self)
{
mrb_sym iv_name;
mrb_get_args(mrb, "n", &iv_name);
mrb_iv_check(mrb, iv_name);
return mrb_iv_get(mrb, self, iv_name);
} | 0 | [
"CWE-824"
] | mruby | b64ce17852b180dfeea81cf458660be41a78974d | 163,255,244,137,731,220,000,000,000,000,000,000,000 | 8 | Should not call `initialize_copy` for `TT_ICLASS`; fix #4027
Since `TT_ICLASS` is a internal object that should never be revealed
to Ruby world. |
T value() const { return value_; } | 0 | [
"CWE-134",
"CWE-119",
"CWE-787"
] | fmt | 8cf30aa2be256eba07bb1cefb998c52326e846e7 | 37,468,925,708,608,720,000,000,000,000,000,000,000 | 1 | Fix segfault on complex pointer formatting (#642) |
void xfrm_policy_hash_rebuild(struct net *net)
{
schedule_work(&net->xfrm.policy_hthresh.work);
} | 0 | [
"CWE-125"
] | ipsec | 7bab09631c2a303f87a7eb7e3d69e888673b9b7e | 89,141,420,671,692,640,000,000,000,000,000,000,000 | 4 | xfrm: policy: check policy direction value
The 'dir' parameter in xfrm_migrate() is a user-controlled byte which is used
as an array index. This can lead to an out-of-bound access, kernel lockup and
DoS. Add a check for the 'dir' value.
This fixes CVE-2017-11600.
References: https://bugzilla.redhat.com/show_bug.cgi?id=1474928
Fixes: 80c9abaabf42 ("[XFRM]: Extension for dynamic update of endpoint address(es)")
Cc: <[email protected]> # v2.6.21-rc1
Reported-by: "bo Zhang" <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]> |
static int ip_vs_genl_fill_service(struct sk_buff *skb,
struct ip_vs_service *svc)
{
struct nlattr *nl_service;
struct ip_vs_flags flags = { .flags = svc->flags,
.mask = ~0 };
nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
if (!nl_service)
return -EMSGSIZE;
NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, svc->af);
if (svc->fwmark) {
NLA_PUT_U32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark);
} else {
NLA_PUT_U16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol);
NLA_PUT(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr);
NLA_PUT_U16(skb, IPVS_SVC_ATTR_PORT, svc->port);
}
NLA_PUT_STRING(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name);
NLA_PUT(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags);
NLA_PUT_U32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ);
NLA_PUT_U32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask);
if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats))
goto nla_put_failure;
nla_nest_end(skb, nl_service);
return 0;
nla_put_failure:
nla_nest_cancel(skb, nl_service);
return -EMSGSIZE;
} | 0 | [
"CWE-119",
"CWE-787"
] | linux | 04bcef2a83f40c6db24222b27a52892cba39dffb | 289,864,502,554,089,900,000,000,000,000,000,000,000 | 37 | ipvs: Add boundary check on ioctl arguments
The ipvs code has a nifty system for doing the size of ioctl command
copies; it defines an array with values into which it indexes the cmd
to find the right length.
Unfortunately, the ipvs code forgot to check if the cmd was in the
range that the array provides, allowing for an index outside of the
array, which then gives a "garbage" result into the length, which
then gets used for copying into a stack buffer.
Fix this by adding sanity checks on these as well as the copy size.
[ [email protected]: adjusted limit to IP_VS_SO_GET_MAX ]
Signed-off-by: Arjan van de Ven <[email protected]>
Acked-by: Julian Anastasov <[email protected]>
Signed-off-by: Simon Horman <[email protected]>
Signed-off-by: Patrick McHardy <[email protected]> |
unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long length, low_limit, high_limit, gap_start, gap_end;
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask;
if (length < info->length)
return -ENOMEM;
/*
* Adjust search limits by the desired length.
* See implementation comment at top of unmapped_area().
*/
gap_end = info->high_limit;
if (gap_end < length)
return -ENOMEM;
high_limit = gap_end - length;
if (info->low_limit > high_limit)
return -ENOMEM;
low_limit = info->low_limit + length;
/* Check highest gap, which does not precede any rbtree node */
gap_start = mm->highest_vm_end;
if (gap_start <= high_limit)
goto found_highest;
/* Check if rbtree root looks promising */
if (RB_EMPTY_ROOT(&mm->mm_rb))
return -ENOMEM;
vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
if (vma->rb_subtree_gap < length)
return -ENOMEM;
while (true) {
/* Visit right subtree if it looks promising */
gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
if (gap_start <= high_limit && vma->vm_rb.rb_right) {
struct vm_area_struct *right =
rb_entry(vma->vm_rb.rb_right,
struct vm_area_struct, vm_rb);
if (right->rb_subtree_gap >= length) {
vma = right;
continue;
}
}
check_current:
/* Check if current node has a suitable gap */
gap_end = vm_start_gap(vma);
if (gap_end < low_limit)
return -ENOMEM;
if (gap_start <= high_limit && gap_end - gap_start >= length)
goto found;
/* Visit left subtree if it looks promising */
if (vma->vm_rb.rb_left) {
struct vm_area_struct *left =
rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb);
if (left->rb_subtree_gap >= length) {
vma = left;
continue;
}
}
/* Go back up the rbtree to find next candidate node */
while (true) {
struct rb_node *prev = &vma->vm_rb;
if (!rb_parent(prev))
return -ENOMEM;
vma = rb_entry(rb_parent(prev),
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_right) {
gap_start = vma->vm_prev ?
vm_end_gap(vma->vm_prev) : 0;
goto check_current;
}
}
}
found:
/* We found a suitable gap. Clip it with the original high_limit. */
if (gap_end > info->high_limit)
gap_end = info->high_limit;
found_highest:
/* Compute highest gap address at the desired alignment */
gap_end -= info->length;
gap_end -= (gap_end - info->align_offset) & info->align_mask;
VM_BUG_ON(gap_end < info->low_limit);
VM_BUG_ON(gap_end < gap_start);
return gap_end;
} | 0 | [
"CWE-119"
] | linux | 1be7107fbe18eed3e319a6c3e83c78254b693acb | 221,394,055,306,240,400,000,000,000,000,000,000,000 | 97 | mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]> |
static void fastrpc_release(struct dma_buf *dmabuf)
{
struct fastrpc_buf *buffer = dmabuf->priv;
fastrpc_buf_free(buffer);
} | 0 | [
"CWE-400",
"CWE-401"
] | linux | fc739a058d99c9297ef6bfd923b809d85855b9a9 | 58,225,058,070,565,970,000,000,000,000,000,000,000 | 6 | misc: fastrpc: prevent memory leak in fastrpc_dma_buf_attach
In fastrpc_dma_buf_attach if dma_get_sgtable fails the allocated memory
for a should be released.
Signed-off-by: Navid Emamdoost <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static struct sock *udp_v6_lookup(struct in6_addr *saddr, u16 sport,
struct in6_addr *daddr, u16 dport, int dif)
{
struct sock *sk, *result = NULL;
struct hlist_node *node;
unsigned short hnum = ntohs(dport);
int badness = -1;
read_lock(&udp_hash_lock);
sk_for_each(sk, node, &udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]) {
struct inet_sock *inet = inet_sk(sk);
if (inet->num == hnum && sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
int score = 0;
if (inet->dport) {
if (inet->dport != sport)
continue;
score++;
}
if (!ipv6_addr_any(&np->rcv_saddr)) {
if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
continue;
score++;
}
if (!ipv6_addr_any(&np->daddr)) {
if (!ipv6_addr_equal(&np->daddr, saddr))
continue;
score++;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
continue;
score++;
}
if(score == 4) {
result = sk;
break;
} else if(score > badness) {
result = sk;
badness = score;
}
}
}
if (result)
sock_hold(result);
read_unlock(&udp_hash_lock);
return result;
} | 0 | [
"CWE-476"
] | linux-2.6 | 1e0c14f49d6b393179f423abbac47f85618d3d46 | 143,582,848,034,732,530,000,000,000,000,000,000,000 | 49 | [UDP]: Fix MSG_PROBE crash
UDP tracks corking status through the pending variable. The
IP layer also tracks it through the socket write queue. It
is possible for the two to get out of sync when MSG_PROBE is
used.
This patch changes UDP to check the write queue to ensure
that the two stay in sync.
Signed-off-by: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
mono_image_typedef_or_ref_full (MonoDynamicImage *assembly, MonoType *type, gboolean try_typespec)
{
MonoDynamicTable *table;
guint32 *values;
guint32 token, scope, enclosing;
MonoClass *klass;
/* if the type requires a typespec, we must try that first*/
if (try_typespec && (token = create_typespec (assembly, type)))
return token;
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->typeref, type));
if (token)
return token;
klass = mono_class_from_mono_type (type);
if (!klass)
klass = mono_class_from_mono_type (type);
/*
* If it's in the same module and not a generic type parameter:
*/
if ((klass->image == &assembly->image) && (type->type != MONO_TYPE_VAR) &&
(type->type != MONO_TYPE_MVAR)) {
MonoReflectionTypeBuilder *tb = mono_class_get_ref_info (klass);
token = MONO_TYPEDEFORREF_TYPEDEF | (tb->table_idx << MONO_TYPEDEFORREF_BITS);
mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), mono_class_get_ref_info (klass));
return token;
}
if (klass->nested_in) {
enclosing = mono_image_typedef_or_ref_full (assembly, &klass->nested_in->byval_arg, FALSE);
/* get the typeref idx of the enclosing type */
enclosing >>= MONO_TYPEDEFORREF_BITS;
scope = (enclosing << MONO_RESOLTION_SCOPE_BITS) | MONO_RESOLTION_SCOPE_TYPEREF;
} else {
scope = resolution_scope_from_image (assembly, klass->image);
}
table = &assembly->tables [MONO_TABLE_TYPEREF];
if (assembly->save) {
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_TYPEREF_SIZE;
values [MONO_TYPEREF_SCOPE] = scope;
values [MONO_TYPEREF_NAME] = string_heap_insert (&assembly->sheap, klass->name);
values [MONO_TYPEREF_NAMESPACE] = string_heap_insert (&assembly->sheap, klass->name_space);
}
token = MONO_TYPEDEFORREF_TYPEREF | (table->next_idx << MONO_TYPEDEFORREF_BITS); /* typeref */
g_hash_table_insert (assembly->typeref, type, GUINT_TO_POINTER(token));
table->next_idx ++;
mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), mono_class_get_ref_info (klass));
return token;
} | 0 | [
"CWE-20"
] | mono | 65292a69c837b8a5f7a392d34db63de592153358 | 184,686,333,474,368,420,000,000,000,000,000,000,000 | 50 | Handle invalid instantiation of generic methods.
* verify.c: Add new function to internal verifier API to check
method instantiations.
* reflection.c (mono_reflection_bind_generic_method_parameters):
Check the instantiation before returning it.
Fixes #655847 |
static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
{
dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
} | 0 | [
"CWE-119",
"CWE-787"
] | linux | 7bc2b55a5c030685b399bb65b6baa9ccc3d1f167 | 166,614,029,105,963,050,000,000,000,000,000,000,000 | 4 | scsi: arcmsr: Buffer overflow in arcmsr_iop_message_xfer()
We need to put an upper bound on "user_len" so the memcpy() doesn't
overflow.
Cc: <[email protected]>
Reported-by: Marco Grassi <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Reviewed-by: Tomas Henzl <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]> |
Subsets and Splits