func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
njs_function_frame_save(njs_vm_t *vm, njs_frame_t *frame, u_char *pc)
{
size_t value_count, n;
njs_value_t *start, *end, *p, **new, *value, **local;
njs_function_t *function;
njs_native_frame_t *active, *native;
*frame = *vm->active_frame;
frame->previous_active_frame = NULL;
native = &frame->native;
native->size = 0;
native->free = NULL;
native->free_size = 0;
active = &vm->active_frame->native;
value_count = njs_function_frame_value_count(active);
function = active->function;
new = (njs_value_t **) ((u_char *) native + NJS_FRAME_SIZE);
value = (njs_value_t *) (new + value_count
+ function->u.lambda->temp);
native->arguments = value;
native->arguments_offset = value + (function->args_offset - 1);
native->local = new + njs_function_frame_args_count(active);
native->temp = new + value_count;
native->pc = pc;
start = njs_function_frame_values(active, &end);
p = native->arguments;
while (start < end) {
*p = *start++;
*new++ = p++;
}
/* Move all arguments. */
p = native->arguments;
local = native->local + function->args_offset;
for (n = 0; n < function->args_count; n++) {
if (!njs_is_valid(p)) {
njs_set_undefined(p);
}
*local++ = p++;
}
return NJS_OK;
}
| 0 |
[
"CWE-416"
] |
njs
|
ad48705bf1f04b4221a5f5b07715ac48b3160d53
| 323,737,482,605,642,830,000,000,000,000,000,000,000 | 55 |
Fixed frame allocation from an awaited frame.
njs_function_frame_save() is used to save the awaited frame when "await"
instruction is encountered. The saving was done as a memcpy() of
existing runtime frame.
njs_function_frame_alloc() is used to alloc a new function frame, this
function tries to use a spare preallocated memory from the previous
frame first. Previously, this function might result in "use-after-free"
when invoked from a restored frame saved with njs_function_frame_save().
Because njs_function_frame_save() left pointers to the spare memory of
the original frame which may be already free when saved frame is
restored.
The fix is to erase fields for the spare memory from the saved frame.
This closes #469 issue on Github.
|
if (!subtype) subtype = "mp4";
if (out_subtype) *out_subtype = subtype;
if (!mux_ext) mux_ext = "mp4";
if (out_ext) *out_ext = mux_ext;
}
static void dasher_update_rep(GF_DasherCtx *ctx, GF_DashStream *ds)
{
char szCodec[RFC6381_CODEC_NAME_SIZE_MAX];
//Outputs are not yet connected, derive mime from init segment extension
if (!ds->rep->mime_type) {
const char *subtype = NULL;
dasher_get_mime_and_ext(ctx, ds, &subtype, NULL);
if (ds->stream_type==GF_STREAM_VISUAL)
gf_dynstrcat(&ds->rep->mime_type, "video/", NULL);
else if (ds->stream_type==GF_STREAM_AUDIO)
gf_dynstrcat(&ds->rep->mime_type, "audio/", NULL);
else
gf_dynstrcat(&ds->rep->mime_type, "application/", NULL);
gf_dynstrcat(&ds->rep->mime_type, subtype, NULL);
}
ds->rep->bandwidth = ds->bitrate;
if (ds->stream_type==GF_STREAM_VISUAL) {
ds->rep->width = ds->width;
ds->rep->height = ds->height;
if (!ds->rep->sar) {
GF_SAFEALLOC(ds->rep->sar, GF_MPD_Fractional);
}
if (ds->rep->sar) {
ds->rep->sar->num = ds->sar.num;
ds->rep->sar->den = ds->sar.den;
}
if (ds->fps.num && ds->fps.den) {
if (!ds->rep->framerate) {
GF_SAFEALLOC(ds->rep->framerate, GF_MPD_Fractional);
}
if (ds->rep->framerate) {
ds->rep->framerate->num = ds->fps.num;
ds->rep->framerate->den = ds->fps.den;
gf_media_get_reduced_frame_rate(&ds->rep->framerate->num, &ds->rep->framerate->den);
}
}
}
else if (ds->stream_type==GF_STREAM_AUDIO) {
Bool use_cicp = GF_FALSE;
Bool use_dolbyx = GF_FALSE;
GF_MPD_Descriptor *desc;
char value[256];
ds->rep->samplerate = ds->sr;
if (ds->nb_surround || ds->nb_lfe) use_cicp = GF_TRUE;
if ((ds->codec_id==GF_CODECID_MHAS) || (ds->codec_id==GF_CODECID_MPHA)) use_cicp = GF_TRUE;
if ((ds->codec_id==GF_CODECID_DTS_LBR) || (ds->codec_id==GF_CODECID_DTS_CA) || (ds->codec_id==GF_CODECID_DTS_HD_HR)
|| (ds->codec_id==GF_CODECID_DTS_HD_MASTER) || (ds->codec_id==GF_CODECID_DTS_X))
use_cicp = GF_TRUE;
if ((ds->codec_id==GF_CODECID_AC3) || (ds->codec_id==GF_CODECID_EAC3)) {
//if regular MPEG-DASH, use CICP, otherwise use Dolby signaling
if (ctx->profile > GF_DASH_PROFILE_FULL) {
use_dolbyx = GF_TRUE;
}
}
if (use_dolbyx) {
u32 cicp_layout = 0;
if (ds->ch_layout)
cicp_layout = gf_audio_fmt_get_cicp_from_layout(ds->ch_layout);
if (!cicp_layout)
cicp_layout = gf_audio_fmt_get_cicp_layout(ds->nb_ch, ds->nb_surround, ds->nb_lfe);
sprintf(value, "%X", gf_audio_fmt_get_dolby_chanmap(cicp_layout) );
desc = gf_mpd_descriptor_new(NULL, "tag:dolby.com,2014:dash:audio_channel_configuration:2011", value);
}
else if (!use_cicp) {
sprintf(value, "%d", ds->nb_ch);
desc = gf_mpd_descriptor_new(NULL, "urn:mpeg:dash:23003:3:audio_channel_configuration:2011", value);
} else {
sprintf(value, "%d", gf_audio_fmt_get_cicp_layout(ds->nb_ch, ds->nb_surround, ds->nb_lfe));
desc = gf_mpd_descriptor_new(NULL, "urn:mpeg:mpegB:cicp:ChannelConfiguration", value);
}
gf_mpd_del_list(ds->rep->audio_channels, gf_mpd_descriptor_free, GF_TRUE);
gf_list_add(ds->rep->audio_channels, desc);
} else {
}
dasher_get_rfc_6381_codec_name(ctx, ds, szCodec, ((ctx->bs_switch==DASHER_BS_SWITCH_INBAND) || (ctx->bs_switch==DASHER_BS_SWITCH_INBAND_PPS)) ? GF_TRUE : GF_FALSE, GF_TRUE);
if (ds->rep->codecs) gf_free(ds->rep->codecs);
ds->rep->codecs = gf_strdup(szCodec);
if (ds->interlaced) ds->rep->scan_type = GF_MPD_SCANTYPE_INTERLACED;
else {
//profiles forcing scanType=progressive for progressive
switch (ctx->profile) {
case GF_DASH_PROFILE_HBBTV_1_5_ISOBMF_LIVE:
ds->rep->scan_type = GF_MPD_SCANTYPE_PROGRESSIVE;
break;
}
| 0 |
[
"CWE-787"
] |
gpac
|
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
| 220,255,204,634,035,300,000,000,000,000,000,000,000 | 105 |
fixed #2138
|
h2_task *h2_task_create(conn_rec *slave, int stream_id,
const h2_request *req, h2_mplx *m,
h2_bucket_beam *input,
apr_interval_time_t timeout,
apr_size_t output_max_mem)
{
apr_pool_t *pool;
h2_task *task;
ap_assert(slave);
ap_assert(req);
apr_pool_create(&pool, slave->pool);
task = apr_pcalloc(pool, sizeof(h2_task));
if (task == NULL) {
return NULL;
}
task->id = "000";
task->stream_id = stream_id;
task->c = slave;
task->mplx = m;
task->pool = pool;
task->request = req;
task->timeout = timeout;
task->input.beam = input;
task->output.max_buffer = output_max_mem;
return task;
}
| 0 |
[
"CWE-444"
] |
mod_h2
|
825de6a46027b2f4c30d7ff5a0c8b852d639c207
| 308,392,728,500,596,950,000,000,000,000,000,000,000 | 29 |
* Fixed keepalives counter on slave connections.
|
acl_find_proxy_user(const char *user, const char *host, const char *ip,
const char *authenticated_as, bool *proxy_used)
{
uint i;
/* if the proxied and proxy user are the same return OK */
DBUG_ENTER("acl_find_proxy_user");
DBUG_PRINT("info", ("user=%s host=%s ip=%s authenticated_as=%s",
user, host, ip, authenticated_as));
if (!strcmp(authenticated_as, user))
{
DBUG_PRINT ("info", ("user is the same as authenticated_as"));
DBUG_RETURN (NULL);
}
*proxy_used= TRUE;
for (i=0; i < acl_proxy_users.elements; i++)
{
ACL_PROXY_USER *proxy= dynamic_element(&acl_proxy_users, i,
ACL_PROXY_USER *);
if (proxy->matches(host, user, ip, authenticated_as))
DBUG_RETURN(proxy);
}
DBUG_RETURN(NULL);
}
| 0 |
[] |
mysql-server
|
25d1b7e03b9b375a243fabdf0556c063c7282361
| 56,161,848,883,896,500,000,000,000,000,000,000,000 | 26 |
Bug #22722946: integer overflow may lead to wrong results in get_56_lenc_string
|
void rtnl_unlock(void)
{
rtnl_shunlock();
netdev_run_todo();
}
| 0 |
[
"CWE-200"
] |
linux-2.6
|
9ef1d4c7c7aca1cd436612b6ca785b726ffb8ed8
| 30,908,929,663,895,880,000,000,000,000,000,000,000 | 6 |
[NETLINK]: Missing initializations in dumped data
Mostly missing initialization of padding fields of 1 or 2 bytes length,
two instances of uninitialized nlmsgerr->msg of 16 bytes length.
Signed-off-by: Patrick McHardy <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int txtwrite_get_param(gx_device *dev, char *Param, void *list)
{
gx_device_txtwrite_t *const tdev = (gx_device_txtwrite_t *) dev;
gs_param_list * plist = (gs_param_list *)list;
bool bool_T = true;
if (strcmp(Param, "OutputFile") == 0) {
gs_param_string ofns;
ofns.data = (const byte *)tdev->fname,
ofns.size = strlen(tdev->fname),
ofns.persistent = false;
return param_write_string(plist, "OutputFile", &ofns);
}
if (strcmp(Param, "WantsToUnicode") == 0) {
return param_write_bool(plist, "WantsToUnicode", &bool_T);
}
if (strcmp(Param, "PreserveTrMode") == 0) {
return param_write_bool(plist, "PreserveTrMode", &bool_T);
}
if (strcmp(Param, "HighLevelDevice") == 0) {
return param_write_bool(plist, "HighLevelDevice", &bool_T);
}
return_error(gs_error_undefined);
}
| 0 |
[
"CWE-476"
] |
ghostpdl
|
407c98a38c3a6ac1681144ed45cc2f4fc374c91f
| 274,680,477,151,557,300,000,000,000,000,000,000,000 | 25 |
txtwrite - guard against using GS_NO_GLYPH to retrieve Unicode values
Bug 701822 "Segmentation fault at psi/iname.c:296 in names_index_ref"
Avoid using a glyph with the value GS_NO_GLYPH to retrieve a glyph
name or Unicode code point from the glyph ID, as this is not a valid
ID.
|
cTValue *lj_debug_frame(lua_State *L, int level, int *size)
{
cTValue *frame, *nextframe, *bot = tvref(L->stack);
/* Traverse frames backwards. */
for (nextframe = frame = L->base-1; frame > bot; ) {
if (frame_gc(frame) == obj2gco(L))
level++; /* Skip dummy frames. See lj_meta_call(). */
if (level-- == 0) {
*size = (int)(nextframe - frame);
return frame; /* Level found. */
}
nextframe = frame;
if (frame_islua(frame)) {
frame = frame_prevl(frame);
} else {
if (frame_isvarg(frame))
level++; /* Skip vararg pseudo-frame. */
frame = frame_prevd(frame);
}
}
*size = level;
return NULL; /* Level not found. */
}
| 0 |
[
"CWE-125"
] |
LuaJIT
|
e296f56b825c688c3530a981dc6b495d972f3d01
| 273,942,778,566,226,600,000,000,000,000,000,000,000 | 23 |
Call error function on rethrow after trace exit.
|
DECLAREContigPutFunc(putRGBAAcontig8bittile)
{
int samplesperpixel = img->samplesperpixel;
(void) x; (void) y;
fromskew *= samplesperpixel;
for( ; h > 0; --h) {
UNROLL8(w, NOP,
*cp++ = PACK4(pp[0], pp[1], pp[2], pp[3]);
pp += samplesperpixel);
cp += toskew;
pp += fromskew;
}
}
| 0 |
[
"CWE-787"
] |
libtiff
|
4bb584a35f87af42d6cf09d15e9ce8909a839145
| 51,988,378,302,548,330,000,000,000,000,000,000,000 | 14 |
RGBA interface: fix integer overflow potentially causing write heap buffer overflow, especially on 32 bit builds. Fixes https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=16443. Credit to OSS Fuzz
|
static void usb_set_lpm_parameters(struct usb_device *udev)
{
struct usb_hub *hub;
unsigned int port_to_port_delay;
unsigned int udev_u1_del;
unsigned int udev_u2_del;
unsigned int hub_u1_del;
unsigned int hub_u2_del;
if (!udev->lpm_capable || udev->speed != USB_SPEED_SUPER)
return;
hub = usb_hub_to_struct_hub(udev->parent);
/* It doesn't take time to transition the roothub into U0, since it
* doesn't have an upstream link.
*/
if (!hub)
return;
udev_u1_del = udev->bos->ss_cap->bU1devExitLat;
udev_u2_del = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat);
hub_u1_del = udev->parent->bos->ss_cap->bU1devExitLat;
hub_u2_del = le16_to_cpu(udev->parent->bos->ss_cap->bU2DevExitLat);
usb_set_lpm_mel(udev, &udev->u1_params, udev_u1_del,
hub, &udev->parent->u1_params, hub_u1_del);
usb_set_lpm_mel(udev, &udev->u2_params, udev_u2_del,
hub, &udev->parent->u2_params, hub_u2_del);
/*
* Appendix C, section C.2.2.2, says that there is a slight delay from
* when the parent hub notices the downstream port is trying to
* transition to U0 to when the hub initiates a U0 transition on its
* upstream port. The section says the delays are tPort2PortU1EL and
* tPort2PortU2EL, but it doesn't define what they are.
*
* The hub chapter, sections 10.4.2.4 and 10.4.2.5 seem to be talking
* about the same delays. Use the maximum delay calculations from those
* sections. For U1, it's tHubPort2PortExitLat, which is 1us max. For
* U2, it's tHubPort2PortExitLat + U2DevExitLat - U1DevExitLat. I
* assume the device exit latencies they are talking about are the hub
* exit latencies.
*
* What do we do if the U2 exit latency is less than the U1 exit
* latency? It's possible, although not likely...
*/
port_to_port_delay = 1;
usb_set_lpm_pel(udev, &udev->u1_params, udev_u1_del,
hub, &udev->parent->u1_params, hub_u1_del,
port_to_port_delay);
if (hub_u2_del > hub_u1_del)
port_to_port_delay = 1 + hub_u2_del - hub_u1_del;
else
port_to_port_delay = 1 + hub_u1_del;
usb_set_lpm_pel(udev, &udev->u2_params, udev_u2_del,
hub, &udev->parent->u2_params, hub_u2_del,
port_to_port_delay);
/* Now that we've got PEL, calculate SEL. */
usb_set_lpm_sel(udev, &udev->u1_params);
usb_set_lpm_sel(udev, &udev->u2_params);
}
| 0 |
[
"CWE-703"
] |
linux
|
e50293ef9775c5f1cf3fcc093037dd6a8c5684ea
| 337,352,646,062,237,370,000,000,000,000,000,000,000 | 66 |
USB: fix invalid memory access in hub_activate()
Commit 8520f38099cc ("USB: change hub initialization sleeps to
delayed_work") changed the hub_activate() routine to make part of it
run in a workqueue. However, the commit failed to take a reference to
the usb_hub structure or to lock the hub interface while doing so. As
a result, if a hub is plugged in and quickly unplugged before the work
routine can run, the routine will try to access memory that has been
deallocated. Or, if the hub is unplugged while the routine is
running, the memory may be deallocated while it is in active use.
This patch fixes the problem by taking a reference to the usb_hub at
the start of hub_activate() and releasing it at the end (when the work
is finished), and by locking the hub interface while the work routine
is running. It also adds a check at the start of the routine to see
if the hub has already been disconnected, in which nothing should be
done.
Signed-off-by: Alan Stern <[email protected]>
Reported-by: Alexandru Cornea <[email protected]>
Tested-by: Alexandru Cornea <[email protected]>
Fixes: 8520f38099cc ("USB: change hub initialization sleeps to delayed_work")
CC: <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
u32 portid, u32 seq, int event, u32 flags,
int family, const struct nft_table *table,
const struct nft_chain *chain)
{
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
event |= NFNL_SUBSYS_NFTABLES << 8;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
if (nlh == NULL)
goto nla_put_failure;
nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = family;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name))
goto nla_put_failure;
if (nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle)))
goto nla_put_failure;
if (nla_put_string(skb, NFTA_CHAIN_NAME, chain->name))
goto nla_put_failure;
if (chain->flags & NFT_BASE_CHAIN) {
const struct nft_base_chain *basechain = nft_base_chain(chain);
const struct nf_hook_ops *ops = &basechain->ops[0];
struct nlattr *nest;
nest = nla_nest_start(skb, NFTA_CHAIN_HOOK);
if (nest == NULL)
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_HOOK_HOOKNUM, htonl(ops->hooknum)))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_HOOK_PRIORITY, htonl(ops->priority)))
goto nla_put_failure;
nla_nest_end(skb, nest);
if (nla_put_be32(skb, NFTA_CHAIN_POLICY,
htonl(basechain->policy)))
goto nla_put_failure;
if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
goto nla_put_failure;
if (nft_dump_stats(skb, nft_base_chain(chain)->stats))
goto nla_put_failure;
}
if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
goto nla_put_failure;
return nlmsg_end(skb, nlh);
nla_put_failure:
nlmsg_trim(skb, nlh);
return -1;
}
| 0 |
[
"CWE-19"
] |
nf
|
a2f18db0c68fec96631c10cad9384c196e9008ac
| 150,682,087,438,753,160,000,000,000,000,000,000,000 | 59 |
netfilter: nf_tables: fix flush ruleset chain dependencies
Jumping between chains doesn't mix well with flush ruleset. Rules
from a different chain and set elements may still refer to us.
[ 353.373791] ------------[ cut here ]------------
[ 353.373845] kernel BUG at net/netfilter/nf_tables_api.c:1159!
[ 353.373896] invalid opcode: 0000 [#1] SMP
[ 353.373942] Modules linked in: intel_powerclamp uas iwldvm iwlwifi
[ 353.374017] CPU: 0 PID: 6445 Comm: 31c3.nft Not tainted 3.18.0 #98
[ 353.374069] Hardware name: LENOVO 5129CTO/5129CTO, BIOS 6QET47WW (1.17 ) 07/14/2010
[...]
[ 353.375018] Call Trace:
[ 353.375046] [<ffffffff81964c31>] ? nf_tables_commit+0x381/0x540
[ 353.375101] [<ffffffff81949118>] nfnetlink_rcv+0x3d8/0x4b0
[ 353.375150] [<ffffffff81943fc5>] netlink_unicast+0x105/0x1a0
[ 353.375200] [<ffffffff8194438e>] netlink_sendmsg+0x32e/0x790
[ 353.375253] [<ffffffff818f398e>] sock_sendmsg+0x8e/0xc0
[ 353.375300] [<ffffffff818f36b9>] ? move_addr_to_kernel.part.20+0x19/0x70
[ 353.375357] [<ffffffff818f44f9>] ? move_addr_to_kernel+0x19/0x30
[ 353.375410] [<ffffffff819016d2>] ? verify_iovec+0x42/0xd0
[ 353.375459] [<ffffffff818f3e10>] ___sys_sendmsg+0x3f0/0x400
[ 353.375510] [<ffffffff810615fa>] ? native_sched_clock+0x2a/0x90
[ 353.375563] [<ffffffff81176697>] ? acct_account_cputime+0x17/0x20
[ 353.375616] [<ffffffff8110dc78>] ? account_user_time+0x88/0xa0
[ 353.375667] [<ffffffff818f4bbd>] __sys_sendmsg+0x3d/0x80
[ 353.375719] [<ffffffff81b184f4>] ? int_check_syscall_exit_work+0x34/0x3d
[ 353.375776] [<ffffffff818f4c0d>] SyS_sendmsg+0xd/0x20
[ 353.375823] [<ffffffff81b1826d>] system_call_fastpath+0x16/0x1b
Release objects in this order: rules -> sets -> chains -> tables, to
make sure no references to chains are held anymore.
Reported-by: Asbjoern Sloth Toennesen <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
int gg_change_status_descr(struct gg_session *sess, int status, const char *descr)
{
struct gg_new_status80 p;
char *gen_descr = NULL;
int descr_len = 0;
int descr_null_len = 0;
int res;
gg_debug_session(sess, GG_DEBUG_FUNCTION, "** gg_change_status_descr(%p, %d, \"%s\");\n", sess, status, descr);
if (!sess) {
errno = EFAULT;
return -1;
}
if (sess->state != GG_STATE_CONNECTED) {
errno = ENOTCONN;
return -1;
}
sess->status = status;
if (descr != NULL && sess->encoding != GG_ENCODING_UTF8) {
descr = gen_descr = gg_encoding_convert(descr, GG_ENCODING_CP1250, GG_ENCODING_UTF8, -1, -1);
if (!gen_descr)
return -1;
}
if (descr) {
descr_len = strlen(descr);
if (descr_len > GG_STATUS_DESCR_MAXSIZE)
descr_len = GG_STATUS_DESCR_MAXSIZE;
/* XXX pamiętać o tym, żeby nie ucinać w środku znaku utf-8 */
} else {
descr = "";
}
p.status = gg_fix32(status);
p.flags = gg_fix32(sess->status_flags);
p.description_size = gg_fix32(descr_len);
if (sess->protocol_version >= GG_PROTOCOL_110) {
p.flags = gg_fix32(0x00000014);
descr_null_len = 1;
}
res = gg_send_packet(sess, GG_NEW_STATUS80,
&p, sizeof(p),
descr, descr_len,
"\x00", descr_null_len,
NULL);
free(gen_descr);
if (GG_S_NA(status)) {
sess->state = GG_STATE_DISCONNECTING;
sess->timeout = GG_TIMEOUT_DISCONNECT;
}
return res;
}
| 0 |
[
"CWE-310"
] |
libgadu
|
23644f1fb8219031b3cac93289a588b05f90226b
| 62,167,640,491,990,440,000,000,000,000,000,000,000 | 64 |
Poprawka ograniczania długości opisu.
|
int ImagingLibTiffInit(ImagingCodecState state, int fp, uint32 offset) {
TIFFSTATE *clientstate = (TIFFSTATE *)state->context;
TRACE(("initing libtiff\n"));
TRACE(("filepointer: %d \n", fp));
TRACE(("State: count %d, state %d, x %d, y %d, ystep %d\n", state->count, state->state,
state->x, state->y, state->ystep));
TRACE(("State: xsize %d, ysize %d, xoff %d, yoff %d \n", state->xsize, state->ysize,
state->xoff, state->yoff));
TRACE(("State: bits %d, bytes %d \n", state->bits, state->bytes));
TRACE(("State: context %p \n", state->context));
clientstate->loc = 0;
clientstate->size = 0;
clientstate->data = 0;
clientstate->fp = fp;
clientstate->ifd = offset;
clientstate->eof = 0;
return 1;
}
| 0 |
[
"CWE-190",
"CWE-787"
] |
Pillow
|
4e2def2539ec13e53a82e06c4b3daf00454100c4
| 61,100,325,867,220,010,000,000,000,000,000,000,000 | 21 |
Overflow checks for realloc for tiff decoding
|
lookup_cset(ushort nrc_code, uchar csmask, bool enabled)
{
static struct {
ushort design;
uchar cstype; // 1: 94-character set, 2: 96-character set, 3: both
bool free; // does not need NRC enabling
uchar cs;
} csdesignations[] = {
{'B', 1, 1, CSET_ASCII}, // ASCII
{'A', 3, 1, CSET_GBCHR}, // UK Latin-1
{'0', 1, 1, CSET_LINEDRW}, // DEC Special Line Drawing
{'>', 1, 1, CSET_TECH}, // DEC Technical
{'U', 1, 1, CSET_OEM}, // OEM Codepage 437
{'<', 1, 1, CSET_DECSUPP}, // DEC User-preferred Supplemental (VT200)
{CPAIR('%', '5'), 1, 1, CSET_DECSPGR}, // DEC Supplementary (VT300)
// definitions for NRC support:
{'4', 1, 0, CSET_NL}, // Dutch
{'C', 1, 0, CSET_FI}, // Finnish
{'5', 1, 0, CSET_FI}, // Finnish
{'R', 1, 0, CSET_FR}, // French
{'f', 1, 0, CSET_FR}, // French
{'Q', 1, 0, CSET_CA}, // French Canadian (VT200, VT300)
{'9', 1, 0, CSET_CA}, // French Canadian (VT200, VT300)
{'K', 1, 0, CSET_DE}, // German
{'Y', 1, 0, CSET_IT}, // Italian
{'`', 1, 0, CSET_NO}, // Norwegian/Danish
{'E', 1, 0, CSET_NO}, // Norwegian/Danish
{'6', 1, 0, CSET_NO}, // Norwegian/Danish
{CPAIR('%', '6'), 1, 0, CSET_PT}, // Portuguese (VT300)
{'Z', 1, 0, CSET_ES}, // Spanish
{'H', 1, 0, CSET_SE}, // Swedish
{'7', 1, 0, CSET_SE}, // Swedish
{'=', 1, 0, CSET_CH}, // Swiss
// 96-character sets (xterm 336)
{'L', 2, 1, CSET_ISO_Latin_Cyrillic},
{'F', 2, 1, CSET_ISO_Greek_Supp},
{'H', 2, 1, CSET_ISO_Hebrew},
{'M', 2, 1, CSET_ISO_Latin_5},
{CPAIR('"', '?'), 1, 1, CSET_DEC_Greek_Supp},
{CPAIR('"', '4'), 1, 1, CSET_DEC_Hebrew_Supp},
{CPAIR('%', '0'), 1, 1, CSET_DEC_Turkish_Supp},
{CPAIR('&', '4'), 1, 1, CSET_DEC_Cyrillic},
{CPAIR('"', '>'), 1, 0, CSET_NRCS_Greek},
{CPAIR('%', '='), 1, 0, CSET_NRCS_Hebrew},
{CPAIR('%', '2'), 1, 0, CSET_NRCS_Turkish},
};
for (uint i = 0; i < lengthof(csdesignations); i++)
if (csdesignations[i].design == nrc_code
&& (csdesignations[i].cstype & csmask)
&& (csdesignations[i].free || enabled)
)
{
return csdesignations[i].cs;
}
return 0;
}
| 0 |
[
"CWE-703",
"CWE-770"
] |
mintty
|
bd52109993440b6996760aaccb66e68e782762b9
| 247,441,707,269,293,270,000,000,000,000,000,000,000 | 56 |
tame some window operations, just in case
|
hook_print_exec (struct t_gui_buffer *buffer, struct t_gui_line *line)
{
struct t_hook *ptr_hook, *next_hook;
char *prefix_no_color, *message_no_color;
int tags_match, tag_found, i, j;
if (!line->data->message || !line->data->message[0])
return;
prefix_no_color = (line->data->prefix) ?
gui_color_decode (line->data->prefix, NULL) : NULL;
message_no_color = gui_color_decode (line->data->message, NULL);
if (!message_no_color)
{
if (prefix_no_color)
free (prefix_no_color);
return;
}
hook_exec_start ();
ptr_hook = weechat_hooks[HOOK_TYPE_PRINT];
while (ptr_hook)
{
next_hook = ptr_hook->next_hook;
if (!ptr_hook->deleted
&& !ptr_hook->running
&& (!HOOK_PRINT(ptr_hook, buffer)
|| (buffer == HOOK_PRINT(ptr_hook, buffer)))
&& (!HOOK_PRINT(ptr_hook, message)
|| !HOOK_PRINT(ptr_hook, message)[0]
|| string_strcasestr (prefix_no_color, HOOK_PRINT(ptr_hook, message))
|| string_strcasestr (message_no_color, HOOK_PRINT(ptr_hook, message))))
{
/* check if tags match */
if (HOOK_PRINT(ptr_hook, tags_array))
{
/* if there are tags in message printed */
if (line->data->tags_array)
{
tags_match = 1;
for (i = 0; i < HOOK_PRINT(ptr_hook, tags_count); i++)
{
/* search for tag in message */
tag_found = 0;
for (j = 0; j < line->data->tags_count; j++)
{
if (string_strcasecmp (HOOK_PRINT(ptr_hook, tags_array)[i],
line->data->tags_array[j]) == 0)
{
tag_found = 1;
break;
}
}
/* tag was asked by hook but not found in message? */
if (!tag_found)
{
tags_match = 0;
break;
}
}
}
else
tags_match = 0;
}
else
tags_match = 1;
/* run callback */
if (tags_match)
{
ptr_hook->running = 1;
(void) (HOOK_PRINT(ptr_hook, callback))
(ptr_hook->callback_data, buffer, line->data->date,
line->data->tags_count,
(const char **)line->data->tags_array,
(int)line->data->displayed, (int)line->data->highlight,
(HOOK_PRINT(ptr_hook, strip_colors)) ? prefix_no_color : line->data->prefix,
(HOOK_PRINT(ptr_hook, strip_colors)) ? message_no_color : line->data->message);
ptr_hook->running = 0;
}
}
ptr_hook = next_hook;
}
if (prefix_no_color)
free (prefix_no_color);
if (message_no_color)
free (message_no_color);
hook_exec_end ();
}
| 0 |
[
"CWE-20"
] |
weechat
|
efb795c74fe954b9544074aafcebb1be4452b03a
| 182,408,608,960,722,100,000,000,000,000,000,000,000 | 95 |
core: do not call shell to execute command in hook_process (fix security problem when a plugin/script gives untrusted command) (bug #37764)
|
GF_Err elng_box_size(GF_Box *s)
{
GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s;
if (ptr->extended_language) {
ptr->size += strlen(ptr->extended_language)+1;
}
return GF_OK;
}
| 0 |
[
"CWE-787"
] |
gpac
|
388ecce75d05e11fc8496aa4857b91245007d26e
| 198,071,997,898,998,240,000,000,000,000,000,000,000 | 9 |
fixed #1587
|
static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
{
struct latch_tree_node *n;
if (!bpf_jit_kallsyms_enabled())
return NULL;
n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
return n ?
container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
NULL;
}
| 0 |
[
"CWE-120"
] |
linux
|
050fad7c4534c13c8eb1d9c2ba66012e014773cb
| 91,508,063,362,169,620,000,000,000,000,000,000,000 | 12 |
bpf: fix truncated jump targets on heavy expansions
Recently during testing, I ran into the following panic:
[ 207.892422] Internal error: Accessing user space memory outside uaccess.h routines: 96000004 [#1] SMP
[ 207.901637] Modules linked in: binfmt_misc [...]
[ 207.966530] CPU: 45 PID: 2256 Comm: test_verifier Tainted: G W 4.17.0-rc3+ #7
[ 207.974956] Hardware name: FOXCONN R2-1221R-A4/C2U4N_MB, BIOS G31FB18A 03/31/2017
[ 207.982428] pstate: 60400005 (nZCv daif +PAN -UAO)
[ 207.987214] pc : bpf_skb_load_helper_8_no_cache+0x34/0xc0
[ 207.992603] lr : 0xffff000000bdb754
[ 207.996080] sp : ffff000013703ca0
[ 207.999384] x29: ffff000013703ca0 x28: 0000000000000001
[ 208.004688] x27: 0000000000000001 x26: 0000000000000000
[ 208.009992] x25: ffff000013703ce0 x24: ffff800fb4afcb00
[ 208.015295] x23: ffff00007d2f5038 x22: ffff00007d2f5000
[ 208.020599] x21: fffffffffeff2a6f x20: 000000000000000a
[ 208.025903] x19: ffff000009578000 x18: 0000000000000a03
[ 208.031206] x17: 0000000000000000 x16: 0000000000000000
[ 208.036510] x15: 0000ffff9de83000 x14: 0000000000000000
[ 208.041813] x13: 0000000000000000 x12: 0000000000000000
[ 208.047116] x11: 0000000000000001 x10: ffff0000089e7f18
[ 208.052419] x9 : fffffffffeff2a6f x8 : 0000000000000000
[ 208.057723] x7 : 000000000000000a x6 : 00280c6160000000
[ 208.063026] x5 : 0000000000000018 x4 : 0000000000007db6
[ 208.068329] x3 : 000000000008647a x2 : 19868179b1484500
[ 208.073632] x1 : 0000000000000000 x0 : ffff000009578c08
[ 208.078938] Process test_verifier (pid: 2256, stack limit = 0x0000000049ca7974)
[ 208.086235] Call trace:
[ 208.088672] bpf_skb_load_helper_8_no_cache+0x34/0xc0
[ 208.093713] 0xffff000000bdb754
[ 208.096845] bpf_test_run+0x78/0xf8
[ 208.100324] bpf_prog_test_run_skb+0x148/0x230
[ 208.104758] sys_bpf+0x314/0x1198
[ 208.108064] el0_svc_naked+0x30/0x34
[ 208.111632] Code: 91302260 f9400001 f9001fa1 d2800001 (29500680)
[ 208.117717] ---[ end trace 263cb8a59b5bf29f ]---
The program itself which caused this had a long jump over the whole
instruction sequence where all of the inner instructions required
heavy expansions into multiple BPF instructions. Additionally, I also
had BPF hardening enabled which requires once more rewrites of all
constant values in order to blind them. Each time we rewrite insns,
bpf_adj_branches() would need to potentially adjust branch targets
which cross the patchlet boundary to accommodate for the additional
delta. Eventually that lead to the case where the target offset could
not fit into insn->off's upper 0x7fff limit anymore where then offset
wraps around becoming negative (in s16 universe), or vice versa
depending on the jump direction.
Therefore it becomes necessary to detect and reject any such occasions
in a generic way for native eBPF and cBPF to eBPF migrations. For
the latter we can simply check bounds in the bpf_convert_filter()'s
BPF_EMIT_JMP helper macro and bail out once we surpass limits. The
bpf_patch_insn_single() for native eBPF (and cBPF to eBPF in case
of subsequent hardening) is a bit more complex in that we need to
detect such truncations before hitting the bpf_prog_realloc(). Thus
the latter is split into an extra pass to probe problematic offsets
on the original program in order to fail early. With that in place
and carefully tested I no longer hit the panic and the rewrites are
rejected properly. The above example panic I've seen on bpf-next,
though the issue itself is generic in that a guard against this issue
in bpf seems more appropriate in this case.
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Martin KaFai Lau <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
|
DEFUN (neighbor_interface,
neighbor_interface_cmd,
NEIGHBOR_CMD "interface WORD",
NEIGHBOR_STR
NEIGHBOR_ADDR_STR
"Interface\n"
"Interface name\n")
{
return peer_interface_vty (vty, argv[0], argv[1]);
}
| 0 |
[
"CWE-125"
] |
frr
|
6d58272b4cf96f0daa846210dd2104877900f921
| 277,820,719,439,074,140,000,000,000,000,000,000,000 | 10 |
[bgpd] cleanup, compact and consolidate capability parsing code
2007-07-26 Paul Jakma <[email protected]>
* (general) Clean up and compact capability parsing slightly.
Consolidate validation of length and logging of generic TLV, and
memcpy of capability data, thus removing such from cap specifc
code (not always present or correct).
* bgp_open.h: Add structures for the generic capability TLV header
and for the data formats of the various specific capabilities we
support. Hence remove the badly named, or else misdefined, struct
capability.
* bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data.
Do the length checks *before* memcpy()'ing based on that length
(stored capability - should have been validated anyway on input,
but..).
(bgp_afi_safi_valid_indices) new function to validate (afi,safi)
which is about to be used as index into arrays, consolidates
several instances of same, at least one of which appeared to be
incomplete..
(bgp_capability_mp) Much condensed.
(bgp_capability_orf_entry) New, process one ORF entry
(bgp_capability_orf) Condensed. Fixed to process all ORF entries.
(bgp_capability_restart) Condensed, and fixed to use a
cap-specific type, rather than abusing capability_mp.
(struct message capcode_str) added to aid generic logging.
(size_t cap_minsizes[]) added to aid generic validation of
capability length field.
(bgp_capability_parse) Generic logging and validation of TLV
consolidated here. Code compacted as much as possible.
* bgp_packet.c: (bgp_open_receive) Capability parsers now use
streams, so no more need here to manually fudge the input stream
getp.
(bgp_capability_msg_parse) use struct capability_mp_data. Validate
lengths /before/ memcpy. Use bgp_afi_safi_valid_indices.
(bgp_capability_receive) Exported for use by test harness.
* bgp_vty.c: (bgp_show_summary) fix conversion warning
(bgp_show_peer) ditto
* bgp_debug.h: Fix storage 'extern' after type 'const'.
* lib/log.c: (mes_lookup) warning about code not being in
same-number array slot should be debug, not warning. E.g. BGP
has several discontigious number spaces, allocating from
different parts of a space is not uncommon (e.g. IANA
assigned versus vendor-assigned code points in some number
space).
|
_pblock_assert_pb_intplugin(Slapi_PBlock *pblock)
{
if (pblock->pb_intplugin == NULL) {
pblock->pb_intplugin = (slapi_pblock_intplugin *)slapi_ch_calloc(1, sizeof(slapi_pblock_intplugin));
}
}
| 0 |
[
"CWE-415"
] |
389-ds-base
|
a3c298f8140d3e4fa1bd5a670f1bb965a21a9b7b
| 98,072,460,038,094,360,000,000,000,000,000,000,000 | 6 |
Issue 5218 - double-free of the virtual attribute context in persistent search (#5219)
description:
A search is processed by a worker using a private pblock.
If the search is persistent, the worker spawn a thread
and kind of duplicate its private pblock so that the spawn
thread continue to process the persistent search.
Then worker ends the initial search, reinit (free) its private pblock,
and returns monitoring the wait_queue.
When the persistent search completes, it frees the duplicated
pblock.
The problem is that private pblock and duplicated pblock
are referring to a same structure (pb_vattr_context).
That can lead to a double free
Fix:
When cloning the pblock (slapi_pblock_clone) make sure
to transfert the references inside the original (private)
pblock to the target (cloned) one
That includes pb_vattr_context pointer.
Reviewed by: Mark Reynolds, James Chapman, Pierre Rogier (Thanks !)
Co-authored-by: Mark Reynolds <[email protected]>
|
void fx_TypedArray_prototype_length_get(txMachine* the)
{
txSlot* instance = fxCheckTypedArrayInstance(the, mxThis);
txSlot* dispatch = instance->next;
txSlot* view = dispatch->next;
txSlot* buffer = view->next;
txU2 shift = dispatch->value.typedArray.dispatch->shift;
mxResult->kind = XS_INTEGER_KIND;
mxResult->value.integer = fxGetDataViewSize(the, view, buffer) >> shift;
}
| 0 |
[
"CWE-125"
] |
moddable
|
135aa9a4a6a9b49b60aa730ebc3bcc6247d75c45
| 255,206,662,233,585,140,000,000,000,000,000,000,000 | 10 |
XS: #896
|
static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
struct io_event __user *event, long *i)
{
long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
if (ret > 0)
*i += ret;
if (unlikely(atomic_read(&ctx->dead)))
ret = -EINVAL;
if (!*i)
*i = ret;
return ret < 0 || *i >= min_nr;
}
| 0 |
[
"CWE-399"
] |
linux
|
d558023207e008a4476a3b7bb8706b2a2bf5d84f
| 253,976,114,520,181,400,000,000,000,000,000,000,000 | 16 |
aio: prevent double free in ioctx_alloc
ioctx_alloc() calls aio_setup_ring() to allocate a ring. If aio_setup_ring()
fails to do so it would call aio_free_ring() before returning, but
ioctx_alloc() would call aio_free_ring() again causing a double free of
the ring.
This is easily reproducible from userspace.
Signed-off-by: Sasha Levin <[email protected]>
Signed-off-by: Benjamin LaHaise <[email protected]>
|
rsock_s_accept_nonblock(VALUE klass, VALUE ex, rb_io_t *fptr,
struct sockaddr *sockaddr, socklen_t *len)
{
int fd2;
rb_io_set_nonblock(fptr);
fd2 = cloexec_accept(fptr->fd, (struct sockaddr*)sockaddr, len, 1);
if (fd2 < 0) {
int e = errno;
switch (e) {
case EAGAIN:
#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
case EWOULDBLOCK:
#endif
case ECONNABORTED:
#if defined EPROTO
case EPROTO:
#endif
if (ex == Qfalse)
return sym_wait_readable;
rb_readwrite_syserr_fail(RB_IO_WAIT_READABLE, e, "accept(2) would block");
}
rb_syserr_fail(e, "accept(2)");
}
rb_update_max_fd(fd2);
return rsock_init_sock(rb_obj_alloc(klass), fd2);
}
| 0 |
[
"CWE-908"
] |
ruby
|
61b7f86248bd121be2e83768be71ef289e8e5b90
| 200,882,426,523,874,550,000,000,000,000,000,000,000 | 27 |
ext/socket/init.c: do not return uninitialized buffer
Resize string buffer only if some data is received in
BasicSocket#read_nonblock and some methods.
Co-Authored-By: Samuel Williams <[email protected]>
|
static inline Quantum GetPixelReadMask(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
if (image->channel_map[ReadMaskPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) QuantumRange);
return(pixel[image->channel_map[ReadMaskPixelChannel].offset]);
}
| 0 |
[
"CWE-20",
"CWE-125"
] |
ImageMagick
|
8187d2d8fd010d2d6b1a3a8edd935beec404dddc
| 49,658,906,808,131,260,000,000,000,000,000,000,000 | 7 |
https://github.com/ImageMagick/ImageMagick/issues/1610
|
parse_response (http_t hd)
{
char *line, *p, *p2;
size_t maxlen, len;
cookie_t cookie = hd->read_cookie;
const char *s;
/* Delete old header lines. */
while (hd->headers)
{
header_t tmp = hd->headers->next;
xfree (hd->headers->value);
xfree (hd->headers);
hd->headers = tmp;
}
/* Wait for the status line. */
do
{
maxlen = MAX_LINELEN;
len = es_read_line (hd->fp_read, &hd->buffer, &hd->buffer_size, &maxlen);
line = hd->buffer;
if (!line)
return gpg_err_code_from_syserror (); /* Out of core. */
if (!maxlen)
return GPG_ERR_TRUNCATED; /* Line has been truncated. */
if (!len)
return GPG_ERR_EOF;
if (opt_debug || (hd->flags & HTTP_FLAG_LOG_RESP))
log_debug_with_string (line, "http.c:response:\n");
}
while (!*line);
if ((p = strchr (line, '/')))
*p++ = 0;
if (!p || strcmp (line, "HTTP"))
return 0; /* Assume http 0.9. */
if ((p2 = strpbrk (p, " \t")))
{
*p2++ = 0;
p2 += strspn (p2, " \t");
}
if (!p2)
return 0; /* Also assume http 0.9. */
p = p2;
/* TODO: Add HTTP version number check. */
if ((p2 = strpbrk (p, " \t")))
*p2++ = 0;
if (!isdigit ((unsigned int)p[0]) || !isdigit ((unsigned int)p[1])
|| !isdigit ((unsigned int)p[2]) || p[3])
{
/* Malformed HTTP status code - assume http 0.9. */
hd->is_http_0_9 = 1;
hd->status_code = 200;
return 0;
}
hd->status_code = atoi (p);
/* Skip all the header lines and wait for the empty line. */
do
{
maxlen = MAX_LINELEN;
len = es_read_line (hd->fp_read, &hd->buffer, &hd->buffer_size, &maxlen);
line = hd->buffer;
if (!line)
return gpg_err_code_from_syserror (); /* Out of core. */
/* Note, that we can silently ignore truncated lines. */
if (!len)
return GPG_ERR_EOF;
/* Trim line endings of empty lines. */
if ((*line == '\r' && line[1] == '\n') || *line == '\n')
*line = 0;
if (opt_debug || (hd->flags & HTTP_FLAG_LOG_RESP))
log_info ("http.c:RESP: '%.*s'\n",
(int)strlen(line)-(*line&&line[1]?2:0),line);
if (*line)
{
gpg_err_code_t ec = store_header (hd, line);
if (ec)
return ec;
}
}
while (len && *line);
cookie->content_length_valid = 0;
if (!(hd->flags & HTTP_FLAG_IGNORE_CL))
{
s = http_get_header (hd, "Content-Length");
if (s)
{
cookie->content_length_valid = 1;
cookie->content_length = string_to_u64 (s);
}
}
return 0;
}
| 0 |
[
"CWE-352"
] |
gnupg
|
4a4bb874f63741026bd26264c43bb32b1099f060
| 63,536,374,777,970,940,000,000,000,000,000,000,000 | 99 |
dirmngr: Avoid possible CSRF attacks via http redirects.
* dirmngr/http.h (parsed_uri_s): Add fields off_host and off_path.
(http_redir_info_t): New.
* dirmngr/http.c (do_parse_uri): Set new fields.
(same_host_p): New.
(http_prepare_redirect): New.
* dirmngr/t-http-basic.c: New test.
* dirmngr/ks-engine-hkp.c (send_request): Use http_prepare_redirect
instead of the open code.
* dirmngr/ks-engine-http.c (ks_http_fetch): Ditto.
--
With this change a http query will not follow a redirect unless the
Location header gives the same host. If the host is different only
the host and port is taken from the Location header and the original
path and query parts are kept.
Signed-off-by: Werner Koch <[email protected]>
(cherry picked from commit fa1b1eaa4241ff3f0634c8bdf8591cbc7c464144)
|
int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
unsigned long rx_ring_ref, unsigned int tx_evtchn,
unsigned int rx_evtchn)
{
struct task_struct *task;
int err = -ENOMEM;
BUG_ON(vif->tx_irq);
BUG_ON(vif->task);
BUG_ON(vif->dealloc_task);
err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
if (err < 0)
goto err;
init_waitqueue_head(&vif->wq);
init_waitqueue_head(&vif->dealloc_wq);
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
err = bind_interdomain_evtchn_to_irqhandler(
vif->domid, tx_evtchn, xenvif_interrupt, 0,
vif->dev->name, vif);
if (err < 0)
goto err_unmap;
vif->tx_irq = vif->rx_irq = err;
disable_irq(vif->tx_irq);
} else {
/* feature-split-event-channels == 1 */
snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name),
"%s-tx", vif->dev->name);
err = bind_interdomain_evtchn_to_irqhandler(
vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
vif->tx_irq_name, vif);
if (err < 0)
goto err_unmap;
vif->tx_irq = err;
disable_irq(vif->tx_irq);
snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name),
"%s-rx", vif->dev->name);
err = bind_interdomain_evtchn_to_irqhandler(
vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
vif->rx_irq_name, vif);
if (err < 0)
goto err_tx_unbind;
vif->rx_irq = err;
disable_irq(vif->rx_irq);
}
task = kthread_create(xenvif_kthread_guest_rx,
(void *)vif, "%s-guest-rx", vif->dev->name);
if (IS_ERR(task)) {
pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
err = PTR_ERR(task);
goto err_rx_unbind;
}
vif->task = task;
task = kthread_create(xenvif_dealloc_kthread,
(void *)vif, "%s-dealloc", vif->dev->name);
if (IS_ERR(task)) {
pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
err = PTR_ERR(task);
goto err_rx_unbind;
}
vif->dealloc_task = task;
rtnl_lock();
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
dev_set_mtu(vif->dev, ETH_DATA_LEN);
netdev_update_features(vif->dev);
netif_carrier_on(vif->dev);
if (netif_running(vif->dev))
xenvif_up(vif);
rtnl_unlock();
wake_up_process(vif->task);
wake_up_process(vif->dealloc_task);
return 0;
err_rx_unbind:
unbind_from_irqhandler(vif->rx_irq, vif);
vif->rx_irq = 0;
err_tx_unbind:
unbind_from_irqhandler(vif->tx_irq, vif);
vif->tx_irq = 0;
err_unmap:
xenvif_unmap_frontend_rings(vif);
err:
module_put(THIS_MODULE);
return err;
}
| 0 |
[
"CWE-399"
] |
net-next
|
e9d8b2c2968499c1f96563e6522c56958d5a1d0d
| 217,128,505,337,453,050,000,000,000,000,000,000,000 | 96 |
xen-netback: disable rogue vif in kthread context
When netback discovers frontend is sending malformed packet it will
disables the interface which serves that frontend.
However disabling a network interface involving taking a mutex which
cannot be done in softirq context, so we need to defer this process to
kthread context.
This patch does the following:
1. introduce a flag to indicate the interface is disabled.
2. check that flag in TX path, don't do any work if it's true.
3. check that flag in RX path, turn off that interface if it's true.
The reason to disable it in RX path is because RX uses kthread. After
this change the behavior of netback is still consistent -- it won't do
any TX work for a rogue frontend, and the interface will be eventually
turned off.
Also change a "continue" to "break" after xenvif_fatal_tx_err, as it
doesn't make sense to continue processing packets if frontend is rogue.
This is a fix for XSA-90.
Reported-by: Török Edwin <[email protected]>
Signed-off-by: Wei Liu <[email protected]>
Cc: Ian Campbell <[email protected]>
Reviewed-by: David Vrabel <[email protected]>
Acked-by: Ian Campbell <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void ha_data_partition_destroy(HA_DATA_PARTITION* ha_part_data)
{
if (ha_part_data)
{
mysql_mutex_destroy(&ha_part_data->LOCK_auto_inc);
}
}
| 0 |
[] |
mysql-server
|
be901b60ae59c93848c829d1b0b2cb523ab8692e
| 58,086,128,628,934,110,000,000,000,000,000,000,000 | 7 |
Bug#26390632: CREATE TABLE CAN CAUSE MYSQL TO EXIT.
Analysis
========
CREATE TABLE of InnoDB table with a partition name
which exceeds the path limit can cause the server
to exit.
During the preparation of the partition name,
there was no check to identify whether the complete
path name for partition exceeds the max supported
path length, causing the server to exit during
subsequent processing.
Fix
===
During the preparation of partition name, check and report
an error if the partition path name exceeds the maximum path
name limit.
This is a 5.5 patch.
|
bool CBlock::AcceptBlock()
{
// Check for duplicate
uint256 hash = GetHash();
if (mapBlockIndex.count(hash))
return error("AcceptBlock() : block already in mapBlockIndex");
// Get prev block index
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashPrevBlock);
if (mi == mapBlockIndex.end())
return DoS(10, error("AcceptBlock() : prev block not found"));
CBlockIndex* pindexPrev = (*mi).second;
int nHeight = pindexPrev->nHeight+1;
// Check proof of work
if (nBits != GetNextWorkRequired(pindexPrev, this))
return DoS(100, error("AcceptBlock() : incorrect proof of work"));
// Check timestamp against prev
if (GetBlockTime() <= pindexPrev->GetMedianTimePast())
return error("AcceptBlock() : block's timestamp is too early");
// Check that all transactions are finalized
BOOST_FOREACH(const CTransaction& tx, vtx)
if (!tx.IsFinal(nHeight, GetBlockTime()))
return DoS(10, error("AcceptBlock() : contains a non-final transaction"));
// Check that the block chain matches the known block chain up to a checkpoint
if (!Checkpoints::CheckBlock(nHeight, hash))
return DoS(100, error("AcceptBlock() : rejected by checkpoint lockin at %d", nHeight));
// Write block to history file
if (!CheckDiskSpace(::GetSerializeSize(*this, SER_DISK)))
return error("AcceptBlock() : out of disk space");
unsigned int nFile = -1;
unsigned int nBlockPos = 0;
if (!WriteToDisk(nFile, nBlockPos))
return error("AcceptBlock() : WriteToDisk failed");
if (!AddToBlockIndex(nFile, nBlockPos))
return error("AcceptBlock() : AddToBlockIndex failed");
// Relay inventory, but don't relay old inventory during initial block download
if (hashBestChain == hash)
CRITICAL_BLOCK(cs_vNodes)
BOOST_FOREACH(CNode* pnode, vNodes)
if (nBestHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 140700))
pnode->PushInventory(CInv(MSG_BLOCK, hash));
return true;
}
| 0 |
[
"CWE-16",
"CWE-787"
] |
bitcoin
|
a206b0ea12eb4606b93323268fc81a4f1f952531
| 324,917,679,935,261,220,000,000,000,000,000,000,000 | 50 |
Do not allow overwriting unspent transactions (BIP 30)
Introduce the following network rule:
* a block is not valid if it contains a transaction whose hash
already exists in the block chain, unless all that transaction's
outputs were already spent before said block.
Warning: this is effectively a network rule change, with potential
risk for forking the block chain. Leaving this unfixed carries the
same risk however, for attackers that can cause a reorganisation
in part of the network.
Thanks to Russell O'Connor and Ben Reeves.
|
static int selinux_task_setscheduler(struct task_struct *p)
{
int rc;
rc = cap_task_setscheduler(p);
if (rc)
return rc;
return current_has_perm(p, PROCESS__SETSCHED);
}
| 0 |
[
"CWE-264"
] |
linux
|
259e5e6c75a910f3b5e656151dc602f53f9d7548
| 104,602,752,386,925,370,000,000,000,000,000,000,000 | 10 |
Add PR_{GET,SET}_NO_NEW_PRIVS to prevent execve from granting privs
With this change, calling
prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
disables privilege granting operations at execve-time. For example, a
process will not be able to execute a setuid binary to change their uid
or gid if this bit is set. The same is true for file capabilities.
Additionally, LSM_UNSAFE_NO_NEW_PRIVS is defined to ensure that
LSMs respect the requested behavior.
To determine if the NO_NEW_PRIVS bit is set, a task may call
prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
It returns 1 if set and 0 if it is not set. If any of the arguments are
non-zero, it will return -1 and set errno to -EINVAL.
(PR_SET_NO_NEW_PRIVS behaves similarly.)
This functionality is desired for the proposed seccomp filter patch
series. By using PR_SET_NO_NEW_PRIVS, it allows a task to modify the
system call behavior for itself and its child tasks without being
able to impact the behavior of a more privileged task.
Another potential use is making certain privileged operations
unprivileged. For example, chroot may be considered "safe" if it cannot
affect privileged tasks.
Note, this patch causes execve to fail when PR_SET_NO_NEW_PRIVS is
set and AppArmor is in use. It is fixed in a subsequent patch.
Signed-off-by: Andy Lutomirski <[email protected]>
Signed-off-by: Will Drewry <[email protected]>
Acked-by: Eric Paris <[email protected]>
Acked-by: Kees Cook <[email protected]>
v18: updated change desc
v17: using new define values as per 3.4
Signed-off-by: James Morris <[email protected]>
|
polite_directory_format (name)
char *name;
{
char *home;
int l;
home = get_string_value ("HOME");
l = home ? strlen (home) : 0;
if (l > 1 && strncmp (home, name, l) == 0 && (!name[l] || name[l] == '/'))
{
strncpy (tdir + 1, name + l, sizeof(tdir) - 2);
tdir[0] = '~';
tdir[sizeof(tdir) - 1] = '\0';
return (tdir);
}
else
return (name);
}
| 0 |
[] |
bash
|
955543877583837c85470f7fb8a97b7aa8d45e6c
| 280,390,978,852,053,630,000,000,000,000,000,000,000 | 18 |
bash-4.4-rc2 release
|
void ip6_flush_pending_frames(struct sock *sk)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
if (skb_dst(skb))
IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
}
ip6_cork_release(inet_sk(sk), inet6_sk(sk));
}
| 0 |
[
"CWE-119",
"CWE-401"
] |
linux
|
2811ebac2521ceac84f2bdae402455baa6a7fb47
| 252,202,455,033,463,900,000,000,000,000,000,000,000 | 13 |
ipv6: udp packets following an UFO enqueued packet need also be handled by UFO
In the following scenario the socket is corked:
If the first UDP packet is larger then the mtu we try to append it to the
write queue via ip6_ufo_append_data. A following packet, which is smaller
than the mtu would be appended to the already queued up gso-skb via
plain ip6_append_data. This causes random memory corruptions.
In ip6_ufo_append_data we also have to be careful to not queue up the
same skb multiple times. So setup the gso frame only when no first skb
is available.
This also fixes a shortcoming where we add the current packet's length to
cork->length but return early because of a packet > mtu with dontfrag set
(instead of sutracting it again).
Found with trinity.
Cc: YOSHIFUJI Hideaki <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Reported-by: Dmitry Vyukov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
dissect_notify_field(tvbuff_t *tvb, int offset, packet_info *pinfo,
proto_tree *tree, dcerpc_info *di, guint8 *drep, guint16 type,
guint16 *data)
{
guint16 field;
const char *str;
offset = dissect_ndr_uint16(
tvb, offset, pinfo, NULL, di, drep,
hf_notify_field, &field);
switch(type) {
case PRINTER_NOTIFY_TYPE:
str = val_to_str_ext_const(field, &printer_notify_option_data_vals_ext,
"Unknown");
break;
case JOB_NOTIFY_TYPE:
str = val_to_str_ext_const(field, &job_notify_option_data_vals_ext,
"Unknown");
break;
default:
str = "Unknown notify type";
break;
}
proto_tree_add_uint_format_value(tree, hf_notify_field, tvb, offset - 2, 2, field, "%s (%d)", str, field);
if (data)
*data = field;
return offset;
}
| 0 |
[
"CWE-399"
] |
wireshark
|
b4d16b4495b732888e12baf5b8a7e9bf2665e22b
| 102,233,499,052,384,400,000,000,000,000,000,000,000 | 32 |
SPOOLSS: Try to avoid an infinite loop.
Use tvb_reported_length_remaining in dissect_spoolss_uint16uni. Make
sure our offset always increments in dissect_spoolss_keybuffer.
Change-Id: I7017c9685bb2fa27161d80a03b8fca4ef630e793
Reviewed-on: https://code.wireshark.org/review/14687
Reviewed-by: Gerald Combs <[email protected]>
Petri-Dish: Gerald Combs <[email protected]>
Tested-by: Petri Dish Buildbot <[email protected]>
Reviewed-by: Michael Mann <[email protected]>
|
static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct tg3 *tp = netdev_priv(dev);
if (tg3_flag(tp, USE_PHYLIB)) {
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
return phy_ethtool_gset(phydev, cmd);
}
cmd->supported = (SUPPORTED_Autoneg);
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
cmd->supported |= (SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full);
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
cmd->supported |= (SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_TP);
cmd->port = PORT_TP;
} else {
cmd->supported |= SUPPORTED_FIBRE;
cmd->port = PORT_FIBRE;
}
cmd->advertising = tp->link_config.advertising;
if (tg3_flag(tp, PAUSE_AUTONEG)) {
if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
cmd->advertising |= ADVERTISED_Pause;
} else {
cmd->advertising |= ADVERTISED_Pause |
ADVERTISED_Asym_Pause;
}
} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
cmd->advertising |= ADVERTISED_Asym_Pause;
}
}
if (netif_running(dev) && tp->link_up) {
ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
cmd->duplex = tp->link_config.active_duplex;
cmd->lp_advertising = tp->link_config.rmt_adv;
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
cmd->eth_tp_mdix = ETH_TP_MDI_X;
else
cmd->eth_tp_mdix = ETH_TP_MDI;
}
} else {
ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
cmd->duplex = DUPLEX_UNKNOWN;
cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
}
cmd->phy_address = tp->phy_addr;
cmd->transceiver = XCVR_INTERNAL;
cmd->autoneg = tp->link_config.autoneg;
cmd->maxtxpkt = 0;
cmd->maxrxpkt = 0;
return 0;
}
| 0 |
[
"CWE-476",
"CWE-119"
] |
linux
|
715230a44310a8cf66fbfb5a46f9a62a9b2de424
| 331,825,999,119,031,400,000,000,000,000,000,000,000 | 65 |
tg3: fix length overflow in VPD firmware parsing
Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version
when present") introduced VPD parsing that contained a potential length
overflow.
Limit the hardware's reported firmware string length (max 255 bytes) to
stay inside the driver's firmware string length (32 bytes). On overflow,
truncate the formatted firmware string instead of potentially overwriting
portions of the tg3 struct.
http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf
Signed-off-by: Kees Cook <[email protected]>
Reported-by: Oded Horovitz <[email protected]>
Reported-by: Brad Spengler <[email protected]>
Cc: [email protected]
Cc: Matt Carlson <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static struct page *go7007_snd_pcm_page(struct snd_pcm_substream *substream,
unsigned long offset)
{
return vmalloc_to_page(substream->runtime->dma_area + offset);
}
| 0 |
[
"CWE-401"
] |
linux
|
9453264ef58638ce8976121ac44c07a3ef375983
| 30,668,262,265,222,840,000,000,000,000,000,000,000 | 5 |
media: go7007: fix a miss of snd_card_free
go7007_snd_init() misses a snd_card_free() in an error path.
Add the missed call to fix it.
Signed-off-by: Chuhong Yuan <[email protected]>
Signed-off-by: Hans Verkuil <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
static int nf_tables_dump_chains(struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
unsigned int idx = 0, s_idx = cb->args[0];
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
struct nftables_pernet *nft_net;
const struct nft_table *table;
const struct nft_chain *chain;
rcu_read_lock();
nft_net = nft_pernet(net);
cb->seq = nft_net->base_seq;
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
continue;
list_for_each_entry_rcu(chain, &table->chains, list) {
if (idx < s_idx)
goto cont;
if (idx > s_idx)
memset(&cb->args[1], 0,
sizeof(cb->args) - sizeof(cb->args[0]));
if (!nft_is_active(net, chain))
continue;
if (nf_tables_fill_chain_info(skb, net,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFT_MSG_NEWCHAIN,
NLM_F_MULTI,
table->family, table,
chain) < 0)
goto done;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
}
done:
rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
| 0 |
[
"CWE-665"
] |
linux
|
ad9f151e560b016b6ad3280b48e42fa11e1a5440
| 194,385,613,131,153,300,000,000,000,000,000,000,000 | 46 |
netfilter: nf_tables: initialize set before expression setup
nft_set_elem_expr_alloc() needs an initialized set if expression sets on
the NFT_EXPR_GC flag. Move set fields initialization before expression
setup.
[4512935.019450] ==================================================================
[4512935.019456] BUG: KASAN: null-ptr-deref in nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019487] Read of size 8 at addr 0000000000000070 by task nft/23532
[4512935.019494] CPU: 1 PID: 23532 Comm: nft Not tainted 5.12.0-rc4+ #48
[...]
[4512935.019502] Call Trace:
[4512935.019505] dump_stack+0x89/0xb4
[4512935.019512] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019536] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019560] kasan_report.cold.12+0x5f/0xd8
[4512935.019566] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019590] nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019615] nf_tables_newset+0xc7f/0x1460 [nf_tables]
Reported-by: [email protected]
Fixes: 65038428b2c6 ("netfilter: nf_tables: allow to specify stateful expression in set definition")
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
PamData(BareosSocket* UA_sock, const std::string& passwd)
: UA_sock_(UA_sock), passwd_(passwd)
{
}
| 0 |
[
"CWE-284"
] |
bareos
|
abe462037388635193f3b5b71575f32596c3b69d
| 39,740,262,239,800,915,000,000,000,000,000,000,000 | 4 |
dir: check account authorization during PAM login
Fixes CVE-2022-24755
Previously, when a user logged in via PAM, Bareos did only check for
authentication (i.e. the "auth" section in PAM). No authorization checks
were made (the "account" section in PAM). This patch now adds the proper
check.
This will break existing PAM configuration!
|
find_func(char_u *name, int is_global, cctx_T *cctx)
{
ufunc_T *fp = find_func_even_dead(name, is_global, cctx);
if (fp != NULL && (fp->uf_flags & FC_DEAD) == 0)
return fp;
return NULL;
}
| 0 |
[
"CWE-416"
] |
vim
|
9c23f9bb5fe435b28245ba8ac65aa0ca6b902c04
| 175,108,774,200,087,160,000,000,000,000,000,000,000 | 8 |
patch 8.2.3902: Vim9: double free with nested :def function
Problem: Vim9: double free with nested :def function.
Solution: Pass "line_to_free" from compile_def_function() and make sure
cmdlinep is valid.
|
static void ide_atapi_cmd_reply(IDEState *s, int size, int max_size)
{
if (size > max_size)
size = max_size;
s->lba = -1; /* no sector read */
s->packet_transfer_size = size;
s->io_buffer_size = size; /* dma: send the reply data as one chunk */
s->elementary_transfer_size = 0;
if (s->atapi_dma) {
block_acct_start(blk_get_stats(s->blk), &s->acct, size,
BLOCK_ACCT_READ);
s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
ide_start_dma(s, ide_atapi_cmd_read_dma_cb);
} else {
s->status = READY_STAT | SEEK_STAT;
s->io_buffer_index = 0;
ide_atapi_cmd_reply_end(s);
}
}
| 0 |
[
"CWE-125"
] |
qemu
|
813212288970c39b1800f63e83ac6e96588095c6
| 255,525,820,484,585,630,000,000,000,000,000,000,000 | 20 |
ide: atapi: assert that the buffer pointer is in range
A case was reported where s->io_buffer_index can be out of range.
The report skimped on the details but it seems to be triggered
by s->lba == -1 on the READ/READ CD paths (e.g. by sending an
ATAPI command with LBA = 0xFFFFFFFF). For now paper over it
with assertions. The first one ensures that there is no overflow
when incrementing s->io_buffer_index, the second checks for the
buffer overrun.
Note that the buffer overrun is only a read, so I am not sure
if the assertion failure is actually less harmful than the overrun.
Signed-off-by: Paolo Bonzini <[email protected]>
Message-id: [email protected]
Reviewed-by: Kevin Wolf <[email protected]>
Signed-off-by: Peter Maydell <[email protected]>
|
static int perf_pmu_commit_txn(struct pmu *pmu)
{
unsigned int flags = __this_cpu_read(nop_txn_flags);
__this_cpu_write(nop_txn_flags, 0);
if (flags & ~PERF_PMU_TXN_ADD)
return 0;
perf_pmu_enable(pmu);
return 0;
}
| 0 |
[
"CWE-416",
"CWE-362"
] |
linux
|
12ca6ad2e3a896256f086497a7c7406a547ee373
| 103,510,831,374,188,410,000,000,000,000,000,000,000 | 12 |
perf: Fix race in swevent hash
There's a race on CPU unplug where we free the swevent hash array
while it can still have events on. This will result in a
use-after-free which is BAD.
Simply do not free the hash array on unplug. This leaves the thing
around and no use-after-free takes place.
When the last swevent dies, we do a for_each_possible_cpu() iteration
anyway to clean these up, at which time we'll free it, so no leakage
will occur.
Reported-by: Sasha Levin <[email protected]>
Tested-by: Sasha Levin <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Stephane Eranian <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vince Weaver <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
int __init mdio_bus_init(void)
{
int ret;
ret = class_register(&mdio_bus_class);
if (!ret) {
ret = bus_register(&mdio_bus_type);
if (ret)
class_unregister(&mdio_bus_class);
}
return ret;
}
| 0 |
[
"CWE-416"
] |
linux
|
6ff7b060535e87c2ae14dd8548512abfdda528fb
| 314,797,474,457,549,300,000,000,000,000,000,000,000 | 13 |
mdio_bus: Fix use-after-free on device_register fails
KASAN has found use-after-free in fixed_mdio_bus_init,
commit 0c692d07842a ("drivers/net/phy/mdio_bus.c: call
put_device on device_register() failure") call put_device()
while device_register() fails,give up the last reference
to the device and allow mdiobus_release to be executed
,kfreeing the bus. However in most drives, mdiobus_free
be called to free the bus while mdiobus_register fails.
use-after-free occurs when access bus again, this patch
revert it to let mdiobus_free free the bus.
KASAN report details as below:
BUG: KASAN: use-after-free in mdiobus_free+0x85/0x90 drivers/net/phy/mdio_bus.c:482
Read of size 4 at addr ffff8881dc824d78 by task syz-executor.0/3524
CPU: 1 PID: 3524 Comm: syz-executor.0 Not tainted 5.0.0-rc7+ #45
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0xfa/0x1ce lib/dump_stack.c:113
print_address_description+0x65/0x270 mm/kasan/report.c:187
kasan_report+0x149/0x18d mm/kasan/report.c:317
mdiobus_free+0x85/0x90 drivers/net/phy/mdio_bus.c:482
fixed_mdio_bus_init+0x283/0x1000 [fixed_phy]
? 0xffffffffc0e40000
? 0xffffffffc0e40000
? 0xffffffffc0e40000
do_one_initcall+0xfa/0x5ca init/main.c:887
do_init_module+0x204/0x5f6 kernel/module.c:3460
load_module+0x66b2/0x8570 kernel/module.c:3808
__do_sys_finit_module+0x238/0x2a0 kernel/module.c:3902
do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x462e99
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f6215c19c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99
RDX: 0000000000000000 RSI: 0000000020000080 RDI: 0000000000000003
RBP: 00007f6215c19c70 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f6215c1a6bc
R13: 00000000004bcefb R14: 00000000006f7030 R15: 0000000000000004
Allocated by task 3524:
set_track mm/kasan/common.c:85 [inline]
__kasan_kmalloc.constprop.3+0xa0/0xd0 mm/kasan/common.c:496
kmalloc include/linux/slab.h:545 [inline]
kzalloc include/linux/slab.h:740 [inline]
mdiobus_alloc_size+0x54/0x1b0 drivers/net/phy/mdio_bus.c:143
fixed_mdio_bus_init+0x163/0x1000 [fixed_phy]
do_one_initcall+0xfa/0x5ca init/main.c:887
do_init_module+0x204/0x5f6 kernel/module.c:3460
load_module+0x66b2/0x8570 kernel/module.c:3808
__do_sys_finit_module+0x238/0x2a0 kernel/module.c:3902
do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Freed by task 3524:
set_track mm/kasan/common.c:85 [inline]
__kasan_slab_free+0x130/0x180 mm/kasan/common.c:458
slab_free_hook mm/slub.c:1409 [inline]
slab_free_freelist_hook mm/slub.c:1436 [inline]
slab_free mm/slub.c:2986 [inline]
kfree+0xe1/0x270 mm/slub.c:3938
device_release+0x78/0x200 drivers/base/core.c:919
kobject_cleanup lib/kobject.c:662 [inline]
kobject_release lib/kobject.c:691 [inline]
kref_put include/linux/kref.h:67 [inline]
kobject_put+0x146/0x240 lib/kobject.c:708
put_device+0x1c/0x30 drivers/base/core.c:2060
__mdiobus_register+0x483/0x560 drivers/net/phy/mdio_bus.c:382
fixed_mdio_bus_init+0x26b/0x1000 [fixed_phy]
do_one_initcall+0xfa/0x5ca init/main.c:887
do_init_module+0x204/0x5f6 kernel/module.c:3460
load_module+0x66b2/0x8570 kernel/module.c:3808
__do_sys_finit_module+0x238/0x2a0 kernel/module.c:3902
do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
The buggy address belongs to the object at ffff8881dc824c80
which belongs to the cache kmalloc-2k of size 2048
The buggy address is located 248 bytes inside of
2048-byte region [ffff8881dc824c80, ffff8881dc825480)
The buggy address belongs to the page:
page:ffffea0007720800 count:1 mapcount:0 mapping:ffff8881f6c02800 index:0x0 compound_mapcount: 0
flags: 0x2fffc0000010200(slab|head)
raw: 02fffc0000010200 0000000000000000 0000000500000001 ffff8881f6c02800
raw: 0000000000000000 00000000800f000f 00000001ffffffff 0000000000000000
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff8881dc824c00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
ffff8881dc824c80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
>ffff8881dc824d00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
^
ffff8881dc824d80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff8881dc824e00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
Fixes: 0c692d07842a ("drivers/net/phy/mdio_bus.c: call put_device on device_register() failure")
Signed-off-by: YueHaibing <[email protected]>
Reviewed-by: Andrew Lunn <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void test_logs()
{
MYSQL_STMT *stmt;
MYSQL_BIND my_bind[2];
char data[255];
ulong length;
int rc;
short id;
myheader("test_logs");
rc= mysql_query(mysql, "DROP TABLE IF EXISTS test_logs");
myquery(rc);
rc= mysql_query(mysql, "CREATE TABLE test_logs(id smallint, name varchar(20))");
myquery(rc);
my_stpcpy((char *)data, "INSERT INTO test_logs VALUES(?, ?)");
stmt= mysql_simple_prepare(mysql, data);
check_stmt(stmt);
/*
We need to memset bind structure because mysql_stmt_bind_param checks all
its members.
*/
memset(my_bind, 0, sizeof(my_bind));
my_bind[0].buffer_type= MYSQL_TYPE_SHORT;
my_bind[0].buffer= (void *)&id;
my_bind[1].buffer_type= MYSQL_TYPE_STRING;
my_bind[1].buffer= (void *)&data;
my_bind[1].buffer_length= 255;
my_bind[1].length= &length;
id= 9876;
length= (ulong)(my_stpcpy((char *)data, "MySQL - Open Source Database")- data);
rc= mysql_stmt_bind_param(stmt, my_bind);
check_execute(stmt, rc);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
my_stpcpy((char *)data, "'");
length= 1;
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
my_stpcpy((char *)data, "\"");
length= 1;
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
length= (ulong)(my_stpcpy((char *)data, "my\'sql\'")-data);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
length= (ulong)(my_stpcpy((char *)data, "my\"sql\"")-data);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
mysql_stmt_close(stmt);
my_stpcpy((char *)data, "INSERT INTO test_logs VALUES(20, 'mysql')");
stmt= mysql_simple_prepare(mysql, data);
check_stmt(stmt);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
mysql_stmt_close(stmt);
my_stpcpy((char *)data, "SELECT * FROM test_logs WHERE id=?");
stmt= mysql_simple_prepare(mysql, data);
check_stmt(stmt);
rc= mysql_stmt_bind_param(stmt, my_bind);
check_execute(stmt, rc);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
my_bind[1].buffer_length= 255;
rc= mysql_stmt_bind_result(stmt, my_bind);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent)
{
fprintf(stdout, "id : %d\n", id);
fprintf(stdout, "name : %s(%ld)\n", data, length);
}
DIE_UNLESS(id == 9876);
DIE_UNLESS(length == 19 || length == 20); /* Due to VARCHAR(20) */
DIE_UNLESS(is_prefix(data, "MySQL - Open Source") == 1);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent)
fprintf(stdout, "\n name : %s(%ld)", data, length);
DIE_UNLESS(length == 1);
DIE_UNLESS(strcmp(data, "'") == 0);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent)
fprintf(stdout, "\n name : %s(%ld)", data, length);
DIE_UNLESS(length == 1);
DIE_UNLESS(strcmp(data, "\"") == 0);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent)
fprintf(stdout, "\n name : %s(%ld)", data, length);
DIE_UNLESS(length == 7);
DIE_UNLESS(strcmp(data, "my\'sql\'") == 0);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent)
fprintf(stdout, "\n name : %s(%ld)", data, length);
DIE_UNLESS(length == 7);
/*DIE_UNLESS(strcmp(data, "my\"sql\"") == 0); */
rc= mysql_stmt_fetch(stmt);
DIE_UNLESS(rc == MYSQL_NO_DATA);
mysql_stmt_close(stmt);
rc= mysql_query(mysql, "DROP TABLE test_logs");
myquery(rc);
}
| 0 |
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
| 334,085,134,190,499,560,000,000,000,000,000,000,000 | 150 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
|
vips_foreign_load_gif_file_dispose( GObject *gobject )
{
VipsForeignLoadGifFile *file = (VipsForeignLoadGifFile *) gobject;
VIPS_FREEF( fclose, file->fp );
G_OBJECT_CLASS( vips_foreign_load_gif_file_parent_class )->
dispose( gobject );
}
| 0 |
[
"CWE-416"
] |
libvips
|
ce684dd008532ea0bf9d4a1d89bacb35f4a83f4d
| 133,589,534,874,887,130,000,000,000,000,000,000,000 | 9 |
fetch map after DGifGetImageDesc()
Earlier refactoring broke GIF map fetch.
|
static int cap_dentry_open(struct file *file, const struct cred *cred)
{
return 0;
}
| 0 |
[] |
linux-2.6
|
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
| 164,004,430,873,485,770,000,000,000,000,000,000,000 | 4 |
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]>
|
static int sock_bindtoindex_locked(struct sock *sk, int ifindex)
{
int ret = -ENOPROTOOPT;
#ifdef CONFIG_NETDEVICES
struct net *net = sock_net(sk);
/* Sorry... */
ret = -EPERM;
if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW))
goto out;
ret = -EINVAL;
if (ifindex < 0)
goto out;
sk->sk_bound_dev_if = ifindex;
if (sk->sk_prot->rehash)
sk->sk_prot->rehash(sk);
sk_dst_reset(sk);
ret = 0;
out:
#endif
return ret;
}
| 0 |
[] |
net
|
35306eb23814444bd4021f8a1c3047d3cb0c8b2b
| 246,108,182,943,332,840,000,000,000,000,000,000,000 | 27 |
af_unix: fix races in sk_peer_pid and sk_peer_cred accesses
Jann Horn reported that SO_PEERCRED and SO_PEERGROUPS implementations
are racy, as af_unix can concurrently change sk_peer_pid and sk_peer_cred.
In order to fix this issue, this patch adds a new spinlock that needs
to be used whenever these fields are read or written.
Jann also pointed out that l2cap_sock_get_peer_pid_cb() is currently
reading sk->sk_peer_pid which makes no sense, as this field
is only possibly set by AF_UNIX sockets.
We will have to clean this in a separate patch.
This could be done by reverting b48596d1dc25 "Bluetooth: L2CAP: Add get_peer_pid callback"
or implementing what was truly expected.
Fixes: 109f6e39fa07 ("af_unix: Allow SO_PEERCRED to work across namespaces.")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jann Horn <[email protected]>
Cc: Eric W. Biederman <[email protected]>
Cc: Luiz Augusto von Dentz <[email protected]>
Cc: Marcel Holtmann <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
information_schema_numeric_attributes() const
{
uint32 prec= type_limits_int()->precision();
return Information_schema_numeric_attributes(prec, 0);
}
| 0 |
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
| 300,059,232,527,173,430,000,000,000,000,000,000,000 | 5 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]>
|
static ssize_t dp8393x_receive(NetClientState *nc, const uint8_t * buf,
size_t pkt_size)
{
dp8393xState *s = qemu_get_nic_opaque(nc);
int packet_type;
uint32_t available, address;
int width, rx_len, padded_len;
uint32_t checksum;
int size;
s->regs[SONIC_RCR] &= ~(SONIC_RCR_PRX | SONIC_RCR_LBK | SONIC_RCR_FAER |
SONIC_RCR_CRCR | SONIC_RCR_LPKT | SONIC_RCR_BC | SONIC_RCR_MC);
if (s->last_rba_is_full) {
return pkt_size;
}
rx_len = pkt_size + sizeof(checksum);
if (s->regs[SONIC_DCR] & SONIC_DCR_DW) {
width = 2;
padded_len = ((rx_len - 1) | 3) + 1;
} else {
width = 1;
padded_len = ((rx_len - 1) | 1) + 1;
}
if (padded_len > dp8393x_rbwc(s) * 2) {
DPRINTF("oversize packet, pkt_size is %d\n", pkt_size);
s->regs[SONIC_ISR] |= SONIC_ISR_RBAE;
dp8393x_update_irq(s);
s->regs[SONIC_RCR] |= SONIC_RCR_LPKT;
goto done;
}
packet_type = dp8393x_receive_filter(s, buf, pkt_size);
if (packet_type < 0) {
DPRINTF("packet not for netcard\n");
return -1;
}
/* Check for EOL */
if (s->regs[SONIC_LLFA] & SONIC_DESC_EOL) {
/* Are we still in resource exhaustion? */
size = sizeof(uint16_t) * 1 * width;
address = dp8393x_crda(s) + sizeof(uint16_t) * 5 * width;
address_space_read(&s->as, address, MEMTXATTRS_UNSPECIFIED,
s->data, size);
s->regs[SONIC_LLFA] = dp8393x_get(s, width, 0);
if (s->regs[SONIC_LLFA] & SONIC_DESC_EOL) {
/* Still EOL ; stop reception */
return -1;
}
/* Link has been updated by host */
/* Clear in_use */
size = sizeof(uint16_t) * width;
address = dp8393x_crda(s) + sizeof(uint16_t) * 6 * width;
dp8393x_put(s, width, 0, 0);
address_space_rw(&s->as, address, MEMTXATTRS_UNSPECIFIED,
(uint8_t *)s->data, size, 1);
/* Move to next descriptor */
s->regs[SONIC_CRDA] = s->regs[SONIC_LLFA];
s->regs[SONIC_ISR] |= SONIC_ISR_PKTRX;
}
/* Save current position */
s->regs[SONIC_TRBA1] = s->regs[SONIC_CRBA1];
s->regs[SONIC_TRBA0] = s->regs[SONIC_CRBA0];
/* Calculate the ethernet checksum */
checksum = cpu_to_le32(crc32(0, buf, pkt_size));
/* Put packet into RBA */
DPRINTF("Receive packet at %08x\n", dp8393x_crba(s));
address = dp8393x_crba(s);
address_space_write(&s->as, address, MEMTXATTRS_UNSPECIFIED,
buf, pkt_size);
address += pkt_size;
/* Put frame checksum into RBA */
address_space_write(&s->as, address, MEMTXATTRS_UNSPECIFIED,
&checksum, sizeof(checksum));
address += sizeof(checksum);
/* Pad short packets to keep pointers aligned */
if (rx_len < padded_len) {
size = padded_len - rx_len;
address_space_rw(&s->as, address, MEMTXATTRS_UNSPECIFIED,
(uint8_t *)"\xFF\xFF\xFF", size, 1);
address += size;
}
s->regs[SONIC_CRBA1] = address >> 16;
s->regs[SONIC_CRBA0] = address & 0xffff;
available = dp8393x_rbwc(s);
available -= padded_len >> 1;
s->regs[SONIC_RBWC1] = available >> 16;
s->regs[SONIC_RBWC0] = available & 0xffff;
/* Update status */
if (dp8393x_rbwc(s) < s->regs[SONIC_EOBC]) {
s->regs[SONIC_RCR] |= SONIC_RCR_LPKT;
}
s->regs[SONIC_RCR] |= packet_type;
s->regs[SONIC_RCR] |= SONIC_RCR_PRX;
if (s->loopback_packet) {
s->regs[SONIC_RCR] |= SONIC_RCR_LBK;
s->loopback_packet = 0;
}
/* Write status to memory */
DPRINTF("Write status at %08x\n", dp8393x_crda(s));
dp8393x_put(s, width, 0, s->regs[SONIC_RCR]); /* status */
dp8393x_put(s, width, 1, rx_len); /* byte count */
dp8393x_put(s, width, 2, s->regs[SONIC_TRBA0]); /* pkt_ptr0 */
dp8393x_put(s, width, 3, s->regs[SONIC_TRBA1]); /* pkt_ptr1 */
dp8393x_put(s, width, 4, s->regs[SONIC_RSC]); /* seq_no */
size = sizeof(uint16_t) * 5 * width;
address_space_write(&s->as, dp8393x_crda(s),
MEMTXATTRS_UNSPECIFIED,
s->data, size);
/* Check link field */
size = sizeof(uint16_t) * width;
address_space_read(&s->as,
dp8393x_crda(s) + sizeof(uint16_t) * 5 * width,
MEMTXATTRS_UNSPECIFIED, s->data, size);
s->regs[SONIC_LLFA] = dp8393x_get(s, width, 0);
if (s->regs[SONIC_LLFA] & SONIC_DESC_EOL) {
/* EOL detected */
s->regs[SONIC_ISR] |= SONIC_ISR_RDE;
} else {
/* Clear in_use */
size = sizeof(uint16_t) * width;
address = dp8393x_crda(s) + sizeof(uint16_t) * 6 * width;
dp8393x_put(s, width, 0, 0);
address_space_write(&s->as, address, MEMTXATTRS_UNSPECIFIED,
s->data, size);
/* Move to next descriptor */
s->regs[SONIC_CRDA] = s->regs[SONIC_LLFA];
s->regs[SONIC_ISR] |= SONIC_ISR_PKTRX;
}
dp8393x_update_irq(s);
s->regs[SONIC_RSC] = (s->regs[SONIC_RSC] & 0xff00) |
((s->regs[SONIC_RSC] + 1) & 0x00ff);
done:
if (s->regs[SONIC_RCR] & SONIC_RCR_LPKT) {
if (s->regs[SONIC_RRP] == s->regs[SONIC_RWP]) {
/* Stop packet reception */
s->last_rba_is_full = true;
} else {
/* Read next resource */
dp8393x_do_read_rra(s);
}
}
return pkt_size;
}
| 0 |
[] |
qemu
|
915976bd98a9286efe6f2e573cb4f1360603adf9
| 128,650,500,510,750,280,000,000,000,000,000,000,000 | 164 |
hw/net/dp8393x: fix integer underflow in dp8393x_do_transmit_packets()
An integer underflow could occur during packet transmission due to 'tx_len' not
being updated if SONIC_TFC register is set to zero. Check for negative 'tx_len'
when removing existing FCS.
RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1899722
Signed-off-by: Mauro Matteo Cascella <[email protected]>
Reported-by: Gaoning Pan <[email protected]>
Acked-by: Jason Wang <[email protected]>
Message-id: [email protected]
Signed-off-by: Peter Maydell <[email protected]>
|
const opaque* Certificate::get_buffer() const
{
if (cert_)
return cert_->get_buffer();
return NULL;
}
| 0 |
[] |
mysql-server
|
b9768521bdeb1a8069c7b871f4536792b65fd79b
| 111,601,229,628,048,680,000,000,000,000,000,000,000 | 7 |
Updated yassl to yassl-2.3.8
(cherry picked from commit 7f9941eab55ed672bfcccd382dafbdbcfdc75aaa)
|
cmd_spec_secure_protocol (const char *com, const char *val, void *place)
{
static const struct decode_item choices[] = {
{ "auto", secure_protocol_auto },
{ "sslv2", secure_protocol_sslv2 },
{ "sslv3", secure_protocol_sslv3 },
{ "tlsv1", secure_protocol_tlsv1 },
{ "tlsv1_1", secure_protocol_tlsv1_1 },
{ "tlsv1_2", secure_protocol_tlsv1_2 },
{ "tlsv1_3", secure_protocol_tlsv1_3 },
{ "pfs", secure_protocol_pfs },
};
int ok = decode_string (val, choices, countof (choices), place);
if (!ok)
fprintf (stderr, _("%s: %s: Invalid value %s.\n"), exec_name, com, quote (val));
return ok;
}
| 0 |
[
"CWE-200"
] |
wget
|
c125d24762962d91050d925fbbd9e6f30b2302f8
| 2,571,910,513,156,151,800,000,000,000,000,000,000 | 17 |
Don't use extended attributes (--xattr) by default
* src/init.c (defaults): Set enable_xattr to false by default
* src/main.c (print_help): Reverse option logic of --xattr
* doc/wget.texi: Add description for --xattr
Users may not be aware that the origin URL and Referer are saved
including credentials, and possibly access tokens within
the urls.
|
ofputil_encode_ofp11_packet_in(const struct ofputil_packet_in *pin)
{
struct ofp11_packet_in *opi;
struct ofpbuf *msg;
msg = ofpraw_alloc_xid(OFPRAW_OFPT11_PACKET_IN, OFP11_VERSION,
htonl(0), pin->packet_len);
opi = ofpbuf_put_zeros(msg, sizeof *opi);
opi->buffer_id = htonl(UINT32_MAX);
opi->in_port = ofputil_port_to_ofp11(
pin->flow_metadata.flow.in_port.ofp_port);
opi->in_phy_port = opi->in_port;
opi->total_len = htons(pin->packet_len);
opi->reason = encode_packet_in_reason(pin->reason, OFP11_VERSION);
opi->table_id = pin->table_id;
return msg;
}
| 0 |
[
"CWE-772"
] |
ovs
|
77ad4225d125030420d897c873e4734ac708c66b
| 30,640,472,592,935,133,000,000,000,000,000,000,000 | 18 |
ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod().
Found by libFuzzer.
Reported-by: Bhargava Shastry <[email protected]>
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]>
|
String* Item_func_get_system_var::val_str(String* str)
{
THD *thd= current_thd;
if (cache_present && thd->query_id == used_query_id)
{
if (cache_present & GET_SYS_VAR_CACHE_STRING)
{
null_value= cached_null_value;
return null_value ? NULL : &cached_strval;
}
else if (cache_present & GET_SYS_VAR_CACHE_LONG)
{
null_value= cached_null_value;
if (!null_value)
cached_strval.set (cached_llval, collation.collation);
cache_present|= GET_SYS_VAR_CACHE_STRING;
return null_value ? NULL : &cached_strval;
}
else if (cache_present & GET_SYS_VAR_CACHE_DOUBLE)
{
null_value= cached_null_value;
if (!null_value)
cached_strval.set_real (cached_dval, decimals, collation.collation);
cache_present|= GET_SYS_VAR_CACHE_STRING;
return null_value ? NULL : &cached_strval;
}
}
str= var->val_str(&cached_strval, thd, var_type, &component);
cache_present|= GET_SYS_VAR_CACHE_STRING;
used_query_id= thd->query_id;
cached_null_value= null_value= !str;
return str;
}
| 0 |
[
"CWE-120"
] |
server
|
eca207c46293bc72dd8d0d5622153fab4d3fccf1
| 194,980,110,859,250,530,000,000,000,000,000,000,000 | 35 |
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix.
|
TPMA_LOCALITY_Marshal(TPMA_LOCALITY *source, BYTE **buffer, INT32 *size)
{
UINT16 written = 0;
written += UINT8_Marshal((UINT8 *)source, buffer, size); /* libtpms changed */
return written;
}
| 0 |
[
"CWE-787"
] |
libtpms
|
3ef9b26cb9f28bd64d738bff9505a20d4eb56acd
| 24,410,870,977,964,300,000,000,000,000,000,000,000 | 6 |
tpm2: Add maxSize parameter to TPM2B_Marshal for sanity checks
Add maxSize parameter to TPM2B_Marshal and assert on it checking
the size of the data intended to be marshaled versus the maximum
buffer size.
Signed-off-by: Stefan Berger <[email protected]>
|
static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *se, int cpu, int add,
struct sched_entity *parent)
{
struct rq *rq = cpu_rq(cpu);
tg->cfs_rq[cpu] = cfs_rq;
init_cfs_rq(cfs_rq, rq);
cfs_rq->tg = tg;
if (add)
list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
tg->se[cpu] = se;
/* se could be NULL for init_task_group */
if (!se)
return;
if (!parent)
se->cfs_rq = &rq->cfs;
else
se->cfs_rq = parent->my_q;
se->my_q = cfs_rq;
se->load.weight = tg->shares;
se->load.inv_weight = div64_64(1ULL<<32, se->load.weight);
se->parent = parent;
}
| 0 |
[] |
linux-2.6
|
8f1bc385cfbab474db6c27b5af1e439614f3025c
| 195,003,333,031,269,300,000,000,000,000,000,000,000 | 26 |
sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
bool JOIN::rollup_init()
{
uint i,j;
Item **ref_array;
tmp_table_param.quick_group= 0; // Can't create groups in tmp table
rollup.state= ROLLUP::STATE_INITED;
/*
Create pointers to the different sum function groups
These are updated by rollup_make_fields()
*/
tmp_table_param.group_parts= send_group_parts;
Item_null_result **null_items=
static_cast<Item_null_result**>(thd->alloc(sizeof(Item*)*send_group_parts));
rollup.null_items= Item_null_array(null_items, send_group_parts);
rollup.ref_pointer_arrays=
static_cast<Ref_ptr_array*>
(thd->alloc((sizeof(Ref_ptr_array) +
all_fields.elements * sizeof(Item*)) * send_group_parts));
rollup.fields=
static_cast<List<Item>*>(thd->alloc(sizeof(List<Item>) * send_group_parts));
if (!null_items || !rollup.ref_pointer_arrays || !rollup.fields)
return true;
ref_array= (Item**) (rollup.ref_pointer_arrays+send_group_parts);
/*
Prepare space for field list for the different levels
These will be filled up in rollup_make_fields()
*/
for (i= 0 ; i < send_group_parts ; i++)
{
rollup.null_items[i]= new (thd->mem_root) Item_null_result(thd);
List<Item> *rollup_fields= &rollup.fields[i];
rollup_fields->empty();
rollup.ref_pointer_arrays[i]= Ref_ptr_array(ref_array, all_fields.elements);
ref_array+= all_fields.elements;
}
for (i= 0 ; i < send_group_parts; i++)
{
for (j=0 ; j < fields_list.elements ; j++)
rollup.fields[i].push_back(rollup.null_items[i], thd->mem_root);
}
List_iterator<Item> it(all_fields);
Item *item;
while ((item= it++))
{
ORDER *group_tmp;
bool found_in_group= 0;
for (group_tmp= group_list; group_tmp; group_tmp= group_tmp->next)
{
if (*group_tmp->item == item)
{
item->maybe_null= 1;
item->in_rollup= 1;
found_in_group= 1;
break;
}
}
if (item->type() == Item::FUNC_ITEM && !found_in_group)
{
bool changed= FALSE;
if (change_group_ref(thd, (Item_func *) item, group_list, &changed))
return 1;
/*
We have to prevent creation of a field in a temporary table for
an expression that contains GROUP BY attributes.
Marking the expression item as 'with_sum_func' will ensure this.
*/
if (changed)
item->with_sum_func= 1;
}
}
return 0;
}
| 0 |
[
"CWE-89"
] |
server
|
5ba77222e9fe7af8ff403816b5338b18b342053c
| 112,784,307,462,427,890,000,000,000,000,000,000,000 | 81 |
MDEV-21028 Server crashes in Query_arena::set_query_arena upon SELECT from view
if the view has algorithm=temptable it is not updatable,
so DEFAULT() for its fields is meaningless,
and thus it's NULL or 0/'' for NOT NULL columns.
|
gimp_layer_mode_from_psp_blend_mode (PSPBlendModes mode)
{
switch (mode)
{
case PSP_BLEND_NORMAL:
return GIMP_LAYER_MODE_NORMAL_LEGACY;
case PSP_BLEND_DARKEN:
return GIMP_LAYER_MODE_DARKEN_ONLY_LEGACY;
case PSP_BLEND_LIGHTEN:
return GIMP_LAYER_MODE_LIGHTEN_ONLY_LEGACY;
case PSP_BLEND_HUE:
return GIMP_LAYER_MODE_HSV_HUE_LEGACY;
case PSP_BLEND_SATURATION:
return GIMP_LAYER_MODE_HSV_SATURATION_LEGACY;
case PSP_BLEND_COLOR:
return GIMP_LAYER_MODE_HSL_COLOR_LEGACY;
case PSP_BLEND_LUMINOSITY:
return GIMP_LAYER_MODE_HSV_VALUE_LEGACY; /* ??? */
case PSP_BLEND_MULTIPLY:
return GIMP_LAYER_MODE_MULTIPLY_LEGACY;
case PSP_BLEND_SCREEN:
return GIMP_LAYER_MODE_SCREEN_LEGACY;
case PSP_BLEND_DISSOLVE:
return GIMP_LAYER_MODE_DISSOLVE;
case PSP_BLEND_OVERLAY:
return GIMP_LAYER_MODE_OVERLAY;
case PSP_BLEND_HARD_LIGHT:
return GIMP_LAYER_MODE_HARDLIGHT_LEGACY;
case PSP_BLEND_SOFT_LIGHT:
return GIMP_LAYER_MODE_SOFTLIGHT_LEGACY;
case PSP_BLEND_DIFFERENCE:
return GIMP_LAYER_MODE_DIFFERENCE_LEGACY;
case PSP_BLEND_DODGE:
return GIMP_LAYER_MODE_DODGE_LEGACY;
case PSP_BLEND_BURN:
return GIMP_LAYER_MODE_BURN_LEGACY;
case PSP_BLEND_EXCLUSION:
return -1; /* ??? */
case PSP_BLEND_ADJUST:
return -1; /* ??? */
case PSP_BLEND_TRUE_HUE:
return -1; /* ??? */
case PSP_BLEND_TRUE_SATURATION:
return -1; /* ??? */
case PSP_BLEND_TRUE_COLOR:
return -1; /* ??? */
case PSP_BLEND_TRUE_LIGHTNESS:
return -1; /* ??? */
}
return -1;
}
| 0 |
[
"CWE-125"
] |
gimp
|
eb2980683e6472aff35a3117587c4f814515c74d
| 58,069,443,748,428,550,000,000,000,000,000,000,000 | 72 |
Bug 790853 - (CVE-2017-17787) heap overread in psp importer.
As any external data, we have to check that strings being read at fixed
length are properly nul-terminated.
|
sc_oberthur_get_certificate_authority(struct sc_pkcs15_der *der, int *out_authority)
{
#ifdef ENABLE_OPENSSL
X509 *x;
BUF_MEM buf_mem;
BIO *bio = NULL;
BASIC_CONSTRAINTS *bs = NULL;
if (!der)
return SC_ERROR_INVALID_ARGUMENTS;
buf_mem.data = malloc(der->len);
if (!buf_mem.data)
return SC_ERROR_OUT_OF_MEMORY;
memcpy(buf_mem.data, der->value, der->len);
buf_mem.max = buf_mem.length = der->len;
bio = BIO_new(BIO_s_mem());
if (!bio) {
free(buf_mem.data);
return SC_ERROR_OUT_OF_MEMORY;
}
BIO_set_mem_buf(bio, &buf_mem, BIO_NOCLOSE);
x = d2i_X509_bio(bio, 0);
free(buf_mem.data);
BIO_free(bio);
if (!x)
return SC_ERROR_INVALID_DATA;
bs = (BASIC_CONSTRAINTS *)X509_get_ext_d2i(x, NID_basic_constraints, NULL, NULL);
if (out_authority)
*out_authority = (bs && bs->ca);
X509_free(x);
return SC_SUCCESS;
#else
return SC_ERROR_NOT_SUPPORTED;
#endif
}
| 0 |
[] |
OpenSC
|
5d4daf6c92e4668f5458f380f3cacea3e879d91a
| 112,716,169,816,050,400,000,000,000,000,000,000,000 | 42 |
oberthur: One more overlooked buffer overflow
Thanks oss-fuzz
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=32202
|
dns_zone_getprivatetype(dns_zone_t *zone) {
REQUIRE(DNS_ZONE_VALID(zone));
return (zone->privatetype);
}
| 0 |
[
"CWE-327"
] |
bind9
|
f09352d20a9d360e50683cd1d2fc52ccedcd77a0
| 43,478,874,869,189,930,000,000,000,000,000,000,000 | 4 |
Update keyfetch_done compute_tag check
If in keyfetch_done the compute_tag fails (because for example the
algorithm is not supported), don't crash, but instead ignore the
key.
|
nautilus_file_get_by_uri (const char *uri)
{
GFile *location;
NautilusFile *file;
location = g_file_new_for_uri (uri);
file = nautilus_file_get_internal (location, TRUE);
g_object_unref (location);
return file;
}
| 0 |
[] |
nautilus
|
7632a3e13874a2c5e8988428ca913620a25df983
| 338,663,860,213,622,400,000,000,000,000,000,000,000 | 11 |
Check for trusted desktop file launchers.
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-directory-async.c:
Check for trusted desktop file launchers.
* libnautilus-private/nautilus-file-private.h:
* libnautilus-private/nautilus-file.c:
* libnautilus-private/nautilus-file.h:
Add nautilus_file_is_trusted_link.
Allow unsetting of custom display name.
* libnautilus-private/nautilus-mime-actions.c:
Display dialog when trying to launch a non-trusted desktop file.
svn path=/trunk/; revision=15003
|
static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
{
if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
return 0;
if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
return 0;
if (w < 0 || w >= env->prog->len) {
verbose_linfo(env, t, "%d: ", t);
verbose(env, "jump out of range from insn %d to %d\n", t, w);
return -EINVAL;
}
if (e == BRANCH)
/* mark branch target for state pruning */
env->explored_states[w] = STATE_LIST_MARK;
if (insn_state[w] == 0) {
/* tree-edge */
insn_state[t] = DISCOVERED | e;
insn_state[w] = DISCOVERED;
if (cur_stack >= env->prog->len)
return -E2BIG;
insn_stack[cur_stack++] = w;
return 1;
} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
verbose_linfo(env, t, "%d: ", t);
verbose_linfo(env, w, "%d: ", w);
verbose(env, "back-edge from insn %d to %d\n", t, w);
return -EINVAL;
} else if (insn_state[w] == EXPLORED) {
/* forward- or cross-edge */
insn_state[t] = DISCOVERED | e;
} else {
verbose(env, "insn state internal bug\n");
return -EFAULT;
}
return 0;
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
linux
|
979d63d50c0c0f7bc537bf821e056cc9fe5abd38
| 76,652,363,834,385,830,000,000,000,000,000,000,000 | 40 |
bpf: prevent out of bounds speculation on pointer arithmetic
Jann reported that the original commit back in b2157399cc98
("bpf: prevent out-of-bounds speculation") was not sufficient
to stop CPU from speculating out of bounds memory access:
While b2157399cc98 only focussed on masking array map access
for unprivileged users for tail calls and data access such
that the user provided index gets sanitized from BPF program
and syscall side, there is still a more generic form affected
from BPF programs that applies to most maps that hold user
data in relation to dynamic map access when dealing with
unknown scalars or "slow" known scalars as access offset, for
example:
- Load a map value pointer into R6
- Load an index into R7
- Do a slow computation (e.g. with a memory dependency) that
loads a limit into R8 (e.g. load the limit from a map for
high latency, then mask it to make the verifier happy)
- Exit if R7 >= R8 (mispredicted branch)
- Load R0 = R6[R7]
- Load R0 = R6[R0]
For unknown scalars there are two options in the BPF verifier
where we could derive knowledge from in order to guarantee
safe access to the memory: i) While </>/<=/>= variants won't
allow to derive any lower or upper bounds from the unknown
scalar where it would be safe to add it to the map value
pointer, it is possible through ==/!= test however. ii) another
option is to transform the unknown scalar into a known scalar,
for example, through ALU ops combination such as R &= <imm>
followed by R |= <imm> or any similar combination where the
original information from the unknown scalar would be destroyed
entirely leaving R with a constant. The initial slow load still
precedes the latter ALU ops on that register, so the CPU
executes speculatively from that point. Once we have the known
scalar, any compare operation would work then. A third option
only involving registers with known scalars could be crafted
as described in [0] where a CPU port (e.g. Slow Int unit)
would be filled with many dependent computations such that
the subsequent condition depending on its outcome has to wait
for evaluation on its execution port and thereby executing
speculatively if the speculated code can be scheduled on a
different execution port, or any other form of mistraining
as described in [1], for example. Given this is not limited
to only unknown scalars, not only map but also stack access
is affected since both is accessible for unprivileged users
and could potentially be used for out of bounds access under
speculation.
In order to prevent any of these cases, the verifier is now
sanitizing pointer arithmetic on the offset such that any
out of bounds speculation would be masked in a way where the
pointer arithmetic result in the destination register will
stay unchanged, meaning offset masked into zero similar as
in array_index_nospec() case. With regards to implementation,
there are three options that were considered: i) new insn
for sanitation, ii) push/pop insn and sanitation as inlined
BPF, iii) reuse of ax register and sanitation as inlined BPF.
Option i) has the downside that we end up using from reserved
bits in the opcode space, but also that we would require
each JIT to emit masking as native arch opcodes meaning
mitigation would have slow adoption till everyone implements
it eventually which is counter-productive. Option ii) and iii)
have both in common that a temporary register is needed in
order to implement the sanitation as inlined BPF since we
are not allowed to modify the source register. While a push /
pop insn in ii) would be useful to have in any case, it
requires once again that every JIT needs to implement it
first. While possible, amount of changes needed would also
be unsuitable for a -stable patch. Therefore, the path which
has fewer changes, less BPF instructions for the mitigation
and does not require anything to be changed in the JITs is
option iii) which this work is pursuing. The ax register is
already mapped to a register in all JITs (modulo arm32 where
it's mapped to stack as various other BPF registers there)
and used in constant blinding for JITs-only so far. It can
be reused for verifier rewrites under certain constraints.
The interpreter's tmp "register" has therefore been remapped
into extending the register set with hidden ax register and
reusing that for a number of instructions that needed the
prior temporary variable internally (e.g. div, mod). This
allows for zero increase in stack space usage in the interpreter,
and enables (restricted) generic use in rewrites otherwise as
long as such a patchlet does not make use of these instructions.
The sanitation mask is dynamic and relative to the offset the
map value or stack pointer currently holds.
There are various cases that need to be taken under consideration
for the masking, e.g. such operation could look as follows:
ptr += val or val += ptr or ptr -= val. Thus, the value to be
sanitized could reside either in source or in destination
register, and the limit is different depending on whether
the ALU op is addition or subtraction and depending on the
current known and bounded offset. The limit is derived as
follows: limit := max_value_size - (smin_value + off). For
subtraction: limit := umax_value + off. This holds because
we do not allow any pointer arithmetic that would
temporarily go out of bounds or would have an unknown
value with mixed signed bounds where it is unclear at
verification time whether the actual runtime value would
be either negative or positive. For example, we have a
derived map pointer value with constant offset and bounded
one, so limit based on smin_value works because the verifier
requires that statically analyzed arithmetic on the pointer
must be in bounds, and thus it checks if resulting
smin_value + off and umax_value + off is still within map
value bounds at time of arithmetic in addition to time of
access. Similarly, for the case of stack access we derive
the limit as follows: MAX_BPF_STACK + off for subtraction
and -off for the case of addition where off := ptr_reg->off +
ptr_reg->var_off.value. Subtraction is a special case for
the masking which can be in form of ptr += -val, ptr -= -val,
or ptr -= val. In the first two cases where we know that
the value is negative, we need to temporarily negate the
value in order to do the sanitation on a positive value
where we later swap the ALU op, and restore original source
register if the value was in source.
The sanitation of pointer arithmetic alone is still not fully
sufficient as is, since a scenario like the following could
happen ...
PTR += 0x1000 (e.g. K-based imm)
PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON
PTR += 0x1000
PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON
[...]
... which under speculation could end up as ...
PTR += 0x1000
PTR -= 0 [ truncated by mitigation ]
PTR += 0x1000
PTR -= 0 [ truncated by mitigation ]
[...]
... and therefore still access out of bounds. To prevent such
case, the verifier is also analyzing safety for potential out
of bounds access under speculative execution. Meaning, it is
also simulating pointer access under truncation. We therefore
"branch off" and push the current verification state after the
ALU operation with known 0 to the verification stack for later
analysis. Given the current path analysis succeeded it is
likely that the one under speculation can be pruned. In any
case, it is also subject to existing complexity limits and
therefore anything beyond this point will be rejected. In
terms of pruning, it needs to be ensured that the verification
state from speculative execution simulation must never prune
a non-speculative execution path, therefore, we mark verifier
state accordingly at the time of push_stack(). If verifier
detects out of bounds access under speculative execution from
one of the possible paths that includes a truncation, it will
reject such program.
Given we mask every reg-based pointer arithmetic for
unprivileged programs, we've been looking into how it could
affect real-world programs in terms of size increase. As the
majority of programs are targeted for privileged-only use
case, we've unconditionally enabled masking (with its alu
restrictions on top of it) for privileged programs for the
sake of testing in order to check i) whether they get rejected
in its current form, and ii) by how much the number of
instructions and size will increase. We've tested this by
using Katran, Cilium and test_l4lb from the kernel selftests.
For Katran we've evaluated balancer_kern.o, Cilium bpf_lxc.o
and an older test object bpf_lxc_opt_-DUNKNOWN.o and l4lb
we've used test_l4lb.o as well as test_l4lb_noinline.o. We
found that none of the programs got rejected by the verifier
with this change, and that impact is rather minimal to none.
balancer_kern.o had 13,904 bytes (1,738 insns) xlated and
7,797 bytes JITed before and after the change. Most complex
program in bpf_lxc.o had 30,544 bytes (3,817 insns) xlated
and 18,538 bytes JITed before and after and none of the other
tail call programs in bpf_lxc.o had any changes either. For
the older bpf_lxc_opt_-DUNKNOWN.o object we found a small
increase from 20,616 bytes (2,576 insns) and 12,536 bytes JITed
before to 20,664 bytes (2,582 insns) and 12,558 bytes JITed
after the change. Other programs from that object file had
similar small increase. Both test_l4lb.o had no change and
remained at 6,544 bytes (817 insns) xlated and 3,401 bytes
JITed and for test_l4lb_noinline.o constant at 5,080 bytes
(634 insns) xlated and 3,313 bytes JITed. This can be explained
in that LLVM typically optimizes stack based pointer arithmetic
by using K-based operations and that use of dynamic map access
is not overly frequent. However, in future we may decide to
optimize the algorithm further under known guarantees from
branch and value speculation. Latter seems also unclear in
terms of prediction heuristics that today's CPUs apply as well
as whether there could be collisions in e.g. the predictor's
Value History/Pattern Table for triggering out of bounds access,
thus masking is performed unconditionally at this point but could
be subject to relaxation later on. We were generally also
brainstorming various other approaches for mitigation, but the
blocker was always lack of available registers at runtime and/or
overhead for runtime tracking of limits belonging to a specific
pointer. Thus, we found this to be minimally intrusive under
given constraints.
With that in place, a simple example with sanitized access on
unprivileged load at post-verification time looks as follows:
# bpftool prog dump xlated id 282
[...]
28: (79) r1 = *(u64 *)(r7 +0)
29: (79) r2 = *(u64 *)(r7 +8)
30: (57) r1 &= 15
31: (79) r3 = *(u64 *)(r0 +4608)
32: (57) r3 &= 1
33: (47) r3 |= 1
34: (2d) if r2 > r3 goto pc+19
35: (b4) (u32) r11 = (u32) 20479 |
36: (1f) r11 -= r2 | Dynamic sanitation for pointer
37: (4f) r11 |= r2 | arithmetic with registers
38: (87) r11 = -r11 | containing bounded or known
39: (c7) r11 s>>= 63 | scalars in order to prevent
40: (5f) r11 &= r2 | out of bounds speculation.
41: (0f) r4 += r11 |
42: (71) r4 = *(u8 *)(r4 +0)
43: (6f) r4 <<= r1
[...]
For the case where the scalar sits in the destination register
as opposed to the source register, the following code is emitted
for the above example:
[...]
16: (b4) (u32) r11 = (u32) 20479
17: (1f) r11 -= r2
18: (4f) r11 |= r2
19: (87) r11 = -r11
20: (c7) r11 s>>= 63
21: (5f) r2 &= r11
22: (0f) r2 += r0
23: (61) r0 = *(u32 *)(r2 +0)
[...]
JIT blinding example with non-conflicting use of r10:
[...]
d5: je 0x0000000000000106 _
d7: mov 0x0(%rax),%edi |
da: mov $0xf153246,%r10d | Index load from map value and
e0: xor $0xf153259,%r10 | (const blinded) mask with 0x1f.
e7: and %r10,%rdi |_
ea: mov $0x2f,%r10d |
f0: sub %rdi,%r10 | Sanitized addition. Both use r10
f3: or %rdi,%r10 | but do not interfere with each
f6: neg %r10 | other. (Neither do these instructions
f9: sar $0x3f,%r10 | interfere with the use of ax as temp
fd: and %r10,%rdi | in interpreter.)
100: add %rax,%rdi |_
103: mov 0x0(%rdi),%eax
[...]
Tested that it fixes Jann's reproducer, and also checked that test_verifier
and test_progs suite with interpreter, JIT and JIT with hardening enabled
on x86-64 and arm64 runs successfully.
[0] Speculose: Analyzing the Security Implications of Speculative
Execution in CPUs, Giorgi Maisuradze and Christian Rossow,
https://arxiv.org/pdf/1801.04084.pdf
[1] A Systematic Evaluation of Transient Execution Attacks and
Defenses, Claudio Canella, Jo Van Bulck, Michael Schwarz,
Moritz Lipp, Benjamin von Berg, Philipp Ortner, Frank Piessens,
Dmitry Evtyushkin, Daniel Gruss,
https://arxiv.org/pdf/1811.05441.pdf
Fixes: b2157399cc98 ("bpf: prevent out-of-bounds speculation")
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
|
add_dconf_locks_to_list (GString *s,
DConfClient *client,
const char *dir)
{
g_auto(GStrv) locks = NULL;
int i;
locks = dconf_client_list_locks (client, dir, NULL);
for (i = 0; locks[i]; i++)
{
g_string_append (s, locks[i]);
g_string_append_c (s, '\n');
}
}
| 0 |
[
"CWE-668"
] |
flatpak
|
cd2142888fc4c199723a0dfca1f15ea8788a5483
| 207,239,085,434,638,600,000,000,000,000,000,000,000 | 14 |
Don't expose /proc when running apply_extra
As shown by CVE-2019-5736, it is sometimes possible for the sandbox
app to access outside files using /proc/self/exe. This is not
typically an issue for flatpak as the sandbox runs as the user which
has no permissions to e.g. modify the host files.
However, when installing apps using extra-data into the system repo
we *do* actually run a sandbox as root. So, in this case we disable mounting
/proc in the sandbox, which will neuter attacks like this.
|
static MagickBooleanType ExpandHeap(size_t size)
{
DataSegmentInfo
*segment_info;
MagickBooleanType
mapped;
ssize_t
i;
void
*block;
size_t
blocksize;
void
*segment;
blocksize=((size+12*sizeof(size_t))+SegmentSize-1) & -SegmentSize;
assert(memory_pool.number_segments < MaxSegments);
segment=MapBlob(-1,IOMode,0,blocksize);
mapped=segment != (void *) NULL ? MagickTrue : MagickFalse;
if (segment == (void *) NULL)
segment=(void *) memory_methods.acquire_memory_handler(blocksize);
if (segment == (void *) NULL)
return(MagickFalse);
segment_info=(DataSegmentInfo *) free_segments;
free_segments=segment_info->next;
segment_info->mapped=mapped;
segment_info->length=blocksize;
segment_info->allocation=segment;
segment_info->bound=(char *) segment+blocksize;
i=(ssize_t) memory_pool.number_segments-1;
for ( ; (i >= 0) && (memory_pool.segments[i]->allocation > segment); i--)
memory_pool.segments[i+1]=memory_pool.segments[i];
memory_pool.segments[i+1]=segment_info;
memory_pool.number_segments++;
size=blocksize-12*sizeof(size_t);
block=(char *) segment_info->allocation+4*sizeof(size_t);
*BlockHeader(block)=size | PreviousBlockBit;
*BlockFooter(block,size)=size;
InsertFreeBlock(block,AllocationPolicy(size));
block=NextBlock(block);
assert(block < segment_info->bound);
*BlockHeader(block)=2*sizeof(size_t);
*BlockHeader(NextBlock(block))=PreviousBlockBit;
return(MagickTrue);
}
| 0 |
[
"CWE-369"
] |
ImageMagick
|
70aa86f5d5d8aa605a918ed51f7574f433a18482
| 338,017,268,251,156,100,000,000,000,000,000,000,000 | 50 |
possible divide by zero + clear buffers
|
//! Return a reference to the maximum pixel value of the instance list.
/**
**/
T& max() {
bool is_all_empty = true;
T *ptr_max = 0;
cimglist_for(*this,l) if (!_data[l].is_empty()) {
ptr_max = _data[l]._data;
is_all_empty = false;
break;
}
if (is_all_empty)
throw CImgInstanceException(_cimglist_instance
"max(): %s.",
_data?"List of empty images":"Empty instance",
cimglist_instance);
T max_value = *ptr_max;
cimglist_for(*this,l) {
const CImg<T>& img = _data[l];
| 0 |
[
"CWE-119",
"CWE-787"
] |
CImg
|
ac8003393569aba51048c9d67e1491559877b1d1
| 231,150,140,077,345,660,000,000,000,000,000,000,000 | 20 |
.
|
}
static JSValue js_sys_basename(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv)
{
return js_sys_file_opt(ctx, this_val, argc, argv, OPT_FILEBASENAME);
| 0 |
[
"CWE-787"
] |
gpac
|
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
| 69,204,953,597,633,070,000,000,000,000,000,000,000 | 4 |
fixed #2138
|
int Http2Handler::write_tls() {
auto loop = sessions_->get_loop();
ERR_clear_error();
for (;;) {
if (wb_.rleft() > 0) {
auto rv = SSL_write(ssl_, wb_.pos, wb_.rleft());
if (rv <= 0) {
auto err = SSL_get_error(ssl_, rv);
switch (err) {
case SSL_ERROR_WANT_READ:
// renegotiation started
return -1;
case SSL_ERROR_WANT_WRITE:
ev_io_start(sessions_->get_loop(), &wev_);
return 0;
default:
return -1;
}
}
wb_.drain(rv);
continue;
}
wb_.reset();
if (fill_wb() != 0) {
return -1;
}
if (wb_.rleft() == 0) {
break;
}
}
if (wb_.rleft() == 0) {
ev_io_stop(loop, &wev_);
} else {
ev_io_start(loop, &wev_);
}
if (nghttp2_session_want_read(session_) == 0 &&
nghttp2_session_want_write(session_) == 0 && wb_.rleft() == 0) {
return -1;
}
return 0;
}
| 0 |
[] |
nghttp2
|
95efb3e19d174354ca50c65d5d7227d92bcd60e1
| 252,792,144,972,110,450,000,000,000,000,000,000,000 | 48 |
Don't read too greedily
|
void *raw_seq_start(struct seq_file *seq, loff_t *pos)
{
struct raw_iter_state *state = raw_seq_private(seq);
read_lock(&state->h->lock);
return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
| 0 |
[
"CWE-362"
] |
linux-2.6
|
f6d8bd051c391c1c0458a30b2a7abcd939329259
| 162,498,420,221,386,230,000,000,000,000,000,000,000 | 7 |
inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
MoreWindows()
{
char *m = "No other window.";
if (windows && (fore == 0 || windows->w_next))
return 1;
if (fore == 0)
{
Msg(0, "No window available");
return 0;
}
Msg(0, m, fore->w_number); /* other arg for nethack */
return 0;
}
| 0 |
[] |
screen
|
c5db181b6e017cfccb8d7842ce140e59294d9f62
| 99,134,718,724,212,470,000,000,000,000,000,000,000 | 13 |
ansi: add support for xterm OSC 11
It allows for getting and setting the background color. Notably, Vim uses
OSC 11 to learn whether it's running on a light or dark colored terminal
and choose a color scheme accordingly.
Tested with gnome-terminal and xterm. When called with "?" argument the
current background color is returned:
$ echo -ne "\e]11;?\e\\"
$ 11;rgb:2323/2727/2929
Signed-off-by: Lubomir Rintel <[email protected]>
(cherry picked from commit 7059bff20a28778f9d3acf81cad07b1388d02309)
Signed-off-by: Amadeusz Sławiński <[email protected]
|
int blkid_partition_set_type_string(blkid_partition par,
const unsigned char *type, size_t len)
{
set_string((unsigned char *) par->typestr,
sizeof(par->typestr), type, len);
return 0;
}
| 0 |
[] |
util-linux
|
50d1594c2e6142a3b51d2143c74027480df082e0
| 303,890,318,507,291,300,000,000,000,000,000,000,000 | 7 |
libblkid: avoid non-empty recursion in EBR
This is extension to the patch 7164a1c34d18831ac61c6744ad14ce916d389b3f.
We also need to detect non-empty recursion in the EBR chain. It's
possible to create standard valid logical partitions and in the last one
points back to the EBR chain. In this case all offsets will be non-empty.
Unfortunately, it's valid to create logical partitions that are not in
the "disk order" (sorted by start offset). So link somewhere back is
valid, but this link cannot points to already existing partition
(otherwise we will see recursion).
This patch forces libblkid to ignore duplicate logical partitions, the
duplicate chain segment is interpreted as non-data segment, after 100
iterations with non-data segments it will break the loop -- no memory
is allocated in this case by the loop.
Addresses: https://bugzilla.redhat.com/show_bug.cgi?id=1349536
References: http://seclists.org/oss-sec/2016/q3/40
Signed-off-by: Karel Zak <[email protected]>
|
xmlHashUpdateEntry(xmlHashTablePtr table, const xmlChar *name,
void *userdata, xmlHashDeallocator f) {
return(xmlHashUpdateEntry3(table, name, NULL, NULL, userdata, f));
}
| 0 |
[
"CWE-399"
] |
libxml2
|
8973d58b7498fa5100a876815476b81fd1a2412a
| 190,435,372,248,164,770,000,000,000,000,000,000,000 | 4 |
Add hash randomization to hash and dict structures
Following http://www.ocert.org/advisories/ocert-2011-003.html
it seems that having hash randomization might be a good idea
when using XML with untrusted data
* configure.in: lookup for rand, srand and time
* dict.c: add randomization to dictionaries hash tables
* hash.c: add randomization to normal hash tables
|
gsicc_set_device_blackptcomp(gx_device *dev, gsicc_blackptcomp_t blackptcomp,
gsicc_profile_types_t profile_type)
{
int code = 0;
cmm_dev_profile_t *profile_struct;
if (dev->procs.get_profile == NULL) {
profile_struct = dev->icc_struct;
} else {
code = dev_proc(dev, get_profile)(dev, &profile_struct);
}
if (profile_struct == NULL)
return 0;
profile_struct->rendercond[profile_type].black_point_comp = blackptcomp;
return code;
}
| 0 |
[] |
ghostpdl
|
6d444c273da5499a4cd72f21cb6d4c9a5256807d
| 317,427,496,095,838,600,000,000,000,000,000,000,000 | 16 |
Bug 697178: Add a file permissions callback
For the rare occasions when the graphics library directly opens a file
(currently for reading), this allows us to apply any restrictions on
file access normally applied in the interpteter.
|
NTSTATUS set_ea(connection_struct *conn, files_struct *fsp,
const struct smb_filename *smb_fname, struct ea_list *ea_list)
{
char *fname = NULL;
if (!lp_ea_support(SNUM(conn))) {
return NT_STATUS_EAS_NOT_SUPPORTED;
}
/* For now setting EAs on streams isn't supported. */
fname = smb_fname->base_name;
for (;ea_list; ea_list = ea_list->next) {
int ret;
fstring unix_ea_name;
fstrcpy(unix_ea_name, "user."); /* All EA's must start with user. */
fstrcat(unix_ea_name, ea_list->ea.name);
canonicalize_ea_name(conn, fsp, fname, unix_ea_name);
DEBUG(10,("set_ea: ea_name %s ealen = %u\n", unix_ea_name, (unsigned int)ea_list->ea.value.length));
if (samba_private_attr_name(unix_ea_name)) {
DEBUG(10,("set_ea: ea name %s is a private Samba name.\n", unix_ea_name));
return NT_STATUS_ACCESS_DENIED;
}
if (ea_list->ea.value.length == 0) {
/* Remove the attribute. */
if (fsp && (fsp->fh->fd != -1)) {
DEBUG(10,("set_ea: deleting ea name %s on "
"file %s by file descriptor.\n",
unix_ea_name, fsp_str_dbg(fsp)));
ret = SMB_VFS_FREMOVEXATTR(fsp, unix_ea_name);
} else {
DEBUG(10,("set_ea: deleting ea name %s on file %s.\n",
unix_ea_name, fname));
ret = SMB_VFS_REMOVEXATTR(conn, fname, unix_ea_name);
}
#ifdef ENOATTR
/* Removing a non existent attribute always succeeds. */
if (ret == -1 && errno == ENOATTR) {
DEBUG(10,("set_ea: deleting ea name %s didn't exist - succeeding by default.\n",
unix_ea_name));
ret = 0;
}
#endif
} else {
if (fsp && (fsp->fh->fd != -1)) {
DEBUG(10,("set_ea: setting ea name %s on file "
"%s by file descriptor.\n",
unix_ea_name, fsp_str_dbg(fsp)));
ret = SMB_VFS_FSETXATTR(fsp, unix_ea_name,
ea_list->ea.value.data, ea_list->ea.value.length, 0);
} else {
DEBUG(10,("set_ea: setting ea name %s on file %s.\n",
unix_ea_name, fname));
ret = SMB_VFS_SETXATTR(conn, fname, unix_ea_name,
ea_list->ea.value.data, ea_list->ea.value.length, 0);
}
}
if (ret == -1) {
#ifdef ENOTSUP
if (errno == ENOTSUP) {
return NT_STATUS_EAS_NOT_SUPPORTED;
}
#endif
return map_nt_error_from_unix(errno);
}
}
return NT_STATUS_OK;
}
| 0 |
[
"CWE-22"
] |
samba
|
bd269443e311d96ef495a9db47d1b95eb83bb8f4
| 227,219,070,662,655,280,000,000,000,000,000,000,000 | 75 |
Fix bug 7104 - "wide links" and "unix extensions" are incompatible.
Change parameter "wide links" to default to "no".
Ensure "wide links = no" if "unix extensions = yes" on a share.
Fix man pages to refect this.
Remove "within share" checks for a UNIX symlink set - even if
widelinks = no. The server will not follow that link anyway.
Correct DEBUG message in check_reduced_name() to add missing "\n"
so it's really clear when a path is being denied as it's outside
the enclosing share path.
Jeremy.
|
static apr_status_t create_namebased_scoreboard(apr_pool_t *pool,
const char *fname)
{
#if APR_HAS_SHARED_MEMORY
apr_status_t rv;
/* The shared memory file must not exist before we create the
* segment. */
apr_shm_remove(fname, pool); /* ignore errors */
rv = apr_shm_create(&ap_scoreboard_shm, scoreboard_size, fname, pool);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00001)
"unable to create or access scoreboard \"%s\" "
"(name-based shared memory failure)", fname);
return rv;
}
#endif /* APR_HAS_SHARED_MEMORY */
return APR_SUCCESS;
}
| 0 |
[
"CWE-476"
] |
httpd
|
fa7b2a5250e54363b3a6c8ac3aaa7de4e8da9b2e
| 277,471,925,369,697,750,000,000,000,000,000,000,000 | 20 |
Merge r1878092 from trunk:
Fix a NULL pointer dereference
* server/scoreboard.c (ap_increment_counts): In certain cases like certain
invalid requests r->method might be NULL here. r->method_number defaults
to M_GET and hence is M_GET in these cases.
Submitted by: rpluem
Reviewed by: covener, ylavic, jfclere
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1893051 13f79535-47bb-0310-9956-ffa450edef68
|
struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
struct sctp_chunk *asconf)
{
sctp_addiphdr_t *hdr;
union sctp_addr_param *addr_param;
sctp_addip_param_t *asconf_param;
struct sctp_chunk *asconf_ack;
__be16 err_code;
int length = 0;
int chunk_len;
__u32 serial;
int all_param_pass = 1;
chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
hdr = (sctp_addiphdr_t *)asconf->skb->data;
serial = ntohl(hdr->serial);
/* Skip the addiphdr and store a pointer to address parameter. */
length = sizeof(sctp_addiphdr_t);
addr_param = (union sctp_addr_param *)(asconf->skb->data + length);
chunk_len -= length;
/* Skip the address parameter and store a pointer to the first
* asconf parameter.
*/
length = ntohs(addr_param->v4.param_hdr.length);
asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
chunk_len -= length;
/* create an ASCONF_ACK chunk.
* Based on the definitions of parameters, we know that the size of
* ASCONF_ACK parameters are less than or equal to the twice of ASCONF
* parameters.
*/
asconf_ack = sctp_make_asconf_ack(asoc, serial, chunk_len * 2);
if (!asconf_ack)
goto done;
/* Process the TLVs contained within the ASCONF chunk. */
while (chunk_len > 0) {
err_code = sctp_process_asconf_param(asoc, asconf,
asconf_param);
/* ADDIP 4.1 A7)
* If an error response is received for a TLV parameter,
* all TLVs with no response before the failed TLV are
* considered successful if not reported. All TLVs after
* the failed response are considered unsuccessful unless
* a specific success indication is present for the parameter.
*/
if (SCTP_ERROR_NO_ERROR != err_code)
all_param_pass = 0;
if (!all_param_pass)
sctp_add_asconf_response(asconf_ack,
asconf_param->crr_id, err_code,
asconf_param);
/* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
* an IP address sends an 'Out of Resource' in its response, it
* MUST also fail any subsequent add or delete requests bundled
* in the ASCONF.
*/
if (SCTP_ERROR_RSRC_LOW == err_code)
goto done;
/* Move to the next ASCONF param. */
length = ntohs(asconf_param->param_hdr.length);
asconf_param = (sctp_addip_param_t *)((void *)asconf_param +
length);
chunk_len -= length;
}
done:
asoc->peer.addip_serial++;
/* If we are sending a new ASCONF_ACK hold a reference to it in assoc
* after freeing the reference to old asconf ack if any.
*/
if (asconf_ack) {
sctp_chunk_hold(asconf_ack);
list_add_tail(&asconf_ack->transmitted_list,
&asoc->asconf_ack_list);
}
return asconf_ack;
}
| 0 |
[
"CWE-20"
] |
linux-2.6
|
ba0166708ef4da7eeb61dd92bbba4d5a749d6561
| 30,443,121,781,322,960,000,000,000,000,000,000,000 | 87 |
sctp: Fix kernel panic while process protocol violation parameter
Since call to function sctp_sf_abort_violation() need paramter 'arg' with
'struct sctp_chunk' type, it will read the chunk type and chunk length from
the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen()
always with 'struct sctp_paramhdr' type's parameter, it will be passed to
sctp_sf_abort_violation(). This may cause kernel panic.
sctp_sf_violation_paramlen()
|-- sctp_sf_abort_violation()
|-- sctp_make_abort_violation()
This patch fixed this problem. This patch also fix two place which called
sctp_sf_violation_paramlen() with wrong paramter type.
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
PHP_MINIT_FUNCTION(spl_directory)
{
REGISTER_SPL_STD_CLASS_EX(SplFileInfo, spl_filesystem_object_new, spl_SplFileInfo_functions);
memcpy(&spl_filesystem_object_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers));
spl_filesystem_object_handlers.clone_obj = spl_filesystem_object_clone;
spl_filesystem_object_handlers.cast_object = spl_filesystem_object_cast;
spl_filesystem_object_handlers.get_debug_info = spl_filesystem_object_get_debug_info;
spl_ce_SplFileInfo->serialize = zend_class_serialize_deny;
spl_ce_SplFileInfo->unserialize = zend_class_unserialize_deny;
REGISTER_SPL_SUB_CLASS_EX(DirectoryIterator, SplFileInfo, spl_filesystem_object_new, spl_DirectoryIterator_functions);
zend_class_implements(spl_ce_DirectoryIterator TSRMLS_CC, 1, zend_ce_iterator);
REGISTER_SPL_IMPLEMENTS(DirectoryIterator, SeekableIterator);
spl_ce_DirectoryIterator->get_iterator = spl_filesystem_dir_get_iterator;
REGISTER_SPL_SUB_CLASS_EX(FilesystemIterator, DirectoryIterator, spl_filesystem_object_new, spl_FilesystemIterator_functions);
REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_MODE_MASK", SPL_FILE_DIR_CURRENT_MODE_MASK);
REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_AS_PATHNAME", SPL_FILE_DIR_CURRENT_AS_PATHNAME);
REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_AS_FILEINFO", SPL_FILE_DIR_CURRENT_AS_FILEINFO);
REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_AS_SELF", SPL_FILE_DIR_CURRENT_AS_SELF);
REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "KEY_MODE_MASK", SPL_FILE_DIR_KEY_MODE_MASK);
REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "KEY_AS_PATHNAME", SPL_FILE_DIR_KEY_AS_PATHNAME);
REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "FOLLOW_SYMLINKS", SPL_FILE_DIR_FOLLOW_SYMLINKS);
REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "KEY_AS_FILENAME", SPL_FILE_DIR_KEY_AS_FILENAME);
REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "NEW_CURRENT_AND_KEY", SPL_FILE_DIR_KEY_AS_FILENAME|SPL_FILE_DIR_CURRENT_AS_FILEINFO);
REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "OTHER_MODE_MASK", SPL_FILE_DIR_OTHERS_MASK);
REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "SKIP_DOTS", SPL_FILE_DIR_SKIPDOTS);
REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "UNIX_PATHS", SPL_FILE_DIR_UNIXPATHS);
spl_ce_FilesystemIterator->get_iterator = spl_filesystem_tree_get_iterator;
REGISTER_SPL_SUB_CLASS_EX(RecursiveDirectoryIterator, FilesystemIterator, spl_filesystem_object_new, spl_RecursiveDirectoryIterator_functions);
REGISTER_SPL_IMPLEMENTS(RecursiveDirectoryIterator, RecursiveIterator);
memcpy(&spl_filesystem_object_check_handlers, &spl_filesystem_object_handlers, sizeof(zend_object_handlers));
spl_filesystem_object_check_handlers.get_method = spl_filesystem_object_get_method_check;
#ifdef HAVE_GLOB
REGISTER_SPL_SUB_CLASS_EX(GlobIterator, FilesystemIterator, spl_filesystem_object_new_check, spl_GlobIterator_functions);
REGISTER_SPL_IMPLEMENTS(GlobIterator, Countable);
#endif
REGISTER_SPL_SUB_CLASS_EX(SplFileObject, SplFileInfo, spl_filesystem_object_new_check, spl_SplFileObject_functions);
REGISTER_SPL_IMPLEMENTS(SplFileObject, RecursiveIterator);
REGISTER_SPL_IMPLEMENTS(SplFileObject, SeekableIterator);
REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "DROP_NEW_LINE", SPL_FILE_OBJECT_DROP_NEW_LINE);
REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "READ_AHEAD", SPL_FILE_OBJECT_READ_AHEAD);
REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "SKIP_EMPTY", SPL_FILE_OBJECT_SKIP_EMPTY);
REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "READ_CSV", SPL_FILE_OBJECT_READ_CSV);
REGISTER_SPL_SUB_CLASS_EX(SplTempFileObject, SplFileObject, spl_filesystem_object_new_check, spl_SplTempFileObject_functions);
return SUCCESS;
}
| 1 |
[
"CWE-190"
] |
php-src
|
7245bff300d3fa8bacbef7897ff080a6f1c23eba
| 203,253,567,879,192,100,000,000,000,000,000,000,000 | 56 |
Fix bug #72262 - do not overflow int
|
void ElectronBrowserClient::RenderProcessReady(
content::RenderProcessHost* host) {
if (delegate_) {
static_cast<api::App*>(delegate_)->RenderProcessReady(host);
}
}
| 0 |
[] |
electron
|
e9fa834757f41c0b9fe44a4dffe3d7d437f52d34
| 294,831,232,417,712,280,000,000,000,000,000,000,000 | 6 |
fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame
Co-authored-by: Samuel Attard <[email protected]>
Co-authored-by: Samuel Attard <[email protected]>
|
build_attribute_list_for_copy (GFile *file,
GFileCopyFlags flags,
char **out_attributes,
GCancellable *cancellable,
GError **error)
{
gboolean ret = FALSE;
GFileAttributeInfoList *attributes = NULL, *namespaces = NULL;
GString *s = NULL;
gboolean first;
int i;
gboolean copy_all_attributes;
gboolean skip_perms;
copy_all_attributes = flags & G_FILE_COPY_ALL_METADATA;
skip_perms = (flags & G_FILE_COPY_TARGET_DEFAULT_PERMS) != 0;
/* Ignore errors here, if the target supports no attributes there is
* nothing to copy. We still honor the cancellable though.
*/
attributes = g_file_query_settable_attributes (file, cancellable, NULL);
if (g_cancellable_set_error_if_cancelled (cancellable, error))
goto out;
namespaces = g_file_query_writable_namespaces (file, cancellable, NULL);
if (g_cancellable_set_error_if_cancelled (cancellable, error))
goto out;
if (attributes == NULL && namespaces == NULL)
goto out;
first = TRUE;
s = g_string_new ("");
if (attributes)
{
for (i = 0; i < attributes->n_infos; i++)
{
if (should_copy (&attributes->infos[i], copy_all_attributes, skip_perms))
{
if (first)
first = FALSE;
else
g_string_append_c (s, ',');
g_string_append (s, attributes->infos[i].name);
}
}
}
if (namespaces)
{
for (i = 0; i < namespaces->n_infos; i++)
{
if (should_copy (&namespaces->infos[i], copy_all_attributes, FALSE))
{
if (first)
first = FALSE;
else
g_string_append_c (s, ',');
g_string_append (s, namespaces->infos[i].name);
g_string_append (s, "::*");
}
}
}
ret = TRUE;
*out_attributes = g_string_free (s, FALSE);
s = NULL;
out:
if (s)
g_string_free (s, TRUE);
if (attributes)
g_file_attribute_info_list_unref (attributes);
if (namespaces)
g_file_attribute_info_list_unref (namespaces);
return ret;
}
| 0 |
[
"CWE-362"
] |
glib
|
d8f8f4d637ce43f8699ba94c9b7648beda0ca174
| 312,337,898,686,092,170,000,000,000,000,000,000,000 | 80 |
gfile: Limit access to files when copying
file_copy_fallback creates new files with default permissions and
set the correct permissions after the operation is finished. This
might cause that the files can be accessible by more users during
the operation than expected. Use G_FILE_CREATE_PRIVATE for the new
files to limit access to those files.
|
static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size,
MemTxAttrs attrs)
{
MemTxResult res;
check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
switch (size) {
case 1:
address_space_stb(&address_space_memory, addr, val, attrs, &res);
break;
case 2:
address_space_stw(&address_space_memory, addr, val, attrs, &res);
break;
case 4:
address_space_stl(&address_space_memory, addr, val, attrs, &res);
break;
default: abort();
}
return res;
}
| 0 |
[] |
qemu
|
e4a511f8cc6f4a46d409fb5c9f72c38ba45f8d83
| 317,336,615,801,535,900,000,000,000,000,000,000,000 | 21 |
exec: clamp accesses against the MemoryRegionSection
Because the clamping was done against the MemoryRegion,
address_space_rw was effectively broken if a write spanned
multiple sections that are not linear in underlying memory
(with the memory not being under an IOMMU).
This is visible with the MIPS rc4030 IOMMU, which is implemented
as a series of alias memory regions that point to the actual RAM.
Tested-by: Hervé Poussineau <[email protected]>
Tested-by: Mark Cave-Ayland <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static void hns_xgmac_get_info(void *mac_drv, struct mac_info *mac_info)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
u32 pause_time, pause_ctrl, port_mode, ctrl_val;
ctrl_val = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
mac_info->pad_and_crc_en = dsaf_get_bit(ctrl_val, XGMAC_CTL_TX_PAD_B);
mac_info->auto_neg = 0;
pause_time = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG);
mac_info->tx_pause_time = pause_time;
port_mode = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG);
mac_info->port_en = dsaf_get_field(port_mode, XGMAC_PORT_MODE_TX_M,
XGMAC_PORT_MODE_TX_S) &&
dsaf_get_field(port_mode, XGMAC_PORT_MODE_RX_M,
XGMAC_PORT_MODE_RX_S);
mac_info->duplex = 1;
mac_info->speed = MAC_SPEED_10000;
pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
mac_info->rx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B);
mac_info->tx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B);
}
| 0 |
[
"CWE-119",
"CWE-703"
] |
linux
|
412b65d15a7f8a93794653968308fc100f2aa87c
| 208,771,005,836,156,850,000,000,000,000,000,000,000 | 24 |
net: hns: fix ethtool_get_strings overflow in hns driver
hns_get_sset_count() returns HNS_NET_STATS_CNT and the data space allocated
is not enough for ethtool_get_strings(), which will cause random memory
corruption.
When SLAB and DEBUG_SLAB are both enabled, memory corruptions like the
the following can be observed without this patch:
[ 43.115200] Slab corruption (Not tainted): Acpi-ParseExt start=ffff801fb0b69030, len=80
[ 43.115206] Redzone: 0x9f911029d006462/0x5f78745f31657070.
[ 43.115208] Last user: [<5f7272655f746b70>](0x5f7272655f746b70)
[ 43.115214] 010: 70 70 65 31 5f 74 78 5f 70 6b 74 00 6b 6b 6b 6b ppe1_tx_pkt.kkkk
[ 43.115217] 030: 70 70 65 31 5f 74 78 5f 70 6b 74 5f 6f 6b 00 6b ppe1_tx_pkt_ok.k
[ 43.115218] Next obj: start=ffff801fb0b69098, len=80
[ 43.115220] Redzone: 0x706d655f6f666966/0x9f911029d74e35b.
[ 43.115229] Last user: [<ffff0000084b11b0>](acpi_os_release_object+0x28/0x38)
[ 43.115231] 000: 74 79 00 6b 6b 6b 6b 6b 70 70 65 31 5f 74 78 5f ty.kkkkkppe1_tx_
[ 43.115232] 010: 70 6b 74 5f 65 72 72 5f 63 73 75 6d 5f 66 61 69 pkt_err_csum_fai
Signed-off-by: Timmy Li <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int input_handler_for_each_handle(struct input_handler *handler, void *data,
int (*fn)(struct input_handle *, void *))
{
struct input_handle *handle;
int retval = 0;
rcu_read_lock();
list_for_each_entry_rcu(handle, &handler->h_list, h_node) {
retval = fn(handle, data);
if (retval)
break;
}
rcu_read_unlock();
return retval;
}
| 0 |
[
"CWE-703",
"CWE-787"
] |
linux
|
cb222aed03d798fc074be55e59d9a112338ee784
| 119,692,698,824,444,200,000,000,000,000,000,000,000 | 18 |
Input: add safety guards to input_set_keycode()
If we happen to have a garbage in input device's keycode table with values
too big we'll end up doing clear_bit() with offset way outside of our
bitmaps, damaging other objects within an input device or even outside of
it. Let's add sanity checks to the returned old keycodes.
Reported-by: [email protected]
Reported-by: [email protected]
Link: https://lore.kernel.org/r/20191207212757.GA245964@dtor-ws
Signed-off-by: Dmitry Torokhov <[email protected]>
|
static int date_period_initialize(timelib_time **st, timelib_time **et, timelib_rel_time **d, long *recurrences, /*const*/ char *format, int format_length TSRMLS_DC)
{
timelib_time *b = NULL, *e = NULL;
timelib_rel_time *p = NULL;
int r = 0;
int retval = 0;
struct timelib_error_container *errors;
timelib_strtointerval(format, format_length, &b, &e, &p, &r, &errors);
if (errors->error_count > 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unknown or bad format (%s)", format);
retval = FAILURE;
} else {
*st = b;
*et = e;
*d = p;
*recurrences = r;
retval = SUCCESS;
}
timelib_error_container_dtor(errors);
return retval;
| 0 |
[] |
php-src
|
c377f1a715476934133f3254d1e0d4bf3743e2d2
| 338,446,713,339,901,650,000,000,000,000,000,000,000 | 23 |
Fix bug #68942 (Use after free vulnerability in unserialize() with DateTimeZone)
|
void cpu_exec_realizefn(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
cc->tcg_initialize(cpu->uc);
tlb_init(cpu);
}
| 0 |
[
"CWE-476"
] |
unicorn
|
3d3deac5e6d38602b689c4fef5dac004f07a2e63
| 106,893,530,514,118,800,000,000,000,000,000,000,000 | 7 |
Fix crash when mapping a big memory and calling uc_close
|
evdns_nameserver_add(unsigned long int address) {
if (!current_base)
current_base = evdns_base_new(NULL, 0);
return evdns_base_nameserver_add(current_base, address);
}
| 0 |
[
"CWE-125"
] |
libevent
|
96f64a022014a208105ead6c8a7066018449d86d
| 106,306,328,572,096,150,000,000,000,000,000,000,000 | 5 |
evdns: name_parse(): fix remote stack overread
@asn-the-goblin-slayer:
"the name_parse() function in libevent's DNS code is vulnerable to a buffer overread.
971 if (cp != name_out) {
972 if (cp + 1 >= end) return -1;
973 *cp++ = '.';
974 }
975 if (cp + label_len >= end) return -1;
976 memcpy(cp, packet + j, label_len);
977 cp += label_len;
978 j += label_len;
No check is made against length before the memcpy occurs.
This was found through the Tor bug bounty program and the discovery should be credited to 'Guido Vranken'."
Reproducer for gdb (https://gist.github.com/azat/e4fcf540e9b89ab86d02):
set $PROT_NONE=0x0
set $PROT_READ=0x1
set $PROT_WRITE=0x2
set $MAP_ANONYMOUS=0x20
set $MAP_SHARED=0x01
set $MAP_FIXED=0x10
set $MAP_32BIT=0x40
start
set $length=202
# overread
set $length=2
# allocate with mmap to have a seg fault on page boundary
set $l=(1<<20)*2
p mmap(0, $l, $PROT_READ|$PROT_WRITE, $MAP_ANONYMOUS|$MAP_SHARED|$MAP_32BIT, -1, 0)
set $packet=(char *)$1+$l-$length
# hack the packet
set $packet[0]=63
set $packet[1]='/'
p malloc(sizeof(int))
set $idx=(int *)$2
set $idx[0]=0
set $name_out_len=202
p malloc($name_out_len)
set $name_out=$3
# have WRITE only mapping to fail on read
set $end=$1+$l
p (void *)mmap($end, 1<<12, $PROT_NONE, $MAP_ANONYMOUS|$MAP_SHARED|$MAP_FIXED|$MAP_32BIT, -1, 0)
set $m=$4
p name_parse($packet, $length, $idx, $name_out, $name_out_len)
x/2s (char *)$name_out
Before this patch:
$ gdb -ex 'source gdb' dns-example
$1 = 1073741824
$2 = (void *) 0x633010
$3 = (void *) 0x633030
$4 = (void *) 0x40200000
Program received signal SIGSEGV, Segmentation fault.
__memcpy_sse2_unaligned () at memcpy-sse2-unaligned.S:33
After this patch:
$ gdb -ex 'source gdb' dns-example
$1 = 1073741824
$2 = (void *) 0x633010
$3 = (void *) 0x633030
$4 = (void *) 0x40200000
$5 = -1
0x633030: "/"
0x633032: ""
(gdb) p $m
$6 = (void *) 0x40200000
(gdb) p $1
$7 = 1073741824
(gdb) p/x $1
$8 = 0x40000000
(gdb) quit
P.S. plus drop one condition duplicate.
Fixes: #317
|
static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct ndmsg *ndm;
struct nlattr *tb[NDA_MAX+1];
struct net_device *dev;
u8 *addr;
u16 vid;
int err;
err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
if (err < 0)
return err;
ndm = nlmsg_data(nlh);
if (ndm->ndm_ifindex == 0) {
pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n");
return -EINVAL;
}
dev = __dev_get_by_index(net, ndm->ndm_ifindex);
if (dev == NULL) {
pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n");
return -ENODEV;
}
if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n");
return -EINVAL;
}
addr = nla_data(tb[NDA_LLADDR]);
err = fdb_vid_parse(tb[NDA_VLAN], &vid);
if (err)
return err;
err = -EOPNOTSUPP;
/* Support fdb on master device the net/bridge default case */
if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
(dev->priv_flags & IFF_BRIDGE_PORT)) {
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
const struct net_device_ops *ops = br_dev->netdev_ops;
err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
nlh->nlmsg_flags);
if (err)
goto out;
else
ndm->ndm_flags &= ~NTF_MASTER;
}
/* Embedded bridge, macvlan, and any other device support */
if ((ndm->ndm_flags & NTF_SELF)) {
if (dev->netdev_ops->ndo_fdb_add)
err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
vid,
nlh->nlmsg_flags);
else
err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
nlh->nlmsg_flags);
if (!err) {
rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
ndm->ndm_state);
ndm->ndm_flags &= ~NTF_SELF;
}
}
out:
return err;
}
| 0 |
[
"CWE-200"
] |
net
|
5f8e44741f9f216e33736ea4ec65ca9ac03036e6
| 158,017,476,159,645,800,000,000,000,000,000,000,000 | 72 |
net: fix infoleak in rtnetlink
The stack object “map” has a total size of 32 bytes. Its last 4
bytes are padding generated by compiler. These padding bytes are
not initialized and sent out via “nla_put”.
Signed-off-by: Kangjie Lu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
nautilus_file_can_write (NautilusFile *file)
{
g_return_val_if_fail (NAUTILUS_IS_FILE (file), FALSE);
return file->details->can_write;
}
| 0 |
[] |
nautilus
|
7632a3e13874a2c5e8988428ca913620a25df983
| 34,534,487,206,971,854,000,000,000,000,000,000,000 | 6 |
Check for trusted desktop file launchers.
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-directory-async.c:
Check for trusted desktop file launchers.
* libnautilus-private/nautilus-file-private.h:
* libnautilus-private/nautilus-file.c:
* libnautilus-private/nautilus-file.h:
Add nautilus_file_is_trusted_link.
Allow unsetting of custom display name.
* libnautilus-private/nautilus-mime-actions.c:
Display dialog when trying to launch a non-trusted desktop file.
svn path=/trunk/; revision=15003
|
static int virtio_net_post_load_device(void *opaque, int version_id)
{
VirtIONet *n = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
int i, link_down;
trace_virtio_net_post_load_device();
virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
virtio_vdev_has_feature(vdev,
VIRTIO_F_VERSION_1),
virtio_vdev_has_feature(vdev,
VIRTIO_NET_F_HASH_REPORT));
/* MAC_TABLE_ENTRIES may be different from the saved image */
if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
n->mac_table.in_use = 0;
}
if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
}
/*
* curr_guest_offloads will be later overwritten by the
* virtio_set_features_nocheck call done from the virtio_load.
* Here we make sure it is preserved and restored accordingly
* in the virtio_net_post_load_virtio callback.
*/
n->saved_guest_offloads = n->curr_guest_offloads;
virtio_net_set_queue_pairs(n);
/* Find the first multicast entry in the saved MAC filter */
for (i = 0; i < n->mac_table.in_use; i++) {
if (n->mac_table.macs[i * ETH_ALEN] & 1) {
break;
}
}
n->mac_table.first_multi = i;
/* nc.link_down can't be migrated, so infer link_down according
* to link status bit in n->status */
link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
for (i = 0; i < n->max_queue_pairs; i++) {
qemu_get_subqueue(n->nic, i)->link_down = link_down;
}
if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
QEMU_CLOCK_VIRTUAL,
virtio_net_announce_timer, n);
if (n->announce_timer.round) {
timer_mod(n->announce_timer.tm,
qemu_clock_get_ms(n->announce_timer.type));
} else {
qemu_announce_timer_del(&n->announce_timer, false);
}
}
if (n->rss_data.enabled) {
n->rss_data.enabled_software_rss = n->rss_data.populate_hash;
if (!n->rss_data.populate_hash) {
if (!virtio_net_attach_epbf_rss(n)) {
if (get_vhost_net(qemu_get_queue(n->nic)->peer)) {
warn_report("Can't post-load eBPF RSS for vhost");
} else {
warn_report("Can't post-load eBPF RSS - "
"fallback to software RSS");
n->rss_data.enabled_software_rss = true;
}
}
}
trace_virtio_net_rss_enable(n->rss_data.hash_types,
n->rss_data.indirections_len,
sizeof(n->rss_data.key));
} else {
trace_virtio_net_rss_disable();
}
return 0;
}
| 0 |
[
"CWE-703"
] |
qemu
|
abe300d9d894f7138e1af7c8e9c88c04bfe98b37
| 197,219,713,116,423,670,000,000,000,000,000,000,000 | 82 |
virtio-net: fix map leaking on error during receive
Commit bedd7e93d0196 ("virtio-net: fix use after unmap/free for sg")
tries to fix the use after free of the sg by caching the virtqueue
elements in an array and unmap them at once after receiving the
packets, But it forgot to unmap the cached elements on error which
will lead to leaking of mapping and other unexpected results.
Fixing this by detaching the cached elements on error. This addresses
CVE-2022-26353.
Reported-by: Victor Tom <[email protected]>
Cc: [email protected]
Fixes: CVE-2022-26353
Fixes: bedd7e93d0196 ("virtio-net: fix use after unmap/free for sg")
Reviewed-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Jason Wang <[email protected]>
|
static int ntop_stats_get_minute_samplings_interval(lua_State *vm) {
time_t epoch_start, epoch_end;
int ifid;
NetworkInterface* iface;
StatsManager *sm;
struct statsManagerRetrieval retvals;
ntop->getTrace()->traceEvent(TRACE_DEBUG, "%s() called", __FUNCTION__);
if(ntop_lua_check(vm, __FUNCTION__, 1, LUA_TNUMBER)) return(CONST_LUA_ERROR);
ifid = lua_tointeger(vm, 1);
if(ifid < 0)
return(CONST_LUA_ERROR);
if(ntop_lua_check(vm, __FUNCTION__, 2, LUA_TNUMBER)) return(CONST_LUA_ERROR);
epoch_start = lua_tointeger(vm, 2);
if(epoch_start < 0)
return(CONST_LUA_ERROR);
if(ntop_lua_check(vm, __FUNCTION__, 3, LUA_TNUMBER)) return(CONST_LUA_ERROR);
epoch_end = lua_tointeger(vm, 3);
if(epoch_end < 0)
return(CONST_LUA_ERROR);
if(!(iface = ntop->getNetworkInterface(ifid)) ||
!(sm = iface->getStatsManager()))
return (CONST_LUA_ERROR);
if(sm->retrieveMinuteStatsInterval(epoch_start, epoch_end, &retvals))
return(CONST_LUA_ERROR);
lua_newtable(vm);
for (unsigned i = 0 ; i < retvals.rows.size() ; i++)
lua_push_str_table_entry(vm, retvals.rows[i].c_str(), (char*)"");
return(CONST_LUA_OK);
}
| 0 |
[
"CWE-476"
] |
ntopng
|
01f47e04fd7c8d54399c9e465f823f0017069f8f
| 275,186,866,153,812,640,000,000,000,000,000,000,000 | 37 |
Security fix: prevents empty host from being used
|
}
void remove_systems_tracks(GF_ISOFile *file)
{
u32 i, count;
count = gf_isom_get_track_count(file);
if (count==1) return;
/*force PL rewrite*/
gf_isom_set_pl_indication(file, GF_ISOM_PL_VISUAL, 0);
gf_isom_set_pl_indication(file, GF_ISOM_PL_AUDIO, 0);
gf_isom_set_pl_indication(file, GF_ISOM_PL_OD, 1); /*the lib always remove IOD when no profiles are specified..*/
for (i=0; i<gf_isom_get_track_count(file); i++) {
switch (gf_isom_get_media_type(file, i+1)) {
case GF_ISOM_MEDIA_VISUAL:
case GF_ISOM_MEDIA_AUXV:
case GF_ISOM_MEDIA_PICT:
case GF_ISOM_MEDIA_AUDIO:
case GF_ISOM_MEDIA_TEXT:
case GF_ISOM_MEDIA_SUBT:
gf_isom_remove_track_from_root_od(file, i+1);
check_media_profile(file, i+1);
break;
/*only remove real systems tracks (eg, delaing with scene description & presentation)
but keep meta & all unknown tracks*/
case GF_ISOM_MEDIA_SCENE:
switch (gf_isom_get_media_subtype(file, i+1, 1)) {
case GF_ISOM_MEDIA_DIMS:
gf_isom_remove_track_from_root_od(file, i+1);
continue;
default:
break;
}
case GF_ISOM_MEDIA_OD:
case GF_ISOM_MEDIA_OCR:
case GF_ISOM_MEDIA_MPEGJ:
gf_isom_remove_track(file, i+1);
i--;
break;
default:
break;
}
}
/*none required*/
if (!gf_isom_get_pl_indication(file, GF_ISOM_PL_AUDIO)) gf_isom_set_pl_indication(file, GF_ISOM_PL_AUDIO, 0xFF);
if (!gf_isom_get_pl_indication(file, GF_ISOM_PL_VISUAL)) gf_isom_set_pl_indication(file, GF_ISOM_PL_VISUAL, 0xFF);
gf_isom_set_pl_indication(file, GF_ISOM_PL_OD, 0xFF);
gf_isom_set_pl_indication(file, GF_ISOM_PL_SCENE, 0xFF);
gf_isom_set_pl_indication(file, GF_ISOM_PL_GRAPHICS, 0xFF);
gf_isom_set_pl_indication(file, GF_ISOM_PL_INLINE, 0);
| 0 |
[
"CWE-787"
] |
gpac
|
4e56ad72ac1afb4e049a10f2d99e7512d7141f9d
| 301,881,102,428,342,900,000,000,000,000,000,000,000 | 52 |
fixed #2216
|
bool make_directory(const std::string& dirname)
{
error_code ec;
bool created = bfs::create_directory(path(dirname), ec);
if (ec) {
ERR_FS << "Failed to create directory " << dirname << ": " << ec.message() << '\n';
}
return created;
}
| 0 |
[
"CWE-200"
] |
wesnoth
|
f8914468182e8d0a1551b430c0879ba236fe4d6d
| 74,258,234,297,968,150,000,000,000,000,000,000,000 | 9 |
Disallow inclusion of .pbl files from WML (bug #23504)
Note that this will also cause Lua wesnoth.have_file() to return false
on .pbl files.
|
parse_blockcode(struct buf *ob, struct sd_markdown *rndr, uint8_t *data, size_t size)
{
size_t beg, end, pre;
struct buf *work = 0;
work = rndr_newbuf(rndr, BUFFER_BLOCK);
beg = 0;
while (beg < size) {
for (end = beg + 1; end < size && data[end - 1] != '\n'; end++) {};
pre = prefix_code(data + beg, end - beg);
if (pre)
beg += pre; /* skipping prefix */
else if (!is_empty(data + beg, end - beg))
/* non-empty non-prefixed line breaks the pre */
break;
if (beg < end) {
/* verbatim copy to the working buffer,
escaping entities */
if (is_empty(data + beg, end - beg))
bufputc(work, '\n');
else bufput(work, data + beg, end - beg);
}
beg = end;
}
while (work->size && work->data[work->size - 1] == '\n')
work->size -= 1;
bufputc(work, '\n');
if (rndr->cb.blockcode)
rndr->cb.blockcode(ob, work, NULL, rndr->opaque);
rndr_popbuf(rndr, BUFFER_BLOCK);
return beg;
}
| 0 |
[] |
redcarpet
|
e5a10516d07114d582d13b9125b733008c61c242
| 242,052,726,280,661,100,000,000,000,000,000,000,000 | 39 |
Avoid rewinding previous inline when auto-linking
When a bit like "[email protected]" is processed, first the emphasis is
rendered, then the 1 is output verbatim. When the `@` is encountered,
Redcarpet tries to find the "local part" of the address and stops when
it encounters an invalid char (i.e. here the `!`).
The problem is that when it searches for the local part, Redcarpet
rewinds the characters but here, the emphasis is already rendered so
the previous HTML tag is rewinded as well and is not correctly closed.
|
struct ucounts *get_ucounts(struct ucounts *ucounts)
{
if (!get_ucounts_or_wrap(ucounts)) {
put_ucounts(ucounts);
ucounts = NULL;
}
return ucounts;
}
| 0 |
[
"CWE-416"
] |
linux
|
f9d87929d451d3e649699d0f1d74f71f77ad38f5
| 297,360,051,480,295,550,000,000,000,000,000,000,000 | 8 |
ucount: Make get_ucount a safe get_user replacement
When the ucount code was refactored to create get_ucount it was missed
that some of the contexts in which a rlimit is kept elevated can be
the only reference to the user/ucount in the system.
Ordinary ucount references exist in places that also have a reference
to the user namspace, but in POSIX message queues, the SysV shm code,
and the SIGPENDING code there is no independent user namespace
reference.
Inspection of the the user_namespace show no instance of circular
references between struct ucounts and the user_namespace. So
hold a reference from struct ucount to i's user_namespace to
resolve this problem.
Link: https://lore.kernel.org/lkml/[email protected]/
Reported-by: Qian Cai <[email protected]>
Reported-by: Mathias Krause <[email protected]>
Tested-by: Mathias Krause <[email protected]>
Reviewed-by: Mathias Krause <[email protected]>
Reviewed-by: Alexey Gladkov <[email protected]>
Fixes: d64696905554 ("Reimplement RLIMIT_SIGPENDING on top of ucounts")
Fixes: 6e52a9f0532f ("Reimplement RLIMIT_MSGQUEUE on top of ucounts")
Fixes: d7c9e99aee48 ("Reimplement RLIMIT_MEMLOCK on top of ucounts")
Cc: [email protected]
Signed-off-by: "Eric W. Biederman" <[email protected]>
|
int converse(pam_handle_t *pamh, int nargs, const struct pam_message *message,
struct pam_response **response) {
int retval;
struct pam_conv *conv;
if ((retval = pam_get_item(pamh, PAM_CONV, (const void **) &conv)) == PAM_SUCCESS) {
retval = conv->conv(nargs, &message, response, conv->appdata_ptr);
if (retval != PAM_SUCCESS) {
_pam_log(LOG_ERR, "(pam_tacplus) converse returned %d", retval);
_pam_log(LOG_ERR, "that is: %s", pam_strerror(pamh, retval));
}
} else {
_pam_log(LOG_ERR, "(pam_tacplus) converse failed to get pam_conv");
}
return retval;
}
| 0 |
[
"CWE-532"
] |
pam_tacplus
|
4a9852c31c2fd0c0e72fbb689a586aabcfb11cb0
| 183,456,824,789,591,400,000,000,000,000,000,000,000 | 19 |
pam: don't leak TACACS+ secret to journald
If journald for syslog is used, the journal is going to store everything,
even DEBUG loglevel messages. Pre-journald logging of system-wide DEBUG
loglevel could be avoided and is not affected in all cases.
With journald presence it's probably safe to no longer log sensitive
details at DEBUG level.
Fix #149
Signed-off-by: Daniel Gollub <[email protected]>
|
MagickExport const char *GetMagickProperty(ImageInfo *image_info,
Image *image,const char *property,ExceptionInfo *exception)
{
char
value[MagickPathExtent];
const char
*string;
assert(property[0] != '\0');
assert(image != (Image *) NULL || image_info != (ImageInfo *) NULL );
if (property[1] == '\0') /* single letter property request */
return(GetMagickPropertyLetter(image_info,image,*property,exception));
if ((image != (Image *) NULL) && (image->debug != MagickFalse))
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
else
if ((image_info != (ImageInfo *) NULL) &&
(image_info->debug != MagickFalse))
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s","no-images");
*value='\0'; /* formated string */
string=(char *) NULL; /* constant string reference */
switch (*property)
{
case 'b':
{
if (LocaleCompare("basename",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
GetPathComponent(image->magick_filename,BasePath,value);
if (*value == '\0')
string="";
break;
}
if (LocaleCompare("bit-depth",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
GetImageDepth(image,exception));
break;
}
break;
}
case 'c':
{
if (LocaleCompare("channels",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
/* FUTURE: return actual image channels */
(void) FormatLocaleString(value,MagickPathExtent,"%s",
CommandOptionToMnemonic(MagickColorspaceOptions,(ssize_t)
image->colorspace));
LocaleLower(value);
if( image->alpha_trait != UndefinedPixelTrait )
(void) ConcatenateMagickString(value,"a",MagickPathExtent);
break;
}
if (LocaleCompare("colorspace",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
string=CommandOptionToMnemonic(MagickColorspaceOptions,(ssize_t)
image->colorspace);
break;
}
if (LocaleCompare("compose",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
string=CommandOptionToMnemonic(MagickComposeOptions,(ssize_t)
image->compose);
break;
}
if (LocaleCompare("compression",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
string=CommandOptionToMnemonic(MagickCompressOptions,(ssize_t)
image->compression);
break;
}
if (LocaleCompare("copyright",property) == 0)
{
(void) CopyMagickString(value,GetMagickCopyright(),MagickPathExtent);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("depth",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->depth);
break;
}
if (LocaleCompare("directory",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
GetPathComponent(image->magick_filename,HeadPath,value);
if (*value == '\0')
string="";
break;
}
break;
}
case 'e':
{
if (LocaleCompare("entropy",property) == 0)
{
double
entropy;
WarnNoImageReturn("\"%%[%s]\"",property);
(void) GetImageEntropy(image,&entropy,exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.*g",
GetMagickPrecision(),entropy);
break;
}
if (LocaleCompare("extension",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
GetPathComponent(image->magick_filename,ExtensionPath,value);
if (*value == '\0')
string="";
break;
}
break;
}
case 'g':
{
if (LocaleCompare("gamma",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%.*g",
GetMagickPrecision(),image->gamma);
break;
}
break;
}
case 'h':
{
if (LocaleCompare("height",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",
image->magick_rows != 0 ? (double) image->magick_rows : 256.0);
break;
}
break;
}
case 'i':
{
if (LocaleCompare("input",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
string=image->filename;
break;
}
if (LocaleCompare("interlace",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
string=CommandOptionToMnemonic(MagickInterlaceOptions,(ssize_t)
image->interlace);
break;
}
break;
}
case 'k':
{
if (LocaleCompare("kurtosis",property) == 0)
{
double
kurtosis,
skewness;
WarnNoImageReturn("\"%%[%s]\"",property);
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.*g",
GetMagickPrecision(),kurtosis);
break;
}
break;
}
case 'm':
{
if (LocaleCompare("magick",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
string=image->magick;
break;
}
if ((LocaleCompare("maxima",property) == 0) ||
(LocaleCompare("max",property) == 0))
{
double
maximum,
minimum;
WarnNoImageReturn("\"%%[%s]\"",property);
(void) GetImageRange(image,&minimum,&maximum,exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.*g",
GetMagickPrecision(),maximum);
break;
}
if (LocaleCompare("mean",property) == 0)
{
double
mean,
standard_deviation;
WarnNoImageReturn("\"%%[%s]\"",property);
(void) GetImageMean(image,&mean,&standard_deviation,exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.*g",
GetMagickPrecision(),mean);
break;
}
if ((LocaleCompare("minima",property) == 0) ||
(LocaleCompare("min",property) == 0))
{
double
maximum,
minimum;
WarnNoImageReturn("\"%%[%s]\"",property);
(void) GetImageRange(image,&minimum,&maximum,exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.*g",
GetMagickPrecision(),minimum);
break;
}
break;
}
case 'o':
{
if (LocaleCompare("opaque",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
string=CommandOptionToMnemonic(MagickBooleanOptions,(ssize_t)
IsImageOpaque(image,exception));
break;
}
if (LocaleCompare("orientation",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
string=CommandOptionToMnemonic(MagickOrientationOptions,(ssize_t)
image->orientation);
break;
}
if (LocaleCompare("output",property) == 0)
{
WarnNoImageInfoReturn("\"%%[%s]\"",property);
(void) CopyMagickString(value,image_info->filename,MagickPathExtent);
break;
}
break;
}
case 'p':
{
if (LocaleCompare("page",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%.20gx%.20g",
(double) image->page.width,(double) image->page.height);
break;
}
#if defined(MAGICKCORE_LCMS_DELEGATE)
if (LocaleCompare("profile:icc",property) == 0 ||
LocaleCompare("profile:icm",property) == 0)
{
#if !defined(LCMS_VERSION) || (LCMS_VERSION < 2000)
#define cmsUInt32Number DWORD
#endif
const StringInfo
*profile;
cmsHPROFILE
icc_profile;
WarnNoImageReturn("\"%%[%s]\"",property);
profile=GetImageProfile(image,property+8);
if (profile == (StringInfo *) NULL)
break;
icc_profile=cmsOpenProfileFromMem(GetStringInfoDatum(profile),
(cmsUInt32Number) GetStringInfoLength(profile));
if (icc_profile != (cmsHPROFILE *) NULL)
{
#if defined(LCMS_VERSION) && (LCMS_VERSION < 2000)
string=cmsTakeProductName(icc_profile);
#else
(void) cmsGetProfileInfoASCII(icc_profile,cmsInfoDescription,
"en","US",value,MagickPathExtent);
#endif
(void) cmsCloseProfile(icc_profile);
}
}
#endif
if (LocaleCompare("profiles",property) == 0)
{
const char
*name;
WarnNoImageReturn("\"%%[%s]\"",property);
ResetImageProfileIterator(image);
name=GetNextImageProfile(image);
if (name != (char *) NULL)
{
(void) CopyMagickString(value,name,MagickPathExtent);
name=GetNextImageProfile(image);
while (name != (char *) NULL)
{
ConcatenateMagickString(value,",",MagickPathExtent);
ConcatenateMagickString(value,name,MagickPathExtent);
name=GetNextImageProfile(image);
}
}
break;
}
break;
}
case 'q':
{
if (LocaleCompare("quality",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->quality);
break;
}
break;
}
case 'r':
{
if (LocaleCompare("resolution.x",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.x);
break;
}
if (LocaleCompare("resolution.y",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.y);
break;
}
break;
}
case 's':
{
if (LocaleCompare("scene",property) == 0)
{
WarnNoImageInfoReturn("\"%%[%s]\"",property);
if (image_info->number_scenes != 0)
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image_info->scene);
else {
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->scene);
}
break;
}
if (LocaleCompare("scenes",property) == 0)
{
/* FUTURE: equivelent to %n? */
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
GetImageListLength(image));
break;
}
if (LocaleCompare("size",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",
MagickPathExtent,value);
break;
}
if (LocaleCompare("skewness",property) == 0)
{
double
kurtosis,
skewness;
WarnNoImageReturn("\"%%[%s]\"",property);
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.*g",
GetMagickPrecision(),skewness);
break;
}
if (LocaleCompare("standard-deviation",property) == 0)
{
double
mean,
standard_deviation;
WarnNoImageReturn("\"%%[%s]\"",property);
(void) GetImageMean(image,&mean,&standard_deviation,exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.*g",
GetMagickPrecision(),standard_deviation);
break;
}
break;
}
case 't':
{
if (LocaleCompare("type",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
string=CommandOptionToMnemonic(MagickTypeOptions,(ssize_t)
IdentifyImageType(image,exception));
break;
}
break;
}
case 'u':
{
if (LocaleCompare("unique",property) == 0)
{
WarnNoImageInfoReturn("\"%%[%s]\"",property);
string=image_info->unique;
break;
}
if (LocaleCompare("units",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
string=CommandOptionToMnemonic(MagickResolutionOptions,(ssize_t)
image->units);
break;
}
break;
}
case 'v':
{
if (LocaleCompare("version",property) == 0)
{
string=GetMagickVersion((size_t *) NULL);
break;
}
break;
}
case 'w':
{
if (LocaleCompare("width",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
(image->magick_columns != 0 ? image->magick_columns : 256));
break;
}
break;
}
}
if (string != (char *) NULL)
return(string);
if (*value != '\0')
{
/*
Create a cloned copy of result, that will get cleaned up, eventually.
*/
if (image != (Image *) NULL)
{
(void) SetImageArtifact(image,"get-property",value);
return(GetImageArtifact(image,"get-property"));
}
else
{
(void) SetImageOption(image_info,"get-property",value);
return(GetImageOption(image_info,"get-property"));
}
}
return((char *) NULL);
}
| 0 |
[
"CWE-476"
] |
ImageMagick
|
2c75f301d9ac84f91071393b02d8c88c8341c91c
| 298,695,879,603,344,400,000,000,000,000,000,000,000 | 471 |
https://github.com/ImageMagick/ImageMagick/issues/1225
|
struct dump_dir *dd_opendir(const char *dir, int flags)
{
struct dump_dir *dd = dd_init();
dir = dd->dd_dirname = rm_trailing_slashes(dir);
dd->dd_fd = open(dir, O_DIRECTORY | O_NOFOLLOW);
struct stat stat_buf;
if (dd->dd_fd < 0)
goto cant_access;
if (fstat(dd->dd_fd, &stat_buf) != 0)
goto cant_access;
/* & 0666 should remove the executable bit */
dd->mode = (stat_buf.st_mode & 0666);
errno = 0;
if (dd_lock(dd, WAIT_FOR_OTHER_PROCESS_USLEEP, flags) < 0)
{
if ((flags & DD_OPEN_READONLY) && errno == EACCES)
{
/* Directory is not writable. If it seems to be readable,
* return "read only" dd, not NULL
*
* Does the directory have 'x' flag?
*/
if (faccessat(dd->dd_fd, ".", R_OK, AT_SYMLINK_NOFOLLOW) == 0)
{
if(dd_check(dd) != NULL)
{
dd_close(dd);
dd = NULL;
}
return dd;
}
}
if (errno == EISDIR)
{
/* EISDIR: dd_lock can lock the dir, but it sees no time file there,
* even after it retried many times. It must be an ordinary directory!
*
* Without this check, e.g. abrt-action-print happily prints any current
* directory when run without arguments, because its option -d DIR
* defaults to "."!
*/
error_msg("'%s' is not a problem directory", dir);
}
else
{
cant_access:
if (errno == ENOENT || errno == ENOTDIR)
{
if (!(flags & DD_FAIL_QUIETLY_ENOENT))
error_msg("'%s' does not exist", dir);
}
else
{
if (!(flags & DD_FAIL_QUIETLY_EACCES))
perror_msg("Can't access '%s'", dir);
}
}
dd_close(dd);
return NULL;
}
dd->dd_uid = (uid_t)-1L;
dd->dd_gid = (gid_t)-1L;
if (geteuid() == 0)
{
/* In case caller would want to create more files, he'll need uid:gid */
if (fstat(dd->dd_fd, &stat_buf) != 0)
{
error_msg("Can't stat '%s'", dir);
dd_close(dd);
return NULL;
}
dd->dd_uid = stat_buf.st_uid;
dd->dd_gid = stat_buf.st_gid;
}
return dd;
}
| 0 |
[
"CWE-20"
] |
libreport
|
1951e7282043dfe1268d492aea056b554baedb75
| 100,512,214,272,332,670,000,000,000,000,000,000,000 | 81 |
lib: fix races in dump directory handling code
Florian Weimer <[email protected]>:
dd_opendir() should keep a file handle (opened with O_DIRECTORY) and
use openat() and similar functions to access files in it.
...
The file system manipulation functions should guard against hard
links (check that link count is <= 1, just as in the user coredump
code in abrt-hook-ccpp), possibly after opening the file
with O_PATH first to avoid side effects on open/close.
Related: #1214745
Signed-off-by: Jakub Filak <[email protected]>
|
NOEXPORT char *cifs_server(CLI *c, SERVICE_OPTIONS *opt, const PHASE phase) {
uint8_t buffer[128];
uint8_t response_access_denied[5] = {0x83, 0, 0, 1, 0x81};
uint8_t response_use_ssl[5] = {0x83, 0, 0, 1, 0x8e};
uint16_t len;
(void)opt; /* squash the unused parameter warning */
if(phase!=PROTOCOL_EARLY)
return NULL;
s_read(c, c->local_rfd.fd, buffer, 4) ;/* NetBIOS header */
len=(uint16_t)(((uint16_t)(buffer[2])<<8)|buffer[3]);
if(len>sizeof buffer-4) {
s_log(LOG_ERR, "Received block too long");
throw_exception(c, 1);
}
s_read(c, c->local_rfd.fd, buffer+4, len);
if(buffer[0]!=0x81) { /* NB_SSN_REQUEST */
s_log(LOG_ERR, "Client did not send session setup");
s_write(c, c->local_wfd.fd, response_access_denied, 5);
throw_exception(c, 1);
}
s_write(c, c->local_wfd.fd, response_use_ssl, 5);
return NULL;
}
| 0 |
[
"CWE-295"
] |
stunnel
|
ebad9ddc4efb2635f37174c9d800d06206f1edf9
| 200,158,411,398,085,000,000,000,000,000,000,000,000 | 24 |
stunnel-5.57
|
kex_start_rekex(struct ssh *ssh)
{
if (ssh->kex == NULL) {
error("%s: no kex", __func__);
return SSH_ERR_INTERNAL_ERROR;
}
if (ssh->kex->done == 0) {
error("%s: requested twice", __func__);
return SSH_ERR_INTERNAL_ERROR;
}
ssh->kex->done = 0;
return kex_send_kexinit(ssh);
}
| 0 |
[
"CWE-522",
"CWE-399"
] |
openssh-portable
|
ec165c392ca54317dbe3064a8c200de6531e89ad
| 208,502,306,869,181,050,000,000,000,000,000,000,000 | 13 |
upstream commit
Unregister the KEXINIT handler after message has been
received. Otherwise an unauthenticated peer can repeat the KEXINIT and cause
allocation of up to 128MB -- until the connection is closed. Reported by
shilei-c at 360.cn
Upstream-ID: 43649ae12a27ef94290db16d1a98294588b75c05
|
static filter_pred_fn_t select_comparison_fn(enum filter_op_ids op,
int field_size, int field_is_signed)
{
filter_pred_fn_t fn = NULL;
int pred_func_index = -1;
switch (op) {
case OP_EQ:
case OP_NE:
break;
default:
if (WARN_ON_ONCE(op < PRED_FUNC_START))
return NULL;
pred_func_index = op - PRED_FUNC_START;
if (WARN_ON_ONCE(pred_func_index > PRED_FUNC_MAX))
return NULL;
}
switch (field_size) {
case 8:
if (pred_func_index < 0)
fn = filter_pred_64;
else if (field_is_signed)
fn = pred_funcs_s64[pred_func_index];
else
fn = pred_funcs_u64[pred_func_index];
break;
case 4:
if (pred_func_index < 0)
fn = filter_pred_32;
else if (field_is_signed)
fn = pred_funcs_s32[pred_func_index];
else
fn = pred_funcs_u32[pred_func_index];
break;
case 2:
if (pred_func_index < 0)
fn = filter_pred_16;
else if (field_is_signed)
fn = pred_funcs_s16[pred_func_index];
else
fn = pred_funcs_u16[pred_func_index];
break;
case 1:
if (pred_func_index < 0)
fn = filter_pred_8;
else if (field_is_signed)
fn = pred_funcs_s8[pred_func_index];
else
fn = pred_funcs_u8[pred_func_index];
break;
}
return fn;
}
| 0 |
[
"CWE-787"
] |
linux
|
70303420b5721c38998cf987e6b7d30cc62d4ff1
| 299,399,228,391,643,040,000,000,000,000,000,000,000 | 55 |
tracing: Check for no filter when processing event filters
The syzkaller detected a out-of-bounds issue with the events filter code,
specifically here:
prog[N].pred = NULL; /* #13 */
prog[N].target = 1; /* TRUE */
prog[N+1].pred = NULL;
prog[N+1].target = 0; /* FALSE */
-> prog[N-1].target = N;
prog[N-1].when_to_branch = false;
As that's the first reference to a "N-1" index, it appears that the code got
here with N = 0, which means the filter parser found no filter to parse
(which shouldn't ever happen, but apparently it did).
Add a new error to the parsing code that will check to make sure that N is
not zero before going into this part of the code. If N = 0, then -EINVAL is
returned, and a error message is added to the filter.
Cc: [email protected]
Fixes: 80765597bc587 ("tracing: Rewrite filter logic to be simpler and faster")
Reported-by: air icy <[email protected]>
bugzilla url: https://bugzilla.kernel.org/show_bug.cgi?id=200019
Signed-off-by: Steven Rostedt (VMware) <[email protected]>
|
virtual const String *const_ptr_string() const { return NULL; }
| 0 |
[
"CWE-617"
] |
server
|
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
| 126,119,353,519,060,280,000,000,000,000,000,000,000 | 1 |
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item.
|
int util_resolve_sys_link(struct udev *udev, char *syspath, size_t size)
{
char link_target[UTIL_PATH_SIZE];
int len;
int i;
int back;
len = readlink(syspath, link_target, sizeof(link_target));
if (len <= 0)
return -1;
link_target[len] = '\0';
dbg(udev, "path link '%s' points to '%s'\n", syspath, link_target);
for (back = 0; strncmp(&link_target[back * 3], "../", 3) == 0; back++)
;
dbg(udev, "base '%s', tail '%s', back %i\n", syspath, &link_target[back * 3], back);
for (i = 0; i <= back; i++) {
char *pos = strrchr(syspath, '/');
if (pos == NULL)
return -1;
pos[0] = '\0';
}
dbg(udev, "after moving back '%s'\n", syspath);
util_strlcat(syspath, "/", size);
util_strlcat(syspath, &link_target[back * 3], size);
return 0;
}
| 0 |
[
"CWE-120"
] |
udev
|
662c3110803bd8c1aedacc36788e6fd028944314
| 245,499,895,849,437,480,000,000,000,000,000,000,000 | 29 |
path_encode: fix max length calculation
Sebastian Krahmer wrote:
> it should reserve 4 times not 3 times len :)
|
static double mp_list_spectrum(_cimg_math_parser& mp) {
const unsigned int ind = (unsigned int)cimg::mod((int)_mp_arg(2),mp.listin.width());
return (double)mp.listin[ind]._spectrum;
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 55,355,188,191,583,660,000,000,000,000,000,000,000 | 4 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
void CClient::Con_RconAuth(IConsole::IResult *pResult, void *pUserData)
{
CClient *pSelf = (CClient *)pUserData;
pSelf->RconAuth("", pResult->GetString(0));
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
teeworlds
|
ff254722a2683867fcb3e67569ffd36226c4bc62
| 213,207,702,694,551,900,000,000,000,000,000,000,000 | 5 |
added some checks to snap handling
|
static void NOINLINE retrieve_file_data(FILE *dfp)
{
#if ENABLE_FEATURE_WGET_STATUSBAR || ENABLE_FEATURE_WGET_TIMEOUT
# if ENABLE_FEATURE_WGET_TIMEOUT
unsigned second_cnt = G.timeout_seconds;
# endif
struct pollfd polldata;
polldata.fd = fileno(dfp);
polldata.events = POLLIN | POLLPRI;
#endif
if (!(option_mask32 & WGET_OPT_QUIET)) {
if (G.output_fd == 1)
fprintf(stderr, "writing to stdout\n");
else
fprintf(stderr, "saving to '%s'\n", G.fname_out);
}
progress_meter(PROGRESS_START);
if (G.chunked)
goto get_clen;
/* Loops only if chunked */
while (1) {
#if ENABLE_FEATURE_WGET_STATUSBAR || ENABLE_FEATURE_WGET_TIMEOUT
/* Must use nonblocking I/O, otherwise fread will loop
* and *block* until it reads full buffer,
* which messes up progress bar and/or timeout logic.
* Because of nonblocking I/O, we need to dance
* very carefully around EAGAIN. See explanation at
* clearerr() calls.
*/
ndelay_on(polldata.fd);
#endif
while (1) {
int n;
unsigned rdsz;
#if ENABLE_FEATURE_WGET_STATUSBAR || ENABLE_FEATURE_WGET_TIMEOUT
/* fread internally uses read loop, which in our case
* is usually exited when we get EAGAIN.
* In this case, libc sets error marker on the stream.
* Need to clear it before next fread to avoid possible
* rare false positive ferror below. Rare because usually
* fread gets more than zero bytes, and we don't fall
* into if (n <= 0) ...
*/
clearerr(dfp);
#endif
errno = 0;
rdsz = sizeof(G.wget_buf);
if (G.got_clen) {
if (G.content_len < (off_t)sizeof(G.wget_buf)) {
if ((int)G.content_len <= 0)
break;
rdsz = (unsigned)G.content_len;
}
}
n = fread(G.wget_buf, 1, rdsz, dfp);
if (n > 0) {
xwrite(G.output_fd, G.wget_buf, n);
#if ENABLE_FEATURE_WGET_STATUSBAR
G.transferred += n;
#endif
if (G.got_clen) {
G.content_len -= n;
if (G.content_len == 0)
break;
}
#if ENABLE_FEATURE_WGET_TIMEOUT
second_cnt = G.timeout_seconds;
#endif
goto bump;
}
/* n <= 0.
* man fread:
* If error occurs, or EOF is reached, the return value
* is a short item count (or zero).
* fread does not distinguish between EOF and error.
*/
if (errno != EAGAIN) {
if (ferror(dfp)) {
progress_meter(PROGRESS_END);
bb_simple_perror_msg_and_die(bb_msg_read_error);
}
break; /* EOF, not error */
}
#if ENABLE_FEATURE_WGET_STATUSBAR || ENABLE_FEATURE_WGET_TIMEOUT
/* It was EAGAIN. There is no data. Wait up to one second
* then abort if timed out, or update the bar and try reading again.
*/
if (safe_poll(&polldata, 1, 1000) == 0) {
# if ENABLE_FEATURE_WGET_TIMEOUT
if (second_cnt != 0 && --second_cnt == 0) {
progress_meter(PROGRESS_END);
bb_simple_error_msg_and_die("download timed out");
}
# endif
/* We used to loop back to poll here,
* but there is no great harm in letting fread
* to try reading anyway.
*/
}
#endif
bump:
/* Need to do it _every_ second for "stalled" indicator
* to be shown properly.
*/
progress_meter(PROGRESS_BUMP);
} /* while (reading data) */
#if ENABLE_FEATURE_WGET_STATUSBAR || ENABLE_FEATURE_WGET_TIMEOUT
clearerr(dfp);
ndelay_off(polldata.fd); /* else fgets can get very unhappy */
#endif
if (!G.chunked)
break;
/* Each chunk ends with "\r\n" - eat it */
fgets_trim_sanitize(dfp, NULL);
get_clen:
/* chunk size format is "HEXNUM[;name[=val]]\r\n" */
fgets_trim_sanitize(dfp, NULL);
errno = 0;
G.content_len = STRTOOFF(G.wget_buf, NULL, 16);
/*
* Had a bug with inputs like "ffffffff0001f400"
* smashing the heap later. Ensure >= 0.
*/
if (G.content_len < 0 || errno)
bb_error_msg_and_die("bad chunk length '%s'", G.wget_buf);
if (G.content_len == 0)
break; /* all done! */
G.got_clen = 1;
/*
* Note that fgets may result in some data being buffered in dfp.
* We loop back to fread, which will retrieve this data.
* Also note that code has to be arranged so that fread
* is done _before_ one-second poll wait - poll doesn't know
* about stdio buffering and can result in spurious one second waits!
*/
}
/* Draw full bar and free its resources */
G.chunked = 0; /* makes it show 100% even for chunked download */
G.got_clen = 1; /* makes it show 100% even for download of (formerly) unknown size */
progress_meter(PROGRESS_END);
if (G.content_len != 0) {
bb_simple_perror_msg_and_die("connection closed prematurely");
/* GNU wget says "DATE TIME (NN MB/s) - Connection closed at byte NNN. Retrying." */
}
/* If -c failed, we restart from the beginning,
* but we do not truncate file then, we do it only now, at the end.
* This lets user to ^C if his 99% complete 10 GB file download
* failed to restart *without* losing the almost complete file.
*/
{
off_t pos = lseek(G.output_fd, 0, SEEK_CUR);
if (pos != (off_t)-1)
ftruncate(G.output_fd, pos);
}
if (!(option_mask32 & WGET_OPT_QUIET)) {
if (G.output_fd == 1)
fprintf(stderr, "written to stdout\n");
else
fprintf(stderr, "'%s' saved\n", G.fname_out);
}
}
| 0 |
[
"CWE-295"
] |
busybox
|
45fa3f18adf57ef9d743038743d9c90573aeeb91
| 77,556,847,261,536,980,000,000,000,000,000,000,000 | 174 |
wget: implement TLS verification with ENABLE_FEATURE_WGET_OPENSSL
When ENABLE_FEATURE_WGET_OPENSSL is enabled, correctly implement TLS
verification by default. And only ignore verification errors, if
--no-check-certificate was passed.
Also note, that previously OPENSSL implementation did not implement
TLS verification, nor printed any warning messages that verification
was not performed.
Bug-Ubuntu: https://bugs.launchpad.net/bugs/1879533
CVE-2018-1000500
Signed-off-by: Dimitri John Ledkov <[email protected]>
Signed-off-by: Denys Vlasenko <[email protected]>
|
static bool ldm_parse_guid (const u8 *src, u8 *dest)
{
static const int size[] = { 4, 2, 2, 2, 6 };
int i, j, v;
if (src[8] != '-' || src[13] != '-' ||
src[18] != '-' || src[23] != '-')
return false;
for (j = 0; j < 5; j++, src++)
for (i = 0; i < size[j]; i++, src+=2, *dest++ = v)
if ((v = ldm_parse_hexbyte (src)) < 0)
return false;
return true;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
cae13fe4cc3f24820ffb990c09110626837e85d4
| 327,190,549,819,801,940,000,000,000,000,000,000,000 | 16 |
Fix for buffer overflow in ldm_frag_add not sufficient
As Ben Hutchings discovered [1], the patch for CVE-2011-1017 (buffer
overflow in ldm_frag_add) is not sufficient. The original patch in
commit c340b1d64000 ("fs/partitions/ldm.c: fix oops caused by corrupted
partition table") does not consider that, for subsequent fragments,
previously allocated memory is used.
[1] http://lkml.org/lkml/2011/5/6/407
Reported-by: Ben Hutchings <[email protected]>
Signed-off-by: Timo Warns <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int maps_open(struct inode *inode, struct file *file)
{
return do_maps_open(inode, file, &proc_pid_maps_op);
}
| 0 |
[
"CWE-264"
] |
linux-2.6
|
1a5a9906d4e8d1976b701f889d8f35d54b928f25
| 325,613,268,312,979,400,000,000,000,000,000,000,000 | 4 |
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38+]
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
{
struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM };
if (!dev->ethtool_ops->get_ringparam)
return -EOPNOTSUPP;
dev->ethtool_ops->get_ringparam(dev, &ringparam);
if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
return -EFAULT;
return 0;
}
| 0 |
[
"CWE-190"
] |
linux-2.6
|
db048b69037e7fa6a7d9e95a1271a50dc08ae233
| 165,353,094,731,487,140,000,000,000,000,000,000,000 | 13 |
ethtool: Fix potential kernel buffer overflow in ETHTOOL_GRXCLSRLALL
On a 32-bit machine, info.rule_cnt >= 0x40000000 leads to integer
overflow and the buffer may be smaller than needed. Since
ETHTOOL_GRXCLSRLALL is unprivileged, this can presumably be used for at
least denial of service.
Signed-off-by: Ben Hutchings <[email protected]>
Cc: [email protected]
Signed-off-by: David S. Miller <[email protected]>
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.