func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
__releases(proto_list_lock)
{
read_unlock(&proto_list_lock);
} | 0 | [
"CWE-264"
]
| linux-2.6 | df0bca049d01c0ee94afb7cd5dfd959541e6c8da | 135,929,625,619,426,350,000,000,000,000,000,000,000 | 4 | net: 4 bytes kernel memory disclosure in SO_BSDCOMPAT gsopt try #2
In function sock_getsockopt() located in net/core/sock.c, optval v.val
is not correctly initialized and directly returned in userland in case
we have SO_BSDCOMPAT option set.
This dummy code should trigger the bug:
int main(void)
{
unsigned char buf[4] = { 0, 0, 0, 0 };
int len;
int sock;
sock = socket(33, 2, 2);
getsockopt(sock, 1, SO_BSDCOMPAT, &buf, &len);
printf("%x%x%x%x\n", buf[0], buf[1], buf[2], buf[3]);
close(sock);
}
Here is a patch that fix this bug by initalizing v.val just after its
declaration.
Signed-off-by: Clément Lecigne <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
{
int id;
struct rb_node **p = &client->handles.rb_node;
struct rb_node *parent = NULL;
struct ion_handle *entry;
id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
if (id < 0)
return id;
handle->id = id;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct ion_handle, node);
if (handle->buffer < entry->buffer)
p = &(*p)->rb_left;
else if (handle->buffer > entry->buffer)
p = &(*p)->rb_right;
else
WARN(1, "%s: buffer already found.", __func__);
}
rb_link_node(&handle->node, parent, p);
rb_insert_color(&handle->node, &client->handles);
return 0;
} | 0 | [
"CWE-416",
"CWE-284"
]
| linux | 9590232bb4f4cc824f3425a6e1349afbe6d6d2b7 | 278,646,139,151,979,470,000,000,000,000,000,000,000 | 30 | staging/android/ion : fix a race condition in the ion driver
There is a use-after-free problem in the ion driver.
This is caused by a race condition in the ion_ioctl()
function.
A handle has ref count of 1 and two tasks on different
cpus calls ION_IOC_FREE simultaneously.
cpu 0 cpu 1
-------------------------------------------------------
ion_handle_get_by_id()
(ref == 2)
ion_handle_get_by_id()
(ref == 3)
ion_free()
(ref == 2)
ion_handle_put()
(ref == 1)
ion_free()
(ref == 0 so ion_handle_destroy() is
called
and the handle is freed.)
ion_handle_put() is called and it
decreases the slub's next free pointer
The problem is detected as an unaligned access in the
spin lock functions since it uses load exclusive
instruction. In some cases it corrupts the slub's
free pointer which causes a mis-aligned access to the
next free pointer.(kmalloc returns a pointer like
ffffc0745b4580aa). And it causes lots of other
hard-to-debug problems.
This symptom is caused since the first member in the
ion_handle structure is the reference count and the
ion driver decrements the reference after it has been
freed.
To fix this problem client->lock mutex is extended
to protect all the codes that uses the handle.
Signed-off-by: Eun Taik Lee <[email protected]>
Reviewed-by: Laura Abbott <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
longlong Item_func_regexp_instr::val_int()
{
DBUG_ASSERT(fixed == 1);
if ((null_value= re.recompile(args[1])))
return 0;
if ((null_value= re.exec(args[0], 0, 1)))
return 0;
return re.match() ? re.subpattern_start(0) + 1 : 0;
} | 0 | [
"CWE-617"
]
| server | 807945f2eb5fa22e6f233cc17b85a2e141efe2c8 | 499,478,238,632,674,400,000,000,000,000,000,000 | 11 | MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item. |
SMBOWFencrypt (uschar passwd[16], uschar * c8, uschar p24[24])
{
uschar p21[21];
memset (p21, '\0', 21);
memcpy (p21, passwd, 16);
E_P24 (p21, c8, p24);
} | 0 | [
"CWE-125"
]
| exim | 57aa14b216432be381b6295c312065b2fd034f86 | 110,213,754,648,394,840,000,000,000,000,000,000,000 | 9 | Fix SPA authenticator, checking client-supplied data before using it. Bug 2571 |
cudnnRNNAlgo_t ToCudnnRNNAlgo(absl::optional<dnn::AlgorithmDesc> algorithm) {
if (!algorithm.has_value()) {
return CUDNN_RNN_ALGO_STANDARD;
}
cudnnRNNAlgo_t algo = static_cast<cudnnRNNAlgo_t>(algorithm->algo_id());
switch (algo) {
case CUDNN_RNN_ALGO_STANDARD:
case CUDNN_RNN_ALGO_PERSIST_STATIC:
case CUDNN_RNN_ALGO_PERSIST_DYNAMIC:
return algo;
default:
LOG(FATAL) << "Unsupported Cudnn RNN algorithm: " << algorithm->algo_id();
}
} | 0 | [
"CWE-20"
]
| tensorflow | 14755416e364f17fb1870882fa778c7fec7f16e3 | 120,517,771,390,533,060,000,000,000,000,000,000,000 | 14 | Prevent CHECK-fail in LSTM/GRU with zero-length input.
PiperOrigin-RevId: 346239181
Change-Id: I5f233dbc076aab7bb4e31ba24f5abd4eaf99ea4f |
virDomainNetCreatePort(virConnectPtr conn,
virDomainDefPtr dom,
virDomainNetDefPtr iface,
unsigned int flags)
{
virErrorPtr save_err;
g_autoptr(virNetwork) net = NULL;
g_autoptr(virNetworkPortDef) portdef = NULL;
g_autoptr(virNetworkPort) port = NULL;
g_autofree char *portxml = NULL;
if (!(net = virNetworkLookupByName(conn, iface->data.network.name)))
return -1;
if (flags & VIR_NETWORK_PORT_CREATE_RECLAIM) {
char uuidstr[VIR_UUID_STRING_BUFLEN];
char macstr[VIR_MAC_STRING_BUFLEN];
virUUIDFormat(iface->data.network.portid, uuidstr);
virMacAddrFormat(&iface->mac, macstr);
/* if the port is already registered, then we are done */
if (virUUIDIsValid(iface->data.network.portid) &&
(port = virNetworkPortLookupByUUID(net, iface->data.network.portid))) {
VIR_DEBUG("network: %s domain: %s mac: %s port: %s - already registered, skipping",
iface->data.network.name, dom->name, macstr, uuidstr);
return 0;
}
/* otherwise we need to create a new port */
VIR_DEBUG("network: %s domain: %s mac: %s port: %s - not found, reclaiming",
iface->data.network.name, dom->name, macstr, uuidstr);
if (!(portdef = virDomainNetDefActualToNetworkPort(dom, iface)))
return -1;
} else {
if (!(portdef = virDomainNetDefToNetworkPort(dom, iface)))
return -1;
}
if (!(portxml = virNetworkPortDefFormat(portdef)))
return -1;
/* prepare to re-use portdef */
virNetworkPortDefFree(portdef);
portdef = NULL;
if (!(port = virNetworkPortCreateXML(net, portxml, flags)))
return -1;
/* prepare to re-use portxml */
VIR_FREE(portxml);
if (!(portxml = virNetworkPortGetXMLDesc(port, 0)) ||
!(portdef = virNetworkPortDefParseString(portxml)) ||
virDomainNetDefActualFromNetworkPort(iface, portdef) < 0) {
virErrorPreserveLast(&save_err);
virNetworkPortDelete(port, 0);
virErrorRestore(&save_err);
return -1;
}
virNetworkPortGetUUID(port, iface->data.network.portid);
return 0;
} | 0 | [
"CWE-212"
]
| libvirt | a5b064bf4b17a9884d7d361733737fb614ad8979 | 38,214,688,881,180,170,000,000,000,000,000,000,000 | 64 | conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used
Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410
(v6.1.0-122-g3b076391be) we support http cookies. Since they may contain
somewhat sensitive information we should not format them into the XML
unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted.
Reported-by: Han Han <[email protected]>
Signed-off-by: Peter Krempa <[email protected]>
Reviewed-by: Erik Skultety <[email protected]> |
SetDefaultRuleset(uchar *pszName)
{
ruleset_t *pRuleset;
DEFiRet;
assert(pszName != NULL);
CHKiRet(GetRuleset(&pRuleset, pszName));
pDfltRuleset = pRuleset;
dbgprintf("default rule set changed to %p: '%s'\n", pRuleset, pszName);
finalize_it:
RETiRet;
} | 0 | [
"CWE-772",
"CWE-401"
]
| rsyslog | 1ef709cc97d54f74d3fdeb83788cc4b01f4c6a2a | 176,171,218,185,640,260,000,000,000,000,000,000,000 | 13 | bugfix: fixed a memory leak and potential abort condition
this could happen if multiple rulesets were used and some output batches
contained messages belonging to more than one ruleset.
fixes: http://bugzilla.adiscon.com/show_bug.cgi?id=226
fixes: http://bugzilla.adiscon.com/show_bug.cgi?id=218 |
GF_Err ainf_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->profile_version);
if (ptr->APID)
gf_bs_write_data(bs, ptr->APID, (u32) strlen(ptr->APID) );
gf_bs_write_u8(bs, 0);
return GF_OK; | 0 | [
"CWE-787"
]
| gpac | 388ecce75d05e11fc8496aa4857b91245007d26e | 206,255,682,233,034,750,000,000,000,000,000,000,000 | 13 | fixed #1587 |
static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req)
{
uint32_t attrs[2] = {
FATTR4_WORD0_RDATTR_ERROR|FATTR4_WORD0_FILEID,
FATTR4_WORD1_MOUNTED_ON_FILEID,
};
__be32 *p;
RESERVE_SPACE(12+NFS4_VERIFIER_SIZE+20);
WRITE32(OP_READDIR);
WRITE64(readdir->cookie);
WRITEMEM(readdir->verifier.data, NFS4_VERIFIER_SIZE);
WRITE32(readdir->count >> 1); /* We're not doing readdirplus */
WRITE32(readdir->count);
WRITE32(2);
/* Switch to mounted_on_fileid if the server supports it */
if (readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
attrs[0] &= ~FATTR4_WORD0_FILEID;
else
attrs[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
WRITE32(attrs[0] & readdir->bitmask[0]);
WRITE32(attrs[1] & readdir->bitmask[1]);
dprintk("%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n",
__func__,
(unsigned long long)readdir->cookie,
((u32 *)readdir->verifier.data)[0],
((u32 *)readdir->verifier.data)[1],
attrs[0] & readdir->bitmask[0],
attrs[1] & readdir->bitmask[1]);
return 0;
} | 0 | [
"CWE-703"
]
| linux | dc0b027dfadfcb8a5504f7d8052754bf8d501ab9 | 64,852,255,317,490,340,000,000,000,000,000,000,000 | 32 | NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <[email protected]> |
static int instantiate_vlan(struct lxc_handler *handler, struct lxc_netdev *netdev)
{
char peer[IFNAMSIZ];
int err;
static uint16_t vlan_cntr = 0;
if (!netdev->link) {
ERROR("no link specified for vlan netdev");
return -1;
}
err = snprintf(peer, sizeof(peer), "vlan%d-%d", netdev->priv.vlan_attr.vid, vlan_cntr++);
if (err >= sizeof(peer)) {
ERROR("peer name too long");
return -1;
}
err = lxc_vlan_create(netdev->link, peer, netdev->priv.vlan_attr.vid);
if (err) {
ERROR("failed to create vlan interface '%s' on '%s' : %s",
peer, netdev->link, strerror(-err));
return -1;
}
netdev->ifindex = if_nametoindex(peer);
if (!netdev->ifindex) {
ERROR("failed to retrieve the ifindex for %s", peer);
lxc_netdev_delete_by_name(peer);
return -1;
}
DEBUG("instantiated vlan '%s', ifindex is '%d'", " vlan1000",
netdev->ifindex);
return 0;
} | 0 | [
"CWE-59",
"CWE-61"
]
| lxc | 592fd47a6245508b79fe6ac819fe6d3b2c1289be | 139,062,175,363,635,360,000,000,000,000,000,000,000 | 36 | CVE-2015-1335: Protect container mounts against symlinks
When a container starts up, lxc sets up the container's inital fstree
by doing a bunch of mounting, guided by the container configuration
file. The container config is owned by the admin or user on the host,
so we do not try to guard against bad entries. However, since the
mount target is in the container, it's possible that the container admin
could divert the mount with symbolic links. This could bypass proper
container startup (i.e. confinement of a root-owned container by the
restrictive apparmor policy, by diverting the required write to
/proc/self/attr/current), or bypass the (path-based) apparmor policy
by diverting, say, /proc to /mnt in the container.
To prevent this,
1. do not allow mounts to paths containing symbolic links
2. do not allow bind mounts from relative paths containing symbolic
links.
Details:
Define safe_mount which ensures that the container has not inserted any
symbolic links into any mount targets for mounts to be done during
container setup.
The host's mount path may contain symbolic links. As it is under the
control of the administrator, that's ok. So safe_mount begins the check
for symbolic links after the rootfs->mount, by opening that directory.
It opens each directory along the path using openat() relative to the
parent directory using O_NOFOLLOW. When the target is reached, it
mounts onto /proc/self/fd/<targetfd>.
Use safe_mount() in mount_entry(), when mounting container proc,
and when needed. In particular, safe_mount() need not be used in
any case where:
1. the mount is done in the container's namespace
2. the mount is for the container's rootfs
3. the mount is relative to a tmpfs or proc/sysfs which we have
just safe_mount()ed ourselves
Since we were using proc/net as a temporary placeholder for /proc/sys/net
during container startup, and proc/net is a symbolic link, use proc/tty
instead.
Update the lxc.container.conf manpage with details about the new
restrictions.
Finally, add a testcase to test some symbolic link possibilities.
Reported-by: Roman Fiedler
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: Stéphane Graber <[email protected]> |
static zend_object_value sqlite_object_new_db(zend_class_entry *class_type TSRMLS_DC)
{
zend_object_value retval;
sqlite_object_new(class_type, &sqlite_object_handlers_db, &retval TSRMLS_CC);
return retval;
} | 0 | []
| php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 7,906,729,547,216,835,000,000,000,000,000,000,000 | 7 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
agoo_ws_ping(agooCon c) {
agooRes res;
if (NULL == (res = agoo_res_create(c))) {
agoo_log_cat(&agoo_error_cat, "Memory allocation of response failed on connection %llu.", (unsigned long long)c->id);
} else {
res->close = false;
res->con_kind = AGOO_CON_WS;
res->ping = true;
agoo_con_res_append(c, res);
}
} | 0 | [
"CWE-444",
"CWE-61"
]
| agoo | 23d03535cf7b50d679a60a953a0cae9519a4a130 | 122,105,252,515,525,300,000,000,000,000,000,000,000 | 12 | Remote addr (#99)
* REMOTE_ADDR added
* Ready for merge |
static av_cold int init(AVFilterContext *ctx)
{
GradFunContext *s = ctx->priv;
s->thresh = (1 << 15) / s->strength;
s->radius = av_clip((s->radius + 1) & ~1, 4, 32);
s->blur_line = ff_gradfun_blur_line_c;
s->filter_line = ff_gradfun_filter_line_c;
if (ARCH_X86)
ff_gradfun_init_x86(s);
av_log(ctx, AV_LOG_VERBOSE, "threshold:%.2f radius:%d\n", s->strength, s->radius);
return 0;
} | 0 | [
"CWE-119",
"CWE-787"
]
| FFmpeg | e43a0a232dbf6d3c161823c2e07c52e76227a1bc | 283,829,998,682,379,280,000,000,000,000,000,000,000 | 17 | avfilter: fix plane validity checks
Fixes out of array accesses
Signed-off-by: Michael Niedermayer <[email protected]> |
DEFUN (no_ip_extcommunity_list_expanded_all,
no_ip_extcommunity_list_expanded_all_cmd,
"no ip extcommunity-list <100-500>",
NO_STR
IP_STR
EXTCOMMUNITY_LIST_STR
"Extended Community list number (expanded)\n")
{
return extcommunity_list_unset_vty (vty, argc, argv, EXTCOMMUNITY_LIST_EXPANDED);
} | 0 | [
"CWE-125"
]
| frr | 6d58272b4cf96f0daa846210dd2104877900f921 | 46,182,908,580,713,470,000,000,000,000,000,000,000 | 10 | [bgpd] cleanup, compact and consolidate capability parsing code
2007-07-26 Paul Jakma <[email protected]>
* (general) Clean up and compact capability parsing slightly.
Consolidate validation of length and logging of generic TLV, and
memcpy of capability data, thus removing such from cap specifc
code (not always present or correct).
* bgp_open.h: Add structures for the generic capability TLV header
and for the data formats of the various specific capabilities we
support. Hence remove the badly named, or else misdefined, struct
capability.
* bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data.
Do the length checks *before* memcpy()'ing based on that length
(stored capability - should have been validated anyway on input,
but..).
(bgp_afi_safi_valid_indices) new function to validate (afi,safi)
which is about to be used as index into arrays, consolidates
several instances of same, at least one of which appeared to be
incomplete..
(bgp_capability_mp) Much condensed.
(bgp_capability_orf_entry) New, process one ORF entry
(bgp_capability_orf) Condensed. Fixed to process all ORF entries.
(bgp_capability_restart) Condensed, and fixed to use a
cap-specific type, rather than abusing capability_mp.
(struct message capcode_str) added to aid generic logging.
(size_t cap_minsizes[]) added to aid generic validation of
capability length field.
(bgp_capability_parse) Generic logging and validation of TLV
consolidated here. Code compacted as much as possible.
* bgp_packet.c: (bgp_open_receive) Capability parsers now use
streams, so no more need here to manually fudge the input stream
getp.
(bgp_capability_msg_parse) use struct capability_mp_data. Validate
lengths /before/ memcpy. Use bgp_afi_safi_valid_indices.
(bgp_capability_receive) Exported for use by test harness.
* bgp_vty.c: (bgp_show_summary) fix conversion warning
(bgp_show_peer) ditto
* bgp_debug.h: Fix storage 'extern' after type 'const'.
* lib/log.c: (mes_lookup) warning about code not being in
same-number array slot should be debug, not warning. E.g. BGP
has several discontigious number spaces, allocating from
different parts of a space is not uncommon (e.g. IANA
assigned versus vendor-assigned code points in some number
space). |
unsigned int DNP3_Base::CalcCRC(int len, const u_char* data)
{
unsigned int crc = 0x0000;
for ( int i = 0; i < len; i++ )
{
unsigned int index = (crc ^ data[i]) & 0xFF;
crc = crc_table[index] ^ (crc >> 8);
}
return ~crc & 0xFFFF;
} | 0 | [
"CWE-119",
"CWE-787"
]
| bro | 6cedd67c381ff22fde653adf02ee31caf66c81a0 | 99,160,708,653,708,200,000,000,000,000,000,000,000 | 12 | DNP3: fix reachable assertion and buffer over-read/overflow.
A DNP3 packet using a link layer header that specifies a zero length can
trigger an assertion failure if assertions are enabled. Assertions are
enabled unless Bro is compiled with the NDEBUG preprocessor macro
defined. The default configuration of Bro will define this macro and so
disables assertions, but using the --enable-debug option in the
configure script will enable assertions. When assertions are disabled,
or also for certain length values, the DNP3 parser may attempt to pass a
negative value as the third argument to memcpy (number of bytes to copy)
and result in a buffer over-read or overflow.
Reported by Travis Emmert. |
lyd_free(struct lyd_node *node)
{
lyd_free_internal_r(node, 1);
} | 0 | [
"CWE-119"
]
| libyang | 32fb4993bc8bb49e93e84016af3c10ea53964be5 | 39,885,179,240,272,110,000,000,000,000,000,000,000 | 4 | schema tree BUGFIX do not check features while still resolving schema
Fixes #723 |
static int veth_open(struct net_device *dev)
{
struct veth_priv *priv;
priv = netdev_priv(dev);
if (priv->peer == NULL)
return -ENOTCONN;
if (priv->peer->flags & IFF_UP) {
netif_carrier_on(dev);
netif_carrier_on(priv->peer);
}
return 0;
} | 0 | [
"CWE-399"
]
| linux | 6ec82562ffc6f297d0de36d65776cff8e5704867 | 1,260,902,808,558,385,200,000,000,000,000,000,000 | 14 | veth: Dont kfree_skb() after dev_forward_skb()
In case of congestion, netif_rx() frees the skb, so we must assume
dev_forward_skb() also consume skb.
Bug introduced by commit 445409602c092
(veth: move loopback logic to common location)
We must change dev_forward_skb() to always consume skb, and veth to not
double free it.
Bug report : http://marc.info/?l=linux-netdev&m=127310770900442&w=3
Reported-by: Martín Ferrari <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
S_ssc_and(pTHX_ const RExC_state_t *pRExC_state, regnode_ssc *ssc,
const regnode_charclass *and_with)
{
/* Accumulate into SSC 'ssc' its 'AND' with 'and_with', which is either
* another SSC or a regular ANYOF class. Can create false positives. */
SV* anded_cp_list;
U8 anded_flags;
PERL_ARGS_ASSERT_SSC_AND;
assert(is_ANYOF_SYNTHETIC(ssc));
/* 'and_with' is used as-is if it too is an SSC; otherwise have to extract
* the code point inversion list and just the relevant flags */
if (is_ANYOF_SYNTHETIC(and_with)) {
anded_cp_list = ((regnode_ssc *)and_with)->invlist;
anded_flags = ANYOF_FLAGS(and_with);
/* XXX This is a kludge around what appears to be deficiencies in the
* optimizer. If we make S_ssc_anything() add in the WARN_SUPER flag,
* there are paths through the optimizer where it doesn't get weeded
* out when it should. And if we don't make some extra provision for
* it like the code just below, it doesn't get added when it should.
* This solution is to add it only when AND'ing, which is here, and
* only when what is being AND'ed is the pristine, original node
* matching anything. Thus it is like adding it to ssc_anything() but
* only when the result is to be AND'ed. Probably the same solution
* could be adopted for the same problem we have with /l matching,
* which is solved differently in S_ssc_init(), and that would lead to
* fewer false positives than that solution has. But if this solution
* creates bugs, the consequences are only that a warning isn't raised
* that should be; while the consequences for having /l bugs is
* incorrect matches */
if (ssc_is_anything((regnode_ssc *)and_with)) {
anded_flags |= ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER;
}
}
else {
anded_cp_list = get_ANYOF_cp_list_for_ssc(pRExC_state, and_with);
if (OP(and_with) == ANYOFD) {
anded_flags = ANYOF_FLAGS(and_with) & ANYOF_COMMON_FLAGS;
}
else {
anded_flags = ANYOF_FLAGS(and_with)
&( ANYOF_COMMON_FLAGS
|ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER
|ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP);
if (ANYOFL_UTF8_LOCALE_REQD(ANYOF_FLAGS(and_with))) {
anded_flags &=
ANYOFL_SHARED_UTF8_LOCALE_fold_HAS_MATCHES_nonfold_REQD;
}
}
}
ANYOF_FLAGS(ssc) &= anded_flags;
/* Below, C1 is the list of code points in 'ssc'; P1, its posix classes.
* C2 is the list of code points in 'and-with'; P2, its posix classes.
* 'and_with' may be inverted. When not inverted, we have the situation of
* computing:
* (C1 | P1) & (C2 | P2)
* = (C1 & (C2 | P2)) | (P1 & (C2 | P2))
* = ((C1 & C2) | (C1 & P2)) | ((P1 & C2) | (P1 & P2))
* <= ((C1 & C2) | P2)) | ( P1 | (P1 & P2))
* <= ((C1 & C2) | P1 | P2)
* Alternatively, the last few steps could be:
* = ((C1 & C2) | (C1 & P2)) | ((P1 & C2) | (P1 & P2))
* <= ((C1 & C2) | C1 ) | ( C2 | (P1 & P2))
* <= (C1 | C2 | (P1 & P2))
* We favor the second approach if either P1 or P2 is non-empty. This is
* because these components are a barrier to doing optimizations, as what
* they match cannot be known until the moment of matching as they are
* dependent on the current locale, 'AND"ing them likely will reduce or
* eliminate them.
* But we can do better if we know that C1,P1 are in their initial state (a
* frequent occurrence), each matching everything:
* (<everything>) & (C2 | P2) = C2 | P2
* Similarly, if C2,P2 are in their initial state (again a frequent
* occurrence), the result is a no-op
* (C1 | P1) & (<everything>) = C1 | P1
*
* Inverted, we have
* (C1 | P1) & ~(C2 | P2) = (C1 | P1) & (~C2 & ~P2)
* = (C1 & (~C2 & ~P2)) | (P1 & (~C2 & ~P2))
* <= (C1 & ~C2) | (P1 & ~P2)
* */
if ((ANYOF_FLAGS(and_with) & ANYOF_INVERT)
&& ! is_ANYOF_SYNTHETIC(and_with))
{
unsigned int i;
ssc_intersection(ssc,
anded_cp_list,
FALSE /* Has already been inverted */
);
/* If either P1 or P2 is empty, the intersection will be also; can skip
* the loop */
if (! (ANYOF_FLAGS(and_with) & ANYOF_MATCHES_POSIXL)) {
ANYOF_POSIXL_ZERO(ssc);
}
else if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) {
/* Note that the Posix class component P from 'and_with' actually
* looks like:
* P = Pa | Pb | ... | Pn
* where each component is one posix class, such as in [\w\s].
* Thus
* ~P = ~(Pa | Pb | ... | Pn)
* = ~Pa & ~Pb & ... & ~Pn
* <= ~Pa | ~Pb | ... | ~Pn
* The last is something we can easily calculate, but unfortunately
* is likely to have many false positives. We could do better
* in some (but certainly not all) instances if two classes in
* P have known relationships. For example
* :lower: <= :alpha: <= :alnum: <= \w <= :graph: <= :print:
* So
* :lower: & :print: = :lower:
* And similarly for classes that must be disjoint. For example,
* since \s and \w can have no elements in common based on rules in
* the POSIX standard,
* \w & ^\S = nothing
* Unfortunately, some vendor locales do not meet the Posix
* standard, in particular almost everything by Microsoft.
* The loop below just changes e.g., \w into \W and vice versa */
regnode_charclass_posixl temp;
int add = 1; /* To calculate the index of the complement */
ANYOF_POSIXL_ZERO(&temp);
for (i = 0; i < ANYOF_MAX; i++) {
assert(i % 2 != 0
|| ! ANYOF_POSIXL_TEST((regnode_charclass_posixl*) and_with, i)
|| ! ANYOF_POSIXL_TEST((regnode_charclass_posixl*) and_with, i + 1));
if (ANYOF_POSIXL_TEST((regnode_charclass_posixl*) and_with, i)) {
ANYOF_POSIXL_SET(&temp, i + add);
}
add = 0 - add; /* 1 goes to -1; -1 goes to 1 */
}
ANYOF_POSIXL_AND(&temp, ssc);
} /* else ssc already has no posixes */
} /* else: Not inverted. This routine is a no-op if 'and_with' is an SSC
in its initial state */
else if (! is_ANYOF_SYNTHETIC(and_with)
|| ! ssc_is_cp_posixl_init(pRExC_state, (regnode_ssc *)and_with))
{
/* But if 'ssc' is in its initial state, the result is just 'and_with';
* copy it over 'ssc' */
if (ssc_is_cp_posixl_init(pRExC_state, ssc)) {
if (is_ANYOF_SYNTHETIC(and_with)) {
StructCopy(and_with, ssc, regnode_ssc);
}
else {
ssc->invlist = anded_cp_list;
ANYOF_POSIXL_ZERO(ssc);
if (ANYOF_FLAGS(and_with) & ANYOF_MATCHES_POSIXL) {
ANYOF_POSIXL_OR((regnode_charclass_posixl*) and_with, ssc);
}
}
}
else if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)
|| (ANYOF_FLAGS(and_with) & ANYOF_MATCHES_POSIXL))
{
/* One or the other of P1, P2 is non-empty. */
if (ANYOF_FLAGS(and_with) & ANYOF_MATCHES_POSIXL) {
ANYOF_POSIXL_AND((regnode_charclass_posixl*) and_with, ssc);
}
ssc_union(ssc, anded_cp_list, FALSE);
}
else { /* P1 = P2 = empty */
ssc_intersection(ssc, anded_cp_list, FALSE);
}
}
} | 0 | [
"CWE-125"
]
| perl5 | 43b2f4ef399e2fd7240b4eeb0658686ad95f8e62 | 201,140,371,035,449,380,000,000,000,000,000,000,000 | 178 | regcomp.c: Convert some strchr to memchr
This allows things to work properly in the face of embedded NULs.
See the branch merge message for more information. |
acquire_caps (uid_t uid)
{
struct __user_cap_header_struct hdr;
struct __user_cap_data_struct data;
/* Tell kernel not clear capabilities when dropping root */
if (prctl (PR_SET_KEEPCAPS, 1, 0, 0, 0) < 0)
g_error ("prctl(PR_SET_KEEPCAPS) failed");
/* Drop root uid, but retain the required permitted caps */
if (setuid (uid) < 0)
g_error ("unable to drop privs");
memset (&hdr, 0, sizeof(hdr));
hdr.version = _LINUX_CAPABILITY_VERSION;
/* Drop all non-require capabilities */
data.effective = REQUIRED_CAPS;
data.permitted = REQUIRED_CAPS;
data.inheritable = 0;
if (capset (&hdr, &data) < 0)
g_error ("capset failed");
} | 1 | []
| gvfs | d7d362995aa0cb8905c8d5c2a2a4c305d2ffff80 | 291,445,721,614,561,270,000,000,000,000,000,000,000 | 23 | admin: Use fsuid to ensure correct file ownership
Files created over admin backend should be owned by root, but they are
owned by the user itself. This is because the daemon drops the uid to
make dbus connection work. Use fsuid and euid to fix this issue.
Closes: https://gitlab.gnome.org/GNOME/gvfs/issues/21 |
EXPORTED unsigned mailbox_should_archive(struct mailbox *mailbox,
const struct index_record *record,
void *rock)
{
int archive_after = config_getduration(IMAPOPT_ARCHIVE_AFTER, 'd');
time_t cutoff = time(0) - archive_after;
if (rock) cutoff = *((time_t *)rock);
int64_t archive_size = config_getbytesize(IMAPOPT_ARCHIVE_MAXSIZE, 'K');
if (archive_size < 0) archive_size = 0;
size_t maxsize = archive_size;
int keepflagged = config_getswitch(IMAPOPT_ARCHIVE_KEEPFLAGGED);
/* never pull messages back from the archives */
if (record->internal_flags & FLAG_INTERNAL_ARCHIVED)
return 1;
/* first check if we're archiving anything */
if (!config_getswitch(IMAPOPT_ARCHIVE_ENABLED))
return 0;
/* always archive big messages */
if (record->size >= maxsize)
return 1;
/* archive everything in DELETED mailboxes */
if (mboxname_isdeletedmailbox(mailbox_name(mailbox), NULL))
return 1;
/* Calendar and Addressbook are small files and need to be hot */
switch (mbtype_isa(mailbox_mbtype(mailbox))) {
case MBTYPE_ADDRESSBOOK:
return 0;
case MBTYPE_CALENDAR:
return 0;
}
/* don't archive flagged messages */
if (keepflagged && (record->system_flags & FLAG_FLAGGED))
return 0;
/* archive all other old messages */
if (record->internaldate <= cutoff)
return 1;
/* and don't archive anything else! */
return 0;
} | 0 | []
| cyrus-imapd | 1d6d15ee74e11a9bd745e80be69869e5fb8d64d6 | 275,485,451,876,022,500,000,000,000,000,000,000,000 | 49 | mailbox.c/reconstruct.c: Add mailbox_mbentry_from_path() |
GnashImage::GnashImage(size_t width, size_t height, ImageType type,
ImageLocation location)
:
_type(type),
_location(location),
_width(width),
_height(height)
{
// Constructed from external input, so restrict dimensions to avoid
// overflow in size calculations
if (!checkValidSize(_width, _height, channels())) {
throw std::bad_alloc();
}
_data.reset(new value_type[size()]);
} | 0 | [
"CWE-189"
]
| gnash | bb4dc77eecb6ed1b967e3ecbce3dac6c5e6f1527 | 238,311,351,291,753,060,000,000,000,000,000,000,000 | 15 | Fix crash in GnashImage.cpp |
f_diff_hlID(typval_T *argvars UNUSED, typval_T *rettv UNUSED)
{
#ifdef FEAT_DIFF
linenr_T lnum = tv_get_lnum(argvars);
static linenr_T prev_lnum = 0;
static varnumber_T changedtick = 0;
static int fnum = 0;
static int change_start = 0;
static int change_end = 0;
static hlf_T hlID = (hlf_T)0;
int filler_lines;
int col;
if (lnum < 0) /* ignore type error in {lnum} arg */
lnum = 0;
if (lnum != prev_lnum
|| changedtick != CHANGEDTICK(curbuf)
|| fnum != curbuf->b_fnum)
{
/* New line, buffer, change: need to get the values. */
filler_lines = diff_check(curwin, lnum);
if (filler_lines < 0)
{
if (filler_lines == -1)
{
change_start = MAXCOL;
change_end = -1;
if (diff_find_change(curwin, lnum, &change_start, &change_end))
hlID = HLF_ADD; /* added line */
else
hlID = HLF_CHD; /* changed line */
}
else
hlID = HLF_ADD; /* added line */
}
else
hlID = (hlf_T)0;
prev_lnum = lnum;
changedtick = CHANGEDTICK(curbuf);
fnum = curbuf->b_fnum;
}
if (hlID == HLF_CHD || hlID == HLF_TXD)
{
col = tv_get_number(&argvars[1]) - 1; /* ignore type error in {col} */
if (col >= change_start && col <= change_end)
hlID = HLF_TXD; /* changed text */
else
hlID = HLF_CHD; /* changed line */
}
rettv->vval.v_number = hlID == (hlf_T)0 ? 0 : (int)hlID;
#endif
} | 0 | [
"CWE-78"
]
| vim | 8c62a08faf89663e5633dc5036cd8695c80f1075 | 14,099,653,410,612,404,000,000,000,000,000,000,000 | 53 | patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others. |
static netdev_features_t gso_features_check(const struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
u16 gso_segs = skb_shinfo(skb)->gso_segs;
if (gso_segs > dev->gso_max_segs)
return features & ~NETIF_F_GSO_MASK;
/* Support for GSO partial features requires software
* intervention before we can actually process the packets
* so we need to strip support for any partial features now
* and we can pull them back in after we have partially
* segmented the frame.
*/
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
features &= ~dev->gso_partial_features;
/* Make sure to clear the IPv4 ID mangling feature if the
* IPv4 header has the potential to be fragmented.
*/
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
struct iphdr *iph = skb->encapsulation ?
inner_ip_hdr(skb) : ip_hdr(skb);
if (!(iph->frag_off & htons(IP_DF)))
features &= ~NETIF_F_TSO_MANGLEID;
}
return features; | 0 | [
"CWE-476"
]
| linux | 0ad646c81b2182f7fa67ec0c8c825e0ee165696d | 198,534,003,605,468,160,000,000,000,000,000,000,000 | 31 | tun: call dev_get_valid_name() before register_netdevice()
register_netdevice() could fail early when we have an invalid
dev name, in which case ->ndo_uninit() is not called. For tun
device, this is a problem because a timer etc. are already
initialized and it expects ->ndo_uninit() to clean them up.
We could move these initializations into a ->ndo_init() so
that register_netdevice() knows better, however this is still
complicated due to the logic in tun_detach().
Therefore, I choose to just call dev_get_valid_name() before
register_netdevice(), which is quicker and much easier to audit.
And for this specific case, it is already enough.
Fixes: 96442e42429e ("tuntap: choose the txq based on rxq")
Reported-by: Dmitry Alexeev <[email protected]>
Cc: Jason Wang <[email protected]>
Cc: "Michael S. Tsirkin" <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int devicenrange(i_ctx_t * i_ctx_p, ref *space, float *ptr)
{
int i, limit, code;
PS_colour_space_t *cspace;
ref altspace;
code = array_get(imemory, space, 1, &altspace);
if (code < 0)
return code;
code = get_space_object(i_ctx_p, &altspace, &cspace);
if (code < 0)
return code;
code = cspace->numcomponents(i_ctx_p, &altspace, &limit);
if (code < 0)
return code;
for (i = 0;i < limit * 2;i+=2) {
ptr[i] = 0;
ptr[i+1] = 1;
}
return 0;
} | 0 | []
| ghostpdl | b326a71659b7837d3acde954b18bda1a6f5e9498 | 291,951,713,801,285,300,000,000,000,000,000,000,000 | 25 | Bug 699655: Properly check the return value....
...when getting a value from a dictionary |
irc_server_valid (struct t_irc_server *server)
{
struct t_irc_server *ptr_server;
if (!server)
return 0;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
if (ptr_server == server)
return 1;
}
/* server not found */
return 0;
} | 0 | [
"CWE-20"
]
| weechat | c265cad1c95b84abfd4e8d861f25926ef13b5d91 | 73,362,635,685,188,730,000,000,000,000,000,000,000 | 17 | Fix verification of SSL certificates by calling gnutls verify callback (patch #7459) |
build_attribute_subpkt(PKT_user_id *uid,byte type,
const void *buf,u32 buflen,
const void *header,u32 headerlen)
{
byte *attrib;
int idx;
if(1+headerlen+buflen>8383)
idx=5;
else if(1+headerlen+buflen>191)
idx=2;
else
idx=1;
/* realloc uid->attrib_data to the right size */
uid->attrib_data=xrealloc(uid->attrib_data,
uid->attrib_len+idx+1+headerlen+buflen);
attrib=&uid->attrib_data[uid->attrib_len];
if(idx==5)
{
attrib[0]=255;
attrib[1]=(1+headerlen+buflen) >> 24;
attrib[2]=(1+headerlen+buflen) >> 16;
attrib[3]=(1+headerlen+buflen) >> 8;
attrib[4]=1+headerlen+buflen;
}
else if(idx==2)
{
attrib[0]=(1+headerlen+buflen-192) / 256 + 192;
attrib[1]=(1+headerlen+buflen-192) % 256;
}
else
attrib[0]=1+headerlen+buflen; /* Good luck finding a JPEG this small! */
attrib[idx++]=type;
/* Tack on our data at the end */
if(headerlen>0)
memcpy(&attrib[idx],header,headerlen);
memcpy(&attrib[idx+headerlen],buf,buflen);
uid->attrib_len+=idx+headerlen+buflen;
} | 0 | [
"CWE-20"
]
| gnupg | 2183683bd633818dd031b090b5530951de76f392 | 10,847,158,697,819,342,000,000,000,000,000,000,000 | 46 | Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]> |
UtilityTupleDescriptor(Node *parsetree)
{
switch (nodeTag(parsetree))
{
case T_FetchStmt:
{
FetchStmt *stmt = (FetchStmt *) parsetree;
Portal portal;
if (stmt->ismove)
return NULL;
portal = GetPortalByName(stmt->portalname);
if (!PortalIsValid(portal))
return NULL; /* not our business to raise error */
return CreateTupleDescCopy(portal->tupDesc);
}
case T_ExecuteStmt:
{
ExecuteStmt *stmt = (ExecuteStmt *) parsetree;
PreparedStatement *entry;
entry = FetchPreparedStatement(stmt->name, false);
if (!entry)
return NULL; /* not our business to raise error */
return FetchPreparedStatementResultDesc(entry);
}
case T_ExplainStmt:
return ExplainResultDesc((ExplainStmt *) parsetree);
case T_VariableShowStmt:
{
VariableShowStmt *n = (VariableShowStmt *) parsetree;
return GetPGVariableResultDesc(n->name);
}
default:
return NULL;
}
} | 0 | [
"CWE-362"
]
| postgres | 5f173040e324f6c2eebb90d86cf1b0cdb5890f0a | 336,477,183,267,624,400,000,000,000,000,000,000,000 | 42 | Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062 |
option_filesystem_cb (const gchar *option_name,
const gchar *value,
gpointer data,
GError **error)
{
FlatpakContext *context = data;
g_autofree char *fs = NULL;
FlatpakFilesystemMode mode;
if (!flatpak_context_parse_filesystem (value, &fs, &mode, error))
return FALSE;
flatpak_context_take_filesystem (context, g_steal_pointer (&fs), mode);
return TRUE;
} | 0 | [
"CWE-94",
"CWE-74"
]
| flatpak | 6e5ae7a109cdfa9735ea7ccbd8cb79f9e8d3ae8b | 115,843,018,839,073,260,000,000,000,000,000,000,000 | 15 | context: Add --env-fd option
This allows environment variables to be added to the context without
making their values visible to processes running under a different uid,
which might be significant if the variable's value is a token or some
other secret value.
Signed-off-by: Simon McVittie <[email protected]>
Part-of: https://github.com/flatpak/flatpak/security/advisories/GHSA-4ppf-fxf6-vxg2 |
const char* avahi_server_get_host_name(AvahiServer *s) {
assert(s);
return s->host_name;
} | 0 | [
"CWE-399"
]
| avahi | 3093047f1aa36bed8a37fa79004bf0ee287929f4 | 46,157,198,167,218,490,000,000,000,000,000,000,000 | 5 | Don't get confused by UDP packets with a source port that is zero
This is a fix for rhbz 475394.
Problem identified by Hugo Dias. |
int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
{
u8 *iv;
int alen;
void *tmp;
int ivlen;
int assoclen;
int extralen;
struct page *page;
struct ip_esp_hdr *esph;
struct aead_request *req;
struct crypto_aead *aead;
struct scatterlist *sg, *dsg;
struct esp_output_extra *extra;
int err = -ENOMEM;
assoclen = sizeof(struct ip_esp_hdr);
extralen = 0;
if (x->props.flags & XFRM_STATE_ESN) {
extralen += sizeof(*extra);
assoclen += sizeof(__be32);
}
aead = x->data;
alen = crypto_aead_authsize(aead);
ivlen = crypto_aead_ivsize(aead);
tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
if (!tmp)
goto error;
extra = esp_tmp_extra(tmp);
iv = esp_tmp_iv(aead, tmp, extralen);
req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req);
if (esp->inplace)
dsg = sg;
else
dsg = &sg[esp->nfrags];
esph = esp_output_set_esn(skb, x, esp->esph, extra);
esp->esph = esph;
sg_init_table(sg, esp->nfrags);
err = skb_to_sgvec(skb, sg,
(unsigned char *)esph - skb->data,
assoclen + ivlen + esp->clen + alen);
if (unlikely(err < 0))
goto error_free;
if (!esp->inplace) {
int allocsize;
struct page_frag *pfrag = &x->xfrag;
allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
spin_lock_bh(&x->lock);
if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
spin_unlock_bh(&x->lock);
goto error_free;
}
skb_shinfo(skb)->nr_frags = 1;
page = pfrag->page;
get_page(page);
/* replace page frags in skb with new page */
__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
pfrag->offset = pfrag->offset + allocsize;
spin_unlock_bh(&x->lock);
sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
err = skb_to_sgvec(skb, dsg,
(unsigned char *)esph - skb->data,
assoclen + ivlen + esp->clen + alen);
if (unlikely(err < 0))
goto error_free;
}
if ((x->props.flags & XFRM_STATE_ESN))
aead_request_set_callback(req, 0, esp_output_done_esn, skb);
else
aead_request_set_callback(req, 0, esp_output_done, skb);
aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
aead_request_set_ad(req, assoclen);
memset(iv, 0, ivlen);
memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
min(ivlen, 8));
ESP_SKB_CB(skb)->tmp = tmp;
err = crypto_aead_encrypt(req);
switch (err) {
case -EINPROGRESS:
goto error;
case -ENOSPC:
err = NET_XMIT_DROP;
break;
case 0:
if ((x->props.flags & XFRM_STATE_ESN))
esp_output_restore_header(skb);
esp_output_encap_csum(skb);
}
if (sg != dsg)
esp_ssg_unref(x, tmp);
if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
err = esp_output_tail_tcp(x, skb);
error_free:
kfree(tmp);
error:
return err;
} | 0 | [
"CWE-787"
]
| linux | ebe48d368e97d007bfeb76fcb065d6cfc4c96645 | 284,248,386,054,812,640,000,000,000,000,000,000,000 | 121 | esp: Fix possible buffer overflow in ESP transformation
The maximum message size that can be send is bigger than
the maximum site that skb_page_frag_refill can allocate.
So it is possible to write beyond the allocated buffer.
Fix this by doing a fallback to COW in that case.
v2:
Avoid get get_order() costs as suggested by Linus Torvalds.
Fixes: cac2661c53f3 ("esp4: Avoid skb_cow_data whenever possible")
Fixes: 03e2a30f6a27 ("esp6: Avoid skb_cow_data whenever possible")
Reported-by: valis <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]> |
void vncws_encode_frame(Buffer *output, const void *payload,
const size_t payload_size)
{
size_t header_size = 0;
unsigned char opcode = WS_OPCODE_BINARY_FRAME;
union {
char buf[WS_HEAD_MAX_LEN];
WsHeader ws;
} header;
if (!payload_size) {
return;
}
header.ws.b0 = 0x80 | (opcode & 0x0f);
if (payload_size <= 125) {
header.ws.b1 = (uint8_t)payload_size;
header_size = 2;
} else if (payload_size < 65536) {
header.ws.b1 = 0x7e;
header.ws.u.s16.l16 = cpu_to_be16((uint16_t)payload_size);
header_size = 4;
} else {
header.ws.b1 = 0x7f;
header.ws.u.s64.l64 = cpu_to_be64(payload_size);
header_size = 10;
}
buffer_reserve(output, header_size + payload_size);
buffer_append(output, header.buf, header_size);
buffer_append(output, payload, payload_size);
} | 0 | []
| qemu | a2bebfd6e09d285aa793cae3fb0fc3a39a9fee6e | 92,084,608,555,199,980,000,000,000,000,000,000,000 | 32 | CVE-2015-1779: incrementally decode websocket frames
The logic for decoding websocket frames wants to fully
decode the frame header and payload, before allowing the
VNC server to see any of the payload data. There is no
size limit on websocket payloads, so this allows a
malicious network client to consume 2^64 bytes in memory
in QEMU. It can trigger this denial of service before
the VNC server even performs any authentication.
The fix is to decode the header, and then incrementally
decode the payload data as it is needed. With this fix
the websocket decoder will allow at most 4k of data to
be buffered before decoding and processing payload.
Signed-off-by: Daniel P. Berrange <[email protected]>
[ kraxel: fix frequent spurious disconnects, suggested by Peter Maydell ]
@@ -361,7 +361,7 @@ int vncws_decode_frame_payload(Buffer *input,
- *payload_size = input->offset;
+ *payload_size = *payload_remain;
[ kraxel: fix 32bit build ]
@@ -306,7 +306,7 @@ struct VncState
- uint64_t ws_payload_remain;
+ size_t ws_payload_remain;
Signed-off-by: Gerd Hoffmann <[email protected]> |
menu_item_select_cb (GtkMenuItem *proxy,
FrWindow *window)
{
GtkAction *action;
char *message;
action = gtk_activatable_get_related_action (GTK_ACTIVATABLE (proxy));
g_return_if_fail (action != NULL);
g_object_get (G_OBJECT (action), "tooltip", &message, NULL);
if (message) {
gtk_statusbar_push (GTK_STATUSBAR (window->priv->statusbar),
window->priv->help_message_cid, message);
g_free (message);
}
} | 0 | [
"CWE-22"
]
| file-roller | b147281293a8307808475e102a14857055f81631 | 38,223,821,131,793,760,000,000,000,000,000,000,000 | 16 | libarchive: sanitize filenames before extracting |
TEST_F(HttpConnectionManagerImplTest, DownstreamProtocolError) {
InSequence s;
setup(false, "");
EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {
conn_manager_->newStream(response_encoder_);
return codecProtocolError("protocol error");
}));
EXPECT_CALL(response_encoder_.stream_, removeCallbacks(_));
EXPECT_CALL(filter_factory_, createFilterChain(_)).Times(0);
// A protocol exception should result in reset of the streams followed by a remote or local close
// depending on whether the downstream client closes the connection prior to the delayed close
// timer firing.
EXPECT_CALL(filter_callbacks_.connection_,
close(Network::ConnectionCloseType::FlushWriteAndDelay));
// Kick off the incoming data.
Buffer::OwnedImpl fake_input("1234");
conn_manager_->onData(fake_input, false);
} | 0 | [
"CWE-400"
]
| envoy | 0e49a495826ea9e29134c1bd54fdeb31a034f40c | 308,466,224,303,516,150,000,000,000,000,000,000,000 | 22 | http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]> |
static void call_trans2ioctl(connection_struct *conn,
struct smb_request *req,
char **pparams, int total_params,
char **ppdata, int total_data,
unsigned int max_data_bytes)
{
char *pdata = *ppdata;
files_struct *fsp = file_fsp(req, SVAL(req->vwv+15, 0));
/* check for an invalid fid before proceeding */
if (!fsp) {
reply_nterror(req, NT_STATUS_INVALID_HANDLE);
return;
}
if ((SVAL(req->vwv+16, 0) == LMCAT_SPL)
&& (SVAL(req->vwv+17, 0) == LMFUNC_GETJOBID)) {
*ppdata = (char *)SMB_REALLOC(*ppdata, 32);
if (*ppdata == NULL) {
reply_nterror(req, NT_STATUS_NO_MEMORY);
return;
}
pdata = *ppdata;
/* NOTE - THIS IS ASCII ONLY AT THE MOMENT - NOT SURE IF OS/2
CAN ACCEPT THIS IN UNICODE. JRA. */
SSVAL(pdata,0,fsp->rap_print_jobid); /* Job number */
srvstr_push(pdata, req->flags2, pdata + 2,
global_myname(), 15,
STR_ASCII|STR_TERMINATE); /* Our NetBIOS name */
srvstr_push(pdata, req->flags2, pdata+18,
lp_servicename(SNUM(conn)), 13,
STR_ASCII|STR_TERMINATE); /* Service name */
send_trans2_replies(conn, req, *pparams, 0, *ppdata, 32,
max_data_bytes);
return;
}
DEBUG(2,("Unknown TRANS2_IOCTL\n"));
reply_nterror(req, NT_STATUS_NOT_IMPLEMENTED);
} | 0 | [
"CWE-22"
]
| samba | bd269443e311d96ef495a9db47d1b95eb83bb8f4 | 226,157,039,236,281,000,000,000,000,000,000,000,000 | 43 | Fix bug 7104 - "wide links" and "unix extensions" are incompatible.
Change parameter "wide links" to default to "no".
Ensure "wide links = no" if "unix extensions = yes" on a share.
Fix man pages to refect this.
Remove "within share" checks for a UNIX symlink set - even if
widelinks = no. The server will not follow that link anyway.
Correct DEBUG message in check_reduced_name() to add missing "\n"
so it's really clear when a path is being denied as it's outside
the enclosing share path.
Jeremy. |
cliprdr_send_data_request(uint32 format)
{
uint8 buffer[4];
DEBUG_CLIPBOARD(("cliprdr_send_data_request\n"));
buf_out_uint32(buffer, format);
cliprdr_send_packet(CLIPRDR_DATA_REQUEST, CLIPRDR_REQUEST, buffer, sizeof(buffer));
} | 0 | [
"CWE-787"
]
| rdesktop | 766ebcf6f23ccfe8323ac10242ae6e127d4505d2 | 201,681,094,162,943,800,000,000,000,000,000,000,000 | 8 | Malicious RDP server security fixes
This commit includes fixes for a set of 21 vulnerabilities in
rdesktop when a malicious RDP server is used.
All vulnerabilities was identified and reported by Eyal Itkin.
* Add rdp_protocol_error function that is used in several fixes
* Refactor of process_bitmap_updates
* Fix possible integer overflow in s_check_rem() on 32bit arch
* Fix memory corruption in process_bitmap_data - CVE-2018-8794
* Fix remote code execution in process_bitmap_data - CVE-2018-8795
* Fix remote code execution in process_plane - CVE-2018-8797
* Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175
* Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175
* Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176
* Fix Denial of Service in sec_recv - CVE-2018-20176
* Fix minor information leak in rdpdr_process - CVE-2018-8791
* Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792
* Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793
* Fix Denial of Service in process_bitmap_data - CVE-2018-8796
* Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798
* Fix Denial of Service in process_secondary_order - CVE-2018-8799
* Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800
* Fix major information leak in ui_clip_handle_data - CVE-2018-20174
* Fix memory corruption in rdp_in_unistr - CVE-2018-20177
* Fix Denial of Service in process_demand_active - CVE-2018-20178
* Fix remote code execution in lspci_process - CVE-2018-20179
* Fix remote code execution in rdpsnddbg_process - CVE-2018-20180
* Fix remote code execution in seamless_process - CVE-2018-20181
* Fix remote code execution in seamless_process_line - CVE-2018-20182 |
text_to_screenline(win_T *wp, char_u *text, int col)
{
int off = (int)(current_ScreenLine - ScreenLines);
if (has_mbyte)
{
int cells;
int u8c, u8cc[MAX_MCO];
int i;
int idx;
int c_len;
char_u *p;
# ifdef FEAT_ARABIC
int prev_c = 0; // previous Arabic character
int prev_c1 = 0; // first composing char for prev_c
# endif
# ifdef FEAT_RIGHTLEFT
if (wp->w_p_rl)
idx = off;
else
# endif
idx = off + col;
// Store multibyte characters in ScreenLines[] et al. correctly.
for (p = text; *p != NUL; )
{
cells = (*mb_ptr2cells)(p);
c_len = (*mb_ptr2len)(p);
if (col + cells > wp->w_width
# ifdef FEAT_RIGHTLEFT
- (wp->w_p_rl ? col : 0)
# endif
)
break;
ScreenLines[idx] = *p;
if (enc_utf8)
{
u8c = utfc_ptr2char(p, u8cc);
if (*p < 0x80 && u8cc[0] == 0)
{
ScreenLinesUC[idx] = 0;
#ifdef FEAT_ARABIC
prev_c = u8c;
#endif
}
else
{
#ifdef FEAT_ARABIC
if (p_arshape && !p_tbidi && ARABIC_CHAR(u8c))
{
// Do Arabic shaping.
int pc, pc1, nc;
int pcc[MAX_MCO];
int firstbyte = *p;
// The idea of what is the previous and next
// character depends on 'rightleft'.
if (wp->w_p_rl)
{
pc = prev_c;
pc1 = prev_c1;
nc = utf_ptr2char(p + c_len);
prev_c1 = u8cc[0];
}
else
{
pc = utfc_ptr2char(p + c_len, pcc);
nc = prev_c;
pc1 = pcc[0];
}
prev_c = u8c;
u8c = arabic_shape(u8c, &firstbyte, &u8cc[0],
pc, pc1, nc);
ScreenLines[idx] = firstbyte;
}
else
prev_c = u8c;
#endif
// Non-BMP character: display as ? or fullwidth ?.
ScreenLinesUC[idx] = u8c;
for (i = 0; i < Screen_mco; ++i)
{
ScreenLinesC[i][idx] = u8cc[i];
if (u8cc[i] == 0)
break;
}
}
if (cells > 1)
ScreenLines[idx + 1] = 0;
}
else if (enc_dbcs == DBCS_JPNU && *p == 0x8e)
// double-byte single width character
ScreenLines2[idx] = p[1];
else if (cells > 1)
// double-width character
ScreenLines[idx + 1] = p[1];
col += cells;
idx += cells;
p += c_len;
}
}
else
{
int len = (int)STRLEN(text);
if (len > wp->w_width - col)
len = wp->w_width - col;
if (len > 0)
{
#ifdef FEAT_RIGHTLEFT
if (wp->w_p_rl)
mch_memmove(current_ScreenLine, text, len);
else
#endif
mch_memmove(current_ScreenLine + col, text, len);
col += len;
}
}
return col;
} | 0 | [
"CWE-122"
]
| vim | 826bfe4bbd7594188e3d74d2539d9707b1c6a14b | 13,864,643,736,561,974,000,000,000,000,000,000,000 | 122 | patch 8.2.3487: illegal memory access if buffer name is very long
Problem: Illegal memory access if buffer name is very long.
Solution: Make sure not to go over the end of the buffer. |
isdn_net_open(struct net_device *dev)
{
int i;
struct net_device *p;
struct in_device *in_dev;
/* moved here from isdn_net_reset, because only the master has an
interface associated which is supposed to be started. BTW:
we need to call netif_start_queue, not netif_wake_queue here */
netif_start_queue(dev);
isdn_net_reset(dev);
/* Fill in the MAC-level header (not needed, but for compatibility... */
for (i = 0; i < ETH_ALEN - sizeof(u32); i++)
dev->dev_addr[i] = 0xfc;
if ((in_dev = dev->ip_ptr) != NULL) {
/*
* Any address will do - we take the first
*/
struct in_ifaddr *ifa = in_dev->ifa_list;
if (ifa != NULL)
memcpy(dev->dev_addr + 2, &ifa->ifa_local, 4);
}
/* If this interface has slaves, start them also */
p = MASTER_TO_SLAVE(dev);
if (p) {
while (p) {
isdn_net_reset(p);
p = MASTER_TO_SLAVE(p);
}
}
isdn_lock_drivers();
return 0;
} | 0 | [
"CWE-119"
]
| linux | 9f5af546e6acc30f075828cb58c7f09665033967 | 83,028,527,577,754,190,000,000,000,000,000,000,000 | 35 | isdn/i4l: fix buffer overflow
This fixes a potential buffer overflow in isdn_net.c caused by an
unbounded strcpy.
[ ISDN seems to be effectively unmaintained, and the I4L driver in
particular is long deprecated, but in case somebody uses this..
- Linus ]
Signed-off-by: Jiten Thakkar <[email protected]>
Signed-off-by: Annie Cherkaev <[email protected]>
Cc: Karsten Keil <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
unsigned long addr, int *rss)
{
unsigned long vm_flags = vma->vm_flags;
pte_t pte = *src_pte;
struct page *page;
/* pte contains position in swap or file, so copy. */
if (unlikely(!pte_present(pte))) {
swp_entry_t entry = pte_to_swp_entry(pte);
if (likely(!non_swap_entry(entry))) {
if (swap_duplicate(entry) < 0)
return entry.val;
/* make sure dst_mm is on swapoff's mmlist. */
if (unlikely(list_empty(&dst_mm->mmlist))) {
spin_lock(&mmlist_lock);
if (list_empty(&dst_mm->mmlist))
list_add(&dst_mm->mmlist,
&src_mm->mmlist);
spin_unlock(&mmlist_lock);
}
rss[MM_SWAPENTS]++;
} else if (is_migration_entry(entry)) {
page = migration_entry_to_page(entry);
rss[mm_counter(page)]++;
if (is_write_migration_entry(entry) &&
is_cow_mapping(vm_flags)) {
/*
* COW mappings require pages in both
* parent and child to be set to read.
*/
make_migration_entry_read(&entry);
pte = swp_entry_to_pte(entry);
if (pte_swp_soft_dirty(*src_pte))
pte = pte_swp_mksoft_dirty(pte);
set_pte_at(src_mm, addr, src_pte, pte);
}
}
goto out_set_pte;
}
/*
* If it's a COW mapping, write protect it both
* in the parent and the child
*/
if (is_cow_mapping(vm_flags)) {
ptep_set_wrprotect(src_mm, addr, src_pte);
pte = pte_wrprotect(pte);
}
/*
* If it's a shared mapping, mark it clean in
* the child
*/
if (vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
page = vm_normal_page(vma, addr, pte);
if (page) {
get_page(page);
page_dup_rmap(page, false);
rss[mm_counter(page)]++;
}
out_set_pte:
set_pte_at(dst_mm, addr, dst_pte, pte);
return 0;
} | 0 | [
"CWE-119"
]
| linux | 1be7107fbe18eed3e319a6c3e83c78254b693acb | 277,769,896,500,764,830,000,000,000,000,000,000,000 | 74 | mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]> |
static int sqlite_free_persistent(zend_rsrc_list_entry *le, void *ptr TSRMLS_DC)
{
return le->ptr == ptr ? ZEND_HASH_APPLY_REMOVE : ZEND_HASH_APPLY_KEEP;
} | 0 | []
| php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 301,405,374,113,054,630,000,000,000,000,000,000,000 | 4 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
gnutls_x509_crt_get_extension_by_oid2(gnutls_x509_crt_t cert,
const char *oid, int indx,
gnutls_datum_t *output,
unsigned int *critical)
{
int ret;
if (cert == NULL) {
gnutls_assert();
return GNUTLS_E_INVALID_REQUEST;
}
if ((ret =
_gnutls_x509_crt_get_extension(cert, oid, indx, output,
critical)) < 0) {
gnutls_assert();
return ret;
}
if (output->size == 0 || output->data == NULL) {
gnutls_assert();
return GNUTLS_E_REQUESTED_DATA_NOT_AVAILABLE;
}
return 0;
} | 0 | [
"CWE-295"
]
| gnutls | 6e76e9b9fa845b76b0b9a45f05f4b54a052578ff | 196,298,717,517,966,240,000,000,000,000,000,000,000 | 26 | on certificate import check whether the two signature algorithms match |
static inline uint32_t read_dword(LSIState *s, uint32_t addr)
{
uint32_t buf;
pci_dma_read(PCI_DEVICE(s), addr, &buf, 4);
return cpu_to_le32(buf);
} | 0 | [
"CWE-835"
]
| qemu | de594e47659029316bbf9391efb79da0a1a08e08 | 132,451,071,638,306,470,000,000,000,000,000,000,000 | 7 | scsi: lsi: exit infinite loop while executing script (CVE-2019-12068)
When executing script in lsi_execute_script(), the LSI scsi adapter
emulator advances 's->dsp' index to read next opcode. This can lead
to an infinite loop if the next opcode is empty. Move the existing
loop exit after 10k iterations so that it covers no-op opcodes as
well.
Reported-by: Bugs SysSec <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static int io_sq_thread(void *data)
{
struct io_ring_ctx *ctx = data;
struct mm_struct *cur_mm = NULL;
const struct cred *old_cred;
mm_segment_t old_fs;
DEFINE_WAIT(wait);
unsigned inflight;
unsigned long timeout;
int ret;
complete(&ctx->completions[1]);
old_fs = get_fs();
set_fs(USER_DS);
old_cred = override_creds(ctx->creds);
ret = timeout = inflight = 0;
while (!kthread_should_park()) {
unsigned int to_submit;
if (inflight) {
unsigned nr_events = 0;
if (ctx->flags & IORING_SETUP_IOPOLL) {
/*
* inflight is the count of the maximum possible
* entries we submitted, but it can be smaller
* if we dropped some of them. If we don't have
* poll entries available, then we know that we
* have nothing left to poll for. Reset the
* inflight count to zero in that case.
*/
mutex_lock(&ctx->uring_lock);
if (!list_empty(&ctx->poll_list))
__io_iopoll_check(ctx, &nr_events, 0);
else
inflight = 0;
mutex_unlock(&ctx->uring_lock);
} else {
/*
* Normal IO, just pretend everything completed.
* We don't have to poll completions for that.
*/
nr_events = inflight;
}
inflight -= nr_events;
if (!inflight)
timeout = jiffies + ctx->sq_thread_idle;
}
to_submit = io_sqring_entries(ctx);
/*
* If submit got -EBUSY, flag us as needing the application
* to enter the kernel to reap and flush events.
*/
if (!to_submit || ret == -EBUSY) {
/*
* We're polling. If we're within the defined idle
* period, then let us spin without work before going
* to sleep. The exception is if we got EBUSY doing
* more IO, we should wait for the application to
* reap events and wake us up.
*/
if (inflight ||
(!time_after(jiffies, timeout) && ret != -EBUSY &&
!percpu_ref_is_dying(&ctx->refs))) {
cond_resched();
continue;
}
/*
* Drop cur_mm before scheduling, we can't hold it for
* long periods (or over schedule()). Do this before
* adding ourselves to the waitqueue, as the unuse/drop
* may sleep.
*/
if (cur_mm) {
unuse_mm(cur_mm);
mmput(cur_mm);
cur_mm = NULL;
}
prepare_to_wait(&ctx->sqo_wait, &wait,
TASK_INTERRUPTIBLE);
/* Tell userspace we may need a wakeup call */
ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
/* make sure to read SQ tail after writing flags */
smp_mb();
to_submit = io_sqring_entries(ctx);
if (!to_submit || ret == -EBUSY) {
if (kthread_should_park()) {
finish_wait(&ctx->sqo_wait, &wait);
break;
}
if (signal_pending(current))
flush_signals(current);
schedule();
finish_wait(&ctx->sqo_wait, &wait);
ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
continue;
}
finish_wait(&ctx->sqo_wait, &wait);
ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
}
mutex_lock(&ctx->uring_lock);
ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
mutex_unlock(&ctx->uring_lock);
if (ret > 0)
inflight += ret;
}
set_fs(old_fs);
if (cur_mm) {
unuse_mm(cur_mm);
mmput(cur_mm);
}
revert_creds(old_cred);
kthread_parkme();
return 0;
} | 0 | []
| linux | ff002b30181d30cdfbca316dadd099c3ca0d739c | 303,206,574,972,735,670,000,000,000,000,000,000,000 | 130 | io_uring: grab ->fs as part of async preparation
This passes it in to io-wq, so it assumes the right fs_struct when
executing async work that may need to do lookups.
Cc: [email protected] # 5.3+
Signed-off-by: Jens Axboe <[email protected]> |
static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
{
return subpool_inode(file_inode(vma->vm_file));
} | 0 | [
"CWE-703"
]
| linux | 5af10dfd0afc559bb4b0f7e3e8227a1578333995 | 58,314,276,716,898,870,000,000,000,000,000,000,000 | 4 | userfaultfd: hugetlbfs: remove superfluous page unlock in VM_SHARED case
huge_add_to_page_cache->add_to_page_cache implicitly unlocks the page
before returning in case of errors.
The error returned was -EEXIST by running UFFDIO_COPY on a non-hole
offset of a VM_SHARED hugetlbfs mapping. It was an userland bug that
triggered it and the kernel must cope with it returning -EEXIST from
ioctl(UFFDIO_COPY) as expected.
page dumped because: VM_BUG_ON_PAGE(!PageLocked(page))
kernel BUG at mm/filemap.c:964!
invalid opcode: 0000 [#1] SMP
CPU: 1 PID: 22582 Comm: qemu-system-x86 Not tainted 4.11.11-300.fc26.x86_64 #1
RIP: unlock_page+0x4a/0x50
Call Trace:
hugetlb_mcopy_atomic_pte+0xc0/0x320
mcopy_atomic+0x96f/0xbe0
userfaultfd_ioctl+0x218/0xe90
do_vfs_ioctl+0xa5/0x600
SyS_ioctl+0x79/0x90
entry_SYSCALL_64_fastpath+0x1a/0xa9
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Andrea Arcangeli <[email protected]>
Tested-by: Maxime Coquelin <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
Cc: "Dr. David Alan Gilbert" <[email protected]>
Cc: Mike Rapoport <[email protected]>
Cc: Alexey Perevalov <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
bool force_nonblock)
{
ssize_t ret;
ret = io_prep_rw(req, sqe, force_nonblock);
if (ret)
return ret;
if (unlikely(!(req->file->f_mode & FMODE_READ)))
return -EBADF;
/* either don't need iovec imported or already have it */
if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
return 0;
return io_rw_prep_async(req, READ, force_nonblock);
} | 0 | []
| linux | 0f2122045b946241a9e549c2a76cea54fa58a7ff | 310,626,837,242,500,530,000,000,000,000,000,000,000 | 17 | io_uring: don't rely on weak ->files references
Grab actual references to the files_struct. To avoid circular references
issues due to this, we add a per-task note that keeps track of what
io_uring contexts a task has used. When the tasks execs or exits its
assigned files, we cancel requests based on this tracking.
With that, we can grab proper references to the files table, and no
longer need to rely on stashing away ring_fd and ring_file to check
if the ring_fd may have been closed.
Cc: [email protected] # v5.5+
Reviewed-by: Pavel Begunkov <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
{
} | 0 | [
"CWE-703"
]
| FFmpeg | e5c7229999182ad1cef13b9eca050dba7a5a08da | 263,611,473,706,517,360,000,000,000,000,000,000,000 | 3 | avcodec/utils: set AVFrame format unconditional
Fixes inconsistency and out of array accesses
Fixes: 10cdd7e63e7f66e3e66273939e0863dd-asan_heap-oob_1a4ff32_7078_cov_4056274555_mov_h264_aac__mp4box_frag.mp4
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <[email protected]> |
static int kvm_set_wallclock(unsigned long now)
{
return -1;
} | 0 | []
| linux | 95ef1e52922cf75b1ea2eae54ef886f2cc47eecb | 255,783,013,211,174,730,000,000,000,000,000,000,000 | 4 | KVM guest: prevent tracing recursion with kvmclock
Prevent tracing of preempt_disable() in get_cpu_var() in
kvm_clock_read(). When CONFIG_DEBUG_PREEMPT is enabled,
preempt_disable/enable() are traced and this causes the function_graph
tracer to go into an infinite recursion. By open coding the
preempt_disable() around the get_cpu_var(), we can use the notrace
version which prevents preempt_disable/enable() from being traced and
prevents the recursion.
Based on a similar patch for Xen from Jeremy Fitzhardinge.
Tested-by: Gleb Natapov <[email protected]>
Acked-by: Steven Rostedt <[email protected]>
Signed-off-by: Avi Kivity <[email protected]> |
processCollectClass(struct module_qstate* qstate, int id)
{
struct iter_qstate* iq = (struct iter_qstate*)qstate->minfo[id];
struct module_qstate* subq;
/* If qchase.qclass == 0 then send out queries for all classes.
* Otherwise, do nothing (wait for all answers to arrive and the
* processClassResponse to put them together, and that moves us
* towards the Finished state when done. */
if(iq->qchase.qclass == 0) {
uint16_t c = 0;
iq->qchase.qclass = LDNS_RR_CLASS_ANY;
while(iter_get_next_root(qstate->env->hints,
qstate->env->fwds, &c)) {
/* generate query for this class */
log_nametypeclass(VERB_ALGO, "spawn collect query",
qstate->qinfo.qname, qstate->qinfo.qtype, c);
if(!generate_sub_request(qstate->qinfo.qname,
qstate->qinfo.qname_len, qstate->qinfo.qtype,
c, qstate, id, iq, INIT_REQUEST_STATE,
FINISHED_STATE, &subq,
(int)!(qstate->query_flags&BIT_CD))) {
errinf(qstate, "could not generate class ANY"
" lookup query");
return error_response(qstate, id,
LDNS_RCODE_SERVFAIL);
}
/* ignore subq, no special init required */
iq->num_current_queries ++;
if(c == 0xffff)
break;
else c++;
}
/* if no roots are configured at all, return */
if(iq->num_current_queries == 0) {
verbose(VERB_ALGO, "No root hints or fwds, giving up "
"on qclass ANY");
return error_response(qstate, id, LDNS_RCODE_REFUSED);
}
/* return false, wait for queries to return */
}
/* if woke up here because of an answer, wait for more answers */
return 0;
} | 1 | [
"CWE-400"
]
| unbound | ba0f382eee814e56900a535778d13206b86b6d49 | 193,134,661,771,536,720,000,000,000,000,000,000,000 | 43 | - CVE-2020-12662 Unbound can be tricked into amplifying an incoming
query into a large number of queries directed to a target.
- CVE-2020-12663 Malformed answers from upstream name servers can be
used to make Unbound unresponsive. |
static int mov_metadata_hmmt(MOVContext *c, AVIOContext *pb, unsigned len)
{
int i, n_hmmt;
if (len < 2)
return 0;
if (c->ignore_chapters)
return 0;
n_hmmt = avio_rb32(pb);
if (n_hmmt > len / 4)
return AVERROR_INVALIDDATA;
for (i = 0; i < n_hmmt && !pb->eof_reached; i++) {
int moment_time = avio_rb32(pb);
avpriv_new_chapter(c->fc, i, av_make_q(1, 1000), moment_time, AV_NOPTS_VALUE, NULL);
}
if (avio_feof(pb))
return AVERROR_INVALIDDATA;
return 0;
} | 0 | [
"CWE-703"
]
| FFmpeg | c953baa084607dd1d84c3bfcce3cf6a87c3e6e05 | 218,119,021,931,195,700,000,000,000,000,000,000,000 | 20 | avformat/mov: Check count sums in build_open_gop_key_points()
Fixes: ffmpeg.md
Fixes: Out of array access
Fixes: CVE-2022-2566
Found-by: Andy Nguyen <[email protected]>
Found-by: 3pvd <[email protected]>
Reviewed-by: Andy Nguyen <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]> |
int Field_string::cmp(const uchar *a_ptr, const uchar *b_ptr)
{
uint a_len, b_len;
if (field_charset->mbmaxlen != 1)
{
uint char_len= field_length/field_charset->mbmaxlen;
a_len= my_charpos(field_charset, a_ptr, a_ptr + field_length, char_len);
b_len= my_charpos(field_charset, b_ptr, b_ptr + field_length, char_len);
}
else
a_len= b_len= field_length;
/*
We have to remove end space to be able to compare multi-byte-characters
like in latin_de 'ae' and 0xe4
*/
return field_charset->coll->strnncollsp(field_charset,
a_ptr, a_len,
b_ptr, b_len);
} | 0 | [
"CWE-120"
]
| server | eca207c46293bc72dd8d0d5622153fab4d3fccf1 | 320,203,053,133,589,600,000,000,000,000,000,000,000 | 20 | MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix. |
dnsc_shared_secrets_sizefunc(void *k, void* ATTR_UNUSED(d))
{
struct shared_secret_cache_key* ssk = (struct shared_secret_cache_key*)k;
size_t key_size = sizeof(struct shared_secret_cache_key)
+ lock_get_mem(&ssk->entry.lock);
size_t data_size = crypto_box_BEFORENMBYTES;
(void)ssk; /* otherwise ssk is unused if no threading, or fixed locksize */
return key_size + data_size;
} | 0 | [
"CWE-190"
]
| unbound | 02080f6b180232f43b77f403d0c038e9360a460f | 147,422,929,026,783,210,000,000,000,000,000,000,000 | 9 | - Fix Integer Overflows in Size Calculations,
reported by X41 D-Sec. |
static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst)
{
if (src->sa_family != dst->sa_family)
return -1;
switch (src->sa_family) {
case AF_INET:
return ((struct sockaddr_in *)src)->sin_addr.s_addr !=
((struct sockaddr_in *)dst)->sin_addr.s_addr;
case AF_INET6: {
struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src;
struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst;
bool link_local;
if (ipv6_addr_cmp(&src_addr6->sin6_addr,
&dst_addr6->sin6_addr))
return 1;
link_local = ipv6_addr_type(&dst_addr6->sin6_addr) &
IPV6_ADDR_LINKLOCAL;
/* Link local must match their scope_ids */
return link_local ? (src_addr6->sin6_scope_id !=
dst_addr6->sin6_scope_id) :
0;
}
default:
return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
&((struct sockaddr_ib *) dst)->sib_addr);
}
} | 0 | [
"CWE-416"
]
| linux | bc0bdc5afaa740d782fbf936aaeebd65e5c2921d | 163,954,656,772,900,410,000,000,000,000,000,000,000 | 30 | RDMA/cma: Do not change route.addr.src_addr.ss_family
If the state is not idle then rdma_bind_addr() will immediately fail and
no change to global state should happen.
For instance if the state is already RDMA_CM_LISTEN then this will corrupt
the src_addr and would cause the test in cma_cancel_operation():
if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
To view a mangled src_addr, eg with a IPv6 loopback address but an IPv4
family, failing the test.
This would manifest as this trace from syzkaller:
BUG: KASAN: use-after-free in __list_add_valid+0x93/0xa0 lib/list_debug.c:26
Read of size 8 at addr ffff8881546491e0 by task syz-executor.1/32204
CPU: 1 PID: 32204 Comm: syz-executor.1 Not tainted 5.12.0-rc8-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:79 [inline]
dump_stack+0x141/0x1d7 lib/dump_stack.c:120
print_address_description.constprop.0.cold+0x5b/0x2f8 mm/kasan/report.c:232
__kasan_report mm/kasan/report.c:399 [inline]
kasan_report.cold+0x7c/0xd8 mm/kasan/report.c:416
__list_add_valid+0x93/0xa0 lib/list_debug.c:26
__list_add include/linux/list.h:67 [inline]
list_add_tail include/linux/list.h:100 [inline]
cma_listen_on_all drivers/infiniband/core/cma.c:2557 [inline]
rdma_listen+0x787/0xe00 drivers/infiniband/core/cma.c:3751
ucma_listen+0x16a/0x210 drivers/infiniband/core/ucma.c:1102
ucma_write+0x259/0x350 drivers/infiniband/core/ucma.c:1732
vfs_write+0x28e/0xa30 fs/read_write.c:603
ksys_write+0x1ee/0x250 fs/read_write.c:658
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
Which is indicating that an rdma_id_private was destroyed without doing
cma_cancel_listens().
Instead of trying to re-use the src_addr memory to indirectly create an
any address build one explicitly on the stack and bind to that as any
other normal flow would do.
Link: https://lore.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: 732d41c545bb ("RDMA/cma: Make the locking for automatic state transition more clear")
Reported-by: [email protected]
Tested-by: Hao Sun <[email protected]>
Reviewed-by: Leon Romanovsky <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]> |
static int __init hostap_init(void)
{
if (init_net.proc_net != NULL) {
hostap_proc = proc_mkdir("hostap", init_net.proc_net);
if (!hostap_proc)
printk(KERN_WARNING "Failed to mkdir "
"/proc/net/hostap\n");
} else
hostap_proc = NULL;
return 0;
} | 0 | [
"CWE-703",
"CWE-264"
]
| linux | 550fd08c2cebad61c548def135f67aba284c6162 | 36,973,322,604,120,920,000,000,000,000,000,000,000 | 12 | net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <[email protected]>
CC: Karsten Keil <[email protected]>
CC: "David S. Miller" <[email protected]>
CC: Jay Vosburgh <[email protected]>
CC: Andy Gospodarek <[email protected]>
CC: Patrick McHardy <[email protected]>
CC: Krzysztof Halasa <[email protected]>
CC: "John W. Linville" <[email protected]>
CC: Greg Kroah-Hartman <[email protected]>
CC: Marcel Holtmann <[email protected]>
CC: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int rtreeClose(sqlite3_vtab_cursor *cur){
Rtree *pRtree = (Rtree *)(cur->pVtab);
int ii;
RtreeCursor *pCsr = (RtreeCursor *)cur;
assert( pRtree->nCursor>0 );
freeCursorConstraints(pCsr);
sqlite3_finalize(pCsr->pReadAux);
sqlite3_free(pCsr->aPoint);
for(ii=0; ii<RTREE_CACHE_SZ; ii++) nodeRelease(pRtree, pCsr->aNode[ii]);
sqlite3_free(pCsr);
pRtree->nCursor--;
nodeBlobReset(pRtree);
return SQLITE_OK;
} | 0 | [
"CWE-125"
]
| sqlite | e41fd72acc7a06ce5a6a7d28154db1ffe8ba37a8 | 58,845,876,798,019,010,000,000,000,000,000,000,000 | 14 | Enhance the rtreenode() function of rtree (used for testing) so that it
uses the newer sqlite3_str object for better performance and improved
error reporting.
FossilOrigin-Name: 90acdbfce9c088582d5165589f7eac462b00062bbfffacdcc786eb9cf3ea5377 |
static CharDriverState *qemu_chr_open_stdio(const char *id,
ChardevBackend *backend,
ChardevReturn *ret,
Error **errp)
{
ChardevStdio *opts = backend->u.stdio.data;
CharDriverState *chr;
struct sigaction act;
ChardevCommon *common = qapi_ChardevStdio_base(opts);
if (is_daemonized()) {
error_setg(errp, "cannot use stdio with -daemonize");
return NULL;
}
if (stdio_in_use) {
error_setg(errp, "cannot use stdio by multiple character devices");
return NULL;
}
stdio_in_use = true;
old_fd0_flags = fcntl(0, F_GETFL);
tcgetattr(0, &oldtty);
qemu_set_nonblock(0);
atexit(term_exit);
memset(&act, 0, sizeof(act));
act.sa_handler = term_stdio_handler;
sigaction(SIGCONT, &act, NULL);
chr = qemu_chr_open_fd(0, 1, common, errp);
if (!chr) {
return NULL;
}
chr->chr_close = qemu_chr_close_stdio;
chr->chr_set_echo = qemu_chr_set_echo_stdio;
if (opts->has_signal) {
stdio_allow_signal = opts->signal;
}
qemu_chr_set_echo_stdio(chr, false);
return chr;
} | 0 | [
"CWE-416"
]
| qemu | a4afa548fc6dd9842ed86639b4d37d4d1c4ad480 | 33,622,303,447,547,420,000,000,000,000,000,000,000 | 43 | char: move front end handlers in CharBackend
Since the hanlders are associated with a CharBackend, rather than the
CharDriverState, it is more appropriate to store in CharBackend. This
avoids the handler copy dance in qemu_chr_fe_set_handlers() then
mux_chr_update_read_handler(), by storing the CharBackend pointer
directly.
Also a mux CharDriver should go through mux->backends[focused], since
chr->be will stay NULL. Before that, it was possible to call
chr->handler by mistake with surprising results, for ex through
qemu_chr_be_can_write(), which would result in calling the last set
handler front end, not the one with focus.
Signed-off-by: Marc-André Lureau <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
String *Item_int_func::val_str(String *str)
{
DBUG_ASSERT(fixed == 1);
longlong nr=val_int();
if (null_value)
return 0;
str->set_int(nr, unsigned_flag, collation.collation);
return str;
} | 0 | [
"CWE-120"
]
| server | eca207c46293bc72dd8d0d5622153fab4d3fccf1 | 205,969,981,250,039,400,000,000,000,000,000,000,000 | 9 | MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix. |
Item *get_copy(THD *thd)
{ return get_item_copy<Item_float>(thd, this); } | 0 | [
"CWE-617"
]
| server | 807945f2eb5fa22e6f233cc17b85a2e141efe2c8 | 229,938,990,045,098,900,000,000,000,000,000,000,000 | 2 | MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item. |
gdm_session_start_reauthentication (GdmSession *session,
GPid pid_of_caller,
uid_t uid_of_caller)
{
GdmSessionConversation *conversation = session->priv->session_conversation;
g_return_if_fail (conversation != NULL);
conversation->reauth_pid_of_caller = pid_of_caller;
gdm_dbus_worker_call_start_reauthentication (conversation->worker_proxy,
(int) pid_of_caller,
(int) uid_of_caller,
conversation->worker_cancellable,
(GAsyncReadyCallback) on_reauthentication_started_cb,
conversation);
} | 0 | []
| gdm | 05e5fc24b0f803098c1d05dae86f5eb05bd0c2a4 | 17,748,221,505,379,371,000,000,000,000,000,000,000 | 17 | session: Cancel worker proxy async ops when freeing conversations
We need to cancel ongoing async ops for worker proxies when freeing
conversations or we'll crash when the completion handler runs and we
access free'd memory.
https://bugzilla.gnome.org/show_bug.cgi?id=758032 |
bool CommandHelpers::isHelpRequest(const BSONElement& helpElem) {
return !helpElem.eoo() && helpElem.trueValue();
} | 0 | [
"CWE-20"
]
| mongo | 722f06f3217c029ef9c50062c8cc775966fd7ead | 116,774,139,945,178,530,000,000,000,000,000,000,000 | 3 | SERVER-38275 ban find explain with UUID |
void btrfs_release_disk_super(struct btrfs_super_block *super)
{
struct page *page = virt_to_page(super);
put_page(page);
} | 0 | [
"CWE-476",
"CWE-703"
]
| linux | e4571b8c5e9ffa1e85c0c671995bd4dcc5c75091 | 237,195,792,713,963,050,000,000,000,000,000,000,000 | 6 | btrfs: fix NULL pointer dereference when deleting device by invalid id
[BUG]
It's easy to trigger NULL pointer dereference, just by removing a
non-existing device id:
# mkfs.btrfs -f -m single -d single /dev/test/scratch1 \
/dev/test/scratch2
# mount /dev/test/scratch1 /mnt/btrfs
# btrfs device remove 3 /mnt/btrfs
Then we have the following kernel NULL pointer dereference:
BUG: kernel NULL pointer dereference, address: 0000000000000000
#PF: supervisor read access in kernel mode
#PF: error_code(0x0000) - not-present page
PGD 0 P4D 0
Oops: 0000 [#1] PREEMPT SMP NOPTI
CPU: 9 PID: 649 Comm: btrfs Not tainted 5.14.0-rc3-custom+ #35
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:btrfs_rm_device+0x4de/0x6b0 [btrfs]
btrfs_ioctl+0x18bb/0x3190 [btrfs]
? lock_is_held_type+0xa5/0x120
? find_held_lock.constprop.0+0x2b/0x80
? do_user_addr_fault+0x201/0x6a0
? lock_release+0xd2/0x2d0
? __x64_sys_ioctl+0x83/0xb0
__x64_sys_ioctl+0x83/0xb0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
[CAUSE]
Commit a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return
btrfs_device directly") moves the "missing" device path check into
btrfs_rm_device().
But btrfs_rm_device() itself can have case where it only receives
@devid, with NULL as @device_path.
In that case, calling strcmp() on NULL will trigger the NULL pointer
dereference.
Before that commit, we handle the "missing" case inside
btrfs_find_device_by_devspec(), which will not check @device_path at all
if @devid is provided, thus no way to trigger the bug.
[FIX]
Before calling strcmp(), also make sure @device_path is not NULL.
Fixes: a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return btrfs_device directly")
CC: [email protected] # 5.4+
Reported-by: butt3rflyh4ck <[email protected]>
Reviewed-by: Anand Jain <[email protected]>
Signed-off-by: Qu Wenruo <[email protected]>
Reviewed-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]> |
const opaque* SSL_SESSION::GetSecret() const
{
return master_secret_;
} | 0 | [
"CWE-254"
]
| mysql-server | e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69 | 175,921,893,673,337,300,000,000,000,000,000,000,000 | 4 | Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED. |
static int set_user(uid_t new_ruid, int dumpclear)
{
struct user_struct *new_user;
new_user = alloc_uid(new_ruid);
if (!new_user)
return -EAGAIN;
if (atomic_read(&new_user->processes) >=
current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
new_user != &root_user) {
free_uid(new_user);
return -EAGAIN;
}
switch_uid(new_user);
if (dumpclear) {
current->mm->dumpable = suid_dumpable;
smp_wmb();
}
current->uid = new_ruid;
return 0;
} | 0 | [
"CWE-20"
]
| linux-2.6 | 9926e4c74300c4b31dee007298c6475d33369df0 | 229,406,115,716,147,220,000,000,000,000,000,000,000 | 24 | CPU time limit patch / setrlimit(RLIMIT_CPU, 0) cheat fix
As discovered here today, the change in Kernel 2.6.17 intended to inhibit
users from setting RLIMIT_CPU to 0 (as that is equivalent to unlimited) by
"cheating" and setting it to 1 in such a case, does not make a difference,
as the check is done in the wrong place (too late), and only applies to the
profiling code.
On all systems I checked running kernels above 2.6.17, no matter what the
hard and soft CPU time limits were before, a user could escape them by
issuing in the shell (sh/bash/zsh) "ulimit -t 0", and then the user's
process was not ever killed.
Attached is a trivial patch to fix that. Simply moving the check to a
slightly earlier location (specifically, before the line that actually
assigns the limit - *old_rlim = new_rlim), does the trick.
Do note that at least the zsh (but not ash, dash, or bash) shell has the
problem of "caching" the limits set by the ulimit command, so when running
zsh the fix will not immediately be evident - after entering "ulimit -t 0",
"ulimit -a" will show "-t: cpu time (seconds) 0", even though the actual
limit as returned by getrlimit(...) will be 1. It can be verified by
opening a subshell (which will not have the values of the parent shell in
cache) and checking in it, or just by running a CPU intensive command like
"echo '65536^1048576' | bc" and verifying that it dumps core after one
second.
Regardless of whether that is a misfeature in the shell, perhaps it would
be better to return -EINVAL from setrlimit in such a case instead of
cheating and setting to 1, as that does not really reflect the actual state
of the process anymore. I do not however know what the ground for that
decision was in the original 2.6.17 change, and whether there would be any
"backward" compatibility issues, so I preferred not to touch that right
now.
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
*/
static inline int skb_padto(struct sk_buff *skb, unsigned int len)
{
unsigned int size = skb->len;
if (likely(size >= len))
return 0;
return skb_pad(skb, len - size); | 0 | [
"CWE-20"
]
| linux | 2b16f048729bf35e6c28a40cbfad07239f9dcd90 | 127,084,443,560,187,060,000,000,000,000,000,000,000 | 7 | net: create skb_gso_validate_mac_len()
If you take a GSO skb, and split it into packets, will the MAC
length (L2 + L3 + L4 headers + payload) of those packets be small
enough to fit within a given length?
Move skb_gso_mac_seglen() to skbuff.h with other related functions
like skb_gso_network_seglen() so we can use it, and then create
skb_gso_validate_mac_len to do the full calculation.
Signed-off-by: Daniel Axtens <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void reds_channel_init_auth_caps(RedLinkInfo *link, RedChannel *channel)
{
RedsState *reds = link->reds;
if (reds->config->sasl_enabled && !link->skip_auth) {
channel->set_common_cap(SPICE_COMMON_CAP_AUTH_SASL);
} else {
channel->set_common_cap(SPICE_COMMON_CAP_AUTH_SPICE);
}
} | 0 | []
| spice | ca5bbc5692e052159bce1a75f55dc60b36078749 | 49,385,519,036,000,620,000,000,000,000,000,000,000 | 9 | With OpenSSL 1.1: Disable client-initiated renegotiation.
Fixes issue #49
Fixes BZ#1904459
Signed-off-by: Julien Ropé <[email protected]>
Reported-by: BlackKD
Acked-by: Frediano Ziglio <[email protected]> |
int X509V3_add_value_bool_nf(char *name, int asn1_bool,
STACK_OF(CONF_VALUE) **extlist)
{
if(asn1_bool) return X509V3_add_value(name, "TRUE", extlist);
return 1;
} | 0 | []
| openssl | a70da5b3ecc3160368529677006801c58cb369db | 99,222,073,824,290,800,000,000,000,000,000,000,000 | 6 | New functions to check a hostname email or IP address against a
certificate. Add options to s_client, s_server and x509 utilities
to print results of checks. |
CopyGetInt16(CopyState cstate, int16 *val)
{
uint16 buf;
if (CopyGetData(cstate, &buf, sizeof(buf), sizeof(buf)) != sizeof(buf))
{
*val = 0; /* suppress compiler warning */
return false;
}
*val = (int16) ntohs(buf);
return true;
} | 0 | [
"CWE-209"
]
| postgres | 804b6b6db4dcfc590a468e7be390738f9f7755fb | 300,973,407,494,743,540,000,000,000,000,000,000,000 | 12 | Fix column-privilege leak in error-message paths
While building error messages to return to the user,
BuildIndexValueDescription, ExecBuildSlotValueDescription and
ri_ReportViolation would happily include the entire key or entire row in
the result returned to the user, even if the user didn't have access to
view all of the columns being included.
Instead, include only those columns which the user is providing or which
the user has select rights on. If the user does not have any rights
to view the table or any of the columns involved then no detail is
provided and a NULL value is returned from BuildIndexValueDescription
and ExecBuildSlotValueDescription. Note that, for key cases, the user
must have access to all of the columns for the key to be shown; a
partial key will not be returned.
Further, in master only, do not return any data for cases where row
security is enabled on the relation and row security should be applied
for the user. This required a bit of refactoring and moving of things
around related to RLS- note the addition of utils/misc/rls.c.
Back-patch all the way, as column-level privileges are now in all
supported versions.
This has been assigned CVE-2014-8161, but since the issue and the patch
have already been publicized on pgsql-hackers, there's no point in trying
to hide this commit. |
static void state(struct connectdata *conn, smtpstate newstate)
{
struct smtp_conn *smtpc = &conn->proto.smtpc;
#if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS)
/* for debug purposes */
static const char * const names[] = {
"STOP",
"SERVERGREET",
"EHLO",
"HELO",
"STARTTLS",
"UPGRADETLS",
"AUTH",
"COMMAND",
"MAIL",
"RCPT",
"DATA",
"POSTDATA",
"QUIT",
/* LAST */
};
if(smtpc->state != newstate)
infof(conn->data, "SMTP %p state change from %s to %s\n",
(void *)smtpc, names[smtpc->state], names[newstate]);
#endif
smtpc->state = newstate;
} | 0 | [
"CWE-200",
"CWE-119",
"CWE-787"
]
| curl | ba1dbd78e5f1ed67c1b8d37ac89d90e5e330b628 | 202,229,985,325,251,170,000,000,000,000,000,000,000 | 29 | smtp: use the upload buffer size for scratch buffer malloc
... not the read buffer size, as that can be set smaller and thus cause
a buffer overflow! CVE-2018-0500
Reported-by: Peter Wu
Bug: https://curl.haxx.se/docs/adv_2018-70a2.html |
static inline void ipv6_addr_set_v4mapped(const __be32 addr,
struct in6_addr *v4mapped)
{
ipv6_addr_set(v4mapped,
0, 0,
htonl(0x0000FFFF),
addr);
} | 0 | [
"CWE-416",
"CWE-284",
"CWE-264"
]
| linux | 45f6fad84cc305103b28d73482b344d7f5b76f39 | 170,285,532,644,666,630,000,000,000,000,000,000,000 | 8 | ipv6: add complete rcu protection around np->opt
This patch addresses multiple problems :
UDP/RAW sendmsg() need to get a stable struct ipv6_txoptions
while socket is not locked : Other threads can change np->opt
concurrently. Dmitry posted a syzkaller
(http://github.com/google/syzkaller) program desmonstrating
use-after-free.
Starting with TCP/DCCP lockless listeners, tcp_v6_syn_recv_sock()
and dccp_v6_request_recv_sock() also need to use RCU protection
to dereference np->opt once (before calling ipv6_dup_options())
This patch adds full RCU protection to np->opt
Reported-by: Dmitry Vyukov <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
METHODDEF(void) _cimg_jpeg_error_exit(j_common_ptr cinfo) {
_cimg_error_ptr c_err = (_cimg_error_ptr) cinfo->err; // Return control to the setjmp point
(*cinfo->err->format_message)(cinfo,c_err->message);
jpeg_destroy(cinfo); // Clean memory and temp files.
longjmp(c_err->setjmp_buffer,1); | 0 | [
"CWE-125"
]
| CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 261,248,059,678,677,450,000,000,000,000,000,000,000 | 6 | Fix other issues in 'CImg<T>::load_bmp()'. |
**/
CImg<T>& set_linear_atXY(const T& value, const float fx, const float fy=0, const int z=0, const int c=0,
const bool is_added=false) {
const int
x = (int)fx - (fx>=0?0:1), nx = x + 1,
y = (int)fy - (fy>=0?0:1), ny = y + 1;
const float
dx = fx - x,
dy = fy - y;
if (z>=0 && z<depth() && c>=0 && c<spectrum()) {
if (y>=0 && y<height()) {
if (x>=0 && x<width()) {
const float w1 = (1 - dx)*(1 - dy), w2 = is_added?1:(1 - w1);
(*this)(x,y,z,c) = (T)(w1*value + w2*(*this)(x,y,z,c));
}
if (nx>=0 && nx<width()) {
const float w1 = dx*(1 - dy), w2 = is_added?1:(1 - w1);
(*this)(nx,y,z,c) = (T)(w1*value + w2*(*this)(nx,y,z,c));
}
}
if (ny>=0 && ny<height()) {
if (x>=0 && x<width()) {
const float w1 = (1 - dx)*dy, w2 = is_added?1:(1 - w1);
(*this)(x,ny,z,c) = (T)(w1*value + w2*(*this)(x,ny,z,c));
}
if (nx>=0 && nx<width()) {
const float w1 = dx*dy, w2 = is_added?1:(1 - w1);
(*this)(nx,ny,z,c) = (T)(w1*value + w2*(*this)(nx,ny,z,c));
}
}
}
return *this; | 0 | [
"CWE-125"
]
| CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 180,658,073,666,774,000,000,000,000,000,000,000,000 | 32 | Fix other issues in 'CImg<T>::load_bmp()'. |
static void print_section(char *level, char *text, u8 *addr,
unsigned int length)
{
metadata_access_enable();
print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
length, 1);
metadata_access_disable();
} | 0 | []
| linux | fd4d9c7d0c71866ec0c2825189ebd2ce35bd95b8 | 195,012,006,594,441,600,000,000,000,000,000,000,000 | 8 | mm: slub: add missing TID bump in kmem_cache_alloc_bulk()
When kmem_cache_alloc_bulk() attempts to allocate N objects from a percpu
freelist of length M, and N > M > 0, it will first remove the M elements
from the percpu freelist, then call ___slab_alloc() to allocate the next
element and repopulate the percpu freelist. ___slab_alloc() can re-enable
IRQs via allocate_slab(), so the TID must be bumped before ___slab_alloc()
to properly commit the freelist head change.
Fix it by unconditionally bumping c->tid when entering the slowpath.
Cc: [email protected]
Fixes: ebe909e0fdb3 ("slub: improve bulk alloc strategy")
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
int arch_reinit_sched_domains(void)
{
int err;
get_online_cpus();
detach_destroy_domains(&cpu_online_map);
err = arch_init_sched_domains(&cpu_online_map);
put_online_cpus();
return err;
} | 0 | []
| linux-2.6 | 8f1bc385cfbab474db6c27b5af1e439614f3025c | 242,000,963,930,085,350,000,000,000,000,000,000,000 | 11 | sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]> |
static int ok_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_ok *pkt;
const char *ptr;
size_t alloc_len;
pkt = git__malloc(sizeof(*pkt));
GITERR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_OK;
line += 3; /* skip "ok " */
if (!(ptr = strchr(line, '\n'))) {
giterr_set(GITERR_NET, "Invalid packet line");
git__free(pkt);
return -1;
}
len = ptr - line;
GITERR_CHECK_ALLOC_ADD(&alloc_len, len, 1);
pkt->ref = git__malloc(alloc_len);
GITERR_CHECK_ALLOC(pkt->ref);
memcpy(pkt->ref, line, len);
pkt->ref[len] = '\0';
*out = (git_pkt *)pkt;
return 0;
} | 0 | [
"CWE-119",
"CWE-787"
]
| libgit2 | 66e3774d279672ee51c3b54545a79d20d1ada834 | 51,013,830,246,277,780,000,000,000,000,000,000,000 | 29 | smart_pkt: verify packet length exceeds PKT_LEN_SIZE
Each packet line in the Git protocol is prefixed by a four-byte
length of how much data will follow, which we parse in
`git_pkt_parse_line`. The transmitted length can either be equal
to zero in case of a flush packet or has to be at least of length
four, as it also includes the encoded length itself. Not
checking this may result in a buffer overflow as we directly pass
the length to functions which accept a `size_t` length as
parameter.
Fix the issue by verifying that non-flush packets have at least a
length of `PKT_LEN_SIZE`. |
struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
{
struct disk_part_tbl *ptbl;
struct hd_struct *part;
int i;
ptbl = rcu_dereference(disk->part_tbl);
part = rcu_dereference(ptbl->last_lookup);
if (part && sector_in_part(part, sector))
return part;
for (i = 1; i < ptbl->len; i++) {
part = rcu_dereference(ptbl->part[i]);
if (part && sector_in_part(part, sector)) {
rcu_assign_pointer(ptbl->last_lookup, part);
return part;
}
}
return &disk->part0;
} | 0 | [
"CWE-416"
]
| linux-stable | 77da160530dd1dc94f6ae15a981f24e5f0021e84 | 282,519,538,324,722,900,000,000,000,000,000,000,000 | 22 | block: fix use-after-free in seq file
I got a KASAN report of use-after-free:
==================================================================
BUG: KASAN: use-after-free in klist_iter_exit+0x61/0x70 at addr ffff8800b6581508
Read of size 8 by task trinity-c1/315
=============================================================================
BUG kmalloc-32 (Not tainted): kasan: bad access detected
-----------------------------------------------------------------------------
Disabling lock debugging due to kernel taint
INFO: Allocated in disk_seqf_start+0x66/0x110 age=144 cpu=1 pid=315
___slab_alloc+0x4f1/0x520
__slab_alloc.isra.58+0x56/0x80
kmem_cache_alloc_trace+0x260/0x2a0
disk_seqf_start+0x66/0x110
traverse+0x176/0x860
seq_read+0x7e3/0x11a0
proc_reg_read+0xbc/0x180
do_loop_readv_writev+0x134/0x210
do_readv_writev+0x565/0x660
vfs_readv+0x67/0xa0
do_preadv+0x126/0x170
SyS_preadv+0xc/0x10
do_syscall_64+0x1a1/0x460
return_from_SYSCALL_64+0x0/0x6a
INFO: Freed in disk_seqf_stop+0x42/0x50 age=160 cpu=1 pid=315
__slab_free+0x17a/0x2c0
kfree+0x20a/0x220
disk_seqf_stop+0x42/0x50
traverse+0x3b5/0x860
seq_read+0x7e3/0x11a0
proc_reg_read+0xbc/0x180
do_loop_readv_writev+0x134/0x210
do_readv_writev+0x565/0x660
vfs_readv+0x67/0xa0
do_preadv+0x126/0x170
SyS_preadv+0xc/0x10
do_syscall_64+0x1a1/0x460
return_from_SYSCALL_64+0x0/0x6a
CPU: 1 PID: 315 Comm: trinity-c1 Tainted: G B 4.7.0+ #62
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014
ffffea0002d96000 ffff880119b9f918 ffffffff81d6ce81 ffff88011a804480
ffff8800b6581500 ffff880119b9f948 ffffffff8146c7bd ffff88011a804480
ffffea0002d96000 ffff8800b6581500 fffffffffffffff4 ffff880119b9f970
Call Trace:
[<ffffffff81d6ce81>] dump_stack+0x65/0x84
[<ffffffff8146c7bd>] print_trailer+0x10d/0x1a0
[<ffffffff814704ff>] object_err+0x2f/0x40
[<ffffffff814754d1>] kasan_report_error+0x221/0x520
[<ffffffff8147590e>] __asan_report_load8_noabort+0x3e/0x40
[<ffffffff83888161>] klist_iter_exit+0x61/0x70
[<ffffffff82404389>] class_dev_iter_exit+0x9/0x10
[<ffffffff81d2e8ea>] disk_seqf_stop+0x3a/0x50
[<ffffffff8151f812>] seq_read+0x4b2/0x11a0
[<ffffffff815f8fdc>] proc_reg_read+0xbc/0x180
[<ffffffff814b24e4>] do_loop_readv_writev+0x134/0x210
[<ffffffff814b4c45>] do_readv_writev+0x565/0x660
[<ffffffff814b8a17>] vfs_readv+0x67/0xa0
[<ffffffff814b8de6>] do_preadv+0x126/0x170
[<ffffffff814b92ec>] SyS_preadv+0xc/0x10
This problem can occur in the following situation:
open()
- pread()
- .seq_start()
- iter = kmalloc() // succeeds
- seqf->private = iter
- .seq_stop()
- kfree(seqf->private)
- pread()
- .seq_start()
- iter = kmalloc() // fails
- .seq_stop()
- class_dev_iter_exit(seqf->private) // boom! old pointer
As the comment in disk_seqf_stop() says, stop is called even if start
failed, so we need to reinitialise the private pointer to NULL when seq
iteration stops.
An alternative would be to set the private pointer to NULL when the
kmalloc() in disk_seqf_start() fails.
Cc: [email protected]
Signed-off-by: Vegard Nossum <[email protected]>
Acked-by: Tejun Heo <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
void vhdx_guid_generate(MSGUID *guid)
{
uuid_t uuid;
assert(guid != NULL);
uuid_generate(uuid);
memcpy(guid, uuid, sizeof(MSGUID));
} | 0 | [
"CWE-835"
]
| qemu | 1d7678dec4761acdc43439da6ceda41a703ba1a6 | 36,188,618,128,791,430,000,000,000,000,000,000,000 | 8 | vhdx: Bounds checking for block_size and logical_sector_size (CVE-2014-0148)
Other variables (e.g. sectors_per_block) are calculated using these
variables, and if not range-checked illegal values could be obtained
causing infinite loops and other potential issues when calculating
BAT entries.
The 1.00 VHDX spec requires BlockSize to be min 1MB, max 256MB.
LogicalSectorSize is required to be either 512 or 4096 bytes.
Reported-by: Kevin Wolf <[email protected]>
Signed-off-by: Jeff Cody <[email protected]>
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]> |
ecma_free_string_list (jmem_cpointer_t string_list_cp) /**< string list */
{
while (string_list_cp != JMEM_CP_NULL)
{
ecma_lit_storage_item_t *string_list_p = JMEM_CP_GET_NON_NULL_POINTER (ecma_lit_storage_item_t, string_list_cp);
for (int i = 0; i < ECMA_LIT_STORAGE_VALUE_COUNT; i++)
{
if (string_list_p->values[i] != JMEM_CP_NULL)
{
ecma_string_t *string_p = JMEM_CP_GET_NON_NULL_POINTER (ecma_string_t,
string_list_p->values[i]);
JERRY_ASSERT (ECMA_STRING_IS_REF_EQUALS_TO_ONE (string_p));
ecma_destroy_ecma_string (string_p);
}
}
jmem_cpointer_t next_item_cp = string_list_p->next_cp;
jmem_pools_free (string_list_p, sizeof (ecma_lit_storage_item_t));
string_list_cp = next_item_cp;
}
} /* ecma_free_string_list */ | 0 | [
"CWE-416"
]
| jerryscript | 3bcd48f72d4af01d1304b754ef19fe1a02c96049 | 155,415,067,331,398,420,000,000,000,000,000,000,000 | 23 | Improve parse_identifier (#4691)
Ascii string length is no longer computed during string allocation.
JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected] |
static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
{
/*
* If the source page was a PFN mapping, we don't have
* a "struct page" for it. We do a best-effort copy by
* just copying from the original user address. If that
* fails, we just zero-fill it. Live with it.
*/
if (unlikely(!src)) {
void *kaddr = kmap_atomic(dst, KM_USER0);
void __user *uaddr = (void __user *)(va & PAGE_MASK);
/*
* This really shouldn't fail, because the page is there
* in the page tables. But it might just be unreadable,
* in which case we just give up and fill the result with
* zeroes.
*/
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
memset(kaddr, 0, PAGE_SIZE);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(dst);
} else
copy_user_highpage(dst, src, va, vma);
} | 0 | [
"CWE-20"
]
| linux-2.6 | 89f5b7da2a6bad2e84670422ab8192382a5aeb9f | 72,715,450,633,227,250,000,000,000,000,000,000,000 | 25 | Reinstate ZERO_PAGE optimization in 'get_user_pages()' and fix XIP
KAMEZAWA Hiroyuki and Oleg Nesterov point out that since the commit
557ed1fa2620dc119adb86b34c614e152a629a80 ("remove ZERO_PAGE") removed
the ZERO_PAGE from the VM mappings, any users of get_user_pages() will
generally now populate the VM with real empty pages needlessly.
We used to get the ZERO_PAGE when we did the "handle_mm_fault()", but
since fault handling no longer uses ZERO_PAGE for new anonymous pages,
we now need to handle that special case in follow_page() instead.
In particular, the removal of ZERO_PAGE effectively removed the core
file writing optimization where we would skip writing pages that had not
been populated at all, and increased memory pressure a lot by allocating
all those useless newly zeroed pages.
This reinstates the optimization by making the unmapped PTE case the
same as for a non-existent page table, which already did this correctly.
While at it, this also fixes the XIP case for follow_page(), where the
caller could not differentiate between the case of a page that simply
could not be used (because it had no "struct page" associated with it)
and a page that just wasn't mapped.
We do that by simply returning an error pointer for pages that could not
be turned into a "struct page *". The error is arbitrarily picked to be
EFAULT, since that was what get_user_pages() already used for the
equivalent IO-mapped page case.
[ Also removed an impossible test for pte_offset_map_lock() failing:
that's not how that function works ]
Acked-by: Oleg Nesterov <[email protected]>
Acked-by: Nick Piggin <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Roland McGrath <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
variable_remove_pattern (value, pattern, patspec, quoted)
char *value, *pattern;
int patspec, quoted;
{
char *tword;
tword = remove_pattern (value, pattern, patspec);
return (tword);
} | 0 | []
| bash | 955543877583837c85470f7fb8a97b7aa8d45e6c | 198,437,858,819,725,670,000,000,000,000,000,000,000 | 10 | bash-4.4-rc2 release |
struct mg_connection *mg_listen(struct mg_mgr *mgr, const char *url,
mg_event_handler_t fn, void *fn_data) {
struct mg_connection *c = NULL;
bool is_udp = strncmp(url, "udp:", 4) == 0;
struct mg_addr addr;
SOCKET fd = mg_open_listener(url, &addr);
if (fd == INVALID_SOCKET) {
LOG(LL_ERROR, ("Failed: %s, errno %d", url, MG_SOCK_ERRNO));
} else if ((c = alloc_conn(mgr, 0, fd)) == NULL) {
LOG(LL_ERROR, ("OOM %s", url));
closesocket(fd);
} else {
memcpy(&c->peer, &addr, sizeof(struct mg_addr));
c->fd = S2PTR(fd);
c->is_listening = 1;
c->is_udp = is_udp;
LIST_ADD_HEAD(struct mg_connection, &mgr->conns, c);
c->fn = fn;
c->fn_data = fn_data;
mg_call(c, MG_EV_OPEN, NULL);
LOG(LL_DEBUG,
("%lu accepting on %s (port %u)", c->id, url, mg_ntohs(c->peer.port)));
}
return c;
} | 0 | [
"CWE-552"
]
| mongoose | c65c8fdaaa257e0487ab0aaae9e8f6b439335945 | 95,354,537,592,979,270,000,000,000,000,000,000,000 | 25 | Protect against the directory traversal in mg_upload() |
static void io_sq_thread_drop_mm(void)
{
struct mm_struct *mm = current->mm;
if (mm) {
kthread_unuse_mm(mm);
mmput(mm);
}
} | 0 | []
| linux | 0f2122045b946241a9e549c2a76cea54fa58a7ff | 129,412,980,564,976,240,000,000,000,000,000,000,000 | 9 | io_uring: don't rely on weak ->files references
Grab actual references to the files_struct. To avoid circular references
issues due to this, we add a per-task note that keeps track of what
io_uring contexts a task has used. When the tasks execs or exits its
assigned files, we cancel requests based on this tracking.
With that, we can grab proper references to the files table, and no
longer need to rely on stashing away ring_fd and ring_file to check
if the ring_fd may have been closed.
Cc: [email protected] # v5.5+
Reviewed-by: Pavel Begunkov <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
json_t *json_web_config(const char *url)
{
struct data_buffer all_data = {NULL, 0};
char curl_err_str[CURL_ERROR_SIZE];
long timeout = 60;
json_error_t err;
json_t *val;
CURL *curl;
int rc;
memset(&err, 0, sizeof(err));
curl = curl_easy_init();
if (unlikely(!curl))
quithere(1, "CURL initialisation failed");
curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout);
curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_ENCODING, "");
curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &all_data);
curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY);
val = NULL;
rc = curl_easy_perform(curl);
curl_easy_cleanup(curl);
if (rc) {
applog(LOG_ERR, "HTTP config request of '%s' failed: %s", url, curl_err_str);
goto c_out;
}
if (!all_data.buf) {
applog(LOG_ERR, "Empty config data received from '%s'", url);
goto c_out;
}
val = JSON_LOADS(all_data.buf, &err);
if (!val) {
applog(LOG_ERR, "JSON config decode of '%s' failed(%d): %s", url,
err.line, err.text);
}
databuf_free(&all_data);
c_out:
return val;
} | 0 | [
"CWE-119",
"CWE-787"
]
| cgminer | e1c5050734123973b99d181c45e74b2cbb00272e | 95,741,638,538,268,440,000,000,000,000,000,000,000 | 52 | Do some random sanity checking for stratum message parsing |
xfs_save_resvblks(struct xfs_mount *mp)
{
uint64_t resblks = 0;
mp->m_resblks_save = mp->m_resblks;
xfs_reserve_blocks(mp, &resblks, NULL);
} | 0 | [
"CWE-416"
]
| linux | c9fbd7bbc23dbdd73364be4d045e5d3612cf6e82 | 89,255,054,659,370,920,000,000,000,000,000,000,000 | 7 | xfs: clear sb->s_fs_info on mount failure
We recently had an oops reported on a 4.14 kernel in
xfs_reclaim_inodes_count() where sb->s_fs_info pointed to garbage
and so the m_perag_tree lookup walked into lala land.
Essentially, the machine was under memory pressure when the mount
was being run, xfs_fs_fill_super() failed after allocating the
xfs_mount and attaching it to sb->s_fs_info. It then cleaned up and
freed the xfs_mount, but the sb->s_fs_info field still pointed to
the freed memory. Hence when the superblock shrinker then ran
it fell off the bad pointer.
With the superblock shrinker problem fixed at teh VFS level, this
stale s_fs_info pointer is still a problem - we use it
unconditionally in ->put_super when the superblock is being torn
down, and hence we can still trip over it after a ->fill_super
call failure. Hence we need to clear s_fs_info if
xfs-fs_fill_super() fails, and we need to check if it's valid in
the places it can potentially be dereferenced after a ->fill_super
failure.
Signed-Off-By: Dave Chinner <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
Signed-off-by: Darrick J. Wong <[email protected]> |
Bool Track_IsMPEG4Stream(u32 HandlerType)
{
switch (HandlerType) {
case GF_ISOM_MEDIA_VISUAL:
case GF_ISOM_MEDIA_AUXV:
case GF_ISOM_MEDIA_PICT:
case GF_ISOM_MEDIA_AUDIO:
case GF_ISOM_MEDIA_SUBPIC:
case GF_ISOM_MEDIA_OD:
case GF_ISOM_MEDIA_OCR:
case GF_ISOM_MEDIA_SCENE:
case GF_ISOM_MEDIA_MPEG7:
case GF_ISOM_MEDIA_OCI:
case GF_ISOM_MEDIA_IPMP:
case GF_ISOM_MEDIA_MPEGJ:
case GF_ISOM_MEDIA_ESM:
return 1;
/*Timedtext is NOT an MPEG-4 stream*/
default:
/*consider xxsm as MPEG-4 handlers*/
if ( (((HandlerType>>8) & 0xFF)== 's') && ((HandlerType& 0xFF)== 'm'))
return 1;
return 0;
}
} | 0 | [
"CWE-476"
]
| gpac | df8fffd839fe5ae9acd82d26fd48280a397411d9 | 169,517,736,791,926,060,000,000,000,000,000,000,000 | 25 | fixed #1736 |
assegment_data_new (int num)
{
return (XMALLOC (MTYPE_AS_SEG_DATA, ASSEGMENT_DATA_SIZE (num, 1)));
} | 0 | [
"CWE-20"
]
| quagga | 7a42b78be9a4108d98833069a88e6fddb9285008 | 338,732,462,534,426,700,000,000,000,000,000,000,000 | 4 | bgpd: Fix AS_PATH size calculation for long paths
If you have an AS_PATH with more entries than
what can be written into a single AS_SEGMENT_MAX
it needs to be broken up. The code that noticed
that the AS_PATH needs to be broken up was not
correctly calculating the size of the resulting
message. This patch addresses this issue. |
void gf_vvc_parse_sei(char *buffer, u32 nal_size, VVCState *vvc)
{
gf_hevc_vvc_parse_sei(buffer, nal_size, NULL, vvc);
} | 0 | [
"CWE-190"
]
| gpac | 0cd19f4db70615d707e0e6202933c2ea0c1d36df | 249,844,114,108,060,160,000,000,000,000,000,000,000 | 4 | fixed #2067 |
date_s__httpdate(int argc, VALUE *argv, VALUE klass)
{
VALUE str, opt;
rb_scan_args(argc, argv, "1:", &str, &opt);
check_limit(str, opt);
return date__httpdate(str);
} | 0 | []
| date | 3959accef8da5c128f8a8e2fd54e932a4fb253b0 | 128,772,328,111,442,300,000,000,000,000,000,000,000 | 9 | Add length limit option for methods that parses date strings
`Date.parse` now raises an ArgumentError when a given date string is
longer than 128. You can configure the limit by giving `limit` keyword
arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`,
the limit is disabled.
Not only `Date.parse` but also the following methods are changed.
* Date._parse
* Date.parse
* DateTime.parse
* Date._iso8601
* Date.iso8601
* DateTime.iso8601
* Date._rfc3339
* Date.rfc3339
* DateTime.rfc3339
* Date._xmlschema
* Date.xmlschema
* DateTime.xmlschema
* Date._rfc2822
* Date.rfc2822
* DateTime.rfc2822
* Date._rfc822
* Date.rfc822
* DateTime.rfc822
* Date._jisx0301
* Date.jisx0301
* DateTime.jisx0301 |
static int _gdImagePngCtxEx(gdImagePtr im, gdIOCtx * outfile, int level)
{
int i, j, bit_depth = 0, interlace_type;
int width = im->sx;
int height = im->sy;
int colors = im->colorsTotal;
int *open = im->open;
int mapping[gdMaxColors]; /* mapping[gd_index] == png_index */
png_byte trans_values[256];
png_color_16 trans_rgb_value;
png_color palette[gdMaxColors];
png_structp png_ptr;
png_infop info_ptr;
volatile int transparent = im->transparent;
volatile int remap = FALSE;
#ifdef PNG_SETJMP_SUPPORTED
jmpbuf_wrapper jbw;
#endif
int ret = 0;
/* width or height of value 0 is invalid in IHDR;
see http://www.w3.org/TR/PNG-Chunks.html */
if (width == 0 || height ==0) return 1;
#ifdef PNG_SETJMP_SUPPORTED
png_ptr = png_create_write_struct (PNG_LIBPNG_VER_STRING,
&jbw, gdPngErrorHandler,
NULL);
#else
png_ptr = png_create_write_struct (PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
#endif
if (png_ptr == NULL) {
gd_error("gd-png error: cannot allocate libpng main struct\n");
return 1;
}
info_ptr = png_create_info_struct (png_ptr);
if (info_ptr == NULL) {
gd_error("gd-png error: cannot allocate libpng info struct\n");
png_destroy_write_struct (&png_ptr, (png_infopp) NULL);
return 1;
}
#ifdef PNG_SETJMP_SUPPORTED
if (setjmp(jbw.jmpbuf)) {
gd_error("gd-png error: setjmp returns error condition\n");
png_destroy_write_struct (&png_ptr, &info_ptr);
return 1;
}
#endif
png_set_write_fn (png_ptr, (void *) outfile, gdPngWriteData,
gdPngFlushData);
/* This is best for palette images, and libpng defaults to it for
palette images anyway, so we don't need to do it explicitly.
What to ideally do for truecolor images depends, alas, on the image.
gd is intentionally imperfect and doesn't spend a lot of time
fussing with such things. */
/* Faster if this is uncommented, but may produce larger truecolor files.
Wait for gdImagePngCtxEx. */
#if 0
png_set_filter (png_ptr, 0, PNG_FILTER_NONE);
#endif
/* 2.0.12: this is finally a parameter */
png_set_compression_level (png_ptr, level);
#ifdef PNG_pHYs_SUPPORTED
/* 2.1.0: specify the resolution */
png_set_pHYs(png_ptr, info_ptr, DPI2DPM(im->res_x), DPI2DPM(im->res_y),
PNG_RESOLUTION_METER);
#endif
/* can set this to a smaller value without compromising compression if all
* image data is 16K or less; will save some decoder memory [min == 8] */
/* png_set_compression_window_bits(png_ptr, 15); */
if (!im->trueColor) {
if (transparent >= im->colorsTotal ||
(transparent >= 0 && open[transparent]))
transparent = -1;
}
if (!im->trueColor) {
for (i = 0; i < gdMaxColors; ++i)
mapping[i] = -1;
}
if (!im->trueColor) {
/* count actual number of colors used (colorsTotal == high-water mark) */
colors = 0;
for (i = 0; i < im->colorsTotal; ++i) {
if (!open[i]) {
mapping[i] = colors;
++colors;
}
}
if (colors == 0) {
gd_error("gd-png error: no colors in palette\n");
ret = 1;
goto bail;
}
if (colors < im->colorsTotal) {
remap = TRUE;
}
if (colors <= 2)
bit_depth = 1;
else if (colors <= 4)
bit_depth = 2;
else if (colors <= 16)
bit_depth = 4;
else
bit_depth = 8;
}
interlace_type = im->interlace ? PNG_INTERLACE_ADAM7 : PNG_INTERLACE_NONE;
if (im->trueColor) {
if (im->saveAlphaFlag) {
png_set_IHDR (png_ptr, info_ptr, width, height, 8,
PNG_COLOR_TYPE_RGB_ALPHA, interlace_type,
PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_DEFAULT);
} else {
png_set_IHDR (png_ptr, info_ptr, width, height, 8,
PNG_COLOR_TYPE_RGB, interlace_type,
PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_DEFAULT);
}
} else {
png_set_IHDR (png_ptr, info_ptr, width, height, bit_depth,
PNG_COLOR_TYPE_PALETTE, interlace_type,
PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
}
if (im->trueColor && (!im->saveAlphaFlag) && (transparent >= 0)) {
/* 2.0.9: fixed by Thomas Winzig */
trans_rgb_value.red = gdTrueColorGetRed (im->transparent);
trans_rgb_value.green = gdTrueColorGetGreen (im->transparent);
trans_rgb_value.blue = gdTrueColorGetBlue (im->transparent);
png_set_tRNS (png_ptr, info_ptr, 0, 0, &trans_rgb_value);
}
if (!im->trueColor) {
/* Oy veh. Remap the PNG palette to put the
entries with interesting alpha channel
values first. This minimizes the size
of the tRNS chunk and thus the size
of the PNG file as a whole. */
int tc = 0;
int i;
int j;
int k;
for (i = 0; (i < im->colorsTotal); i++) {
if ((!im->open[i]) && (im->alpha[i] != gdAlphaOpaque)) {
tc++;
}
}
if (tc) {
#if 0
for (i = 0; (i < im->colorsTotal); i++) {
trans_values[i] = 255 -
((im->alpha[i] << 1) + (im->alpha[i] >> 6));
}
png_set_tRNS (png_ptr, info_ptr, trans_values, 256, NULL);
#endif
if (!remap) {
remap = TRUE;
}
/* (Semi-)transparent indexes come up from the bottom
of the list of real colors; opaque
indexes come down from the top */
j = 0;
k = colors - 1;
for (i = 0; (i < im->colorsTotal); i++) {
if (!im->open[i]) {
if (im->alpha[i] != gdAlphaOpaque) {
/* Andrew Hull: >> 6, not >> 7! (gd 2.0.5) */
trans_values[j] = 255 -
((im->alpha[i] << 1) + (im->alpha[i] >> 6));
mapping[i] = j++;
} else {
mapping[i] = k--;
}
}
}
png_set_tRNS (png_ptr, info_ptr, trans_values, tc, NULL);
}
}
/* convert palette to libpng layout */
if (!im->trueColor) {
if (remap)
for (i = 0; i < im->colorsTotal; ++i) {
if (mapping[i] < 0)
continue;
palette[mapping[i]].red = im->red[i];
palette[mapping[i]].green = im->green[i];
palette[mapping[i]].blue = im->blue[i];
}
else
for (i = 0; i < colors; ++i) {
palette[i].red = im->red[i];
palette[i].green = im->green[i];
palette[i].blue = im->blue[i];
}
png_set_PLTE (png_ptr, info_ptr, palette, colors);
}
/* write out the PNG header info (everything up to first IDAT) */
png_write_info (png_ptr, info_ptr);
/* make sure < 8-bit images are packed into pixels as tightly as possible */
png_set_packing (png_ptr);
/* This code allocates a set of row buffers and copies the gd image data
* into them only in the case that remapping is necessary; in gd 1.3 and
* later, the im->pixels array is laid out identically to libpng's row
* pointers and can be passed to png_write_image() function directly.
* The remapping case could be accomplished with less memory for non-
* interlaced images, but interlacing causes some serious complications. */
if (im->trueColor) {
/* performance optimizations by Phong Tran */
int channels = im->saveAlphaFlag ? 4 : 3;
/* Our little 7-bit alpha channel trick costs us a bit here. */
png_bytep *row_pointers;
unsigned char *pOutputRow;
int **ptpixels = im->tpixels;
int *pThisRow;
unsigned char a;
int thisPixel;
png_bytep *prow_pointers;
int saveAlphaFlag = im->saveAlphaFlag;
if (overflow2(sizeof (png_bytep), height)) {
ret = 1;
goto bail;
}
row_pointers = gdMalloc (sizeof (png_bytep) * height);
if (row_pointers == NULL) {
gd_error("gd-png error: unable to allocate row_pointers\n");
ret = 1;
goto bail;
}
prow_pointers = row_pointers;
for (j = 0; j < height; ++j) {
if (overflow2(width, channels) || ((*prow_pointers =
(png_bytep) gdMalloc (width * channels)) == NULL)) {
gd_error("gd-png error: unable to allocate rows\n");
for (i = 0; i < j; ++i)
gdFree (row_pointers[i]);
/* 2.0.29: memory leak TBB */
gdFree(row_pointers);
ret = 1;
goto bail;
}
pOutputRow = *prow_pointers++;
pThisRow = *ptpixels++;
for (i = 0; i < width; ++i) {
thisPixel = *pThisRow++;
*pOutputRow++ = gdTrueColorGetRed (thisPixel);
*pOutputRow++ = gdTrueColorGetGreen (thisPixel);
*pOutputRow++ = gdTrueColorGetBlue (thisPixel);
if (saveAlphaFlag) {
/* convert the 7-bit alpha channel to an 8-bit alpha channel.
We do a little bit-flipping magic, repeating the MSB
as the LSB, to ensure that 0 maps to 0 and
127 maps to 255. We also have to invert to match
PNG's convention in which 255 is opaque. */
a = gdTrueColorGetAlpha (thisPixel);
/* Andrew Hull: >> 6, not >> 7! (gd 2.0.5) */
*pOutputRow++ = 255 - ((a << 1) + (a >> 6));
}
}
}
png_write_image (png_ptr, row_pointers);
png_write_end (png_ptr, info_ptr);
for (j = 0; j < height; ++j)
gdFree (row_pointers[j]);
gdFree (row_pointers);
} else {
if (remap) {
png_bytep *row_pointers;
if (overflow2(sizeof (png_bytep), height)) {
ret = 1;
goto bail;
}
row_pointers = gdMalloc (sizeof (png_bytep) * height);
if (row_pointers == NULL) {
gd_error("gd-png error: unable to allocate row_pointers\n");
ret = 1;
goto bail;
}
for (j = 0; j < height; ++j) {
if ((row_pointers[j] = (png_bytep) gdMalloc (width)) == NULL) {
gd_error("gd-png error: unable to allocate rows\n");
for (i = 0; i < j; ++i)
gdFree (row_pointers[i]);
/* TBB: memory leak */
gdFree (row_pointers);
ret = 1;
goto bail;
}
for (i = 0; i < width; ++i)
row_pointers[j][i] = mapping[im->pixels[j][i]];
}
png_write_image (png_ptr, row_pointers);
png_write_end (png_ptr, info_ptr);
for (j = 0; j < height; ++j)
gdFree (row_pointers[j]);
gdFree (row_pointers);
} else {
png_write_image (png_ptr, im->pixels);
png_write_end (png_ptr, info_ptr);
}
}
/* 1.6.3: maybe we should give that memory BACK! TBB */
bail:
png_destroy_write_struct (&png_ptr, &info_ptr);
return ret;
} | 0 | [
"CWE-415"
]
| libgd | 56ce6ef068b954ad28379e83cca04feefc51320c | 12,526,495,744,097,817,000,000,000,000,000,000,000 | 322 | Fix #381: libgd double-free vulnerability
The issue is that `gdImagePngCtxEx` (which is called by `gdImagePngPtr`
and the other PNG output functions to do the real work) does not return
whether it succeeded or failed, so this is not checked in
`gdImagePngPtr` and the function wrongly assumes everything is okay,
which is not, in this case, because the palette image contains no
palette entries.
We can't change the signature of `gdImagePngCtxEx` for API
compatibility reasons, so we introduce the static helper
`_gdImagePngCtxEx` which returns success respective failure, so
`gdImagePngPtr` and `gdImagePngPtrEx` can check the return value. We
leave it solely to libpng for now to report warnings regarding the
failing write.
CVE-2017-6362
(cherry picked from commit 2207e3c88a06a5c42230907554ab1e9f2ec021ea) |
static inline void skb_reset_inner_network_header(struct sk_buff *skb)
{
skb->inner_network_header = skb->data - skb->head; | 0 | [
"CWE-20"
]
| linux | 2b16f048729bf35e6c28a40cbfad07239f9dcd90 | 145,400,733,668,729,600,000,000,000,000,000,000,000 | 4 | net: create skb_gso_validate_mac_len()
If you take a GSO skb, and split it into packets, will the MAC
length (L2 + L3 + L4 headers + payload) of those packets be small
enough to fit within a given length?
Move skb_gso_mac_seglen() to skbuff.h with other related functions
like skb_gso_network_seglen() so we can use it, and then create
skb_gso_validate_mac_len to do the full calculation.
Signed-off-by: Daniel Axtens <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
*/
PHP_METHOD(DatePeriod, __construct)
{
php_period_obj *dpobj;
php_date_obj *dateobj;
php_interval_obj *intobj;
zval *start, *end = NULL, *interval;
long recurrences = 0, options = 0;
char *isostr = NULL;
int isostr_len = 0;
timelib_time *clone;
zend_error_handling error_handling;
zend_replace_error_handling(EH_THROW, NULL, &error_handling TSRMLS_CC);
if (zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "OOl|l", &start, date_ce_interface, &interval, date_ce_interval, &recurrences, &options) == FAILURE) {
if (zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "OOO|l", &start, date_ce_interface, &interval, date_ce_interval, &end, date_ce_interface, &options) == FAILURE) {
if (zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "s|l", &isostr, &isostr_len, &options) == FAILURE) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "This constructor accepts either (DateTimeInterface, DateInterval, int) OR (DateTimeInterface, DateInterval, DateTime) OR (string) as arguments.");
zend_restore_error_handling(&error_handling TSRMLS_CC);
return;
}
}
}
dpobj = zend_object_store_get_object(getThis() TSRMLS_CC);
dpobj->current = NULL;
if (isostr) {
date_period_initialize(&(dpobj->start), &(dpobj->end), &(dpobj->interval), &recurrences, isostr, isostr_len TSRMLS_CC);
if (dpobj->start == NULL) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "The ISO interval '%s' did not contain a start date.", isostr);
}
if (dpobj->interval == NULL) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "The ISO interval '%s' did not contain an interval.", isostr);
}
if (dpobj->end == NULL && recurrences == 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "The ISO interval '%s' did not contain an end date or a recurrence count.", isostr);
}
if (dpobj->start) {
timelib_update_ts(dpobj->start, NULL);
}
if (dpobj->end) {
timelib_update_ts(dpobj->end, NULL);
}
dpobj->start_ce = date_ce_date;
} else {
/* init */
intobj = (php_interval_obj *) zend_object_store_get_object(interval TSRMLS_CC);
/* start date */
dateobj = (php_date_obj *) zend_object_store_get_object(start TSRMLS_CC);
clone = timelib_time_ctor();
memcpy(clone, dateobj->time, sizeof(timelib_time));
if (dateobj->time->tz_abbr) {
clone->tz_abbr = strdup(dateobj->time->tz_abbr);
}
if (dateobj->time->tz_info) {
clone->tz_info = dateobj->time->tz_info;
}
dpobj->start = clone;
dpobj->start_ce = Z_OBJCE_P(start);
/* interval */
dpobj->interval = timelib_rel_time_clone(intobj->diff);
/* end date */
if (end) {
dateobj = (php_date_obj *) zend_object_store_get_object(end TSRMLS_CC);
clone = timelib_time_clone(dateobj->time);
dpobj->end = clone;
}
}
/* options */
dpobj->include_start_date = !(options & PHP_DATE_PERIOD_EXCLUDE_START_DATE);
/* recurrrences */
dpobj->recurrences = recurrences + dpobj->include_start_date;
dpobj->initialized = 1;
zend_restore_error_handling(&error_handling TSRMLS_CC); | 0 | []
| php-src | c377f1a715476934133f3254d1e0d4bf3743e2d2 | 158,727,037,129,595,360,000,000,000,000,000,000,000 | 83 | Fix bug #68942 (Use after free vulnerability in unserialize() with DateTimeZone) |
do_device_not_available(struct pt_regs *regs, long error_code)
{
unsigned long cr0;
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
#ifdef CONFIG_MATH_EMULATION
if (!boot_cpu_has(X86_FEATURE_FPU) && (read_cr0() & X86_CR0_EM)) {
struct math_emu_info info = { };
cond_local_irq_enable(regs);
info.regs = regs;
math_emulate(&info);
return;
}
#endif
/* This should not happen. */
cr0 = read_cr0();
if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
/* Try to fix it up and carry on. */
write_cr0(cr0 & ~X86_CR0_TS);
} else {
/*
* Something terrible happened, and we're better off trying
* to kill the task than getting stuck in a never-ending
* loop of #NM faults.
*/
die("unexpected #NM exception", regs, error_code);
}
} | 0 | [
"CWE-362",
"CWE-284"
]
| linux | d8ba61ba58c88d5207c1ba2f7d9a2280e7d03be9 | 281,371,547,443,769,830,000,000,000,000,000,000,000 | 32 | x86/entry/64: Don't use IST entry for #BP stack
There's nothing IST-worthy about #BP/int3. We don't allow kprobes
in the small handful of places in the kernel that run at CPL0 with
an invalid stack, and 32-bit kernels have used normal interrupt
gates for #BP forever.
Furthermore, we don't allow kprobes in places that have usergs while
in kernel mode, so "paranoid" is also unnecessary.
Signed-off-by: Andy Lutomirski <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: [email protected] |
static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
{
struct rdma_id_private *id_priv;
struct cma_multicast *mc = multicast->context;
struct rdma_cm_event event;
int ret;
id_priv = mc->id_priv;
if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
return 0;
if (!status)
status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
mutex_lock(&id_priv->qp_mutex);
if (!status && id_priv->id.qp)
status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
be16_to_cpu(multicast->rec.mlid));
mutex_unlock(&id_priv->qp_mutex);
memset(&event, 0, sizeof event);
event.status = status;
event.param.ud.private_data = mc->context;
if (!status) {
event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
ib_init_ah_from_mcmember(id_priv->id.device,
id_priv->id.port_num, &multicast->rec,
&event.param.ud.ah_attr);
event.param.ud.qp_num = 0xFFFFFF;
event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
} else
event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
ret = id_priv->id.event_handler(&id_priv->id, &event);
if (ret) {
cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex);
rdma_destroy_id(&id_priv->id);
return 0;
}
mutex_unlock(&id_priv->handler_mutex);
return 0;
} | 0 | [
"CWE-20"
]
| linux | b2853fd6c2d0f383dbdf7427e263eb576a633867 | 292,606,384,515,373,540,000,000,000,000,000,000,000 | 44 | IB/core: Don't resolve passive side RoCE L2 address in CMA REQ handler
The code that resolves the passive side source MAC within the rdma_cm
connection request handler was both redundant and buggy, so remove it.
It was redundant since later, when an RC QP is modified to RTR state,
the resolution will take place in the ib_core module. It was buggy
because this callback also deals with UD SIDR exchange, for which we
incorrectly looked at the REQ member of the CM event and dereferenced
a random value.
Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures")
Signed-off-by: Moni Shoua <[email protected]>
Signed-off-by: Or Gerlitz <[email protected]>
Signed-off-by: Roland Dreier <[email protected]> |
enum Item_result result_type() const { return ROW_RESULT; } | 0 | []
| mysql-server | f7316aa0c9a3909fc7498e7b95d5d3af044a7e21 | 275,259,926,947,090,420,000,000,000,000,000,000,000 | 1 | Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item. |
static void snd_timer_check_master(struct snd_timer_instance *master)
{
struct snd_timer_instance *slave, *tmp;
/* check all pending slaves */
list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
list_move_tail(&slave->open_list, &master->slave_list_head);
spin_lock_irq(&slave_active_lock);
slave->master = master;
slave->timer = master->timer;
if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
list_add_tail(&slave->active_list,
&master->slave_active_head);
spin_unlock_irq(&slave_active_lock);
}
}
} | 1 | [
"CWE-20",
"CWE-200",
"CWE-362"
]
| linux | b5a663aa426f4884c71cd8580adae73f33570f0d | 64,563,709,718,376,220,000,000,000,000,000,000,000 | 19 | ALSA: timer: Harden slave timer list handling
A slave timer instance might be still accessible in a racy way while
operating the master instance as it lacks of locking. Since the
master operation is mostly protected with timer->lock, we should cope
with it while changing the slave instance, too. Also, some linked
lists (active_list and ack_list) of slave instances aren't unlinked
immediately at stopping or closing, and this may lead to unexpected
accesses.
This patch tries to address these issues. It adds spin lock of
timer->lock (either from master or slave, which is equivalent) in a
few places. For avoiding a deadlock, we ensure that the global
slave_active_lock is always locked at first before each timer lock.
Also, ack and active_list of slave instances are properly unlinked at
snd_timer_stop() and snd_timer_close().
Last but not least, remove the superfluous call of _snd_timer_stop()
at removing slave links. This is a noop, and calling it may confuse
readers wrt locking. Further cleanup will follow in a later patch.
Actually we've got reports of use-after-free by syzkaller fuzzer, and
this hopefully fixes these issues.
Reported-by: Dmitry Vyukov <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]> |
int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
enum network_type nw_type,
enum dot11_auth_mode dot11_auth_mode,
enum auth_mode auth_mode,
enum ath6kl_crypto_type pairwise_crypto,
u8 pairwise_crypto_len,
enum ath6kl_crypto_type group_crypto,
u8 group_crypto_len, int ssid_len, u8 *ssid,
u8 *bssid, u16 channel, u32 ctrl_flags,
u8 nw_subtype)
{
struct sk_buff *skb;
struct wmi_connect_cmd *cc;
int ret;
ath6kl_dbg(ATH6KL_DBG_WMI,
"wmi connect bssid %pM freq %d flags 0x%x ssid_len %d "
"type %d dot11_auth %d auth %d pairwise %d group %d\n",
bssid, channel, ctrl_flags, ssid_len, nw_type,
dot11_auth_mode, auth_mode, pairwise_crypto, group_crypto);
ath6kl_dbg_dump(ATH6KL_DBG_WMI, NULL, "ssid ", ssid, ssid_len);
wmi->traffic_class = 100;
if ((pairwise_crypto == NONE_CRYPT) && (group_crypto != NONE_CRYPT))
return -EINVAL;
if ((pairwise_crypto != NONE_CRYPT) && (group_crypto == NONE_CRYPT))
return -EINVAL;
skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_connect_cmd));
if (!skb)
return -ENOMEM;
cc = (struct wmi_connect_cmd *) skb->data;
if (ssid_len)
memcpy(cc->ssid, ssid, ssid_len);
cc->ssid_len = ssid_len;
cc->nw_type = nw_type;
cc->dot11_auth_mode = dot11_auth_mode;
cc->auth_mode = auth_mode;
cc->prwise_crypto_type = pairwise_crypto;
cc->prwise_crypto_len = pairwise_crypto_len;
cc->grp_crypto_type = group_crypto;
cc->grp_crypto_len = group_crypto_len;
cc->ch = cpu_to_le16(channel);
cc->ctrl_flags = cpu_to_le32(ctrl_flags);
cc->nw_subtype = nw_subtype;
if (bssid != NULL)
memcpy(cc->bssid, bssid, ETH_ALEN);
ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CONNECT_CMDID,
NO_SYNC_WMIFLAG);
return ret;
} | 0 | [
"CWE-125"
]
| linux | 5d6751eaff672ea77642e74e92e6c0ac7f9709ab | 47,954,215,372,230,550,000,000,000,000,000,000,000 | 59 | ath6kl: add some bounds checking
The "ev->traffic_class" and "reply->ac" variables come from the network
and they're used as an offset into the wmi->stream_exist_for_ac[] array.
Those variables are u8 so they can be 0-255 but the stream_exist_for_ac[]
array only has WMM_NUM_AC (4) elements. We need to add a couple bounds
checks to prevent array overflows.
I also modified one existing check from "if (traffic_class > 3) {" to
"if (traffic_class >= WMM_NUM_AC) {" just to make them all consistent.
Fixes: bdcd81707973 (" Add ath6kl cleaned up driver")
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Kalle Valo <[email protected]> |
static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
u8 scope, int ifindex)
{
struct ifaddrmsg *ifm;
ifm = nlmsg_data(nlh);
ifm->ifa_family = AF_INET6;
ifm->ifa_prefixlen = prefixlen;
ifm->ifa_flags = flags;
ifm->ifa_scope = scope;
ifm->ifa_index = ifindex;
} | 0 | [
"CWE-20"
]
| linux | 77751427a1ff25b27d47a4c36b12c3c8667855ac | 34,703,470,981,595,024,000,000,000,000,000,000,000 | 12 | ipv6: addrconf: validate new MTU before applying it
Currently we don't check if the new MTU is valid or not and this allows
one to configure a smaller than minimum allowed by RFCs or even bigger
than interface own MTU, which is a problem as it may lead to packet
drops.
If you have a daemon like NetworkManager running, this may be exploited
by remote attackers by forging RA packets with an invalid MTU, possibly
leading to a DoS. (NetworkManager currently only validates for values
too small, but not for too big ones.)
The fix is just to make sure the new value is valid. That is, between
IPV6_MIN_MTU and interface's MTU.
Note that similar check is already performed at
ndisc_router_discovery(), for when kernel itself parses the RA.
Signed-off-by: Marcelo Ricardo Leitner <[email protected]>
Signed-off-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void llhttp_reset(llhttp_t* parser) {
llhttp_type_t type = parser->type;
const llhttp_settings_t* settings = parser->settings;
void* data = parser->data;
uint8_t lenient_flags = parser->lenient_flags;
llhttp__internal_init(parser);
parser->type = type;
parser->settings = (void*) settings;
parser->data = data;
parser->lenient_flags = lenient_flags;
} | 0 | [
"CWE-444"
]
| node | af488f8dc82d69847992ea1cd2f53dc8082b3b91 | 43,150,630,936,262,160,000,000,000,000,000,000,000 | 13 | deps: update llhttp to 6.0.4
Refs: https://hackerone.com/reports/1238099
Refs: https://hackerone.com/reports/1238709
Refs: https://github.com/nodejs-private/llhttp-private/pull/6
Refs: https://github.com/nodejs-private/llhttp-private/pull/5
CVE-ID: CVE-2021-22959
CVE-ID: CVE-2021-22960
PR-URL: https://github.com/nodejs-private/node-private/pull/284
Reviewed-By: Akshay K <[email protected]>
Reviewed-By: James M Snell <[email protected]>
Reviewed-By: Robert Nagy <[email protected]> |
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg)
{
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
int ret = 0;
int uval = 0;
hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
cmd != HFI1_IOCTL_GET_VERS &&
!uctxt)
return -EINVAL;
switch (cmd) {
case HFI1_IOCTL_ASSIGN_CTXT:
ret = assign_ctxt(fd, arg, _IOC_SIZE(cmd));
break;
case HFI1_IOCTL_CTXT_INFO:
ret = get_ctxt_info(fd, arg, _IOC_SIZE(cmd));
break;
case HFI1_IOCTL_USER_INFO:
ret = get_base_info(fd, arg, _IOC_SIZE(cmd));
break;
case HFI1_IOCTL_CREDIT_UPD:
if (uctxt)
sc_return_credits(uctxt->sc);
break;
case HFI1_IOCTL_TID_UPDATE:
ret = user_exp_rcv_setup(fd, arg, _IOC_SIZE(cmd));
break;
case HFI1_IOCTL_TID_FREE:
ret = user_exp_rcv_clear(fd, arg, _IOC_SIZE(cmd));
break;
case HFI1_IOCTL_TID_INVAL_READ:
ret = user_exp_rcv_invalid(fd, arg, _IOC_SIZE(cmd));
break;
case HFI1_IOCTL_RECV_CTRL:
ret = manage_rcvq(uctxt, fd->subctxt, arg);
break;
case HFI1_IOCTL_POLL_TYPE:
if (get_user(uval, (int __user *)arg))
return -EFAULT;
uctxt->poll_type = (typeof(uctxt->poll_type))uval;
break;
case HFI1_IOCTL_ACK_EVENT:
ret = user_event_ack(uctxt, fd->subctxt, arg);
break;
case HFI1_IOCTL_SET_PKEY:
ret = set_ctxt_pkey(uctxt, arg);
break;
case HFI1_IOCTL_CTXT_RESET:
ret = ctxt_reset(uctxt);
break;
case HFI1_IOCTL_GET_VERS:
uval = HFI1_USER_SWVERSION;
if (put_user(uval, (int __user *)arg))
return -EFAULT;
break;
default:
return -EINVAL;
}
return ret;
} | 0 | [
"CWE-416"
]
| linux | 3d2a9d642512c21a12d19b9250e7a835dcb41a79 | 244,830,912,804,411,750,000,000,000,000,000,000,000 | 78 | IB/hfi1: Ensure correct mm is used at all times
Two earlier bug fixes have created a security problem in the hfi1
driver. One fix aimed to solve an issue where current->mm was not valid
when closing the hfi1 cdev. It attempted to do this by saving a cached
value of the current->mm pointer at file open time. This is a problem if
another process with access to the FD calls in via write() or ioctl() to
pin pages via the hfi driver. The other fix tried to solve a use after
free by taking a reference on the mm.
To fix this correctly we use the existing cached value of the mm in the
mmu notifier. Now we can check in the insert, evict, etc. routines that
current->mm matched what the notifier was registered for. If not, then
don't allow access. The register of the mmu notifier will save the mm
pointer.
Since in do_exit() the exit_mm() is called before exit_files(), which
would call our close routine a reference is needed on the mm. We rely on
the mmgrab done by the registration of the notifier, whereas before it was
explicit. The mmu notifier deregistration happens when the user context is
torn down, the creation of which triggered the registration.
Also of note is we do not do any explicit work to protect the interval
tree notifier. It doesn't seem that this is going to be needed since we
aren't actually doing anything with current->mm. The interval tree
notifier stuff still has a FIXME noted from a previous commit that will be
addressed in a follow on patch.
Cc: <[email protected]>
Fixes: e0cf75deab81 ("IB/hfi1: Fix mm_struct use after free")
Fixes: 3faa3d9a308e ("IB/hfi1: Make use of mm consistent")
Link: https://lore.kernel.org/r/[email protected]
Suggested-by: Jann Horn <[email protected]>
Reported-by: Jason Gunthorpe <[email protected]>
Reviewed-by: Ira Weiny <[email protected]>
Reviewed-by: Mike Marciniszyn <[email protected]>
Signed-off-by: Dennis Dalessandro <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]> |
static double mp_exp(_cimg_math_parser& mp) {
return std::exp(_mp_arg(2));
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 224,890,397,064,489,750,000,000,000,000,000,000,000 | 3 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
static int rbd_header_from_disk(struct rbd_device *rbd_dev,
struct rbd_image_header_ondisk *ondisk)
{
struct rbd_image_header *header = &rbd_dev->header;
bool first_time = header->object_prefix == NULL;
struct ceph_snap_context *snapc;
char *object_prefix = NULL;
char *snap_names = NULL;
u64 *snap_sizes = NULL;
u32 snap_count;
int ret = -ENOMEM;
u32 i;
/* Allocate this now to avoid having to handle failure below */
if (first_time) {
object_prefix = kstrndup(ondisk->object_prefix,
sizeof(ondisk->object_prefix),
GFP_KERNEL);
if (!object_prefix)
return -ENOMEM;
}
/* Allocate the snapshot context and fill it in */
snap_count = le32_to_cpu(ondisk->snap_count);
snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
if (!snapc)
goto out_err;
snapc->seq = le64_to_cpu(ondisk->snap_seq);
if (snap_count) {
struct rbd_image_snap_ondisk *snaps;
u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
/* We'll keep a copy of the snapshot names... */
if (snap_names_len > (u64)SIZE_MAX)
goto out_2big;
snap_names = kmalloc(snap_names_len, GFP_KERNEL);
if (!snap_names)
goto out_err;
/* ...as well as the array of their sizes. */
snap_sizes = kmalloc_array(snap_count,
sizeof(*header->snap_sizes),
GFP_KERNEL);
if (!snap_sizes)
goto out_err;
/*
* Copy the names, and fill in each snapshot's id
* and size.
*
* Note that rbd_dev_v1_header_info() guarantees the
* ondisk buffer we're working with has
* snap_names_len bytes beyond the end of the
* snapshot id array, this memcpy() is safe.
*/
memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
snaps = ondisk->snaps;
for (i = 0; i < snap_count; i++) {
snapc->snaps[i] = le64_to_cpu(snaps[i].id);
snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
}
}
/* We won't fail any more, fill in the header */
if (first_time) {
header->object_prefix = object_prefix;
header->obj_order = ondisk->options.order;
rbd_init_layout(rbd_dev);
} else {
ceph_put_snap_context(header->snapc);
kfree(header->snap_names);
kfree(header->snap_sizes);
}
/* The remaining fields always get updated (when we refresh) */
header->image_size = le64_to_cpu(ondisk->image_size);
header->snapc = snapc;
header->snap_names = snap_names;
header->snap_sizes = snap_sizes;
return 0;
out_2big:
ret = -EIO;
out_err:
kfree(snap_sizes);
kfree(snap_names);
ceph_put_snap_context(snapc);
kfree(object_prefix);
return ret;
} | 0 | [
"CWE-863"
]
| linux | f44d04e696feaf13d192d942c4f14ad2e117065a | 118,353,025,166,956,530,000,000,000,000,000,000,000 | 96 | rbd: require global CAP_SYS_ADMIN for mapping and unmapping
It turns out that currently we rely only on sysfs attribute
permissions:
$ ll /sys/bus/rbd/{add*,remove*}
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add_single_major
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/remove
--w------- 1 root root 4096 Sep 3 20:38 /sys/bus/rbd/remove_single_major
This means that images can be mapped and unmapped (i.e. block devices
can be created and deleted) by a UID 0 process even after it drops all
privileges or by any process with CAP_DAC_OVERRIDE in its user namespace
as long as UID 0 is mapped into that user namespace.
Be consistent with other virtual block devices (loop, nbd, dm, md, etc)
and require CAP_SYS_ADMIN in the initial user namespace for mapping and
unmapping, and also for dumping the configuration string and refreshing
the image header.
Cc: [email protected]
Signed-off-by: Ilya Dryomov <[email protected]>
Reviewed-by: Jeff Layton <[email protected]> |
PolyPurge(string, class) /* returns pointer to a purged copy */
register char *string;
register char class;
{
register char *ptr;
static char area[STRINGSIZE];
ptr = area;
while (*string)
{
if (!MatchClass(class, *string))
{
*(ptr++) = *string;
}
string++;
}
*ptr = '\0';
return (area);
} | 0 | [
"CWE-787"
]
| cracklib | 33d7fa4585247cd2247a1ffa032ad245836c6edb | 32,875,944,373,962,710,000,000,000,000,000,000,000 | 18 | Fix a buffer overflow processing long words
A buffer overflow processing long words has been discovered. This commit
applies the patch from
https://build.opensuse.org/package/view_file/Base:System/cracklib/0004-overflow-processing-long-words.patch
by Howard Guo.
See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=835386 and
http://www.openwall.com/lists/oss-security/2016/08/23/8 |
GF_Err rtp_hnti_box_read(GF_Box *s, GF_BitStream *bs)
{
u32 length;
GF_RTPBox *ptr = (GF_RTPBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
ISOM_DECREASE_SIZE(ptr, 4)
ptr->subType = gf_bs_read_u32(bs);
length = (u32) (ptr->size);
//sdp text has no delimiter !!!
ptr->sdpText = (char*)gf_malloc(sizeof(char) * (length+1));
if (!ptr->sdpText) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, ptr->sdpText, length);
ptr->sdpText[length] = 0;
return GF_OK;
} | 0 | [
"CWE-787"
]
| gpac | 388ecce75d05e11fc8496aa4857b91245007d26e | 290,858,008,049,364,950,000,000,000,000,000,000,000 | 18 | fixed #1587 |
Subsets and Splits