func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static inline struct pppox_sock *get_item_by_addr(struct net *net,
struct sockaddr_pppox *sp)
{
struct net_device *dev;
struct pppoe_net *pn;
struct pppox_sock *pppox_sock = NULL;
int ifindex;
rcu_read_lock();
dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
if (dev) {
ifindex = dev->ifindex;
pn = pppoe_pernet(net);
pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
sp->sa_addr.pppoe.remote, ifindex);
}
rcu_read_unlock();
return pppox_sock;
} | 0 | [
"CWE-20",
"CWE-269"
]
| linux | f3d3342602f8bcbf37d7c46641cb9bca7618eb1c | 319,255,135,461,605,380,000,000,000,000,000,000,000 | 20 | net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void IntegrationCodecClient::ConnectionCallbacks::onEvent(Network::ConnectionEvent event) {
parent_.last_connection_event_ = event;
if (event == Network::ConnectionEvent::Connected) {
parent_.connected_ = true;
parent_.connection_->dispatcher().exit();
} else if (event == Network::ConnectionEvent::RemoteClose) {
parent_.disconnected_ = true;
parent_.connection_->dispatcher().exit();
} else {
parent_.disconnected_ = true;
}
} | 0 | [
"CWE-400",
"CWE-703"
]
| envoy | afc39bea36fd436e54262f150c009e8d72db5014 | 54,561,940,117,509,115,000,000,000,000,000,000,000 | 12 | Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <[email protected]> |
_equalCreateSeqStmt(const CreateSeqStmt *a, const CreateSeqStmt *b)
{
COMPARE_NODE_FIELD(sequence);
COMPARE_NODE_FIELD(options);
COMPARE_SCALAR_FIELD(ownerId);
return true;
} | 0 | [
"CWE-362"
]
| postgres | 5f173040e324f6c2eebb90d86cf1b0cdb5890f0a | 282,005,967,977,358,900,000,000,000,000,000,000,000 | 8 | Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062 |
pimv2_check_checksum(netdissect_options *ndo, const u_char *bp,
const u_char *bp2, u_int len)
{
const struct ip *ip;
u_int cksum;
if (!ND_TTEST2(bp[0], len)) {
/* We don't have all the data. */
return (UNVERIFIED);
}
ip = (const struct ip *)bp2;
if (IP_V(ip) == 4) {
struct cksum_vec vec[1];
vec[0].ptr = bp;
vec[0].len = len;
cksum = in_cksum(vec, 1);
return (cksum ? INCORRECT : CORRECT);
} else if (IP_V(ip) == 6) {
const struct ip6_hdr *ip6;
ip6 = (const struct ip6_hdr *)bp2;
cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM);
return (cksum ? INCORRECT : CORRECT);
} else {
return (UNVERIFIED);
}
} | 0 | [
"CWE-125",
"CWE-787"
]
| tcpdump | 6fca58f5f9c96749a575f52e20598ad43f5bdf30 | 184,716,189,859,883,260,000,000,000,000,000,000,000 | 28 | CVE-2017-12996/PIMv2: Make sure PIM TLVs have the right length.
We do bounds checks based on the TLV length, so if the TLV's length is
too short, and we don't check for that, we could end up fetching data
past the end of the TLV - including past the length of the captured data
in the packet.
This fixes a buffer over-read discovered by Forcepoint's security
researchers Otto Airamo & Antti Levomäki.
Add tests using the capture files supplied by the reporter(s). |
struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
{
struct rpc_task *task;
task = rpc_new_task(task_setup_data);
if (IS_ERR(task))
goto out;
rpc_task_set_client(task, task_setup_data->rpc_client);
rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
if (task->tk_action == NULL)
rpc_call_start(task);
atomic_inc(&task->tk_count);
rpc_execute(task);
out:
return task;
} | 0 | [
"CWE-400",
"CWE-399",
"CWE-703"
]
| linux | 0b760113a3a155269a3fba93a409c640031dd68f | 222,566,660,238,824,570,000,000,000,000,000,000,000 | 19 | NLM: Don't hang forever on NLM unlock requests
If the NLM daemon is killed on the NFS server, we can currently end up
hanging forever on an 'unlock' request, instead of aborting. Basically,
if the rpcbind request fails, or the server keeps returning garbage, we
really want to quit instead of retrying.
Tested-by: Vasily Averin <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
Cc: [email protected] |
mlx5_tx_mseg_memcpy(uint8_t *pdst,
struct mlx5_txq_local *__rte_restrict loc,
unsigned int len,
unsigned int must,
unsigned int olx __rte_unused)
{
struct rte_mbuf *mbuf;
unsigned int part, dlen, copy = 0;
uint8_t *psrc;
MLX5_ASSERT(len);
do {
/* Allow zero length packets, must check first. */
dlen = rte_pktmbuf_data_len(loc->mbuf);
if (dlen <= loc->mbuf_off) {
/* Exhausted packet, just free. */
mbuf = loc->mbuf;
loc->mbuf = mbuf->next;
rte_pktmbuf_free_seg(mbuf);
loc->mbuf_off = 0;
MLX5_ASSERT(loc->mbuf_nseg > 1);
MLX5_ASSERT(loc->mbuf);
--loc->mbuf_nseg;
if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
unsigned int diff;
if (copy >= must) {
/*
* We already copied the minimal
* requested amount of data.
*/
return copy;
}
diff = must - copy;
if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
/*
* Copy only the minimal required
* part of the data buffer. Limit amount
* of data to be copied to the length of
* available space.
*/
len = RTE_MIN(len, diff);
}
}
continue;
}
dlen -= loc->mbuf_off;
psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
loc->mbuf_off);
part = RTE_MIN(len, dlen);
rte_memcpy(pdst, psrc, part);
copy += part;
loc->mbuf_off += part;
len -= part;
if (!len) {
if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
loc->mbuf_off = 0;
/* Exhausted packet, just free. */
mbuf = loc->mbuf;
loc->mbuf = mbuf->next;
rte_pktmbuf_free_seg(mbuf);
loc->mbuf_off = 0;
MLX5_ASSERT(loc->mbuf_nseg >= 1);
--loc->mbuf_nseg;
}
return copy;
}
pdst += part;
} while (true);
} | 0 | []
| dpdk-stable | ef311075d21b4f68c8ccfc46a00cda7c2a0bf4cc | 233,348,992,156,016,000,000,000,000,000,000,000,000 | 70 | net/mlx5: fix Rx queue recovery mechanism
The local variables are getting inconsistent in data receiving routines
after queue error recovery.
Receive queue consumer index is getting wrong, need to reset one to the
size of the queue (as RQ was fully replenished in recovery procedure).
In MPRQ case, also the local consumed strd variable should be reset.
CVE-2022-28199
Fixes: 88c0733 ("net/mlx5: extend Rx completion with error handling")
Signed-off-by: Alexander Kozyrev <[email protected]>
Signed-off-by: Matan Azrad <[email protected]> |
int qeth_core_load_discipline(struct qeth_card *card,
enum qeth_discipline_id discipline)
{
int rc = 0;
mutex_lock(&qeth_mod_mutex);
switch (discipline) {
case QETH_DISCIPLINE_LAYER3:
card->discipline = try_then_request_module(
symbol_get(qeth_l3_discipline), "qeth_l3");
break;
case QETH_DISCIPLINE_LAYER2:
card->discipline = try_then_request_module(
symbol_get(qeth_l2_discipline), "qeth_l2");
break;
}
if (!card->discipline) {
dev_err(&card->gdev->dev, "There is no kernel module to "
"support discipline %d\n", discipline);
rc = -EINVAL;
}
mutex_unlock(&qeth_mod_mutex);
return rc;
} | 0 | [
"CWE-200",
"CWE-119"
]
| linux | 6fb392b1a63ae36c31f62bc3fc8630b49d602b62 | 217,747,342,291,477,300,000,000,000,000,000,000,000 | 23 | qeth: avoid buffer overflow in snmp ioctl
Check user-defined length in snmp ioctl request and allow request
only if it fits into a qeth command buffer.
Signed-off-by: Ursula Braun <[email protected]>
Signed-off-by: Frank Blaschka <[email protected]>
Reviewed-by: Heiko Carstens <[email protected]>
Reported-by: Nico Golde <[email protected]>
Reported-by: Fabian Yamaguchi <[email protected]>
Cc: <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void tcp_set_rto(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
/* Old crap is replaced with new one. 8)
*
* More seriously:
* 1. If rtt variance happened to be less 50msec, it is hallucination.
* It cannot be less due to utterly erratic ACK generation made
* at least by solaris and freebsd. "Erratic ACKs" has _nothing_
* to do with delayed acks, because at cwnd>2 true delack timeout
* is invisible. Actually, Linux-2.4 also generates erratic
* ACKs in some circumstances.
*/
inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
/* 2. Fixups made earlier cannot be right.
* If we do not estimate RTO correctly without them,
* all the algo is pure shit and should be replaced
* with correct one. It is exactly, which we pretend to do.
*/
/* NOTE: clamping at TCP_RTO_MIN is not required, current algo
* guarantees that rto is higher.
*/
tcp_bound_rto(sk);
} | 0 | [
"CWE-703",
"CWE-189"
]
| linux | 8b8a321ff72c785ed5e8b4cf6eda20b35d427390 | 147,723,130,516,680,300,000,000,000,000,000,000,000 | 26 | tcp: fix zero cwnd in tcp_cwnd_reduction
Patch 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode
conditionally") introduced a bug that cwnd may become 0 when both
inflight and sndcnt are 0 (cwnd = inflight + sndcnt). This may lead
to a div-by-zero if the connection starts another cwnd reduction
phase by setting tp->prior_cwnd to the current cwnd (0) in
tcp_init_cwnd_reduction().
To prevent this we skip PRR operation when nothing is acked or
sacked. Then cwnd must be positive in all cases as long as ssthresh
is positive:
1) The proportional reduction mode
inflight > ssthresh > 0
2) The reduction bound mode
a) inflight == ssthresh > 0
b) inflight < ssthresh
sndcnt > 0 since newly_acked_sacked > 0 and inflight < ssthresh
Therefore in all cases inflight and sndcnt can not both be 0.
We check invalid tp->prior_cwnd to avoid potential div0 bugs.
In reality this bug is triggered only with a sequence of less common
events. For example, the connection is terminating an ECN-triggered
cwnd reduction with an inflight 0, then it receives reordered/old
ACKs or DSACKs from prior transmission (which acks nothing). Or the
connection is in fast recovery stage that marks everything lost,
but fails to retransmit due to local issues, then receives data
packets from other end which acks nothing.
Fixes: 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode conditionally")
Reported-by: Oleksandr Natalenko <[email protected]>
Signed-off-by: Yuchung Cheng <[email protected]>
Signed-off-by: Neal Cardwell <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int __init ipddp_init_module(void)
{
dev_ipddp = ipddp_init();
return PTR_ERR_OR_ZERO(dev_ipddp);
} | 0 | [
"CWE-200"
]
| linux | 9824dfae5741275473a23a7ed5756c7b6efacc9d | 93,616,037,596,146,050,000,000,000,000,000,000,000 | 5 | net/appletalk: fix minor pointer leak to userspace in SIOCFINDIPDDPRT
Fields ->dev and ->next of struct ipddp_route may be copied to
userspace on the SIOCFINDIPDDPRT ioctl. This is only accessible
to CAP_NET_ADMIN though. Let's manually copy the relevant fields
instead of using memcpy().
BugLink: http://blog.infosectcbr.com.au/2018/09/linux-kernel-infoleaks.html
Cc: Jann Horn <[email protected]>
Signed-off-by: Willy Tarreau <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void uas_add_work(struct uas_cmd_info *cmdinfo)
{
struct scsi_pointer *scp = (void *)cmdinfo;
struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd, SCp);
struct uas_dev_info *devinfo = cmnd->device->hostdata;
lockdep_assert_held(&devinfo->lock);
cmdinfo->state |= IS_IN_WORK_LIST;
schedule_work(&devinfo->work);
} | 0 | [
"CWE-125"
]
| linux | 786de92b3cb26012d3d0f00ee37adf14527f35c4 | 302,478,863,041,495,970,000,000,000,000,000,000,000 | 10 | USB: uas: fix bug in handling of alternate settings
The uas driver has a subtle bug in the way it handles alternate
settings. The uas_find_uas_alt_setting() routine returns an
altsetting value (the bAlternateSetting number in the descriptor), but
uas_use_uas_driver() then treats that value as an index to the
intf->altsetting array, which it isn't.
Normally this doesn't cause any problems because the various
alternate settings have bAlternateSetting values 0, 1, 2, ..., so the
value is equal to the index in the array. But this is not guaranteed,
and Andrey Konovalov used the syzkaller fuzzer with KASAN to get a
slab-out-of-bounds error by violating this assumption.
This patch fixes the bug by making uas_find_uas_alt_setting() return a
pointer to the altsetting entry rather than either the value or the
index. Pointers are less subject to misinterpretation.
Signed-off-by: Alan Stern <[email protected]>
Reported-by: Andrey Konovalov <[email protected]>
Tested-by: Andrey Konovalov <[email protected]>
CC: Oliver Neukum <[email protected]>
CC: <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static PyObject *checkPassword(PyObject *self, PyObject *args)
{
const char *user = NULL;
const char *pswd = NULL;
const char *service = NULL;
const char *default_realm = NULL;
const int verify = 1;
int result = 0;
if (!PyArg_ParseTuple(args, "ssssb", &user, &pswd, &service, &default_realm, &verify))
return NULL;
result = authenticate_user_krb5pwd(user, pswd, service, default_realm, verify);
if (result)
return Py_INCREF(Py_True), Py_True;
else
return NULL;
} | 0 | [
"CWE-287"
]
| pykerberos | 02d13860b25fab58e739f0e000bed0067b7c6f9c | 173,919,310,497,026,300,000,000,000,000,000,000,000 | 19 | adding KDC verification support (enabled by default) |
read_config_file(void)
{
init_data(conf_file, global_init_keywords);
} | 0 | [
"CWE-59",
"CWE-61"
]
| keepalived | 04f2d32871bb3b11d7dc024039952f2fe2750306 | 33,618,046,669,416,904,000,000,000,000,000,000,000 | 4 | When opening files for write, ensure they aren't symbolic links
Issue #1048 identified that if, for example, a non privileged user
created a symbolic link from /etc/keepalvied.data to /etc/passwd,
writing to /etc/keepalived.data (which could be invoked via DBus)
would cause /etc/passwd to be overwritten.
This commit stops keepalived writing to pathnames where the ultimate
component is a symbolic link, by setting O_NOFOLLOW whenever opening
a file for writing.
This might break some setups, where, for example, /etc/keepalived.data
was a symbolic link to /home/fred/keepalived.data. If this was the case,
instead create a symbolic link from /home/fred/keepalived.data to
/tmp/keepalived.data, so that the file is still accessible via
/home/fred/keepalived.data.
There doesn't appear to be a way around this backward incompatibility,
since even checking if the pathname is a symbolic link prior to opening
for writing would create a race condition.
Signed-off-by: Quentin Armitage <[email protected]> |
static inline void barrier_wait(atomic_t *b)
{
while (atomic_read(b) == 0)
asm ("pause");
mfence();
} | 0 | [
"CWE-269"
]
| coreboot | afb7a814783cda12f5b72167163b9109ee1d15a7 | 156,448,160,062,183,890,000,000,000,000,000,000,000 | 6 | cpu/x86/smm: Introduce SMM module loader version 2
Xeon-SP Skylake Scalable Processor can have 36 CPU threads (18 cores).
Current coreboot SMM is unable to handle more than ~32 CPU threads.
This patch introduces a version 2 of the SMM module loader which
addresses this problem. Having two versions of the SMM module loader
prevents any issues to current projects. Future Xeon-SP products will
be using this version of the SMM loader. Subsequent patches will
enable board specific functionality for Xeon-SP.
The reason for moving to version 2 is the state save area begins to
encroach upon the SMI handling code when more than 32 CPU threads are
in the system. This can cause system hangs, reboots, etc. The second
change is related to staggered entry points with simple near jumps. In
the current loader, near jumps will not work because the CPU is jumping
within the same code segment. In version 2, "far" address jumps are
necessary therefore protected mode must be enabled first. The SMM
layout and how the CPUs are staggered are documented in the code.
By making the modifications above, this allows the smm module loader to
expand easily as more CPU threads are added.
TEST=build for Tiogapass platform under OCP mainboard. Enable the
following in Kconfig.
select CPU_INTEL_COMMON_SMM
select SOC_INTEL_COMMON_BLOCK_SMM
select SMM_TSEG
select HAVE_SMI_HANDLER
select ACPI_INTEL_HARDWARE_SLEEP_VALUES
Debug console will show all 36 cores relocated. Further tested by
generating SMI's to port 0xb2 using XDP/ITP HW debugger and ensured all
cores entering and exiting SMM properly. In addition, booted to Linux
5.4 kernel and observed no issues during mp init.
Change-Id: I00a23a5f2a46110536c344254868390dbb71854c
Signed-off-by: Rocky Phagura <[email protected]>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/43684
Tested-by: build bot (Jenkins) <[email protected]>
Reviewed-by: Angel Pons <[email protected]> |
int ethtool_op_set_tso(struct net_device *dev, u32 data)
{
if (data)
dev->features |= NETIF_F_TSO;
else
dev->features &= ~NETIF_F_TSO;
return 0;
} | 0 | [
"CWE-190"
]
| linux-2.6 | db048b69037e7fa6a7d9e95a1271a50dc08ae233 | 38,045,927,300,178,263,000,000,000,000,000,000,000 | 9 | ethtool: Fix potential kernel buffer overflow in ETHTOOL_GRXCLSRLALL
On a 32-bit machine, info.rule_cnt >= 0x40000000 leads to integer
overflow and the buffer may be smaller than needed. Since
ETHTOOL_GRXCLSRLALL is unprivileged, this can presumably be used for at
least denial of service.
Signed-off-by: Ben Hutchings <[email protected]>
Cc: [email protected]
Signed-off-by: David S. Miller <[email protected]> |
void tcp_disable_fack(struct tcp_sock *tp)
{
/* RFC3517 uses different metric in lost marker => reset on change */
if (tcp_is_fack(tp))
tp->lost_skb_hint = NULL;
tp->rx_opt.sack_ok &= ~TCP_FACK_ENABLED;
} | 0 | [
"CWE-703",
"CWE-189"
]
| linux | 8b8a321ff72c785ed5e8b4cf6eda20b35d427390 | 116,989,082,265,233,600,000,000,000,000,000,000,000 | 7 | tcp: fix zero cwnd in tcp_cwnd_reduction
Patch 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode
conditionally") introduced a bug that cwnd may become 0 when both
inflight and sndcnt are 0 (cwnd = inflight + sndcnt). This may lead
to a div-by-zero if the connection starts another cwnd reduction
phase by setting tp->prior_cwnd to the current cwnd (0) in
tcp_init_cwnd_reduction().
To prevent this we skip PRR operation when nothing is acked or
sacked. Then cwnd must be positive in all cases as long as ssthresh
is positive:
1) The proportional reduction mode
inflight > ssthresh > 0
2) The reduction bound mode
a) inflight == ssthresh > 0
b) inflight < ssthresh
sndcnt > 0 since newly_acked_sacked > 0 and inflight < ssthresh
Therefore in all cases inflight and sndcnt can not both be 0.
We check invalid tp->prior_cwnd to avoid potential div0 bugs.
In reality this bug is triggered only with a sequence of less common
events. For example, the connection is terminating an ECN-triggered
cwnd reduction with an inflight 0, then it receives reordered/old
ACKs or DSACKs from prior transmission (which acks nothing). Or the
connection is in fast recovery stage that marks everything lost,
but fails to retransmit due to local issues, then receives data
packets from other end which acks nothing.
Fixes: 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode conditionally")
Reported-by: Oleksandr Natalenko <[email protected]>
Signed-off-by: Yuchung Cheng <[email protected]>
Signed-off-by: Neal Cardwell <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void ESTreeIRGen::emitRestElement(
bool declInit,
ESTree::RestElementNode *rest,
hermes::irgen::ESTreeIRGen::IteratorRecord iteratorRecord,
hermes::AllocStackInst *iteratorDone,
SharedExceptionHandler *handler) {
// 13.3.3.8 BindingRestElement:...BindingIdentifier
auto *notDoneBlock = Builder.createBasicBlock(Builder.getFunction());
auto *newValueBlock = Builder.createBasicBlock(Builder.getFunction());
auto *doneBlock = Builder.createBasicBlock(Builder.getFunction());
llvh::Optional<LReference> lref;
if (canCreateLRefWithoutSideEffects(rest->_argument)) {
lref = createLRef(rest->_argument, declInit);
} else {
emitTryWithSharedHandler(handler, [this, &lref, rest, declInit]() {
lref = createLRef(rest->_argument, declInit);
});
}
auto *A = Builder.createAllocArrayInst({}, 0);
auto *n = Builder.createAllocStackInst(genAnonymousLabelName("n"));
// n = 0.
Builder.createStoreStackInst(Builder.getLiteralPositiveZero(), n);
Builder.createCondBranchInst(
Builder.createLoadStackInst(iteratorDone), doneBlock, notDoneBlock);
// notDoneBlock:
Builder.setInsertionBlock(notDoneBlock);
auto *stepValue = emitIteratorNext(iteratorRecord);
auto *stepDone = emitIteratorComplete(iteratorRecord);
Builder.createStoreStackInst(stepDone, iteratorDone);
Builder.createCondBranchInst(stepDone, doneBlock, newValueBlock);
// newValueBlock:
Builder.setInsertionBlock(newValueBlock);
auto *nVal = Builder.createLoadStackInst(n);
nVal->setType(Type::createNumber());
// A[n] = stepValue;
// Unfortunately this can throw because our arrays can have limited range.
// The spec doesn't specify what to do in this case, but the reasonable thing
// to do is to what we would if this was a for-of loop doing the same thing.
// See section BindingRestElement:...BindingIdentifier, step f and g:
// https://www.ecma-international.org/ecma-262/9.0/index.html#sec-destructuring-binding-patterns-runtime-semantics-iteratorbindinginitialization
emitTryWithSharedHandler(handler, [this, stepValue, A, nVal]() {
Builder.createStorePropertyInst(stepValue, A, nVal);
});
// ++n;
auto add = Builder.createBinaryOperatorInst(
nVal, Builder.getLiteralNumber(1), BinaryOperatorInst::OpKind::AddKind);
add->setType(Type::createNumber());
Builder.createStoreStackInst(add, n);
Builder.createBranchInst(notDoneBlock);
// doneBlock:
Builder.setInsertionBlock(doneBlock);
if (lref->canStoreWithoutSideEffects()) {
lref->emitStore(A);
} else {
emitTryWithSharedHandler(handler, [&lref, A]() { lref->emitStore(A); });
}
} | 0 | [
"CWE-125",
"CWE-787"
]
| hermes | 091835377369c8fd5917d9b87acffa721ad2a168 | 269,548,412,858,814,000,000,000,000,000,000,000,000 | 65 | Correctly restore whether or not a function is an inner generator
Summary:
If a generator was large enough to be lazily compiled, we would lose
that information when reconstituting the function's context. This meant
the function was generated as a regular function instead of a generator.
#utd-hermes-ignore-android
Reviewed By: tmikov
Differential Revision: D23580247
fbshipit-source-id: af5628bf322cbdc7c7cdfbb5f8d0756328518ea1 |
static int cbs_av1_ref_tile_data(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit,
GetBitContext *gbc,
AV1RawTileData *td)
{
int pos;
pos = get_bits_count(gbc);
if (pos >= 8 * unit->data_size) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Bitstream ended before "
"any data in tile group (%d bits read).\n", pos);
return AVERROR_INVALIDDATA;
}
// Must be byte-aligned at this point.
av_assert0(pos % 8 == 0);
td->data_ref = av_buffer_ref(unit->data_ref);
if (!td->data_ref)
return AVERROR(ENOMEM);
td->data = unit->data + pos / 8;
td->data_size = unit->data_size - pos / 8;
return 0;
} | 0 | [
"CWE-20",
"CWE-129"
]
| FFmpeg | b97a4b658814b2de8b9f2a3bce491c002d34de31 | 299,775,878,508,230,370,000,000,000,000,000,000,000 | 25 | cbs_av1: Fix reading of overlong uvlc codes
The specification allows 2^32-1 to be encoded as any number of zeroes
greater than 31, followed by a one. This previously failed because the
trace code would overflow the array containing the string representation
of the bits if there were more than 63 zeroes. Fix that by splitting the
trace output into batches, and at the same time move it out of the default
path.
(While this seems likely to be a specification error, libaom does support
it so we probably should as well.)
From a test case by keval shah <[email protected]>.
Reviewed-by: Michael Niedermayer <[email protected]> |
int unit_set_description(Unit *u, const char *description) {
int r;
assert(u);
r = free_and_strdup(&u->description, empty_to_null(description));
if (r < 0)
return r;
if (r > 0)
unit_add_to_dbus_queue(u);
return 0;
} | 0 | [
"CWE-269"
]
| systemd | bf65b7e0c9fc215897b676ab9a7c9d1c688143ba | 310,604,409,849,524,470,000,000,000,000,000,000,000 | 13 | core: imply NNP and SUID/SGID restriction for DynamicUser=yes service
Let's be safe, rather than sorry. This way DynamicUser=yes services can
neither take benefit of, nor create SUID/SGID binaries.
Given that DynamicUser= is a recent addition only we should be able to
get away with turning this on, even though this is strictly speaking a
binary compatibility breakage. |
TPML_CCA_Marshal(TPML_CCA *source, BYTE **buffer, INT32 *size)
{
UINT16 written = 0;
UINT32 i;
written += UINT32_Marshal(&source->count, buffer, size);
for (i = 0 ; i < source->count ; i++) {
written += TPMA_CC_Marshal(&source->commandAttributes[i], buffer, size);
}
return written;
} | 0 | [
"CWE-787"
]
| libtpms | 3ef9b26cb9f28bd64d738bff9505a20d4eb56acd | 37,251,068,084,047,003,000,000,000,000,000,000,000 | 11 | tpm2: Add maxSize parameter to TPM2B_Marshal for sanity checks
Add maxSize parameter to TPM2B_Marshal and assert on it checking
the size of the data intended to be marshaled versus the maximum
buffer size.
Signed-off-by: Stefan Berger <[email protected]> |
static void forlist (LexState *ls, TString *indexname) {
/* forlist -> NAME {,NAME} IN explist forbody */
FuncState *fs = ls->fs;
expdesc e;
int nvars = 5; /* gen, state, control, toclose, 'indexname' */
int line;
int base = fs->freereg;
/* create control variables */
new_localvarliteral(ls, "(for state)");
new_localvarliteral(ls, "(for state)");
new_localvarliteral(ls, "(for state)");
new_localvarliteral(ls, "(for state)");
/* create declared variables */
new_localvar(ls, indexname);
while (testnext(ls, ',')) {
new_localvar(ls, str_checkname(ls));
nvars++;
}
checknext(ls, TK_IN);
line = ls->linenumber;
adjust_assign(ls, 4, explist(ls, &e), &e);
adjustlocalvars(ls, 4); /* control variables */
marktobeclosed(fs); /* last control var. must be closed */
luaK_checkstack(fs, 3); /* extra space to call generator */
forbody(ls, base, line, nvars - 4, 1);
} | 0 | [
"CWE-125"
]
| lua | 1f3c6f4534c6411313361697d98d1145a1f030fa | 231,449,442,859,072,380,000,000,000,000,000,000,000 | 26 | Bug: Lua can generate wrong code when _ENV is <const> |
bool CModule::OnWebPreRequest(CWebSock& WebSock, const CString& sPageName) {
return false;
} | 0 | [
"CWE-20",
"CWE-264"
]
| znc | 8de9e376ce531fe7f3c8b0aa4876d15b479b7311 | 310,769,440,550,804,100,000,000,000,000,000,000,000 | 3 | Fix remote code execution and privilege escalation vulnerability.
To trigger this, need to have a user already.
Thanks for Jeriko One <[email protected]> for finding and reporting this.
CVE-2019-12816 |
void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
{
if (!progs ||
progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
return;
kfree_rcu(progs, rcu);
} | 0 | [
"CWE-120"
]
| linux | 050fad7c4534c13c8eb1d9c2ba66012e014773cb | 280,560,310,348,020,900,000,000,000,000,000,000,000 | 7 | bpf: fix truncated jump targets on heavy expansions
Recently during testing, I ran into the following panic:
[ 207.892422] Internal error: Accessing user space memory outside uaccess.h routines: 96000004 [#1] SMP
[ 207.901637] Modules linked in: binfmt_misc [...]
[ 207.966530] CPU: 45 PID: 2256 Comm: test_verifier Tainted: G W 4.17.0-rc3+ #7
[ 207.974956] Hardware name: FOXCONN R2-1221R-A4/C2U4N_MB, BIOS G31FB18A 03/31/2017
[ 207.982428] pstate: 60400005 (nZCv daif +PAN -UAO)
[ 207.987214] pc : bpf_skb_load_helper_8_no_cache+0x34/0xc0
[ 207.992603] lr : 0xffff000000bdb754
[ 207.996080] sp : ffff000013703ca0
[ 207.999384] x29: ffff000013703ca0 x28: 0000000000000001
[ 208.004688] x27: 0000000000000001 x26: 0000000000000000
[ 208.009992] x25: ffff000013703ce0 x24: ffff800fb4afcb00
[ 208.015295] x23: ffff00007d2f5038 x22: ffff00007d2f5000
[ 208.020599] x21: fffffffffeff2a6f x20: 000000000000000a
[ 208.025903] x19: ffff000009578000 x18: 0000000000000a03
[ 208.031206] x17: 0000000000000000 x16: 0000000000000000
[ 208.036510] x15: 0000ffff9de83000 x14: 0000000000000000
[ 208.041813] x13: 0000000000000000 x12: 0000000000000000
[ 208.047116] x11: 0000000000000001 x10: ffff0000089e7f18
[ 208.052419] x9 : fffffffffeff2a6f x8 : 0000000000000000
[ 208.057723] x7 : 000000000000000a x6 : 00280c6160000000
[ 208.063026] x5 : 0000000000000018 x4 : 0000000000007db6
[ 208.068329] x3 : 000000000008647a x2 : 19868179b1484500
[ 208.073632] x1 : 0000000000000000 x0 : ffff000009578c08
[ 208.078938] Process test_verifier (pid: 2256, stack limit = 0x0000000049ca7974)
[ 208.086235] Call trace:
[ 208.088672] bpf_skb_load_helper_8_no_cache+0x34/0xc0
[ 208.093713] 0xffff000000bdb754
[ 208.096845] bpf_test_run+0x78/0xf8
[ 208.100324] bpf_prog_test_run_skb+0x148/0x230
[ 208.104758] sys_bpf+0x314/0x1198
[ 208.108064] el0_svc_naked+0x30/0x34
[ 208.111632] Code: 91302260 f9400001 f9001fa1 d2800001 (29500680)
[ 208.117717] ---[ end trace 263cb8a59b5bf29f ]---
The program itself which caused this had a long jump over the whole
instruction sequence where all of the inner instructions required
heavy expansions into multiple BPF instructions. Additionally, I also
had BPF hardening enabled which requires once more rewrites of all
constant values in order to blind them. Each time we rewrite insns,
bpf_adj_branches() would need to potentially adjust branch targets
which cross the patchlet boundary to accommodate for the additional
delta. Eventually that lead to the case where the target offset could
not fit into insn->off's upper 0x7fff limit anymore where then offset
wraps around becoming negative (in s16 universe), or vice versa
depending on the jump direction.
Therefore it becomes necessary to detect and reject any such occasions
in a generic way for native eBPF and cBPF to eBPF migrations. For
the latter we can simply check bounds in the bpf_convert_filter()'s
BPF_EMIT_JMP helper macro and bail out once we surpass limits. The
bpf_patch_insn_single() for native eBPF (and cBPF to eBPF in case
of subsequent hardening) is a bit more complex in that we need to
detect such truncations before hitting the bpf_prog_realloc(). Thus
the latter is split into an extra pass to probe problematic offsets
on the original program in order to fail early. With that in place
and carefully tested I no longer hit the panic and the rewrites are
rejected properly. The above example panic I've seen on bpf-next,
though the issue itself is generic in that a guard against this issue
in bpf seems more appropriate in this case.
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Martin KaFai Lau <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]> |
poppler_document_get_n_pages (PopplerDocument *document)
{
g_return_val_if_fail (POPPLER_IS_DOCUMENT (document), 0);
return document->doc->getNumPages();
} | 0 | [
"CWE-476"
]
| poppler | f162ecdea0dda5dbbdb45503c1d55d9afaa41d44 | 182,165,827,250,931,680,000,000,000,000,000,000,000 | 6 | Fix crash on missing embedded file
Check whether an embedded file is actually present in the PDF
and show warning in that case.
https://bugs.freedesktop.org/show_bug.cgi?id=106137
https://gitlab.freedesktop.org/poppler/poppler/issues/236 |
NCURSES_SP_NAME(vid_attr) (NCURSES_SP_DCLx
attr_t newmode,
NCURSES_PAIRS_T pair_arg,
void *opts)
{
T((T_CALLED("vid_attr(%s,%d)"), _traceattr(newmode), (int) pair_arg));
returnCode(NCURSES_SP_NAME(vid_puts) (NCURSES_SP_ARGx
newmode,
pair_arg,
opts,
NCURSES_SP_NAME(_nc_putchar)));
} | 0 | []
| ncurses | 790a85dbd4a81d5f5d8dd02a44d84f01512ef443 | 52,569,121,979,729,670,000,000,000,000,000,000,000 | 12 | ncurses 6.2 - patch 20200531
+ correct configure version-check/warnng for g++ to allow for 10.x
+ re-enable "bel" in konsole-base (report by Nia Huang)
+ add linux-s entry (patch by Alexandre Montaron).
+ drop long-obsolete convert_configure.pl
+ add test/test_parm.c, for checking tparm changes.
+ improve parameter-checking for tparm, adding function _nc_tiparm() to
handle the most-used case, which accepts only numeric parameters
(report/testcase by "puppet-meteor").
+ use a more conservative estimate of the buffer-size in lib_tparm.c's
save_text() and save_number(), in case the sprintf() function
passes-through unexpected characters from a format specifier
(report/testcase by "puppet-meteor").
+ add a check for end-of-string in cvtchar to handle a malformed
string in infotocap (report/testcase by "puppet-meteor"). |
mailimf_delivering_info_parse(const char * message, size_t length,
size_t * indx,
struct mailimf_delivering_info ** result)
{
size_t cur_token;
clist * list;
struct mailimf_delivering_info * delivering_info;
int r;
int res;
cur_token = * indx;
r = mailimf_struct_multiple_parse(message, length, &cur_token,
&list,
(mailimf_struct_parser *)
mailimf_trace_resent_fields_parse,
(mailimf_struct_destructor *)
mailimf_trace_resent_fields_free);
if (r != MAILIMF_NO_ERROR) {
res = r;
goto err;
}
delivering_info = mailimf_delivering_info_new(list);
if (delivering_info == NULL) {
res = MAILIMF_ERROR_MEMORY;
goto free_list;
}
* result = delivering_info;
* indx = cur_token;
return MAILIMF_NO_ERROR;
free_list:
clist_foreach(list, (clist_func) mailimf_trace_resent_fields_free, NULL);
clist_free(list);
err:
return res;
} | 0 | [
"CWE-476"
]
| libetpan | 1fe8fbc032ccda1db9af66d93016b49c16c1f22d | 2,631,111,281,626,740,600,000,000,000,000,000,000 | 40 | Fixed crash #274 |
struct r_bin_pe_lib_t* PE_(r_bin_pe_get_libs)(struct PE_(r_bin_pe_obj_t)* bin) {
if (!bin) {
return NULL;
}
struct r_bin_pe_lib_t* libs = NULL;
PE_(image_import_directory) * curr_import_dir = NULL;
PE_(image_delay_import_directory) * curr_delay_import_dir = NULL;
PE_DWord name_off = 0;
SdbHash* lib_map = NULL;
ut64 off; //cache value
int index = 0;
int len = 0;
int max_libs = 20;
libs = calloc (max_libs + 1, sizeof(struct r_bin_pe_lib_t));
if (!libs) {
r_sys_perror ("malloc (libs)");
return NULL;
}
if (bin->import_directory_offset + bin->import_directory_size > bin->size) {
bprintf ("import directory offset bigger than file\n");
goto out_error;
}
lib_map = sdb_ht_new ();
off = bin->import_directory_offset;
if (off < bin->size && off > 0) {
void* last = NULL;
// normal imports
if (off + sizeof (PE_(image_import_directory)) > bin->size) {
goto out_error;
}
curr_import_dir = (PE_(image_import_directory)*)(bin->b->buf + off);
last = (char*) curr_import_dir + bin->import_directory_size;
while ((void*) (curr_import_dir + 1) <= last && (
curr_import_dir->FirstThunk || curr_import_dir->Name ||
curr_import_dir->TimeDateStamp || curr_import_dir->Characteristics ||
curr_import_dir->ForwarderChain)) {
name_off = bin_pe_rva_to_paddr (bin, curr_import_dir->Name);
len = r_buf_read_at (bin->b, name_off, (ut8*) libs[index].name, PE_STRING_LENGTH);
if (!libs[index].name[0]) { // minimum string length
goto next;
}
if (len < 2 || libs[index].name[0] == 0) { // minimum string length
bprintf ("Warning: read (libs - import dirs) %d\n", len);
break;
}
libs[index].name[len - 1] = '\0';
r_str_case (libs[index].name, 0);
if (!sdb_ht_find (lib_map, libs[index].name, NULL)) {
sdb_ht_insert (lib_map, libs[index].name, "a");
libs[index++].last = 0;
if (index >= max_libs) {
libs = realloc (libs, (max_libs * 2) * sizeof (struct r_bin_pe_lib_t));
if (!libs) {
r_sys_perror ("realloc (libs)");
goto out_error;
}
max_libs *= 2;
}
}
next:
curr_import_dir++;
}
}
off = bin->delay_import_directory_offset;
if (off < bin->size && off > 0) {
if (off + sizeof(PE_(image_delay_import_directory)) > bin->size) {
goto out_error;
}
curr_delay_import_dir = (PE_(image_delay_import_directory)*)(bin->b->buf + off);
while (curr_delay_import_dir->Name != 0 && curr_delay_import_dir->DelayImportNameTable != 0) {
name_off = bin_pe_rva_to_paddr (bin, curr_delay_import_dir->Name);
if (name_off > bin->size || name_off + PE_STRING_LENGTH > bin->size) {
goto out_error;
}
len = r_buf_read_at (bin->b, name_off, (ut8*) libs[index].name, PE_STRING_LENGTH);
if (len != PE_STRING_LENGTH) {
bprintf ("Warning: read (libs - delay import dirs)\n");
break;
}
libs[index].name[len - 1] = '\0';
r_str_case (libs[index].name, 0);
if (!sdb_ht_find (lib_map, libs[index].name, NULL)) {
sdb_ht_insert (lib_map, libs[index].name, "a");
libs[index++].last = 0;
if (index >= max_libs) {
libs = realloc (libs, (max_libs * 2) * sizeof (struct r_bin_pe_lib_t));
if (!libs) {
sdb_ht_free (lib_map);
r_sys_perror ("realloc (libs)");
return NULL;
}
max_libs *= 2;
}
}
curr_delay_import_dir++;
if ((const ut8*) (curr_delay_import_dir + 1) >= (const ut8*) (bin->b->buf + bin->size)) {
break;
}
}
}
sdb_ht_free (lib_map);
libs[index].last = 1;
return libs;
out_error:
sdb_ht_free (lib_map);
free (libs);
return NULL;
} | 0 | [
"CWE-125"
]
| radare2 | 4e1cf0d3e6f6fe2552a269def0af1cd2403e266c | 312,969,512,166,390,400,000,000,000,000,000,000,000 | 109 | Fix crash in pe |
CString CWebSock::GetSkinPath(const CString& sSkinName) {
CString sRet = CZNC::Get().GetZNCPath() + "/webskins/" + sSkinName;
if (!CFile::IsDir(sRet)) {
sRet = CZNC::Get().GetCurPath() + "/webskins/" + sSkinName;
if (!CFile::IsDir(sRet)) {
sRet = CString(_SKINDIR_) + "/" + sSkinName;
}
}
return sRet + "/";
} | 1 | [
"CWE-22"
]
| znc | a4a5aeeb17d32937d8c7d743dae9a4cc755ce773 | 235,465,360,297,438,940,000,000,000,000,000,000,000 | 13 | Don't let web skin name ../../../../ access files outside of usual skins directories.
Thanks for Jeriko One <[email protected]> for finding and reporting this. |
get_curve_name (int curve)
{
if (curve == CURVE_NIST_P256)
return "NIST P-256";
else if (curve == CURVE_NIST_P384)
return "NIST P-384";
else if (curve == CURVE_NIST_P521)
return "NIST P-521";
else if (curve == CURVE_SEC_P256K1)
return "secp256k1";
else if (curve == CURVE_ED25519)
return "Ed25519";
else
return "unknown";
} | 0 | [
"CWE-20"
]
| gnupg | 2183683bd633818dd031b090b5530951de76f392 | 310,358,001,286,967,200,000,000,000,000,000,000,000 | 15 | Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]> |
static int mce_end(int order)
{
int ret = -1;
u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
if (!timeout)
goto reset;
if (order < 0)
goto reset;
/*
* Allow others to run.
*/
atomic_inc(&mce_executing);
if (order == 1) {
/* CHECKME: Can this race with a parallel hotplug? */
int cpus = num_online_cpus();
/*
* Monarch: Wait for everyone to go through their scanning
* loops.
*/
while (atomic_read(&mce_executing) <= cpus) {
if (mce_timed_out(&timeout,
"Timeout: Monarch CPU unable to finish machine check processing"))
goto reset;
ndelay(SPINUNIT);
}
mce_reign();
barrier();
ret = 0;
} else {
/*
* Subject: Wait for Monarch to finish.
*/
while (atomic_read(&mce_executing) != 0) {
if (mce_timed_out(&timeout,
"Timeout: Monarch CPU did not finish machine check processing"))
goto reset;
ndelay(SPINUNIT);
}
/*
* Don't reset anything. That's done by the Monarch.
*/
return 0;
}
/*
* Reset all global state.
*/
reset:
atomic_set(&global_nwo, 0);
atomic_set(&mce_callin, 0);
barrier();
/*
* Let others run again.
*/
atomic_set(&mce_executing, 0);
return ret;
} | 0 | [
"CWE-362"
]
| linux | b3b7c4795ccab5be71f080774c45bbbcc75c2aaf | 206,493,610,761,966,680,000,000,000,000,000,000,000 | 64 | x86/MCE: Serialize sysfs changes
The check_interval file in
/sys/devices/system/machinecheck/machinecheck<cpu number>
directory is a global timer value for MCE polling. If it is changed by one
CPU, mce_restart() broadcasts the event to other CPUs to delete and restart
the MCE polling timer and __mcheck_cpu_init_timer() reinitializes the
mce_timer variable.
If more than one CPU writes a specific value to the check_interval file
concurrently, mce_timer is not protected from such concurrent accesses and
all kinds of explosions happen. Since only root can write to those sysfs
variables, the issue is not a big deal security-wise.
However, concurrent writes to these configuration variables is void of
reason so the proper thing to do is to serialize the access with a mutex.
Boris:
- Make store_int_with_restart() use device_store_ulong() to filter out
negative intervals
- Limit min interval to 1 second
- Correct locking
- Massage commit message
Signed-off-by: Seunghun Han <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: linux-edac <[email protected]>
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected] |
static MSUSB_PIPE_DESCRIPTOR** msusb_mspipes_read(wStream* s, UINT32 NumberOfPipes)
{
UINT32 pnum;
MSUSB_PIPE_DESCRIPTOR** MsPipes;
if (Stream_GetRemainingCapacity(s) < 12 * NumberOfPipes)
return NULL;
MsPipes = (MSUSB_PIPE_DESCRIPTOR**)calloc(NumberOfPipes, sizeof(MSUSB_PIPE_DESCRIPTOR*));
if (!MsPipes)
return NULL;
for (pnum = 0; pnum < NumberOfPipes; pnum++)
{
MSUSB_PIPE_DESCRIPTOR* MsPipe = msusb_mspipe_new();
if (!MsPipe)
goto out_error;
Stream_Read_UINT16(s, MsPipe->MaximumPacketSize);
Stream_Seek(s, 2);
Stream_Read_UINT32(s, MsPipe->MaximumTransferSize);
Stream_Read_UINT32(s, MsPipe->PipeFlags);
/* Already set to zero by memset
MsPipe->PipeHandle = 0;
MsPipe->bEndpointAddress = 0;
MsPipe->bInterval = 0;
MsPipe->PipeType = 0;
MsPipe->InitCompleted = 0;
*/
MsPipes[pnum] = MsPipe;
}
return MsPipes;
out_error:
for (pnum = 0; pnum < NumberOfPipes; pnum++)
free(MsPipes[pnum]);
free(MsPipes);
return NULL;
} | 1 | [
"CWE-190"
]
| FreeRDP | 9f77fc3dd2394373e1be753952b00dafa1a9b7da | 43,522,950,749,896,040,000,000,000,000,000,000,000 | 43 | Fixed int overflow in msusb_mspipes_read
Thanks to hac425 |
lyp_deviate_apply_ext(struct lys_deviate *dev, struct lys_node *target, LYEXT_SUBSTMT substmt, struct lys_ext *extdef)
{
struct ly_ctx *ctx;
int m, n;
struct lys_ext_instance *new;
void *reallocated;
/* LY_DEVIATE_ADD and LY_DEVIATE_RPL are very similar so they are implement the same way - in replacing,
* there can be some extension instances in the target, in case of adding, there should not be any so we
* will be just adding. */
ctx = target->module->ctx; /* shortcut */
m = n = -1;
while ((m = lys_ext_iter(dev->ext, dev->ext_size, m + 1, substmt)) != -1) {
/* deviate and its substatements include extensions, copy them to the target, replacing the previous
* extensions if any. In case of deviating extension itself, we have to deviate only the same type
* of the extension as specified in the deviation */
if (substmt == LYEXT_SUBSTMT_SELF && dev->ext[m]->def != extdef) {
continue;
}
if (substmt == LYEXT_SUBSTMT_SELF && dev->mod == LY_DEVIATE_ADD) {
/* in case of adding extension, we will be replacing only the inherited extensions */
do {
n = lys_ext_iter(target->ext, target->ext_size, n + 1, substmt);
} while (n != -1 && (target->ext[n]->def != extdef || !(target->ext[n]->flags & LYEXT_OPT_INHERIT)));
} else {
/* get the index of the extension to replace in the target node */
do {
n = lys_ext_iter(target->ext, target->ext_size, n + 1, substmt);
/* if we are applying extension deviation, we have to deviate only the same type of the extension */
} while (n != -1 && substmt == LYEXT_SUBSTMT_SELF && target->ext[n]->def != extdef);
}
if (n == -1) {
/* nothing to replace, we are going to add it - reallocate */
new = malloc(sizeof **target->ext);
LY_CHECK_ERR_RETURN(!new, LOGMEM(ctx), EXIT_FAILURE);
reallocated = realloc(target->ext, (target->ext_size + 1) * sizeof *target->ext);
LY_CHECK_ERR_RETURN(!reallocated, LOGMEM(ctx); free(new), EXIT_FAILURE);
target->ext = reallocated;
target->ext_size++;
n = target->ext_size - 1;
} else {
/* replacing - the original set of extensions is actually backuped together with the
* node itself, so we are supposed only to free the allocated data here ... */
lys_extension_instances_free(ctx, target->ext[n]->ext, target->ext[n]->ext_size, NULL);
lydict_remove(ctx, target->ext[n]->arg_value);
free(target->ext[n]);
/* and prepare the new structure */
new = malloc(sizeof **target->ext);
LY_CHECK_ERR_RETURN(!new, LOGMEM(ctx), EXIT_FAILURE);
}
/* common part for adding and replacing - fill the newly created / replaced cell */
target->ext[n] = new;
target->ext[n]->def = dev->ext[m]->def;
target->ext[n]->arg_value = lydict_insert(ctx, dev->ext[m]->arg_value, 0);
target->ext[n]->flags = 0;
target->ext[n]->parent = target;
target->ext[n]->parent_type = LYEXT_PAR_NODE;
target->ext[n]->insubstmt = substmt;
target->ext[n]->insubstmt_index = dev->ext[m]->insubstmt_index;
target->ext[n]->ext_size = dev->ext[m]->ext_size;
lys_ext_dup(ctx, target->module, dev->ext[m]->ext, dev->ext[m]->ext_size, target, LYEXT_PAR_NODE,
&target->ext[n]->ext, 1, NULL);
target->ext[n]->nodetype = LYS_EXT;
target->ext[n]->module = target->module;
target->ext[n]->priv = NULL;
/* TODO cover complex extension instances */
}
/* remove the rest of extensions belonging to the original substatement in the target node,
* due to possible reverting of the deviation effect, they are actually not removed, just moved
* to the backup of the original node when the original node is backuped, here we just have to
* free the replaced / deleted originals */
while ((n = lys_ext_iter(target->ext, target->ext_size, n + 1, substmt)) != -1) {
if (substmt == LYEXT_SUBSTMT_SELF) {
/* if we are applying extension deviation, we are going to remove only
* - the same type of the extension in case of replacing
* - the same type of the extension which was inherited in case of adding
* note - delete deviation is covered in lyp_deviate_del_ext */
if (target->ext[n]->def != extdef ||
(dev->mod == LY_DEVIATE_ADD && !(target->ext[n]->flags & LYEXT_OPT_INHERIT))) {
/* keep this extension */
continue;
}
}
/* remove the item */
lyp_ext_instance_rm(ctx, &target->ext, &target->ext_size, n);
--n;
}
return EXIT_SUCCESS;
} | 0 | [
"CWE-787"
]
| libyang | f6d684ade99dd37b21babaa8a856f64faa1e2e0d | 293,292,288,734,943,770,000,000,000,000,000,000,000 | 100 | parser BUGFIX long identity name buffer overflow
STRING_OVERFLOW (CWE-120) |
static int serial_ir_open(struct rc_dev *rcdev)
{
unsigned long flags;
/* initialize timestamp */
serial_ir.lastkt = ktime_get();
spin_lock_irqsave(&hardware[type].lock, flags);
/* Set DLAB 0. */
soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
soutp(UART_IER, sinp(UART_IER) | UART_IER_MSI);
spin_unlock_irqrestore(&hardware[type].lock, flags);
return 0;
} | 0 | [
"CWE-416"
]
| linux | 56cd26b618855c9af48c8301aa6754ced8dd0beb | 132,757,864,396,392,280,000,000,000,000,000,000,000 | 18 | media: serial_ir: Fix use-after-free in serial_ir_init_module
Syzkaller report this:
BUG: KASAN: use-after-free in sysfs_remove_file_ns+0x5f/0x70 fs/sysfs/file.c:468
Read of size 8 at addr ffff8881dc7ae030 by task syz-executor.0/6249
CPU: 1 PID: 6249 Comm: syz-executor.0 Not tainted 5.0.0-rc8+ #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0xfa/0x1ce lib/dump_stack.c:113
print_address_description+0x65/0x270 mm/kasan/report.c:187
kasan_report+0x149/0x18d mm/kasan/report.c:317
? 0xffffffffc1728000
sysfs_remove_file_ns+0x5f/0x70 fs/sysfs/file.c:468
sysfs_remove_file include/linux/sysfs.h:519 [inline]
driver_remove_file+0x40/0x50 drivers/base/driver.c:122
remove_bind_files drivers/base/bus.c:585 [inline]
bus_remove_driver+0x186/0x220 drivers/base/bus.c:725
driver_unregister+0x6c/0xa0 drivers/base/driver.c:197
serial_ir_init_module+0x169/0x1000 [serial_ir]
do_one_initcall+0xfa/0x5ca init/main.c:887
do_init_module+0x204/0x5f6 kernel/module.c:3460
load_module+0x66b2/0x8570 kernel/module.c:3808
__do_sys_finit_module+0x238/0x2a0 kernel/module.c:3902
do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x462e99
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f9450132c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99
RDX: 0000000000000000 RSI: 0000000020000100 RDI: 0000000000000003
RBP: 00007f9450132c70 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f94501336bc
R13: 00000000004bcefa R14: 00000000006f6fb0 R15: 0000000000000004
Allocated by task 6249:
set_track mm/kasan/common.c:85 [inline]
__kasan_kmalloc.constprop.3+0xa0/0xd0 mm/kasan/common.c:495
kmalloc include/linux/slab.h:545 [inline]
kzalloc include/linux/slab.h:740 [inline]
bus_add_driver+0xc0/0x610 drivers/base/bus.c:651
driver_register+0x1bb/0x3f0 drivers/base/driver.c:170
serial_ir_init_module+0xe8/0x1000 [serial_ir]
do_one_initcall+0xfa/0x5ca init/main.c:887
do_init_module+0x204/0x5f6 kernel/module.c:3460
load_module+0x66b2/0x8570 kernel/module.c:3808
__do_sys_finit_module+0x238/0x2a0 kernel/module.c:3902
do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Freed by task 6249:
set_track mm/kasan/common.c:85 [inline]
__kasan_slab_free+0x130/0x180 mm/kasan/common.c:457
slab_free_hook mm/slub.c:1430 [inline]
slab_free_freelist_hook mm/slub.c:1457 [inline]
slab_free mm/slub.c:3005 [inline]
kfree+0xe1/0x270 mm/slub.c:3957
kobject_cleanup lib/kobject.c:662 [inline]
kobject_release lib/kobject.c:691 [inline]
kref_put include/linux/kref.h:67 [inline]
kobject_put+0x146/0x240 lib/kobject.c:708
bus_remove_driver+0x10e/0x220 drivers/base/bus.c:732
driver_unregister+0x6c/0xa0 drivers/base/driver.c:197
serial_ir_init_module+0x14c/0x1000 [serial_ir]
do_one_initcall+0xfa/0x5ca init/main.c:887
do_init_module+0x204/0x5f6 kernel/module.c:3460
load_module+0x66b2/0x8570 kernel/module.c:3808
__do_sys_finit_module+0x238/0x2a0 kernel/module.c:3902
do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
The buggy address belongs to the object at ffff8881dc7ae000
which belongs to the cache kmalloc-256 of size 256
The buggy address is located 48 bytes inside of
256-byte region [ffff8881dc7ae000, ffff8881dc7ae100)
The buggy address belongs to the page:
page:ffffea000771eb80 count:1 mapcount:0 mapping:ffff8881f6c02e00 index:0x0
flags: 0x2fffc0000000200(slab)
raw: 02fffc0000000200 ffffea0007d14800 0000000400000002 ffff8881f6c02e00
raw: 0000000000000000 00000000800c000c 00000001ffffffff 0000000000000000
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff8881dc7adf00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
ffff8881dc7adf80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
>ffff8881dc7ae000: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
^
ffff8881dc7ae080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff8881dc7ae100: fc fc fc fc fc fc fc fc 00 00 00 00 00 00 00 00
There are already cleanup handlings in serial_ir_init error path,
no need to call serial_ir_exit do it again in serial_ir_init_module,
otherwise will trigger a use-after-free issue.
Fixes: fa5dc29c1fcc ("[media] lirc_serial: move out of staging and rename to serial_ir")
Reported-by: Hulk Robot <[email protected]>
Signed-off-by: YueHaibing <[email protected]>
Signed-off-by: Sean Young <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
u32 __user *uaddr2, int nr_wake, int nr_requeue,
u32 *cmpval, int requeue_pi)
{
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
int drop_count = 0, task_count = 0, ret;
struct futex_pi_state *pi_state = NULL;
struct futex_hash_bucket *hb1, *hb2;
struct futex_q *this, *next;
DEFINE_WAKE_Q(wake_q);
if (requeue_pi) {
/*
* Requeue PI only works on two distinct uaddrs. This
* check is only valid for private futexes. See below.
*/
if (uaddr1 == uaddr2)
return -EINVAL;
/*
* requeue_pi requires a pi_state, try to allocate it now
* without any locks in case it fails.
*/
if (refill_pi_state_cache())
return -ENOMEM;
/*
* requeue_pi must wake as many tasks as it can, up to nr_wake
* + nr_requeue, since it acquires the rt_mutex prior to
* returning to userspace, so as to not leave the rt_mutex with
* waiters and no owner. However, second and third wake-ups
* cannot be predicted as they involve race conditions with the
* first wake and a fault while looking up the pi_state. Both
* pthread_cond_signal() and pthread_cond_broadcast() should
* use nr_wake=1.
*/
if (nr_wake != 1)
return -EINVAL;
}
retry:
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
requeue_pi ? VERIFY_WRITE : VERIFY_READ);
if (unlikely(ret != 0))
goto out_put_key1;
/*
* The check above which compares uaddrs is not sufficient for
* shared futexes. We need to compare the keys:
*/
if (requeue_pi && match_futex(&key1, &key2)) {
ret = -EINVAL;
goto out_put_keys;
}
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
retry_private:
hb_waiters_inc(hb2);
double_lock_hb(hb1, hb2);
if (likely(cmpval != NULL)) {
u32 curval;
ret = get_futex_value_locked(&curval, uaddr1);
if (unlikely(ret)) {
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
ret = get_user(curval, uaddr1);
if (ret)
goto out_put_keys;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&key2);
put_futex_key(&key1);
goto retry;
}
if (curval != *cmpval) {
ret = -EAGAIN;
goto out_unlock;
}
}
if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
/*
* Attempt to acquire uaddr2 and wake the top waiter. If we
* intend to requeue waiters, force setting the FUTEX_WAITERS
* bit. We force this here where we are able to easily handle
* faults rather in the requeue loop below.
*/
ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
&key2, &pi_state, nr_requeue);
/*
* At this point the top_waiter has either taken uaddr2 or is
* waiting on it. If the former, then the pi_state will not
* exist yet, look it up one more time to ensure we have a
* reference to it. If the lock was taken, ret contains the
* vpid of the top waiter task.
* If the lock was not taken, we have pi_state and an initial
* refcount on it. In case of an error we have nothing.
*/
if (ret > 0) {
WARN_ON(pi_state);
drop_count++;
task_count++;
/*
* If we acquired the lock, then the user space value
* of uaddr2 should be vpid. It cannot be changed by
* the top waiter as it is blocked on hb2 lock if it
* tries to do so. If something fiddled with it behind
* our back the pi state lookup might unearth it. So
* we rather use the known value than rereading and
* handing potential crap to lookup_pi_state.
*
* If that call succeeds then we have pi_state and an
* initial refcount on it.
*/
ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
}
switch (ret) {
case 0:
/* We hold a reference on the pi state. */
break;
/* If the above failed, then pi_state is NULL */
case -EFAULT:
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
put_futex_key(&key2);
put_futex_key(&key1);
ret = fault_in_user_writeable(uaddr2);
if (!ret)
goto retry;
goto out;
case -EAGAIN:
/*
* Two reasons for this:
* - Owner is exiting and we just wait for the
* exit to complete.
* - The user space value changed.
*/
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
put_futex_key(&key2);
put_futex_key(&key1);
cond_resched();
goto retry;
default:
goto out_unlock;
}
}
plist_for_each_entry_safe(this, next, &hb1->chain, list) {
if (task_count - nr_wake >= nr_requeue)
break;
if (!match_futex(&this->key, &key1))
continue;
/*
* FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
* be paired with each other and no other futex ops.
*
* We should never be requeueing a futex_q with a pi_state,
* which is awaiting a futex_unlock_pi().
*/
if ((requeue_pi && !this->rt_waiter) ||
(!requeue_pi && this->rt_waiter) ||
this->pi_state) {
ret = -EINVAL;
break;
}
/*
* Wake nr_wake waiters. For requeue_pi, if we acquired the
* lock, we already woke the top_waiter. If not, it will be
* woken by futex_unlock_pi().
*/
if (++task_count <= nr_wake && !requeue_pi) {
mark_wake_futex(&wake_q, this);
continue;
}
/* Ensure we requeue to the expected futex for requeue_pi. */
if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
ret = -EINVAL;
break;
}
/*
* Requeue nr_requeue waiters and possibly one more in the case
* of requeue_pi if we couldn't acquire the lock atomically.
*/
if (requeue_pi) {
/*
* Prepare the waiter to take the rt_mutex. Take a
* refcount on the pi_state and store the pointer in
* the futex_q object of the waiter.
*/
get_pi_state(pi_state);
this->pi_state = pi_state;
ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
this->rt_waiter,
this->task);
if (ret == 1) {
/*
* We got the lock. We do neither drop the
* refcount on pi_state nor clear
* this->pi_state because the waiter needs the
* pi_state for cleaning up the user space
* value. It will drop the refcount after
* doing so.
*/
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
} else if (ret) {
/*
* rt_mutex_start_proxy_lock() detected a
* potential deadlock when we tried to queue
* that waiter. Drop the pi_state reference
* which we took above and remove the pointer
* to the state from the waiters futex_q
* object.
*/
this->pi_state = NULL;
put_pi_state(pi_state);
/*
* We stop queueing more waiters and let user
* space deal with the mess.
*/
break;
}
}
requeue_futex(this, hb1, hb2, &key2);
drop_count++;
}
/*
* We took an extra initial reference to the pi_state either
* in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
* need to drop it here again.
*/
put_pi_state(pi_state);
out_unlock:
double_unlock_hb(hb1, hb2);
wake_up_q(&wake_q);
hb_waiters_dec(hb2);
/*
* drop_futex_key_refs() must be called outside the spinlocks. During
* the requeue we moved futex_q's from the hash bucket at key1 to the
* one at key2 and updated their key pointer. We no longer need to
* hold the references to key1.
*/
while (--drop_count >= 0)
drop_futex_key_refs(&key1);
out_put_keys:
put_futex_key(&key2);
out_put_key1:
put_futex_key(&key1);
out:
return ret ? ret : task_count;
} | 0 | [
"CWE-416"
]
| linux | 48fb6f4db940e92cfb16cd878cddd59ea6120d06 | 196,265,007,509,844,400,000,000,000,000,000,000,000 | 275 | futex: Remove unnecessary warning from get_futex_key
Commit 65d8fc777f6d ("futex: Remove requirement for lock_page() in
get_futex_key()") removed an unnecessary lock_page() with the
side-effect that page->mapping needed to be treated very carefully.
Two defensive warnings were added in case any assumption was missed and
the first warning assumed a correct application would not alter a
mapping backing a futex key. Since merging, it has not triggered for
any unexpected case but Mark Rutland reported the following bug
triggering due to the first warning.
kernel BUG at kernel/futex.c:679!
Internal error: Oops - BUG: 0 [#1] PREEMPT SMP
Modules linked in:
CPU: 0 PID: 3695 Comm: syz-executor1 Not tainted 4.13.0-rc3-00020-g307fec773ba3 #3
Hardware name: linux,dummy-virt (DT)
task: ffff80001e271780 task.stack: ffff000010908000
PC is at get_futex_key+0x6a4/0xcf0 kernel/futex.c:679
LR is at get_futex_key+0x6a4/0xcf0 kernel/futex.c:679
pc : [<ffff00000821ac14>] lr : [<ffff00000821ac14>] pstate: 80000145
The fact that it's a bug instead of a warning was due to an unrelated
arm64 problem, but the warning itself triggered because the underlying
mapping changed.
This is an application issue but from a kernel perspective it's a
recoverable situation and the warning is unnecessary so this patch
removes the warning. The warning may potentially be triggered with the
following test program from Mark although it may be necessary to adjust
NR_FUTEX_THREADS to be a value smaller than the number of CPUs in the
system.
#include <linux/futex.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <unistd.h>
#define NR_FUTEX_THREADS 16
pthread_t threads[NR_FUTEX_THREADS];
void *mem;
#define MEM_PROT (PROT_READ | PROT_WRITE)
#define MEM_SIZE 65536
static int futex_wrapper(int *uaddr, int op, int val,
const struct timespec *timeout,
int *uaddr2, int val3)
{
syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
}
void *poll_futex(void *unused)
{
for (;;) {
futex_wrapper(mem, FUTEX_CMP_REQUEUE_PI, 1, NULL, mem + 4, 1);
}
}
int main(int argc, char *argv[])
{
int i;
mem = mmap(NULL, MEM_SIZE, MEM_PROT,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
printf("Mapping @ %p\n", mem);
printf("Creating futex threads...\n");
for (i = 0; i < NR_FUTEX_THREADS; i++)
pthread_create(&threads[i], NULL, poll_futex, NULL);
printf("Flipping mapping...\n");
for (;;) {
mmap(mem, MEM_SIZE, MEM_PROT,
MAP_FIXED | MAP_SHARED | MAP_ANONYMOUS, -1, 0);
}
return 0;
}
Reported-and-tested-by: Mark Rutland <[email protected]>
Signed-off-by: Mel Gorman <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
Cc: [email protected] # 4.7+
Signed-off-by: Linus Torvalds <[email protected]> |
int au1100fb_drv_suspend(struct platform_device *dev, pm_message_t state)
{
struct au1100fb_device *fbdev = platform_get_drvdata(dev);
if (!fbdev)
return 0;
/* Save the clock source state */
sys_clksrc = au_readl(SYS_CLKSRC);
/* Blank the LCD */
au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info);
/* Stop LCD clocking */
au_writel(sys_clksrc & ~SYS_CS_ML_MASK, SYS_CLKSRC);
memcpy(&fbregs, fbdev->regs, sizeof(struct au1100fb_regs));
return 0;
} | 0 | [
"CWE-119",
"CWE-189",
"CWE-703"
]
| linux | 7314e613d5ff9f0934f7a0f74ed7973b903315d1 | 315,886,287,021,191,520,000,000,000,000,000,000,000 | 20 | Fix a few incorrectly checked [io_]remap_pfn_range() calls
Nico Golde reports a few straggling uses of [io_]remap_pfn_range() that
really should use the vm_iomap_memory() helper. This trivially converts
two of them to the helper, and comments about why the third one really
needs to continue to use remap_pfn_range(), and adds the missing size
check.
Reported-by: Nico Golde <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]. |
xfs_buf_delwri_submit_nowait(
struct list_head *buffer_list)
{
LIST_HEAD (io_list);
return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
} | 0 | [
"CWE-20",
"CWE-703"
]
| linux | eb178619f930fa2ba2348de332a1ff1c66a31424 | 281,064,678,151,091,000,000,000,000,000,000,000,000 | 6 | xfs: fix _xfs_buf_find oops on blocks beyond the filesystem end
When _xfs_buf_find is passed an out of range address, it will fail
to find a relevant struct xfs_perag and oops with a null
dereference. This can happen when trying to walk a filesystem with a
metadata inode that has a partially corrupted extent map (i.e. the
block number returned is corrupt, but is otherwise intact) and we
try to read from the corrupted block address.
In this case, just fail the lookup. If it is readahead being issued,
it will simply not be done, but if it is real read that fails we
will get an error being reported. Ideally this case should result
in an EFSCORRUPTED error being reported, but we cannot return an
error through xfs_buf_read() or xfs_buf_get() so this lookup failure
may result in ENOMEM or EIO errors being reported instead.
Signed-off-by: Dave Chinner <[email protected]>
Reviewed-by: Brian Foster <[email protected]>
Reviewed-by: Ben Myers <[email protected]>
Signed-off-by: Ben Myers <[email protected]> |
void testCrash_MakeOwner_Bug20080207() {
// Testcase by Adrian Manrique
UriParserStateA state;
UriUriA sourceUri;
state.uri = &sourceUri;
const char * const sourceUriString = "http://user:[email protected]:80/";
if (uriParseUriA(&state, sourceUriString) != 0) {
TEST_ASSERT(false);
}
if (uriNormalizeSyntaxA(&sourceUri) != 0) {
TEST_ASSERT(false);
}
uriFreeUriMembersA(&sourceUri);
TEST_ASSERT(true);
} | 0 | [
"CWE-787"
]
| uriparser | 864f5d4c127def386dd5cc926ad96934b297f04e | 24,526,670,958,411,420,000,000,000,000,000,000,000 | 15 | UriQuery.c: Fix out-of-bounds-write in ComposeQuery and ...Ex
Reported by Google Autofuzz team |
static void BROTLI_NOINLINE WrapRingBuffer(BrotliDecoderState* s) {
if (s->should_wrap_ringbuffer) {
memcpy(s->ringbuffer, s->ringbuffer_end, (size_t)s->pos);
s->should_wrap_ringbuffer = 0;
}
} | 0 | [
"CWE-120"
]
| brotli | 223d80cfbec8fd346e32906c732c8ede21f0cea6 | 278,472,039,825,782,750,000,000,000,000,000,000,000 | 6 | Update (#826)
* IMPORTANT: decoder: fix potential overflow when input chunk is >2GiB
* simplify max Huffman table size calculation
* eliminate symbol duplicates (static arrays in .h files)
* minor combing in research/ code |
struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops,
loff_t file_size)
{
struct dentry *de = debugfs_create_file(name, mode, parent, data, fops);
if (de)
d_inode(de)->i_size = file_size;
return de;
} | 0 | [
"CWE-362",
"CWE-399"
]
| linux | 49d31c2f389acfe83417083e1208422b4091cd9e | 162,513,033,132,052,280,000,000,000,000,000,000,000 | 11 | dentry name snapshots
take_dentry_name_snapshot() takes a safe snapshot of dentry name;
if the name is a short one, it gets copied into caller-supplied
structure, otherwise an extra reference to external name is grabbed
(those are never modified). In either case the pointer to stable
string is stored into the same structure.
dentry must be held by the caller of take_dentry_name_snapshot(),
but may be freely dropped afterwards - the snapshot will stay
until destroyed by release_dentry_name_snapshot().
Intended use:
struct name_snapshot s;
take_dentry_name_snapshot(&s, dentry);
...
access s.name
...
release_dentry_name_snapshot(&s);
Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name
to pass down with event.
Signed-off-by: Al Viro <[email protected]> |
bool isValidStringChar(char c) {
/*
* The difference between the character restriction here and that mentioned
* in section 3.7 of version 6 of the Structured Headers draft is that this
* function accepts \ and DQUOTE characters. These characters are allowed
* as long as they are present as a part of an escape sequence, which is
* checked for in the parseString() function in the StructuredHeadersBuffer.
*/
return c >= 0x20 && c <= 0x7E;
} | 0 | [
"CWE-787"
]
| proxygen | 2f07985bef9fbae124cc63e5c0272e32da4fdaec | 326,475,945,737,725,470,000,000,000,000,000,000,000 | 10 | Fix SEGV in StructuredHeaders::decodeBase64
Summary:
The existing code can potentially cause a SEGV due to an out of bounds write.
This fixes CVE-2019-11921.
Reviewed By: knekritz
Differential Revision: D12983120
fbshipit-source-id: 1d48063595c8d518fd8afcbc941de260af7e37fd |
AlterTableLookupRelation(AlterTableStmt *stmt, LOCKMODE lockmode)
{
return RangeVarGetRelidExtended(stmt->relation, lockmode, stmt->missing_ok, false,
RangeVarCallbackForAlterRelation,
(void *) stmt);
} | 0 | [
"CWE-362"
]
| postgres | 5f173040e324f6c2eebb90d86cf1b0cdb5890f0a | 88,831,067,740,222,600,000,000,000,000,000,000,000 | 6 | Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062 |
vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
{
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct virtio_scsi_event *event = &evt->event;
struct virtio_scsi_event __user *eventp;
unsigned out, in;
int head, ret;
if (!vq->private_data) {
vs->vs_events_missed = true;
return;
}
again:
vhost_disable_notify(&vs->dev, vq);
head = vhost_get_vq_desc(vq, vq->iov,
ARRAY_SIZE(vq->iov), &out, &in,
NULL, NULL);
if (head < 0) {
vs->vs_events_missed = true;
return;
}
if (head == vq->num) {
if (vhost_enable_notify(&vs->dev, vq))
goto again;
vs->vs_events_missed = true;
return;
}
if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
vq->iov[out].iov_len);
vs->vs_events_missed = true;
return;
}
if (vs->vs_events_missed) {
event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
vs->vs_events_missed = false;
}
eventp = vq->iov[out].iov_base;
ret = __copy_to_user(eventp, event, sizeof(*event));
if (!ret)
vhost_add_used_and_signal(&vs->dev, vq, head, 0);
else
vq_err(vq, "Faulted on vhost_scsi_send_event\n");
} | 0 | [
"CWE-200",
"CWE-119"
]
| linux | 59c816c1f24df0204e01851431d3bab3eb76719c | 235,575,015,623,840,670,000,000,000,000,000,000,000 | 48 | vhost/scsi: potential memory corruption
This code in vhost_scsi_make_tpg() is confusing because we limit "tpgt"
to UINT_MAX but the data type of "tpg->tport_tpgt" and that is a u16.
I looked at the context and it turns out that in
vhost_scsi_set_endpoint(), "tpg->tport_tpgt" is used as an offset into
the vs_tpg[] array which has VHOST_SCSI_MAX_TARGET (256) elements so
anything higher than 255 then it is invalid. I have made that the limit
now.
In vhost_scsi_send_evt() we mask away values higher than 255, but now
that the limit has changed, we don't need the mask.
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Nicholas Bellinger <[email protected]> |
GF_Err gf_isom_base_sample_entry_read(GF_SampleEntryBox *ptr, GF_BitStream *bs)
{
gf_bs_read_data(bs, ptr->reserved, 6);
ptr->dataReferenceIndex = gf_bs_read_u16(bs);
if (!ptr->dataReferenceIndex) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[ISO file] dataReferenceIndex set to 0 in sample entry, overriding to 1\n"));
ptr->dataReferenceIndex = 1;
}
return GF_OK;
} | 0 | [
"CWE-476",
"CWE-401"
]
| gpac | 328c6d682698fdb9878dbb4f282963d42c538c01 | 274,717,076,381,297,540,000,000,000,000,000,000,000 | 10 | fixed #1756 |
ieee80211_tdls_add_supp_channels(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
/*
* Add possible channels for TDLS. These are channels that are allowed
* to be active.
*/
u8 subband_cnt;
u8 *pos = skb_put(skb, 2);
*pos++ = WLAN_EID_SUPPORTED_CHANNELS;
/*
* 5GHz and 2GHz channels numbers can overlap. Ignore this for now, as
* this doesn't happen in real world scenarios.
*/
/* 2GHz, with 5MHz spacing */
subband_cnt = ieee80211_tdls_add_subband(sdata, skb, 2412, 2472, 5);
/* 5GHz, with 20MHz spacing */
subband_cnt += ieee80211_tdls_add_subband(sdata, skb, 5000, 5825, 20);
/* length */
*pos = 2 * subband_cnt;
} | 0 | []
| linux | 79c92ca42b5a3e0ea172ea2ce8df8e125af237da | 63,948,655,804,658,955,000,000,000,000,000,000,000 | 26 | mac80211: handle deauthentication/disassociation from TDLS peer
When receiving a deauthentication/disassociation frame from a TDLS
peer, a station should not disconnect the current AP, but only
disable the current TDLS link if it's enabled.
Without this change, a TDLS issue can be reproduced by following the
steps as below:
1. STA-1 and STA-2 are connected to AP, bidirection traffic is running
between STA-1 and STA-2.
2. Set up TDLS link between STA-1 and STA-2, stay for a while, then
teardown TDLS link.
3. Repeat step #2 and monitor the connection between STA and AP.
During the test, one STA may send a deauthentication/disassociation
frame to another, after TDLS teardown, with reason code 6/7, which
means: Class 2/3 frame received from nonassociated STA.
On receive this frame, the receiver STA will disconnect the current
AP and then reconnect. It's not a expected behavior, purpose of this
frame should be disabling the TDLS link, not the link with AP.
Cc: [email protected]
Signed-off-by: Yu Wang <[email protected]>
Signed-off-by: Johannes Berg <[email protected]> |
pkinit_pkcs7type2oid(pkinit_plg_crypto_context cryptoctx, int pkcs7_type)
{
int nid;
switch (pkcs7_type) {
case CMS_SIGN_CLIENT:
return cryptoctx->id_pkinit_authData;
case CMS_SIGN_DRAFT9:
/*
* Delay creating this OID until we know we need it.
* It shadows an existing OpenSSL oid. If it
* is created too early, it breaks things like
* the use of pkcs12 (which uses pkcs7 structures).
* We need this shadow version because our code
* depends on the "other" type to be unknown to the
* OpenSSL code.
*/
if (cryptoctx->id_pkinit_authData9 == NULL) {
pkiDebug("%s: Creating shadow instance of pkcs7-data oid\n",
__FUNCTION__);
nid = OBJ_create("1.2.840.113549.1.7.1", "id-pkcs7-data",
"PKCS7 data");
if (nid == NID_undef)
return NULL;
cryptoctx->id_pkinit_authData9 = OBJ_nid2obj(nid);
}
return cryptoctx->id_pkinit_authData9;
case CMS_SIGN_SERVER:
return cryptoctx->id_pkinit_DHKeyData;
case CMS_ENVEL_SERVER:
return cryptoctx->id_pkinit_rkeyData;
default:
return NULL;
}
} | 0 | [
"CWE-476"
]
| krb5 | f249555301940c6df3a2cdda13b56b5674eebc2e | 271,745,596,901,925,000,000,000,000,000,000,000,000 | 36 | PKINIT null pointer deref [CVE-2013-1415]
Don't dereference a null pointer when cleaning up.
The KDC plugin for PKINIT can dereference a null pointer when a
malformed packet causes processing to terminate early, leading to
a crash of the KDC process. An attacker would need to have a valid
PKINIT certificate or have observed a successful PKINIT authentication,
or an unauthenticated attacker could execute the attack if anonymous
PKINIT is enabled.
CVSSv2 vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:P/RL:O/RC:C
This is a minimal commit for pullup; style fixes in a followup.
[[email protected]: reformat and edit commit message]
(cherry picked from commit c773d3c775e9b2d88bcdff5f8a8ba88d7ec4e8ed)
ticket: 7570
version_fixed: 1.11.1
status: resolved |
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
} | 0 | [
"CWE-20",
"CWE-190"
]
| tinyexr | a685e3332f61cd4e59324bf3f669d36973d64270 | 286,748,953,311,985,520,000,000,000,000,000,000,000 | 24 | Make line_no with too large value(2**20) invalid. Fixes #124 |
void am_cache_unlock(server_rec *s, am_cache_entry_t *entry)
{
am_mod_cfg_rec *mod_cfg;
/* Update access time. */
entry->access = apr_time_now();
mod_cfg = am_get_mod_cfg(s);
apr_global_mutex_unlock(mod_cfg->lock);
} | 0 | [
"CWE-79"
]
| mod_auth_mellon | 7af21c53da7bb1de024274ee6da30bc22316a079 | 247,825,824,885,168,300,000,000,000,000,000,000,000 | 10 | Fix Cross-Site Session Transfer vulnerability
mod_auth_mellon did not verify that the site the session was created
for was the same site as the site the user accessed. This allows an
attacker with access to one web site on a server to use the same
session to get access to a different site running on the same server.
This patch fixes this vulnerability by storing the cookie parameters
used when creating the session in the session, and verifying those
parameters when the session is loaded.
Thanks to François Kooman for reporting this vulnerability.
This vulnerability has been assigned CVE-2017-6807. |
static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
{
struct swap_info_struct *p;
unsigned long offset, type;
unsigned char count;
unsigned char has_cache;
int err = -EINVAL;
if (non_swap_entry(entry))
goto out;
type = swp_type(entry);
if (type >= nr_swapfiles)
goto bad_file;
p = swap_info[type];
offset = swp_offset(entry);
spin_lock(&swap_lock);
if (unlikely(offset >= p->max))
goto unlock_out;
count = p->swap_map[offset];
has_cache = count & SWAP_HAS_CACHE;
count &= ~SWAP_HAS_CACHE;
err = 0;
if (usage == SWAP_HAS_CACHE) {
/* set SWAP_HAS_CACHE if there is no cache and entry is used */
if (!has_cache && count)
has_cache = SWAP_HAS_CACHE;
else if (has_cache) /* someone else added cache */
err = -EEXIST;
else /* no users remaining */
err = -ENOENT;
} else if (count || has_cache) {
if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
count += usage;
else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
err = -EINVAL;
else if (swap_count_continued(p, offset, count))
count = COUNT_CONTINUED;
else
err = -ENOMEM;
} else
err = -ENOENT; /* unused swap entry */
p->swap_map[offset] = count | has_cache;
unlock_out:
spin_unlock(&swap_lock);
out:
return err;
bad_file:
printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
goto out;
} | 0 | [
"CWE-264"
]
| linux-2.6 | 1a5a9906d4e8d1976b701f889d8f35d54b928f25 | 308,047,913,808,004,700,000,000,000,000,000,000,000 | 60 | mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38+]
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void sps_range_extension::dump(int fd) const
{
FILE* fh;
if (fd==1) fh=stdout;
else if (fd==2) fh=stderr;
else { return; }
LOG0("----------------- SPS-range-extension -----------------\n");
LOG1("transform_skip_rotation_enabled_flag : %d\n", transform_skip_rotation_enabled_flag);
LOG1("transform_skip_context_enabled_flag : %d\n", transform_skip_context_enabled_flag);
LOG1("implicit_rdpcm_enabled_flag : %d\n", implicit_rdpcm_enabled_flag);
LOG1("explicit_rdpcm_enabled_flag : %d\n", explicit_rdpcm_enabled_flag);
LOG1("extended_precision_processing_flag : %d\n", extended_precision_processing_flag);
LOG1("intra_smoothing_disabled_flag : %d\n", intra_smoothing_disabled_flag);
LOG1("high_precision_offsets_enabled_flag : %d\n", high_precision_offsets_enabled_flag);
LOG1("persistent_rice_adaptation_enabled_flag : %d\n", persistent_rice_adaptation_enabled_flag);
LOG1("cabac_bypass_alignment_enabled_flag : %d\n", cabac_bypass_alignment_enabled_flag);
} | 0 | [
"CWE-787"
]
| libde265 | 8e89fe0e175d2870c39486fdd09250b230ec10b8 | 191,782,345,028,106,800,000,000,000,000,000,000,000 | 18 | error on out-of-range cpb_cnt_minus1 (oss-fuzz issue 27590) |
static int kvm_guest_time_update(struct kvm_vcpu *v)
{
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
kernel_ns = 0;
host_tsc = 0;
/*
* If the host uses TSC clock, then passthrough TSC as stable
* to the guest.
*/
spin_lock(&ka->pvclock_gtod_sync_lock);
use_master_clock = ka->use_master_clock;
if (use_master_clock) {
host_tsc = ka->master_cycle_now;
kernel_ns = ka->master_kernel_ns;
}
spin_unlock(&ka->pvclock_gtod_sync_lock);
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
if (unlikely(this_tsc_khz == 0)) {
local_irq_restore(flags);
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
return 1;
}
if (!use_master_clock) {
host_tsc = native_read_tsc();
kernel_ns = get_kernel_ns();
}
tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc);
/*
* We may have to catch up the TSC to match elapsed wall clock
* time for two reasons, even if kvmclock is used.
* 1) CPU could have been running below the maximum TSC rate
* 2) Broken TSC compensation resets the base at each VCPU
* entry to avoid unknown leaps of TSC even when running
* again on the same CPU. This may cause apparent elapsed
* time to disappear, and the guest to stand still or run
* very slowly.
*/
if (vcpu->tsc_catchup) {
u64 tsc = compute_guest_tsc(v, kernel_ns);
if (tsc > tsc_timestamp) {
adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
tsc_timestamp = tsc;
}
}
local_irq_restore(flags);
if (!vcpu->pv_time_enabled)
return 0;
/*
* Time as measured by the TSC may go backwards when resetting the base
* tsc_timestamp. The reason for this is that the TSC resolution is
* higher than the resolution of the other clock scales. Thus, many
* possible measurments of the TSC correspond to one measurement of any
* other clock, and so a spread of values is possible. This is not a
* problem for the computation of the nanosecond clock; with TSC rates
* around 1GHZ, there can only be a few cycles which correspond to one
* nanosecond value, and any path through this code will inevitably
* take longer than that. However, with the kernel_ns value itself,
* the precision may be much lower, down to HZ granularity. If the
* first sampling of TSC against kernel_ns ends in the low part of the
* range, and the second in the high end of the range, we can get:
*
* (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
*
* As the sampling errors potentially range in the thousands of cycles,
* it is possible such a time value has already been observed by the
* guest. To protect against this, we must compute the system time as
* observed by the guest and ensure the new system time is greater.
*/
max_kernel_ns = 0;
if (vcpu->hv_clock.tsc_timestamp) {
max_kernel_ns = vcpu->last_guest_tsc -
vcpu->hv_clock.tsc_timestamp;
max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
vcpu->hv_clock.tsc_to_system_mul,
vcpu->hv_clock.tsc_shift);
max_kernel_ns += vcpu->last_kernel_ns;
}
if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
&vcpu->hv_clock.tsc_shift,
&vcpu->hv_clock.tsc_to_system_mul);
vcpu->hw_tsc_khz = this_tsc_khz;
}
/* with a master <monotonic time, tsc value> tuple,
* pvclock clock reads always increase at the (scaled) rate
* of guest TSC - no need to deal with sampling errors.
*/
if (!use_master_clock) {
if (max_kernel_ns > kernel_ns)
kernel_ns = max_kernel_ns;
}
/* With all the info we got, fill in the values */
vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
vcpu->last_kernel_ns = kernel_ns;
vcpu->last_guest_tsc = tsc_timestamp;
/*
* The interface expects us to write an even number signaling that the
* update is finished. Since the guest won't see the intermediate
* state, we just increase by 2 at the end.
*/
vcpu->hv_clock.version += 2;
if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
&guest_hv_clock, sizeof(guest_hv_clock))))
return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
vcpu->pvclock_set_guest_stopped_request = false;
}
/* If the host uses TSC clocksource, then it is stable */
if (use_master_clock)
pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
vcpu->hv_clock.flags = pvclock_flags;
kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
&vcpu->hv_clock,
sizeof(vcpu->hv_clock));
return 0;
} | 0 | [
"CWE-399"
]
| kvm | 0b79459b482e85cb7426aa7da683a9f2c97aeae1 | 335,423,565,433,096,800,000,000,000,000,000,000,000 | 146 | KVM: x86: Convert MSR_KVM_SYSTEM_TIME to use gfn_to_hva_cache functions (CVE-2013-1797)
There is a potential use after free issue with the handling of
MSR_KVM_SYSTEM_TIME. If the guest specifies a GPA in a movable or removable
memory such as frame buffers then KVM might continue to write to that
address even after it's removed via KVM_SET_USER_MEMORY_REGION. KVM pins
the page in memory so it's unlikely to cause an issue, but if the user
space component re-purposes the memory previously used for the guest, then
the guest will be able to corrupt that memory.
Tested: Tested against kvmclock unit test
Signed-off-by: Andrew Honig <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
apply_stored_configuration_at_startup (GsdXrandrManager *manager, guint32 timestamp)
{
GError *my_error;
gboolean success;
char *backup_filename;
char *intended_filename;
backup_filename = gnome_rr_config_get_backup_filename ();
intended_filename = gnome_rr_config_get_intended_filename ();
/* 1. See if there was a "saved" configuration. If there is one, it means
* that the user had selected to change the display configuration, but the
* machine crashed. In that case, we'll apply *that* configuration and save it on top of the
* "intended" one.
*/
my_error = NULL;
success = apply_configuration_from_filename (manager, backup_filename, FALSE, timestamp, &my_error);
if (success) {
/* The backup configuration existed, and could be applied
* successfully, so we must restore it on top of the
* failed/intended one.
*/
restore_backup_configuration (manager, backup_filename, intended_filename, timestamp);
goto out;
}
if (!g_error_matches (my_error, G_FILE_ERROR, G_FILE_ERROR_NOENT)) {
/* Epic fail: there (probably) was a backup configuration, but
* we could not apply it. The only thing we can do is delete
* the backup configuration. Let's hope that the user doesn't
* get left with an unusable display...
*/
unlink (backup_filename);
goto out;
}
/* 2. There was no backup configuration! This means we are
* good. Apply the intended configuration instead.
*/
apply_intended_configuration (manager, intended_filename, timestamp);
out:
if (my_error)
g_error_free (my_error);
g_free (backup_filename);
g_free (intended_filename);
} | 0 | []
| gnome-settings-daemon | be513b3c7d80d0b7013d79ce46d7eeca929705cc | 278,448,968,385,709,640,000,000,000,000,000,000,000 | 53 | Implement autoconfiguration of the outputs
This is similar in spirit to 'xrandr --auto', but we disfavor selecting clone modes.
Instead, we lay out the outputs left-to-right.
Signed-off-by: Federico Mena Quintero <[email protected]> |
static void __exit snd_mem_exit(void)
{
remove_proc_entry(SND_MEM_PROC_FILE, NULL);
free_all_reserved_pages();
if (snd_allocated_pages > 0)
printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages);
} | 0 | []
| linux-2.6 | ccec6e2c4a74adf76ed4e2478091a311b1806212 | 250,622,172,965,917,600,000,000,000,000,000,000,000 | 7 | Convert snd-page-alloc proc file to use seq_file
Use seq_file for the proc file read/write of snd-page-alloc module.
This automatically fixes bugs in the old proc code.
Signed-off-by: Takashi Iwai <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static zend_object_value sqlite_object_new_query(zend_class_entry *class_type TSRMLS_DC)
{
zend_object_value retval;
sqlite_object_new(class_type, &sqlite_object_handlers_query, &retval TSRMLS_CC);
return retval;
} | 0 | []
| php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 70,313,401,198,443,730,000,000,000,000,000,000,000 | 7 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
static void connection_read_for_eos(connection * const con) {
!con->is_ssl_sock
? connection_read_for_eos_plain(con)
: connection_read_for_eos_ssl(con);
} | 0 | [
"CWE-703"
]
| lighttpd1.4 | b03b86f47b0d5a553137f081fadc482b4af1372d | 117,154,304,726,964,970,000,000,000,000,000,000,000 | 5 | [core] fix merging large headers across mult reads (fixes #3059)
(thx mitd)
x-ref:
"Connections stuck in Close_Wait causing 100% cpu usage"
https://redmine.lighttpd.net/issues/3059 |
parser_parse_object_method (parser_context_t *context_p) /**< context */
{
context_p->source_p--;
context_p->column--;
uint16_t function_literal_index = lexer_construct_function_object (context_p, (PARSER_FUNCTION_CLOSURE
| PARSER_ALLOW_SUPER
| PARSER_IS_METHOD));
parser_emit_cbc_literal (context_p,
CBC_PUSH_LITERAL,
function_literal_index);
context_p->last_cbc.literal_type = LEXER_FUNCTION_LITERAL;
lexer_next_token (context_p);
} /* parser_parse_object_method */ | 0 | [
"CWE-416"
]
| jerryscript | 3bcd48f72d4af01d1304b754ef19fe1a02c96049 | 6,422,911,588,710,815,000,000,000,000,000,000,000 | 16 | Improve parse_identifier (#4691)
Ascii string length is no longer computed during string allocation.
JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected] |
static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp1_qp,
int qp_attr_mask)
{
struct bnxt_re_qp *qp = rdev->qp1_sqp;
int rc = 0;
if (qp_attr_mask & IB_QP_STATE) {
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
qp->qplib_qp.state = qp1_qp->qplib_qp.state;
}
if (qp_attr_mask & IB_QP_PKEY_INDEX) {
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
}
if (qp_attr_mask & IB_QP_QKEY) {
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
/* Using a Random QKEY */
qp->qplib_qp.qkey = 0x81818181;
}
if (qp_attr_mask & IB_QP_SQ_PSN) {
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
}
rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc)
dev_err(rdev_to_dev(rdev),
"Failed to modify Shadow QP for QP1");
return rc;
} | 0 | [
"CWE-400",
"CWE-401"
]
| linux | 4a9d46a9fe14401f21df69cea97c62396d5fb053 | 20,753,068,831,571,100,000,000,000,000,000,000,000 | 32 | RDMA: Fix goto target to release the allocated memory
In bnxt_re_create_srq(), when ib_copy_to_udata() fails allocated memory
should be released by goto fail.
Fixes: 37cb11acf1f7 ("RDMA/bnxt_re: Add SRQ support for Broadcom adapters")
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Navid Emamdoost <[email protected]>
Reviewed-by: Jason Gunthorpe <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]> |
Item_ref::Item_ref(TABLE_LIST *view_arg, Item **item,
const char *field_name_arg, bool alias_name_used_arg)
:Item_ident(view_arg, field_name_arg),
result_field(NULL), ref(item), reference_trough_name(0)
{
alias_name_used= alias_name_used_arg;
/*
This constructor is used to create some internal references over fixed items
*/
if ((set_properties_only= (ref && *ref && (*ref)->fixed)))
set_properties();
} | 0 | []
| server | b000e169562697aa072600695d4f0c0412f94f4f | 24,938,656,298,501,530,000,000,000,000,000,000,000 | 12 | Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL))
based on:
commit f7316aa0c9a
Author: Ajo Robert <[email protected]>
Date: Thu Aug 24 17:03:21 2017 +0530
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item. |
cifs_cleanup_volume_info(struct smb_vol *volume_info)
{
if (!volume_info)
return;
cleanup_volume_info_contents(volume_info);
kfree(volume_info);
} | 0 | [
"CWE-703",
"CWE-189"
]
| linux | 1fc29bacedeabb278080e31bb9c1ecb49f143c3b | 30,358,380,241,370,665,000,000,000,000,000,000,000 | 7 | cifs: fix off-by-one bug in build_unc_path_to_root
commit 839db3d10a (cifs: fix up handling of prefixpath= option) changed
the code such that the vol->prepath no longer contained a leading
delimiter and then fixed up the places that accessed that field to
account for that change.
One spot in build_unc_path_to_root was missed however. When doing the
pointer addition on pos, that patch failed to account for the fact that
we had already incremented "pos" by one when adding the length of the
prepath. This caused a buffer overrun by one byte.
This patch fixes the problem by correcting the handling of "pos".
Cc: <[email protected]> # v3.8+
Reported-by: Marcus Moeller <[email protected]>
Reported-by: Ken Fallon <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]> |
GF_Err metx_on_child_box(GF_Box *s, GF_Box *a)
{
GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_SINF:
return GF_OK;
case GF_ISOM_BOX_TYPE_TXTC:
//we allow the config box on metx
if (ptr->config) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->config = (GF_TextConfigBox *)a;
break;
}
return GF_OK; | 0 | [
"CWE-787"
]
| gpac | 388ecce75d05e11fc8496aa4857b91245007d26e | 172,584,933,818,248,870,000,000,000,000,000,000,000 | 14 | fixed #1587 |
HiiGetString (
IN CONST EFI_HII_STRING_PROTOCOL *This,
IN CONST CHAR8 *Language,
IN EFI_HII_HANDLE PackageList,
IN EFI_STRING_ID StringId,
OUT EFI_STRING String,
IN OUT UINTN *StringSize,
OUT EFI_FONT_INFO **StringFontInfo OPTIONAL
)
{
EFI_STATUS Status;
LIST_ENTRY *Link;
HII_DATABASE_PRIVATE_DATA *Private;
HII_DATABASE_RECORD *DatabaseRecord;
HII_DATABASE_PACKAGE_LIST_INSTANCE *PackageListNode;
HII_STRING_PACKAGE_INSTANCE *StringPackage;
if (This == NULL || Language == NULL || StringId < 1 || StringSize == NULL || PackageList == NULL) {
return EFI_INVALID_PARAMETER;
}
if (String == NULL && *StringSize != 0) {
return EFI_INVALID_PARAMETER;
}
if (!IsHiiHandleValid (PackageList)) {
return EFI_NOT_FOUND;
}
Private = HII_STRING_DATABASE_PRIVATE_DATA_FROM_THIS (This);
PackageListNode = NULL;
for (Link = Private->DatabaseList.ForwardLink; Link != &Private->DatabaseList; Link = Link->ForwardLink) {
DatabaseRecord = CR (Link, HII_DATABASE_RECORD, DatabaseEntry, HII_DATABASE_RECORD_SIGNATURE);
if (DatabaseRecord->Handle == PackageList) {
PackageListNode = DatabaseRecord->PackageList;
break;
}
}
if (PackageListNode != NULL) {
//
// First search: to match the StringId in the specified language.
//
for (Link = PackageListNode->StringPkgHdr.ForwardLink;
Link != &PackageListNode->StringPkgHdr;
Link = Link->ForwardLink
) {
StringPackage = CR (Link, HII_STRING_PACKAGE_INSTANCE, StringEntry, HII_STRING_PACKAGE_SIGNATURE);
if (HiiCompareLanguage (StringPackage->StringPkgHdr->Language, (CHAR8 *) Language)) {
Status = GetStringWorker (Private, StringPackage, StringId, String, StringSize, StringFontInfo);
if (Status != EFI_NOT_FOUND) {
return Status;
}
}
}
//
// Second search: to match the StringId in other available languages if exist.
//
for (Link = PackageListNode->StringPkgHdr.ForwardLink;
Link != &PackageListNode->StringPkgHdr;
Link = Link->ForwardLink
) {
StringPackage = CR (Link, HII_STRING_PACKAGE_INSTANCE, StringEntry, HII_STRING_PACKAGE_SIGNATURE);
Status = GetStringWorker (Private, StringPackage, StringId, NULL, NULL, NULL);
if (!EFI_ERROR (Status)) {
return EFI_INVALID_LANGUAGE;
}
}
}
return EFI_NOT_FOUND;
}
| 0 | []
| edk2 | 764e8ba1389a617639d79d2c4f0d53f4ea4a7387 | 123,133,027,356,585,920,000,000,000,000,000,000,000 | 73 | MdeModulePkg/String.c: Zero memory before free (CVE-2019-14558)
REF: https://bugzilla.tianocore.org/show_bug.cgi?id=1611
Cc: Liming Gao <[email protected]>
Cc: Eric Dong <[email protected]>
Cc: Jian J Wang <[email protected]>
Signed-off-by: Dandan Bi <[email protected]>
Reviewed-by: Eric Dong <[email protected]>
Reviewed-by: Jian J Wang <[email protected]> |
long SSL_CTX_set_options(SSL_CTX*, long)
{
// TDOD:
return SSL_SUCCESS;
} | 0 | [
"CWE-254"
]
| mysql-server | e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69 | 264,325,290,497,302,150,000,000,000,000,000,000,000 | 5 | Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED. |
void TransposerBase::setRate(double newRate)
{
rate = newRate;
}
| 0 | [
"CWE-617"
]
| soundtouch | 107f2c5d201a4dfea1b7f15c5957ff2ac9e5f260 | 51,580,371,970,602,160,000,000,000,000,000,000,000 | 4 | Replaced illegal-number-of-channel assertions with run-time exception |
Field_string::unpack(uchar *to, const uchar *from, const uchar *from_end,
uint param_data)
{
uint from_length, length;
/*
Compute the declared length of the field on the master. This is
used to decide if one or two bytes should be read as length.
*/
if (param_data)
from_length= (((param_data >> 4) & 0x300) ^ 0x300) + (param_data & 0x00ff);
else
from_length= field_length;
DBUG_PRINT("debug",
("param_data: 0x%x, field_length: %u, from_length: %u",
param_data, field_length, from_length));
/*
Compute the actual length of the data by reading one or two bits
(depending on the declared field length on the master).
*/
if (from_length > 255)
{
if (from + 2 > from_end)
return 0;
length= uint2korr(from);
from+= 2;
}
else
{
if (from + 1 > from_end)
return 0;
length= (uint) *from++;
}
if (from + length > from_end || length > field_length)
return 0;
memcpy(to, from, length);
// Pad the string with the pad character of the fields charset
field_charset->cset->fill(field_charset, (char*) to + length, field_length - length, field_charset->pad_char);
return from+length;
} | 0 | [
"CWE-416",
"CWE-703"
]
| server | 08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917 | 119,044,554,163,770,050,000,000,000,000,000,000,000 | 42 | MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]> |
TEST_F(EncryptionUtilTest, aes_test_by_case) {
std::string case_1 = "9qYx8l1601oWHEVCREAqZg=="; // base64 for encrypted "hello, doris"
std::string source_1 = "hello, doris";
std::string case_2 = "nP/db4j4yqMjXv/pItaOVA=="; // base64 for encrypted "doris test"
std::string source_2 = "doris test";
std::unique_ptr<char[]> encrypt_1(new char[case_1.length()]);
int length_1 = base64_decode(case_1.c_str(), case_1.length(), encrypt_1.get());
std::unique_ptr<char[]> decrypted_1(new char[case_1.length()]);
int ret_code = EncryptionUtil::decrypt(AES_128_ECB, (unsigned char*)encrypt_1.get(), length_1,
(unsigned char*)_aes_key.c_str(), _aes_key.length(),
nullptr, true, (unsigned char*)decrypted_1.get());
ASSERT_TRUE(ret_code > 0);
std::string decrypted_content_1(decrypted_1.get(), ret_code);
ASSERT_EQ(source_1, decrypted_content_1);
std::unique_ptr<char[]> encrypt_2(new char[case_2.length()]);
int length_2 = base64_decode(case_2.c_str(), case_2.length(), encrypt_2.get());
std::unique_ptr<char[]> decrypted_2(new char[case_2.length()]);
ret_code = EncryptionUtil::decrypt(AES_128_ECB, (unsigned char*)encrypt_2.get(), length_2,
(unsigned char*)_aes_key.c_str(), _aes_key.length(), nullptr,
true, (unsigned char*)decrypted_2.get());
ASSERT_TRUE(ret_code > 0);
std::string decrypted_content_2(decrypted_2.get(), ret_code);
ASSERT_EQ(source_2, decrypted_content_2);
} | 0 | [
"CWE-200"
]
| incubator-doris | 246ac4e37aa4da6836b7850cb990f02d1c3725a3 | 1,405,454,677,843,115,000,000,000,000,000,000,000 | 26 | [fix] fix a bug of encryption function with iv may return wrong result (#8277) |
SAPI_API void sapi_unregister_post_entry(sapi_post_entry *post_entry TSRMLS_DC)
{
if (SG(sapi_started) && EG(in_execution)) {
return;
}
zend_hash_del(&SG(known_post_content_types), post_entry->content_type,
post_entry->content_type_len+1);
} | 0 | [
"CWE-190",
"CWE-79"
]
| php-src | 996faf964bba1aec06b153b370a7f20d3dd2bb8b | 146,449,432,928,087,720,000,000,000,000,000,000,000 | 8 | Update header handling to RFC 7230 |
TORRENT_TEST(dict_find_funs)
{
// a: int
// b: string
// c: list
// d: dict
char b[] = "d1:ai1e1:b3:foo1:cli1ei2ee1:dd1:xi1eee";
bdecode_node e;
error_code ec;
int ret = bdecode(b, b + sizeof(b)-1, e, ec);
TEST_EQUAL(ret, 0);
printf("%s\n", print_entry(e).c_str());
TEST_EQUAL(e.type(), bdecode_node::dict_t);
// dict_find_int*
TEST_EQUAL(e.dict_find_int_value("a"), 1);
TEST_EQUAL(e.dict_find_int("a").type(), bdecode_node::int_t);
TEST_EQUAL(e.dict_find_int_value("b", -10), -10);
TEST_EQUAL(e.dict_find_int_value("x", -10), -10);
TEST_EQUAL(e.dict_find_int("b").type(), bdecode_node::none_t);
TEST_EQUAL(e.dict_find_int("x").type(), bdecode_node::none_t);
// dict_find_string*
TEST_EQUAL(e.dict_find_string_value("b"), "foo");
TEST_EQUAL(e.dict_find_string("b").type(), bdecode_node::string_t);
TEST_EQUAL(e.dict_find_string_value("c", "blah"), "blah");
TEST_EQUAL(e.dict_find_string_value("x", "blah"), "blah");
TEST_EQUAL(e.dict_find_string("c").type(), bdecode_node::none_t);
TEST_EQUAL(e.dict_find_string("x").type(), bdecode_node::none_t);
// dict_find_list
TEST_CHECK(e.dict_find_list("c"));
TEST_EQUAL(e.dict_find_list("c").list_size(), 2);
TEST_EQUAL(e.dict_find_list("c").list_int_value_at(0), 1);
TEST_EQUAL(e.dict_find_list("c").list_int_value_at(1), 2);
TEST_CHECK(!e.dict_find_list("d"));
// dict_find_dict
TEST_CHECK(e.dict_find_dict("d"));
TEST_EQUAL(e.dict_find_dict("d").dict_find_int_value("x"), 1);
TEST_EQUAL(e.dict_find_dict("d").dict_find_int_value("y", -10), -10);
TEST_CHECK(!e.dict_find_dict("c"));
// variants taking std::string
TEST_EQUAL(e.dict_find_dict(std::string("d")).dict_find_int_value("x"), 1);
TEST_CHECK(!e.dict_find_dict(std::string("c")));
TEST_CHECK(!e.dict_find_dict(std::string("x")));
TEST_EQUAL(e.dict_size(), 4);
TEST_EQUAL(e.dict_size(), 4);
// dict_at
TEST_EQUAL(e.dict_at(0).first, "a");
TEST_EQUAL(e.dict_at(0).second.int_value(), 1);
TEST_EQUAL(e.dict_at(1).first, "b");
TEST_EQUAL(e.dict_at(1).second.string_value(), "foo");
TEST_EQUAL(e.dict_at(2).first, "c");
TEST_EQUAL(e.dict_at(2).second.type(), bdecode_node::list_t);
TEST_EQUAL(e.dict_at(3).first, "d");
TEST_EQUAL(e.dict_at(3).second.type(), bdecode_node::dict_t);
} | 0 | [
"CWE-125"
]
| libtorrent | ec30a5e9ec703afb8abefba757c6d401303b53db | 287,798,366,255,439,550,000,000,000,000,000,000,000 | 67 | fix out-of-bounds read in bdecode
Fixes #2099 |
static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
return 0;
if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) ||
!page_address_valid(vcpu, vmcs12->io_bitmap_b))
return -EINVAL;
return 0;
} | 0 | [
"CWE-284"
]
| linux | 727ba748e110b4de50d142edca9d6a9b7e6111d8 | 281,728,170,329,043,560,000,000,000,000,000,000,000 | 12 | kvm: nVMX: Enforce cpl=0 for VMX instructions
VMX instructions executed inside a L1 VM will always trigger a VM exit
even when executed with cpl 3. This means we must perform the
privilege check in software.
Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks")
Cc: [email protected]
Signed-off-by: Felix Wilhelm <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
ProcXkbGetKbdByName(ClientPtr client)
{
DeviceIntPtr dev;
DeviceIntPtr tmpd;
DeviceIntPtr master;
xkbGetKbdByNameReply rep = { 0 };
xkbGetMapReply mrep = { 0 };
xkbGetCompatMapReply crep = { 0 };
xkbGetIndicatorMapReply irep = { 0 };
xkbGetNamesReply nrep = { 0 };
xkbGetGeometryReply grep = { 0 };
XkbComponentNamesRec names = { 0 };
XkbDescPtr xkb, new;
XkbEventCauseRec cause;
unsigned char *str;
char mapFile[PATH_MAX];
unsigned len;
unsigned fwant, fneed, reported;
int status;
Bool geom_changed;
XkbSrvLedInfoPtr old_sli;
XkbSrvLedInfoPtr sli;
Mask access_mode = DixGetAttrAccess | DixManageAccess;
REQUEST(xkbGetKbdByNameReq);
REQUEST_AT_LEAST_SIZE(xkbGetKbdByNameReq);
if (!(client->xkbClientFlags & _XkbClientInitialized))
return BadAccess;
CHK_KBD_DEVICE(dev, stuff->deviceSpec, client, access_mode);
master = GetMaster(dev, MASTER_KEYBOARD);
xkb = dev->key->xkbInfo->desc;
status = Success;
str = (unsigned char *) &stuff[1];
if (GetComponentSpec(&str, TRUE, &status)) /* keymap, unsupported */
return BadMatch;
names.keycodes = GetComponentSpec(&str, TRUE, &status);
names.types = GetComponentSpec(&str, TRUE, &status);
names.compat = GetComponentSpec(&str, TRUE, &status);
names.symbols = GetComponentSpec(&str, TRUE, &status);
names.geometry = GetComponentSpec(&str, TRUE, &status);
if (status != Success)
return status;
len = str - ((unsigned char *) stuff);
if ((XkbPaddedSize(len) / 4) != stuff->length)
return BadLength;
CHK_MASK_LEGAL(0x01, stuff->want, XkbGBN_AllComponentsMask);
CHK_MASK_LEGAL(0x02, stuff->need, XkbGBN_AllComponentsMask);
if (stuff->load)
fwant = XkbGBN_AllComponentsMask;
else
fwant = stuff->want | stuff->need;
if ((!names.compat) &&
(fwant & (XkbGBN_CompatMapMask | XkbGBN_IndicatorMapMask))) {
names.compat = Xstrdup("%");
}
if ((!names.types) && (fwant & (XkbGBN_TypesMask))) {
names.types = Xstrdup("%");
}
if ((!names.symbols) && (fwant & XkbGBN_SymbolsMask)) {
names.symbols = Xstrdup("%");
}
geom_changed = ((names.geometry != NULL) &&
(strcmp(names.geometry, "%") != 0));
if ((!names.geometry) && (fwant & XkbGBN_GeometryMask)) {
names.geometry = Xstrdup("%");
geom_changed = FALSE;
}
memset(mapFile, 0, PATH_MAX);
rep.type = X_Reply;
rep.deviceID = dev->id;
rep.sequenceNumber = client->sequence;
rep.length = 0;
rep.minKeyCode = xkb->min_key_code;
rep.maxKeyCode = xkb->max_key_code;
rep.loaded = FALSE;
fwant =
XkbConvertGetByNameComponents(TRUE, stuff->want) | XkmVirtualModsMask;
fneed = XkbConvertGetByNameComponents(TRUE, stuff->need);
rep.reported = XkbConvertGetByNameComponents(FALSE, fwant | fneed);
if (stuff->load) {
fneed |= XkmKeymapRequired;
fwant |= XkmKeymapLegal;
}
if ((fwant | fneed) & XkmSymbolsMask) {
fneed |= XkmKeyNamesIndex | XkmTypesIndex;
fwant |= XkmIndicatorsIndex;
}
/* We pass dev in here so we can get the old names out if needed. */
rep.found = XkbDDXLoadKeymapByNames(dev, &names, fwant, fneed, &new,
mapFile, PATH_MAX);
rep.newKeyboard = FALSE;
rep.pad1 = rep.pad2 = rep.pad3 = rep.pad4 = 0;
stuff->want |= stuff->need;
if (new == NULL)
rep.reported = 0;
else {
if (stuff->load)
rep.loaded = TRUE;
if (stuff->load ||
((rep.reported & XkbGBN_SymbolsMask) && (new->compat))) {
XkbChangesRec changes;
memset(&changes, 0, sizeof(changes));
XkbUpdateDescActions(new,
new->min_key_code, XkbNumKeys(new), &changes);
}
if (new->map == NULL)
rep.reported &= ~(XkbGBN_SymbolsMask | XkbGBN_TypesMask);
else if (rep.reported & (XkbGBN_SymbolsMask | XkbGBN_TypesMask)) {
mrep.type = X_Reply;
mrep.deviceID = dev->id;
mrep.sequenceNumber = client->sequence;
mrep.length =
((SIZEOF(xkbGetMapReply) - SIZEOF(xGenericReply)) >> 2);
mrep.minKeyCode = new->min_key_code;
mrep.maxKeyCode = new->max_key_code;
mrep.present = 0;
mrep.totalSyms = mrep.totalActs =
mrep.totalKeyBehaviors = mrep.totalKeyExplicit =
mrep.totalModMapKeys = mrep.totalVModMapKeys = 0;
if (rep.reported & (XkbGBN_TypesMask | XkbGBN_ClientSymbolsMask)) {
mrep.present |= XkbKeyTypesMask;
mrep.firstType = 0;
mrep.nTypes = mrep.totalTypes = new->map->num_types;
}
else {
mrep.firstType = mrep.nTypes = 0;
mrep.totalTypes = 0;
}
if (rep.reported & XkbGBN_ClientSymbolsMask) {
mrep.present |= (XkbKeySymsMask | XkbModifierMapMask);
mrep.firstKeySym = mrep.firstModMapKey = new->min_key_code;
mrep.nKeySyms = mrep.nModMapKeys = XkbNumKeys(new);
}
else {
mrep.firstKeySym = mrep.firstModMapKey = 0;
mrep.nKeySyms = mrep.nModMapKeys = 0;
}
if (rep.reported & XkbGBN_ServerSymbolsMask) {
mrep.present |= XkbAllServerInfoMask;
mrep.virtualMods = ~0;
mrep.firstKeyAct = mrep.firstKeyBehavior =
mrep.firstKeyExplicit = new->min_key_code;
mrep.nKeyActs = mrep.nKeyBehaviors =
mrep.nKeyExplicit = XkbNumKeys(new);
mrep.firstVModMapKey = new->min_key_code;
mrep.nVModMapKeys = XkbNumKeys(new);
}
else {
mrep.virtualMods = 0;
mrep.firstKeyAct = mrep.firstKeyBehavior =
mrep.firstKeyExplicit = 0;
mrep.nKeyActs = mrep.nKeyBehaviors = mrep.nKeyExplicit = 0;
}
XkbComputeGetMapReplySize(new, &mrep);
rep.length += SIZEOF(xGenericReply) / 4 + mrep.length;
}
if (new->compat == NULL)
rep.reported &= ~XkbGBN_CompatMapMask;
else if (rep.reported & XkbGBN_CompatMapMask) {
crep.type = X_Reply;
crep.deviceID = dev->id;
crep.sequenceNumber = client->sequence;
crep.length = 0;
crep.groups = XkbAllGroupsMask;
crep.firstSI = 0;
crep.nSI = crep.nTotalSI = new->compat->num_si;
XkbComputeGetCompatMapReplySize(new->compat, &crep);
rep.length += SIZEOF(xGenericReply) / 4 + crep.length;
}
if (new->indicators == NULL)
rep.reported &= ~XkbGBN_IndicatorMapMask;
else if (rep.reported & XkbGBN_IndicatorMapMask) {
irep.type = X_Reply;
irep.deviceID = dev->id;
irep.sequenceNumber = client->sequence;
irep.length = 0;
irep.which = XkbAllIndicatorsMask;
XkbComputeGetIndicatorMapReplySize(new->indicators, &irep);
rep.length += SIZEOF(xGenericReply) / 4 + irep.length;
}
if (new->names == NULL)
rep.reported &= ~(XkbGBN_OtherNamesMask | XkbGBN_KeyNamesMask);
else if (rep.reported & (XkbGBN_OtherNamesMask | XkbGBN_KeyNamesMask)) {
nrep.type = X_Reply;
nrep.deviceID = dev->id;
nrep.sequenceNumber = client->sequence;
nrep.length = 0;
nrep.minKeyCode = new->min_key_code;
nrep.maxKeyCode = new->max_key_code;
if (rep.reported & XkbGBN_OtherNamesMask) {
nrep.which = XkbAllNamesMask;
if (new->map != NULL)
nrep.nTypes = new->map->num_types;
else
nrep.nTypes = 0;
nrep.nKTLevels = 0;
nrep.groupNames = XkbAllGroupsMask;
nrep.virtualMods = XkbAllVirtualModsMask;
nrep.indicators = XkbAllIndicatorsMask;
nrep.nRadioGroups = new->names->num_rg;
}
else {
nrep.which = 0;
nrep.nTypes = 0;
nrep.nKTLevels = 0;
nrep.groupNames = 0;
nrep.virtualMods = 0;
nrep.indicators = 0;
nrep.nRadioGroups = 0;
}
if (rep.reported & XkbGBN_KeyNamesMask) {
nrep.which |= XkbKeyNamesMask;
nrep.firstKey = new->min_key_code;
nrep.nKeys = XkbNumKeys(new);
nrep.nKeyAliases = new->names->num_key_aliases;
if (nrep.nKeyAliases)
nrep.which |= XkbKeyAliasesMask;
}
else {
nrep.which &= ~(XkbKeyNamesMask | XkbKeyAliasesMask);
nrep.firstKey = nrep.nKeys = 0;
nrep.nKeyAliases = 0;
}
XkbComputeGetNamesReplySize(new, &nrep);
rep.length += SIZEOF(xGenericReply) / 4 + nrep.length;
}
if (new->geom == NULL)
rep.reported &= ~XkbGBN_GeometryMask;
else if (rep.reported & XkbGBN_GeometryMask) {
grep.type = X_Reply;
grep.deviceID = dev->id;
grep.sequenceNumber = client->sequence;
grep.length = 0;
grep.found = TRUE;
grep.pad = 0;
grep.widthMM = grep.heightMM = 0;
grep.nProperties = grep.nColors = grep.nShapes = 0;
grep.nSections = grep.nDoodads = 0;
grep.baseColorNdx = grep.labelColorNdx = 0;
XkbComputeGetGeometryReplySize(new->geom, &grep, None);
rep.length += SIZEOF(xGenericReply) / 4 + grep.length;
}
}
reported = rep.reported;
if (client->swapped) {
swaps(&rep.sequenceNumber);
swapl(&rep.length);
swaps(&rep.found);
swaps(&rep.reported);
}
WriteToClient(client, SIZEOF(xkbGetKbdByNameReply), &rep);
if (reported & (XkbGBN_SymbolsMask | XkbGBN_TypesMask))
XkbSendMap(client, new, &mrep);
if (reported & XkbGBN_CompatMapMask)
XkbSendCompatMap(client, new->compat, &crep);
if (reported & XkbGBN_IndicatorMapMask)
XkbSendIndicatorMap(client, new->indicators, &irep);
if (reported & (XkbGBN_KeyNamesMask | XkbGBN_OtherNamesMask))
XkbSendNames(client, new, &nrep);
if (reported & XkbGBN_GeometryMask)
XkbSendGeometry(client, new->geom, &grep, FALSE);
if (rep.loaded) {
XkbDescPtr old_xkb;
xkbNewKeyboardNotify nkn;
old_xkb = xkb;
xkb = new;
dev->key->xkbInfo->desc = xkb;
new = old_xkb; /* so it'll get freed automatically */
XkbCopyControls(xkb, old_xkb);
nkn.deviceID = nkn.oldDeviceID = dev->id;
nkn.minKeyCode = new->min_key_code;
nkn.maxKeyCode = new->max_key_code;
nkn.oldMinKeyCode = xkb->min_key_code;
nkn.oldMaxKeyCode = xkb->max_key_code;
nkn.requestMajor = XkbReqCode;
nkn.requestMinor = X_kbGetKbdByName;
nkn.changed = XkbNKN_KeycodesMask;
if (geom_changed)
nkn.changed |= XkbNKN_GeometryMask;
XkbSendNewKeyboardNotify(dev, &nkn);
/* Update the map and LED info on the device itself, as well as
* any slaves if it's an MD, or its MD if it's an SD and was the
* last device used on that MD. */
for (tmpd = inputInfo.devices; tmpd; tmpd = tmpd->next) {
if (tmpd != dev && GetMaster(tmpd, MASTER_KEYBOARD) != dev &&
(tmpd != master || dev != master->lastSlave))
continue;
if (tmpd != dev)
XkbDeviceApplyKeymap(tmpd, xkb);
if (tmpd->kbdfeed && tmpd->kbdfeed->xkb_sli) {
old_sli = tmpd->kbdfeed->xkb_sli;
tmpd->kbdfeed->xkb_sli = NULL;
sli = XkbAllocSrvLedInfo(tmpd, tmpd->kbdfeed, NULL, 0);
if (sli) {
sli->explicitState = old_sli->explicitState;
sli->effectiveState = old_sli->effectiveState;
}
tmpd->kbdfeed->xkb_sli = sli;
XkbFreeSrvLedInfo(old_sli);
}
}
}
if ((new != NULL) && (new != xkb)) {
XkbFreeKeyboard(new, XkbAllComponentsMask, TRUE);
new = NULL;
}
XkbFreeComponentNames(&names, FALSE);
XkbSetCauseXkbReq(&cause, X_kbGetKbdByName, client);
XkbUpdateAllDeviceIndicators(NULL, &cause);
return Success;
} | 0 | [
"CWE-119"
]
| xserver | f7cd1276bbd4fe3a9700096dec33b52b8440788d | 132,579,639,302,546,820,000,000,000,000,000,000,000 | 329 | Correct bounds checking in XkbSetNames()
CVE-2020-14345 / ZDI 11428
This vulnerability was discovered by:
Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
Signed-off-by: Matthieu Herrb <[email protected]> |
static void mmlistcheck(GWindow gw, struct gmenuitem *mi, GEvent *UNUSED(e)) {
FontView *fv = (FontView *) GDrawGetUserData(gw);
int i, base, j;
MMSet *mm = fv->b.sf->mm;
SplineFont *sub;
GMenuItem2 *mml;
for ( i=0; mmlist[i].mid!=MID_ChangeMMBlend; ++i );
base = i+2;
if ( mm==NULL )
mml = mmlist;
else {
mml = calloc(base+mm->instance_count+2,sizeof(GMenuItem2));
memcpy(mml,mmlist,sizeof(mmlist));
mml[base-1].ti.fg = mml[base-1].ti.bg = COLOR_DEFAULT;
mml[base-1].ti.line = true;
for ( j = 0, i=base; j<mm->instance_count+1; ++i, ++j ) {
if ( j==0 )
sub = mm->normal;
else
sub = mm->instances[j-1];
mml[i].ti.text = uc_copy(sub->fontname);
mml[i].ti.checkable = true;
mml[i].ti.checked = sub==fv->b.sf;
mml[i].ti.userdata = sub;
mml[i].invoke = FVMenuShowSubFont;
mml[i].ti.fg = mml[i].ti.bg = COLOR_DEFAULT;
}
}
GMenuItemArrayFree(mi->sub);
mi->sub = GMenuItem2ArrayCopy(mml,NULL);
if ( mml!=mmlist ) {
for ( i=base; mml[i].ti.text!=NULL; ++i )
free( mml[i].ti.text);
free(mml);
}
for ( mi = mi->sub; mi->ti.text!=NULL || mi->ti.line ; ++mi ) {
switch ( mi->mid ) {
case MID_CreateMM:
mi->ti.disabled = false;
break;
case MID_MMInfo: case MID_MMValid: case MID_BlendToNew:
mi->ti.disabled = mm==NULL;
break;
case MID_ChangeMMBlend:
mi->ti.disabled = mm==NULL || mm->apple;
break;
}
}
} | 0 | [
"CWE-119",
"CWE-787"
]
| fontforge | 626f751752875a0ddd74b9e217b6f4828713573c | 24,470,522,913,516,400,000,000,000,000,000,000,000 | 51 | Warn users before discarding their unsaved scripts (#3852)
* Warn users before discarding their unsaved scripts
This closes #3846. |
static void io_queue_async_work(struct io_kiocb *req)
{
struct io_kiocb *link;
/* init ->work of the whole link before punting */
io_prep_async_link(req);
link = __io_queue_async_work(req);
if (link)
io_queue_linked_timeout(link);
} | 0 | []
| linux | 0f2122045b946241a9e549c2a76cea54fa58a7ff | 269,185,534,999,894,820,000,000,000,000,000,000,000 | 11 | io_uring: don't rely on weak ->files references
Grab actual references to the files_struct. To avoid circular references
issues due to this, we add a per-task note that keeps track of what
io_uring contexts a task has used. When the tasks execs or exits its
assigned files, we cancel requests based on this tracking.
With that, we can grab proper references to the files table, and no
longer need to rely on stashing away ring_fd and ring_file to check
if the ring_fd may have been closed.
Cc: [email protected] # v5.5+
Reviewed-by: Pavel Begunkov <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
void CLASS parse_makernote_0xc634(int base, int uptag, unsigned dng_writer)
{
/*placeholder */
} | 1 | [
"CWE-119"
]
| LibRaw | 8303e74b0567806dd5f16fc39aab70fe928de1a2 | 127,582,534,343,492,500,000,000,000,000,000,000,000 | 4 | processCanonCameraInfo possible buffer overrun on damaged file |
ldbm_config_allidsthreshold_set(void *arg, void *value, char *errorbuf __attribute__((unused)), int phase __attribute__((unused)), int apply)
{
struct ldbminfo *li = (struct ldbminfo *)arg;
int retval = LDAP_SUCCESS;
int val = (int)((uintptr_t)value);
/* Do whatever we can to make sure the data is ok. */
/* Catch attempts to configure a stupidly low allidsthreshold */
if ((val > -1) && (val < 100)) {
val = 100;
}
if (apply) {
li->li_allidsthreshold = val;
}
return retval;
} | 0 | [
"CWE-399",
"CWE-203"
]
| 389-ds-base | cc0f69283abc082488824702dae485b8eae938bc | 63,195,776,619,849,180,000,000,000,000,000,000,000 | 19 | Issue 4480 - Unexpected info returned to ldap request (#4491)
Bug description:
If the bind entry does not exist, the bind result info
reports that 'No such entry'. It should not give any
information if the target entry exists or not
Fix description:
Does not return any additional information during a bind
relates: https://github.com/389ds/389-ds-base/issues/4480
Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all)
Platforms tested: F31 |
static int check_map_prog_compatibility(struct bpf_verifier_env *env,
struct bpf_map *map,
struct bpf_prog *prog)
{
/* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
* preallocated hash maps, since doing memory allocation
* in overflow_handler can crash depending on where nmi got
* triggered.
*/
if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
if (!check_map_prealloc(map)) {
verbose(env, "perf_event programs can only use preallocated hash map\n");
return -EINVAL;
}
if (map->inner_map_meta &&
!check_map_prealloc(map->inner_map_meta)) {
verbose(env, "perf_event programs can only use preallocated inner hash map\n");
return -EINVAL;
}
}
if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
!bpf_offload_prog_map_match(prog, map)) {
verbose(env, "offload device mismatch between prog and map\n");
return -EINVAL;
}
return 0;
} | 0 | [
"CWE-125"
]
| linux | b799207e1e1816b09e7a5920fbb2d5fcf6edd681 | 321,797,853,712,842,700,000,000,000,000,000,000,000 | 30 | bpf: 32-bit RSH verification must truncate input before the ALU op
When I wrote commit 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification"), I
assumed that, in order to emulate 64-bit arithmetic with 32-bit logic, it
is sufficient to just truncate the output to 32 bits; and so I just moved
the register size coercion that used to be at the start of the function to
the end of the function.
That assumption is true for almost every op, but not for 32-bit right
shifts, because those can propagate information towards the least
significant bit. Fix it by always truncating inputs for 32-bit ops to 32
bits.
Also get rid of the coerce_reg_to_size() after the ALU op, since that has
no effect.
Fixes: 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification")
Acked-by: Daniel Borkmann <[email protected]>
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]> |
xfs_alloc_query_range(
struct xfs_btree_cur *cur,
struct xfs_alloc_rec_incore *low_rec,
struct xfs_alloc_rec_incore *high_rec,
xfs_alloc_query_range_fn fn,
void *priv)
{
union xfs_btree_irec low_brec;
union xfs_btree_irec high_brec;
struct xfs_alloc_query_range_info query;
ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
low_brec.a = *low_rec;
high_brec.a = *high_rec;
query.priv = priv;
query.fn = fn;
return xfs_btree_query_range(cur, &low_brec, &high_brec,
xfs_alloc_query_range_helper, &query);
} | 0 | [
"CWE-400",
"CWE-703",
"CWE-835"
]
| linux | d0c7feaf87678371c2c09b3709400be416b2dc62 | 168,394,714,816,106,950,000,000,000,000,000,000,000 | 19 | xfs: add agf freeblocks verify in xfs_agf_verify
We recently used fuzz(hydra) to test XFS and automatically generate
tmp.img(XFS v5 format, but some metadata is wrong)
xfs_repair information(just one AG):
agf_freeblks 0, counted 3224 in ag 0
agf_longest 536874136, counted 3224 in ag 0
sb_fdblocks 613, counted 3228
Test as follows:
mount tmp.img tmpdir
cp file1M tmpdir
sync
In 4.19-stable, sync will stuck, the reason is:
xfs_mountfs
xfs_check_summary_counts
if ((!xfs_sb_version_haslazysbcount(&mp->m_sb) ||
XFS_LAST_UNMOUNT_WAS_CLEAN(mp)) &&
!xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS))
return 0; -->just return, incore sb_fdblocks still be 613
xfs_initialize_perag_data
cp file1M tmpdir -->ok(write file to pagecache)
sync -->stuck(write pagecache to disk)
xfs_map_blocks
xfs_iomap_write_allocate
while (count_fsb != 0) {
nimaps = 0;
while (nimaps == 0) { --> endless loop
nimaps = 1;
xfs_bmapi_write(..., &nimaps) --> nimaps becomes 0 again
xfs_bmapi_write
xfs_bmap_alloc
xfs_bmap_btalloc
xfs_alloc_vextent
xfs_alloc_fix_freelist
xfs_alloc_space_available -->fail(agf_freeblks is 0)
In linux-next, sync not stuck, cause commit c2b3164320b5 ("xfs:
use the latest extent at writeback delalloc conversion time") remove
the above while, dmesg is as follows:
[ 55.250114] XFS (loop0): page discard on page ffffea0008bc7380, inode 0x1b0c, offset 0.
Users do not know why this page is discard, the better soultion is:
1. Like xfs_repair, make sure sb_fdblocks is equal to counted
(xfs_initialize_perag_data did this, who is not called at this mount)
2. Add agf verify, if fail, will tell users to repair
This patch use the second soultion.
Signed-off-by: Zheng Bin <[email protected]>
Signed-off-by: Ren Xudong <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
Signed-off-by: Darrick J. Wong <[email protected]> |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteSpaceToDepthParams*>(node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
#define TF_LITE_SPACE_TO_DEPTH(type, scalar) \
tflite::SpaceToDepthParams op_params; \
op_params.block_size = params->block_size; \
type::SpaceToDepth(op_params, GetTensorShape(input), \
GetTensorData<scalar>(input), GetTensorShape(output), \
GetTensorData<scalar>(output))
switch (input->type) { // Already know in/out types are same.
case kTfLiteFloat32:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_DEPTH(reference_ops, float);
} else {
TF_LITE_SPACE_TO_DEPTH(optimized_ops, float);
}
break;
case kTfLiteUInt8:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_DEPTH(reference_ops, uint8_t);
} else {
TF_LITE_SPACE_TO_DEPTH(optimized_ops, uint8_t);
}
break;
case kTfLiteInt8:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_DEPTH(reference_ops, int8_t);
} else {
TF_LITE_SPACE_TO_DEPTH(optimized_ops, int8_t);
}
break;
case kTfLiteInt32:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_DEPTH(reference_ops, int32_t);
} else {
TF_LITE_SPACE_TO_DEPTH(optimized_ops, int32_t);
}
break;
case kTfLiteInt64:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_DEPTH(reference_ops, int64_t);
} else {
TF_LITE_SPACE_TO_DEPTH(optimized_ops, int64_t);
}
break;
default:
context->ReportError(context, "Type '%s' not currently supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
#undef TF_LITE_SPACE_TO_DEPTH
return kTfLiteOk;
} | 0 | [
"CWE-369"
]
| tensorflow | 0d45ea1ca641b21b73bcf9c00e0179cda284e7e7 | 80,459,535,602,141,320,000,000,000,000,000,000,000 | 61 | Prevent one more div by 0 in TFLite
PiperOrigin-RevId: 370800114
Change-Id: I6b956aeb8c458cc6f514408d2e89ffacfe249e57 |
static void php_ldap_escape_map_set_chars(zend_bool *map, const char *chars, const int charslen, char escape)
{
int i = 0;
while (i < charslen) {
map[(unsigned char) chars[i++]] = escape;
}
} | 0 | [
"CWE-476"
]
| php-src | 49782c54994ecca2ef2a061063bd5a7079c43527 | 243,772,527,997,346,500,000,000,000,000,000,000,000 | 7 | Fix bug #76248 - Malicious LDAP-Server Response causes Crash |
static int effective_prio(struct task_struct *p)
{
p->normal_prio = normal_prio(p);
/*
* If we are RT tasks or we were boosted to RT priority,
* keep the priority unchanged. Otherwise, update priority
* to the normal priority:
*/
if (!rt_prio(p->prio))
return p->normal_prio;
return p->prio;
} | 0 | []
| linux-2.6 | 8f1bc385cfbab474db6c27b5af1e439614f3025c | 324,466,293,269,738,170,000,000,000,000,000,000,000 | 12 | sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]> |
explicit operator bool() const {
return ok();
} | 0 | [
"CWE-613"
]
| mongo | e55d6e2292e5dbe2f97153251d8193d1cc89f5d7 | 121,871,608,683,492,000,000,000,000,000,000,000,000 | 3 | SERVER-38984 Validate unique User ID on UserCache hit |
static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
struct xfrm_replay_state_esn **preplay_esn,
struct nlattr *rta)
{
struct xfrm_replay_state_esn *p, *pp, *up;
unsigned int klen, ulen;
if (!rta)
return 0;
up = nla_data(rta);
klen = xfrm_replay_state_esn_len(up);
ulen = nla_len(rta) >= (int)klen ? klen : sizeof(*up);
p = kzalloc(klen, GFP_KERNEL);
if (!p)
return -ENOMEM;
pp = kzalloc(klen, GFP_KERNEL);
if (!pp) {
kfree(p);
return -ENOMEM;
}
memcpy(p, up, ulen);
memcpy(pp, up, ulen);
*replay_esn = p;
*preplay_esn = pp;
return 0;
} | 0 | [
"CWE-125"
]
| linux | b805d78d300bcf2c83d6df7da0c818b0fee41427 | 49,089,244,385,927,650,000,000,000,000,000,000,000 | 32 | xfrm: policy: Fix out-of-bound array accesses in __xfrm_policy_unlink
UBSAN report this:
UBSAN: Undefined behaviour in net/xfrm/xfrm_policy.c:1289:24
index 6 is out of range for type 'unsigned int [6]'
CPU: 1 PID: 0 Comm: swapper/1 Not tainted 4.4.162-514.55.6.9.x86_64+ #13
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
0000000000000000 1466cf39b41b23c9 ffff8801f6b07a58 ffffffff81cb35f4
0000000041b58ab3 ffffffff83230f9c ffffffff81cb34e0 ffff8801f6b07a80
ffff8801f6b07a20 1466cf39b41b23c9 ffffffff851706e0 ffff8801f6b07ae8
Call Trace:
<IRQ> [<ffffffff81cb35f4>] __dump_stack lib/dump_stack.c:15 [inline]
<IRQ> [<ffffffff81cb35f4>] dump_stack+0x114/0x1a0 lib/dump_stack.c:51
[<ffffffff81d94225>] ubsan_epilogue+0x12/0x8f lib/ubsan.c:164
[<ffffffff81d954db>] __ubsan_handle_out_of_bounds+0x16e/0x1b2 lib/ubsan.c:382
[<ffffffff82a25acd>] __xfrm_policy_unlink+0x3dd/0x5b0 net/xfrm/xfrm_policy.c:1289
[<ffffffff82a2e572>] xfrm_policy_delete+0x52/0xb0 net/xfrm/xfrm_policy.c:1309
[<ffffffff82a3319b>] xfrm_policy_timer+0x30b/0x590 net/xfrm/xfrm_policy.c:243
[<ffffffff813d3927>] call_timer_fn+0x237/0x990 kernel/time/timer.c:1144
[<ffffffff813d8e7e>] __run_timers kernel/time/timer.c:1218 [inline]
[<ffffffff813d8e7e>] run_timer_softirq+0x6ce/0xb80 kernel/time/timer.c:1401
[<ffffffff8120d6f9>] __do_softirq+0x299/0xe10 kernel/softirq.c:273
[<ffffffff8120e676>] invoke_softirq kernel/softirq.c:350 [inline]
[<ffffffff8120e676>] irq_exit+0x216/0x2c0 kernel/softirq.c:391
[<ffffffff82c5edab>] exiting_irq arch/x86/include/asm/apic.h:652 [inline]
[<ffffffff82c5edab>] smp_apic_timer_interrupt+0x8b/0xc0 arch/x86/kernel/apic/apic.c:926
[<ffffffff82c5c985>] apic_timer_interrupt+0xa5/0xb0 arch/x86/entry/entry_64.S:735
<EOI> [<ffffffff81188096>] ? native_safe_halt+0x6/0x10 arch/x86/include/asm/irqflags.h:52
[<ffffffff810834d7>] arch_safe_halt arch/x86/include/asm/paravirt.h:111 [inline]
[<ffffffff810834d7>] default_idle+0x27/0x430 arch/x86/kernel/process.c:446
[<ffffffff81085f05>] arch_cpu_idle+0x15/0x20 arch/x86/kernel/process.c:437
[<ffffffff8132abc3>] default_idle_call+0x53/0x90 kernel/sched/idle.c:92
[<ffffffff8132b32d>] cpuidle_idle_call kernel/sched/idle.c:156 [inline]
[<ffffffff8132b32d>] cpu_idle_loop kernel/sched/idle.c:251 [inline]
[<ffffffff8132b32d>] cpu_startup_entry+0x60d/0x9a0 kernel/sched/idle.c:299
[<ffffffff8113e119>] start_secondary+0x3c9/0x560 arch/x86/kernel/smpboot.c:245
The issue is triggered as this:
xfrm_add_policy
-->verify_newpolicy_info //check the index provided by user with XFRM_POLICY_MAX
//In my case, the index is 0x6E6BB6, so it pass the check.
-->xfrm_policy_construct //copy the user's policy and set xfrm_policy_timer
-->xfrm_policy_insert
--> __xfrm_policy_link //use the orgin dir, in my case is 2
--> xfrm_gen_index //generate policy index, there is 0x6E6BB6
then xfrm_policy_timer be fired
xfrm_policy_timer
--> xfrm_policy_id2dir //get dir from (policy index & 7), in my case is 6
--> xfrm_policy_delete
--> __xfrm_policy_unlink //access policy_count[dir], trigger out of range access
Add xfrm_policy_id2dir check in verify_newpolicy_info, make sure the computed dir is
valid, to fix the issue.
Reported-by: Hulk Robot <[email protected]>
Fixes: e682adf021be ("xfrm: Try to honor policy index if it's supplied by user")
Signed-off-by: YueHaibing <[email protected]>
Acked-by: Herbert Xu <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]> |
int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
int ret;
if (!boot_cpu_has(X86_FEATURE_FXSR))
return -ENODEV;
fpu__activate_fpstate_write(fpu);
fpstate_sanitize_xstate(fpu);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&fpu->state.fxsave, 0, -1);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
/*
* update the header bits in the xsave header, indicating the
* presence of FP and SSE state.
*/
if (boot_cpu_has(X86_FEATURE_XSAVE))
fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
return ret;
} | 0 | [
"CWE-200"
]
| linux | 814fb7bb7db5433757d76f4c4502c96fc53b0b5e | 227,683,517,522,798,230,000,000,000,000,000,000,000 | 30 | x86/fpu: Don't let userspace set bogus xcomp_bv
On x86, userspace can use the ptrace() or rt_sigreturn() system calls to
set a task's extended state (xstate) or "FPU" registers. ptrace() can
set them for another task using the PTRACE_SETREGSET request with
NT_X86_XSTATE, while rt_sigreturn() can set them for the current task.
In either case, registers can be set to any value, but the kernel
assumes that the XSAVE area itself remains valid in the sense that the
CPU can restore it.
However, in the case where the kernel is using the uncompacted xstate
format (which it does whenever the XSAVES instruction is unavailable),
it was possible for userspace to set the xcomp_bv field in the
xstate_header to an arbitrary value. However, all bits in that field
are reserved in the uncompacted case, so when switching to a task with
nonzero xcomp_bv, the XRSTOR instruction failed with a #GP fault. This
caused the WARN_ON_FPU(err) in copy_kernel_to_xregs() to be hit. In
addition, since the error is otherwise ignored, the FPU registers from
the task previously executing on the CPU were leaked.
Fix the bug by checking that the user-supplied value of xcomp_bv is 0 in
the uncompacted case, and returning an error otherwise.
The reason for validating xcomp_bv rather than simply overwriting it
with 0 is that we want userspace to see an error if it (incorrectly)
provides an XSAVE area in compacted format rather than in uncompacted
format.
Note that as before, in case of error we clear the task's FPU state.
This is perhaps non-ideal, especially for PTRACE_SETREGSET; it might be
better to return an error before changing anything. But it seems the
"clear on error" behavior is fine for now, and it's a little tricky to
do otherwise because it would mean we couldn't simply copy the full
userspace state into kernel memory in one __copy_from_user().
This bug was found by syzkaller, which hit the above-mentioned
WARN_ON_FPU():
WARNING: CPU: 1 PID: 0 at ./arch/x86/include/asm/fpu/internal.h:373 __switch_to+0x5b5/0x5d0
CPU: 1 PID: 0 Comm: swapper/1 Not tainted 4.13.0 #453
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
task: ffff9ba2bc8e42c0 task.stack: ffffa78cc036c000
RIP: 0010:__switch_to+0x5b5/0x5d0
RSP: 0000:ffffa78cc08bbb88 EFLAGS: 00010082
RAX: 00000000fffffffe RBX: ffff9ba2b8bf2180 RCX: 00000000c0000100
RDX: 00000000ffffffff RSI: 000000005cb10700 RDI: ffff9ba2b8bf36c0
RBP: ffffa78cc08bbbd0 R08: 00000000929fdf46 R09: 0000000000000001
R10: 0000000000000000 R11: 0000000000000000 R12: ffff9ba2bc8e42c0
R13: 0000000000000000 R14: ffff9ba2b8bf3680 R15: ffff9ba2bf5d7b40
FS: 00007f7e5cb10700(0000) GS:ffff9ba2bf400000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00000000004005cc CR3: 0000000079fd5000 CR4: 00000000001406e0
Call Trace:
Code: 84 00 00 00 00 00 e9 11 fd ff ff 0f ff 66 0f 1f 84 00 00 00 00 00 e9 e7 fa ff ff 0f ff 66 0f 1f 84 00 00 00 00 00 e9 c2 fa ff ff <0f> ff 66 0f 1f 84 00 00 00 00 00 e9 d4 fc ff ff 66 66 2e 0f 1f
Here is a C reproducer. The expected behavior is that the program spin
forever with no output. However, on a buggy kernel running on a
processor with the "xsave" feature but without the "xsaves" feature
(e.g. Sandy Bridge through Broadwell for Intel), within a second or two
the program reports that the xmm registers were corrupted, i.e. were not
restored correctly. With CONFIG_X86_DEBUG_FPU=y it also hits the above
kernel warning.
#define _GNU_SOURCE
#include <stdbool.h>
#include <inttypes.h>
#include <linux/elf.h>
#include <stdio.h>
#include <sys/ptrace.h>
#include <sys/uio.h>
#include <sys/wait.h>
#include <unistd.h>
int main(void)
{
int pid = fork();
uint64_t xstate[512];
struct iovec iov = { .iov_base = xstate, .iov_len = sizeof(xstate) };
if (pid == 0) {
bool tracee = true;
for (int i = 0; i < sysconf(_SC_NPROCESSORS_ONLN) && tracee; i++)
tracee = (fork() != 0);
uint32_t xmm0[4] = { [0 ... 3] = tracee ? 0x00000000 : 0xDEADBEEF };
asm volatile(" movdqu %0, %%xmm0\n"
" mov %0, %%rbx\n"
"1: movdqu %%xmm0, %0\n"
" mov %0, %%rax\n"
" cmp %%rax, %%rbx\n"
" je 1b\n"
: "+m" (xmm0) : : "rax", "rbx", "xmm0");
printf("BUG: xmm registers corrupted! tracee=%d, xmm0=%08X%08X%08X%08X\n",
tracee, xmm0[0], xmm0[1], xmm0[2], xmm0[3]);
} else {
usleep(100000);
ptrace(PTRACE_ATTACH, pid, 0, 0);
wait(NULL);
ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov);
xstate[65] = -1;
ptrace(PTRACE_SETREGSET, pid, NT_X86_XSTATE, &iov);
ptrace(PTRACE_CONT, pid, 0, 0);
wait(NULL);
}
return 1;
}
Note: the program only tests for the bug using the ptrace() system call.
The bug can also be reproduced using the rt_sigreturn() system call, but
only when called from a 32-bit program, since for 64-bit programs the
kernel restores the FPU state from the signal frame by doing XRSTOR
directly from userspace memory (with proper error checking).
Reported-by: Dmitry Vyukov <[email protected]>
Signed-off-by: Eric Biggers <[email protected]>
Reviewed-by: Kees Cook <[email protected]>
Reviewed-by: Rik van Riel <[email protected]>
Acked-by: Dave Hansen <[email protected]>
Cc: <[email protected]> [v3.17+]
Cc: Andrew Morton <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Eric Biggers <[email protected]>
Cc: Fenghua Yu <[email protected]>
Cc: Kevin Hao <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Michael Halcrow <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Wanpeng Li <[email protected]>
Cc: Yu-cheng Yu <[email protected]>
Cc: [email protected]
Fixes: 0b29643a5843 ("x86/xsaves: Change compacted format xsave area header")
Link: http://lkml.kernel.org/r/[email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
catch_exception(except_T *excp)
{
excp->caught = caught_stack;
caught_stack = excp;
set_vim_var_string(VV_EXCEPTION, (char_u *)excp->value, -1);
if (*excp->throw_name != NUL)
{
if (excp->throw_lnum != 0)
vim_snprintf((char *)IObuff, IOSIZE, _("%s, line %ld"),
excp->throw_name, (long)excp->throw_lnum);
else
vim_snprintf((char *)IObuff, IOSIZE, "%s", excp->throw_name);
set_vim_var_string(VV_THROWPOINT, IObuff, -1);
}
else
// throw_name not set on an exception from a command that was typed.
set_vim_var_string(VV_THROWPOINT, NULL, -1);
if (p_verbose >= 13 || debug_break_level > 0)
{
int save_msg_silent = msg_silent;
if (debug_break_level > 0)
msg_silent = FALSE; // display messages
else
verbose_enter();
++no_wait_return;
if (debug_break_level > 0 || *p_vfile == NUL)
msg_scroll = TRUE; // always scroll up, don't overwrite
smsg(_("Exception caught: %s"), excp->value);
msg_puts("\n"); // don't overwrite this either
if (debug_break_level > 0 || *p_vfile == NUL)
cmdline_row = msg_row;
--no_wait_return;
if (debug_break_level > 0)
msg_silent = save_msg_silent;
else
verbose_leave();
}
} | 0 | [
"CWE-787"
]
| vim | 96b9bf8f74af8abf1e30054f996708db7dc285be | 284,289,959,635,267,480,000,000,000,000,000,000,000 | 42 | patch 9.0.0577: buffer underflow with unexpected :finally
Problem: Buffer underflow with unexpected :finally.
Solution: Check CSF_TRY can be found. |
void LinkResolver::resolve_invokehandle(CallInfo& result, constantPoolHandle pool, int index, TRAPS) {
assert(EnableInvokeDynamic, "");
// This guy is reached from InterpreterRuntime::resolve_invokehandle.
KlassHandle resolved_klass;
Symbol* method_name = NULL;
Symbol* method_signature = NULL;
KlassHandle current_klass;
resolve_pool(resolved_klass, method_name, method_signature, current_klass, pool, index, CHECK);
if (TraceMethodHandles) {
ResourceMark rm(THREAD);
tty->print_cr("resolve_invokehandle %s %s", method_name->as_C_string(), method_signature->as_C_string());
}
resolve_handle_call(result, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
} | 0 | []
| jdk8u | f14e35d20e1a4d0f507f05838844152f2242c6d3 | 12,277,529,880,664,623,000,000,000,000,000,000,000 | 14 | 8281866: Enhance MethodHandle invocations
Reviewed-by: andrew
Backport-of: d974d9da365f787f67971d88c79371c8b0769f75 |
static int have_ask_password(void) {
_cleanup_closedir_ DIR *dir;
dir = opendir("/run/systemd/ask-password");
if (!dir) {
if (errno == ENOENT)
return false;
else
return -errno;
}
for (;;) {
struct dirent *de;
errno = 0;
de = readdir(dir);
if (!de && errno > 0)
return -errno;
if (!de)
return false;
if (startswith(de->d_name, "ask."))
return true;
}
} | 0 | [
"CWE-20"
]
| systemd | 531ac2b2349da02acc9c382849758e07eb92b020 | 43,323,699,978,655,160,000,000,000,000,000,000,000 | 25 | If the notification message length is 0, ignore the message (#4237)
Fixes #4234.
Signed-off-by: Jorge Niedbalski <[email protected]> |
gss_inquire_context(
OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
gss_name_t *src_name,
gss_name_t *targ_name,
OM_uint32 *lifetime_rec,
gss_OID *mech_type,
OM_uint32 *ctx_flags,
int *locally_initiated,
int *opened)
{
gss_union_ctx_id_t ctx;
gss_mechanism mech;
OM_uint32 status, temp_minor;
gss_OID actual_mech;
gss_name_t localTargName = NULL, localSourceName = NULL;
status = val_inq_ctx_args(minor_status,
context_handle,
src_name, targ_name,
lifetime_rec,
mech_type, ctx_flags,
locally_initiated, opened);
if (status != GSS_S_COMPLETE)
return (status);
/*
* select the approprate underlying mechanism routine and
* call it.
*/
ctx = (gss_union_ctx_id_t) context_handle;
if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT)
return (GSS_S_NO_CONTEXT);
mech = gssint_get_mechanism (ctx->mech_type);
if (!mech || !mech->gss_inquire_context || !mech->gss_display_name ||
!mech->gss_release_name) {
return (GSS_S_UNAVAILABLE);
}
status = mech->gss_inquire_context(
minor_status,
ctx->internal_ctx_id,
(src_name ? &localSourceName : NULL),
(targ_name ? &localTargName : NULL),
lifetime_rec,
&actual_mech,
ctx_flags,
locally_initiated,
opened);
if (status != GSS_S_COMPLETE) {
map_error(minor_status, mech);
return status;
}
/* need to convert names */
if (src_name) {
if (localSourceName) {
status = gssint_convert_name_to_union_name(minor_status, mech,
localSourceName, src_name);
if (status != GSS_S_COMPLETE) {
if (localTargName)
mech->gss_release_name(&temp_minor, &localTargName);
return (status);
}
} else {
*src_name = GSS_C_NO_NAME;
}
}
if (targ_name) {
if (localTargName) {
status = gssint_convert_name_to_union_name(minor_status, mech,
localTargName, targ_name);
if (status != GSS_S_COMPLETE) {
if (src_name)
(void) gss_release_name(&temp_minor, src_name);
return (status);
}
}
else {
*targ_name = GSS_C_NO_NAME;
}
}
if (mech_type)
*mech_type = gssint_get_public_oid(actual_mech);
return(GSS_S_COMPLETE);
} | 0 | [
"CWE-415"
]
| krb5 | 56f7b1bc95a2a3eeb420e069e7655fb181ade5cf | 47,209,513,824,349,620,000,000,000,000,000,000,000 | 96 | Preserve GSS context on init/accept failure
After gss_init_sec_context() or gss_accept_sec_context() has created a
context, don't delete the mechglue context on failures from subsequent
calls, even if the mechanism deletes the mech-specific context (which
is allowed by RFC 2744 but not preferred). Check for union contexts
with no mechanism context in each GSS function which accepts a
gss_ctx_id_t.
CVE-2017-11462:
RFC 2744 permits a GSS-API implementation to delete an existing
security context on a second or subsequent call to
gss_init_sec_context() or gss_accept_sec_context() if the call results
in an error. This API behavior has been found to be dangerous,
leading to the possibility of memory errors in some callers. For
safety, GSS-API implementations should instead preserve existing
security contexts on error until the caller deletes them.
All versions of MIT krb5 prior to this change may delete acceptor
contexts on error. Versions 1.13.4 through 1.13.7, 1.14.1 through
1.14.5, and 1.15 through 1.15.1 may also delete initiator contexts on
error.
ticket: 8598 (new)
target_version: 1.15-next
target_version: 1.14-next
tags: pullup |
imapx_untagged_recent (CamelIMAPXServer *is,
GInputStream *input_stream,
GCancellable *cancellable,
GError **error)
{
CamelIMAPXMailbox *mailbox;
guint32 recent;
g_return_val_if_fail (CAMEL_IS_IMAPX_SERVER (is), FALSE);
mailbox = camel_imapx_server_ref_pending_or_selected (is);
if (mailbox == NULL) {
g_warning ("%s: No mailbox available", G_STRFUNC);
return TRUE;
}
recent = (guint32) is->priv->context->id;
c (is->priv->tagprefix, "%s: updating mailbox '%s' recent: %d ~> %d\n", G_STRFUNC,
camel_imapx_mailbox_get_name (mailbox),
camel_imapx_mailbox_get_recent (mailbox),
recent);
camel_imapx_mailbox_set_recent (mailbox, recent);
g_object_unref (mailbox);
return TRUE;
} | 0 | []
| evolution-data-server | f26a6f672096790d0bbd76903db4c9a2e44f116b | 148,833,331,871,325,210,000,000,000,000,000,000,000 | 30 | [IMAPx] 'STARTTLS not supported' error ignored
When a user has setup the STARTTLS encryption method, but the server doesn't
support it, then an error should be shown to the user, instead of using
unsecure connection. There had been two bugs in the existing code which
prevented this error from being used and the failure properly reported.
This had been filled at:
https://bugzilla.redhat.com/show_bug.cgi?id=1334842 |
static CURLcode smtp_state_authlogin_resp(struct connectdata *conn,
int smtpcode,
smtpstate instate)
{
CURLcode result = CURLE_OK;
struct SessionHandle *data = conn->data;
size_t len = 0;
char *authuser = NULL;
(void)instate; /* no use for this yet */
if(smtpcode != 334) {
failf(data, "Access denied: %d", smtpcode);
result = CURLE_LOGIN_DENIED;
}
else {
result = smtp_auth_login_user(conn, &authuser, &len);
if(!result) {
if(authuser) {
result = Curl_pp_sendf(&conn->proto.smtpc.pp, "%s", authuser);
if(!result)
state(conn, SMTP_AUTHPASSWD);
}
Curl_safefree(authuser);
}
}
return result;
} | 0 | [
"CWE-89"
]
| curl | 75ca568fa1c19de4c5358fed246686de8467c238 | 182,387,262,356,993,320,000,000,000,000,000,000,000 | 31 | URL sanitize: reject URLs containing bad data
Protocols (IMAP, POP3 and SMTP) that use the path part of a URL in a
decoded manner now use the new Curl_urldecode() function to reject URLs
with embedded control codes (anything that is or decodes to a byte value
less than 32).
URLs containing such codes could easily otherwise be used to do harm and
allow users to do unintended actions with otherwise innocent tools and
applications. Like for example using a URL like
pop3://pop3.example.com/1%0d%0aDELE%201 when the app wants a URL to get
a mail and instead this would delete one.
This flaw is considered a security vulnerability: CVE-2012-0036
Security advisory at: http://curl.haxx.se/docs/adv_20120124.html
Reported by: Dan Fandrich |
CImgDisplay& set_button() {
_button = 0;
_is_event = true;
#if cimg_display==1
pthread_cond_broadcast(&cimg::X11_attr().wait_event);
#elif cimg_display==2
SetEvent(cimg::Win32_attr().wait_event);
#endif
return *this;
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 102,090,774,807,424,750,000,000,000,000,000,000,000 | 10 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
log_cachednxdomain (const char *dn)
{
string ("cached nxdomain ");
name (dn);
line ();
} | 0 | [
"CWE-362"
]
| ndjbdns | 847523271f3966cf4618c5689b905703c41dec1c | 291,112,348,415,271,640,000,000,000,000,000,000,000 | 7 | Merge identical outgoing requests.
This patch fixes dnscache to combine *same* client queries into one
single outgoing request, thus securing the server from possible cache
poisoning attacks. This fixes one of the cache poisoning vulnerability
reported by Mr Mark Johnson
-> https://bugzilla.redhat.com/show_bug.cgi?id=838965.
Nonetheless the original patch for this issue was created by
Mr Jeff king -> http://www.your.org/dnscache/
Sincere thanks to Mr Mark for reporting this issue and Mr Jeff for
creating the patch and releasing it under public domain. |
static void usb_parse_ssp_isoc_endpoint_companion(struct device *ddev,
int cfgno, int inum, int asnum, struct usb_host_endpoint *ep,
unsigned char *buffer, int size)
{
struct usb_ssp_isoc_ep_comp_descriptor *desc;
/*
* The SuperSpeedPlus Isoc endpoint companion descriptor immediately
* follows the SuperSpeed Endpoint Companion descriptor
*/
desc = (struct usb_ssp_isoc_ep_comp_descriptor *) buffer;
if (desc->bDescriptorType != USB_DT_SSP_ISOC_ENDPOINT_COMP ||
size < USB_DT_SSP_ISOC_EP_COMP_SIZE) {
dev_warn(ddev, "Invalid SuperSpeedPlus isoc endpoint companion"
"for config %d interface %d altsetting %d ep %d.\n",
cfgno, inum, asnum, ep->desc.bEndpointAddress);
return;
}
memcpy(&ep->ssp_isoc_ep_comp, desc, USB_DT_SSP_ISOC_EP_COMP_SIZE);
} | 0 | [
"CWE-125"
]
| linux | 1c0edc3633b56000e18d82fc241e3995ca18a69e | 24,944,507,796,294,340,000,000,000,000,000,000,000 | 20 | USB: core: fix out-of-bounds access bug in usb_get_bos_descriptor()
Andrey used the syzkaller fuzzer to find an out-of-bounds memory
access in usb_get_bos_descriptor(). The code wasn't checking that the
next usb_dev_cap_header structure could fit into the remaining buffer
space.
This patch fixes the error and also reduces the bNumDeviceCaps field
in the header to match the actual number of capabilities found, in
cases where there are fewer than expected.
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: Alan Stern <[email protected]>
Tested-by: Andrey Konovalov <[email protected]>
CC: <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
void print_plane(std::ostream& out, const CGAL::Plane_3<R>& p) {
print_plane_impl(out, Nef_3_internal::get_plane(p), typename Fraction_traits<typename R::FT>::Decompose() );
} | 0 | [
"CWE-125"
]
| cgal | 5a1ab45058112f8647c14c02f58905ecc597ec76 | 78,117,937,874,330,040,000,000,000,000,000,000,000 | 3 | Fix Nef_3 |
move_group_shares(struct task_group *tg, struct sched_domain *sd,
int scpu, int dcpu)
{
while (tg) {
__move_group_shares(tg, sd, scpu, dcpu);
tg = tg->parent;
}
} | 0 | []
| linux-2.6 | 8f1bc385cfbab474db6c27b5af1e439614f3025c | 75,471,065,383,893,770,000,000,000,000,000,000,000 | 8 | sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]> |
static int mov_write_int8_metadata(AVFormatContext *s, AVIOContext *pb,
const char *name, const char *tag,
int len)
{
AVDictionaryEntry *t = NULL;
uint8_t num;
int size = 24 + len;
if (len != 1 && len != 4)
return -1;
if (!(t = av_dict_get(s->metadata, tag, NULL, 0)))
return 0;
num = atoi(t->value);
avio_wb32(pb, size);
ffio_wfourcc(pb, name);
avio_wb32(pb, size - 8);
ffio_wfourcc(pb, "data");
avio_wb32(pb, 0x15);
avio_wb32(pb, 0);
if (len==4) avio_wb32(pb, num);
else avio_w8 (pb, num);
return size;
} | 0 | [
"CWE-369"
]
| FFmpeg | 2c0e98a0b478284bdff6d7a4062522605a8beae5 | 36,019,284,044,931,920,000,000,000,000,000,000,000 | 26 | avformat/movenc: Write version 2 of audio atom if channels is not known
The version 1 needs the channel count and would divide by 0
Fixes: division by 0
Fixes: fpe_movenc.c_1108_1.ogg
Fixes: fpe_movenc.c_1108_2.ogg
Fixes: fpe_movenc.c_1108_3.wav
Found-by: #CHEN HONGXU# <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
(cherry picked from commit fa19fbcf712a6a6cc5a5cfdc3254a97b9bce6582)
Signed-off-by: Michael Niedermayer <[email protected]> |
long do_mount(char *dev_name, char *dir_name, char *type_page,
unsigned long flags, void *data_page)
{
struct nameidata nd;
int retval = 0;
int mnt_flags = 0;
/* Discard magic */
if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
flags &= ~MS_MGC_MSK;
/* Basic sanity checks */
if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
return -EINVAL;
if (dev_name && !memchr(dev_name, 0, PAGE_SIZE))
return -EINVAL;
if (data_page)
((char *)data_page)[PAGE_SIZE - 1] = 0;
/* Separate the per-mountpoint flags */
if (flags & MS_NOSUID)
mnt_flags |= MNT_NOSUID;
if (flags & MS_NODEV)
mnt_flags |= MNT_NODEV;
if (flags & MS_NOEXEC)
mnt_flags |= MNT_NOEXEC;
if (flags & MS_NOATIME)
mnt_flags |= MNT_NOATIME;
if (flags & MS_NODIRATIME)
mnt_flags |= MNT_NODIRATIME;
if (flags & MS_RELATIME)
mnt_flags |= MNT_RELATIME;
flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
MS_NOATIME | MS_NODIRATIME | MS_RELATIME);
/* ... and get the mountpoint */
retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
if (retval)
return retval;
retval = security_sb_mount(dev_name, &nd, type_page, flags, data_page);
if (retval)
goto dput_out;
if (flags & MS_REMOUNT)
retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
data_page);
else if (flags & MS_BIND)
retval = do_loopback(&nd, dev_name, flags & MS_REC);
else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
retval = do_change_type(&nd, flags);
else if (flags & MS_MOVE)
retval = do_move_mount(&nd, dev_name);
else
retval = do_new_mount(&nd, type_page, flags, mnt_flags,
dev_name, data_page);
dput_out:
path_release(&nd);
return retval;
} | 0 | [
"CWE-269"
]
| linux-2.6 | ee6f958291e2a768fd727e7a67badfff0b67711a | 174,737,263,451,788,120,000,000,000,000,000,000,000 | 63 | check privileges before setting mount propagation
There's a missing check for CAP_SYS_ADMIN in do_change_type().
Signed-off-by: Miklos Szeredi <[email protected]>
Cc: Al Viro <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
zzip_disk_new(void)
{
ZZIP_DISK *disk = malloc(sizeof(ZZIP_DISK));
if (! disk)
return disk; /* ENOMEM */
zzip_disk_init(disk, 0, 0);
return disk;
} | 0 | []
| zziplib | 72ec933663f738d8e166979aa7fd5590b2104a07 | 46,901,075,305,164,330,000,000,000,000,000,000,000 | 8 | need to check on endbuf for stored files #15 |
SPICE_CONSTRUCTOR_FUNC(quic_global_init)
{
family_init(&family_8bpc, 8, DEFmaxclen);
family_init(&family_5bpc, 5, DEFmaxclen);
} | 0 | []
| spice-common | 762e0abae36033ccde658fd52d3235887b60862d | 33,174,693,692,331,597,000,000,000,000,000,000,000 | 5 | quic: Check we have some data to start decoding quic image
All paths already pass some data to quic_decode_begin but for the
test check it, it's not that expensive test.
Checking for not 0 is enough, all other words will potentially be
read calling more_io_words but we need one to avoid a potential
initial buffer overflow or deferencing an invalid pointer.
Signed-off-by: Frediano Ziglio <[email protected]>
Acked-by: Uri Lublin <[email protected]> |
TEST(RoleParsingTest, BuildRoleBSON) {
RoleGraph graph;
RoleName roleA("roleA", "dbA");
RoleName roleB("roleB", "dbB");
RoleName roleC("roleC", "dbC");
RoleName roleD("roleD", "dbD");
ActionSet actions;
actions.addAction(ActionType::find);
actions.addAction(ActionType::insert);
SharedRestrictionDocument restrictions = uassertStatusOK(parseAuthenticationRestriction(
BSON_ARRAY(BSON("clientSource" << BSON_ARRAY("127.0.0.1")))));
ASSERT_OK(graph.createRole(roleA));
ASSERT_OK(graph.createRole(roleB));
ASSERT_OK(graph.createRole(roleC));
ASSERT_OK(graph.createRole(roleD));
ASSERT_OK(graph.addRoleToRole(roleA, roleC));
ASSERT_OK(graph.addRoleToRole(roleA, roleB));
ASSERT_OK(graph.addRoleToRole(roleA, roleD));
ASSERT_OK(graph.addRoleToRole(roleB, roleC));
ASSERT_OK(graph.addPrivilegeToRole(
roleA, Privilege(ResourcePattern::forAnyNormalResource(), actions)));
ASSERT_OK(graph.addPrivilegeToRole(
roleB, Privilege(ResourcePattern::forExactNamespace(NamespaceString("dbB.foo")), actions)));
ASSERT_OK(
graph.addPrivilegeToRole(roleC, Privilege(ResourcePattern::forClusterResource(), actions)));
ASSERT_OK(graph.replaceRestrictionsForRole(roleD, restrictions));
ASSERT_OK(graph.recomputePrivilegeData());
// Role A
mutablebson::Document doc;
ASSERT_OK(RoleGraph::getBSONForRole(&graph, roleA, doc.root()));
BSONObj roleDoc = doc.getObject();
ASSERT_EQUALS("dbA.roleA", roleDoc["_id"].String());
ASSERT_EQUALS("roleA", roleDoc["role"].String());
ASSERT_EQUALS("dbA", roleDoc["db"].String());
ASSERT_TRUE(roleDoc["authenticationRestrictions"].Array().empty());
std::vector<BSONElement> privs = roleDoc["privileges"].Array();
ASSERT_EQUALS(1U, privs.size());
ASSERT_EQUALS("", privs[0].Obj()["resource"].Obj()["db"].String());
ASSERT_EQUALS("", privs[0].Obj()["resource"].Obj()["collection"].String());
ASSERT(privs[0].Obj()["resource"].Obj()["cluster"].eoo());
std::vector<BSONElement> actionElements = privs[0].Obj()["actions"].Array();
ASSERT_EQUALS(2U, actionElements.size());
ASSERT_EQUALS("find", actionElements[0].String());
ASSERT_EQUALS("insert", actionElements[1].String());
std::vector<BSONElement> roles = roleDoc["roles"].Array();
ASSERT_EQUALS(3U, roles.size());
ASSERT_EQUALS("roleC", roles[0].Obj()["role"].String());
ASSERT_EQUALS("dbC", roles[0].Obj()["db"].String());
ASSERT_EQUALS("roleB", roles[1].Obj()["role"].String());
ASSERT_EQUALS("dbB", roles[1].Obj()["db"].String());
// Role B
doc.reset();
ASSERT_OK(RoleGraph::getBSONForRole(&graph, roleB, doc.root()));
roleDoc = doc.getObject();
ASSERT_EQUALS("dbB.roleB", roleDoc["_id"].String());
ASSERT_EQUALS("roleB", roleDoc["role"].String());
ASSERT_EQUALS("dbB", roleDoc["db"].String());
ASSERT_TRUE(roleDoc["authenticationRestrictions"].Array().empty());
privs = roleDoc["privileges"].Array();
ASSERT_EQUALS(1U, privs.size());
ASSERT_EQUALS("dbB", privs[0].Obj()["resource"].Obj()["db"].String());
ASSERT_EQUALS("foo", privs[0].Obj()["resource"].Obj()["collection"].String());
ASSERT(privs[0].Obj()["resource"].Obj()["cluster"].eoo());
actionElements = privs[0].Obj()["actions"].Array();
ASSERT_EQUALS(2U, actionElements.size());
ASSERT_EQUALS("find", actionElements[0].String());
ASSERT_EQUALS("insert", actionElements[1].String());
roles = roleDoc["roles"].Array();
ASSERT_EQUALS(1U, roles.size());
ASSERT_EQUALS("roleC", roles[0].Obj()["role"].String());
ASSERT_EQUALS("dbC", roles[0].Obj()["db"].String());
// Role C
doc.reset();
ASSERT_OK(RoleGraph::getBSONForRole(&graph, roleC, doc.root()));
roleDoc = doc.getObject();
ASSERT_EQUALS("dbC.roleC", roleDoc["_id"].String());
ASSERT_EQUALS("roleC", roleDoc["role"].String());
ASSERT_EQUALS("dbC", roleDoc["db"].String());
ASSERT_TRUE(roleDoc["authenticationRestrictions"].Array().empty());
privs = roleDoc["privileges"].Array();
ASSERT_EQUALS(1U, privs.size());
ASSERT(privs[0].Obj()["resource"].Obj()["cluster"].Bool());
ASSERT(privs[0].Obj()["resource"].Obj()["db"].eoo());
ASSERT(privs[0].Obj()["resource"].Obj()["collection"].eoo());
actionElements = privs[0].Obj()["actions"].Array();
ASSERT_EQUALS(2U, actionElements.size());
ASSERT_EQUALS("find", actionElements[0].String());
ASSERT_EQUALS("insert", actionElements[1].String());
roles = roleDoc["roles"].Array();
ASSERT_EQUALS(0U, roles.size());
// Role D
doc.reset();
ASSERT_OK(RoleGraph::getBSONForRole(&graph, roleD, doc.root()));
roleDoc = doc.getObject();
ASSERT_EQUALS("dbD.roleD", roleDoc["_id"].String());
ASSERT_EQUALS("roleD", roleDoc["role"].String());
ASSERT_EQUALS("dbD", roleDoc["db"].String());
ASSERT_FALSE(roleDoc["authenticationRestrictions"].Array().empty());
auto restrictionObj = BSONArray(roleDoc["authenticationRestrictions"].Obj());
SharedRestrictionDocument parsedRestrictions =
uassertStatusOK(parseAuthenticationRestriction(restrictionObj));
ASSERT_EQ(restrictions->toString(), parsedRestrictions->toString());
privs = roleDoc["privileges"].Array();
ASSERT_TRUE(privs.empty());
roles = roleDoc["roles"].Array();
ASSERT_EQUALS(0U, roles.size());
} | 0 | [
"CWE-863"
]
| mongo | 521e56b407ac72bc69a97a24d1253f51a5b6e81b | 94,389,210,562,029,700,000,000,000,000,000,000,000 | 131 | SERVER-45472 Ensure RoleGraph can serialize authentication restrictions to BSON |
static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
struct buffer_head **bhs, int nr_bhs)
{
struct super_block *sb = dir->i_sb;
sector_t last_blknr = blknr + MSDOS_SB(sb)->sec_per_clus;
int err, i, n;
/* Zeroing the unused blocks on this cluster */
blknr += nr_used;
n = nr_used;
while (blknr < last_blknr) {
bhs[n] = sb_getblk(sb, blknr);
if (!bhs[n]) {
err = -ENOMEM;
goto error;
}
memset(bhs[n]->b_data, 0, sb->s_blocksize);
set_buffer_uptodate(bhs[n]);
mark_buffer_dirty(bhs[n]);
n++;
blknr++;
if (n == nr_bhs) {
if (IS_DIRSYNC(dir)) {
err = fat_sync_bhs(bhs, n);
if (err)
goto error;
}
for (i = 0; i < n; i++)
brelse(bhs[i]);
n = 0;
}
}
if (IS_DIRSYNC(dir)) {
err = fat_sync_bhs(bhs, n);
if (err)
goto error;
}
for (i = 0; i < n; i++)
brelse(bhs[i]);
return 0;
error:
for (i = 0; i < n; i++)
bforget(bhs[i]);
return err;
} | 0 | []
| linux-2.6 | c483bab099cb89e92b7cad94a52fcdaf37e56657 | 173,474,113,211,646,430,000,000,000,000,000,000,000 | 48 | fat: fix VFAT compat ioctls on 64-bit systems
If you compile and run the below test case in an msdos or vfat directory on
an x86-64 system with -m32 you'll get garbage in the kernel_dirent struct
followed by a SIGSEGV.
The patch fixes this.
Reported and initial fix by Bart Oldeman
#include <sys/types.h>
#include <sys/ioctl.h>
#include <dirent.h>
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
struct kernel_dirent {
long d_ino;
long d_off;
unsigned short d_reclen;
char d_name[256]; /* We must not include limits.h! */
};
#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct kernel_dirent [2])
#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct kernel_dirent [2])
int main(void)
{
int fd = open(".", O_RDONLY);
struct kernel_dirent de[2];
while (1) {
int i = ioctl(fd, VFAT_IOCTL_READDIR_BOTH, (long)de);
if (i == -1) break;
if (de[0].d_reclen == 0) break;
printf("SFN: reclen=%2d off=%d ino=%d, %-12s",
de[0].d_reclen, de[0].d_off, de[0].d_ino, de[0].d_name);
if (de[1].d_reclen)
printf("\tLFN: reclen=%2d off=%d ino=%d, %s",
de[1].d_reclen, de[1].d_off, de[1].d_ino, de[1].d_name);
printf("\n");
}
return 0;
}
Signed-off-by: Bart Oldeman <[email protected]>
Signed-off-by: OGAWA Hirofumi <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static struct page *kvm_pfn_to_page(pfn_t pfn)
{
if (is_error_noslot_pfn(pfn))
return KVM_ERR_PTR_BAD_PAGE;
if (kvm_is_mmio_pfn(pfn)) {
WARN_ON(1);
return KVM_ERR_PTR_BAD_PAGE;
}
return pfn_to_page(pfn);
} | 0 | [
"CWE-399"
]
| linux | e40f193f5bb022e927a57a4f5d5194e4f12ddb74 | 318,609,091,273,439,500,000,000,000,000,000,000,000 | 12 | KVM: Fix iommu map/unmap to handle memory slot moves
The iommu integration into memory slots expects memory slots to be
added or removed and doesn't handle the move case. We can unmap
slots from the iommu after we mark them invalid and map them before
installing the final memslot array. Also re-order the kmemdup vs
map so we don't leave iommu mappings if we get ENOMEM.
Reviewed-by: Gleb Natapov <[email protected]>
Signed-off-by: Alex Williamson <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
int LibRaw::unpack(void)
{
CHECK_ORDER_HIGH(LIBRAW_PROGRESS_LOAD_RAW);
CHECK_ORDER_LOW(LIBRAW_PROGRESS_IDENTIFY);
try {
RUN_CALLBACK(LIBRAW_PROGRESS_LOAD_RAW,0,2);
if (O.shot_select >= P1.raw_count)
return LIBRAW_REQUEST_FOR_NONEXISTENT_IMAGE;
if(!load_raw)
return LIBRAW_UNSPECIFIED_ERROR;
if (O.use_camera_matrix && C.cmatrix[0][0] > 0.25)
{
memcpy (C.rgb_cam, C.cmatrix, sizeof (C.cmatrix));
IO.raw_color = 0;
}
// already allocated ?
if(imgdata.image)
{
free(imgdata.image);
imgdata.image = 0;
}
if (libraw_internal_data.unpacker_data.meta_length)
{
libraw_internal_data.internal_data.meta_data =
(char *) malloc (libraw_internal_data.unpacker_data.meta_length);
merror (libraw_internal_data.internal_data.meta_data, "LibRaw::unpack()");
}
ID.input->seek(libraw_internal_data.unpacker_data.data_offset, SEEK_SET);
int save_document_mode = O.document_mode;
O.document_mode = 0;
libraw_decoder_info_t decoder_info;
get_decoder_info(&decoder_info);
int save_iwidth = S.iwidth, save_iheight = S.iheight, save_shrink = IO.shrink;
int rwidth = S.raw_width, rheight = S.raw_height;
if( !IO.fuji_width)
{
// adjust non-Fuji allocation
if(rwidth < S.width + S.left_margin)
rwidth = S.width + S.left_margin;
if(rheight < S.height + S.top_margin)
rheight = S.height + S.top_margin;
}
if(decoder_info.decoder_flags & LIBRAW_DECODER_FLATFIELD)
{
imgdata.rawdata.raw_alloc = malloc(rwidth*rheight*sizeof(imgdata.rawdata.raw_image[0]));
imgdata.rawdata.raw_image = (ushort*) imgdata.rawdata.raw_alloc;
}
else if (decoder_info.decoder_flags & LIBRAW_DECODER_4COMPONENT)
{
S.iwidth = S.width;
S.iheight= S.height;
IO.shrink = 0;
imgdata.rawdata.raw_alloc = calloc(rwidth*rheight,sizeof(*imgdata.rawdata.color_image));
imgdata.rawdata.color_image = (ushort(*)[4]) imgdata.rawdata.raw_alloc;
}
else if (decoder_info.decoder_flags & LIBRAW_DECODER_LEGACY)
{
// sRAW and Foveon only, so extra buffer size is just 1/4
// Legacy converters does not supports half mode!
S.iwidth = S.width;
S.iheight= S.height;
IO.shrink = 0;
// allocate image as temporary buffer, size
imgdata.rawdata.raw_alloc = calloc(S.iwidth*S.iheight,sizeof(*imgdata.image));
imgdata.image = (ushort (*)[4]) imgdata.rawdata.raw_alloc;
}
(this->*load_raw)();
// recover saved
if( decoder_info.decoder_flags & LIBRAW_DECODER_LEGACY)
{
imgdata.image = 0;
imgdata.rawdata.color_image = (ushort (*)[4]) imgdata.rawdata.raw_alloc;
}
// calculate channel maximum
{
for(int c=0;c<4;c++) C.channel_maximum[c] = 0;
if(decoder_info.decoder_flags & LIBRAW_DECODER_LEGACY)
{
for(int rc = 0; rc < S.iwidth*S.iheight; rc++)
{
if(C.channel_maximum[0]<imgdata.rawdata.color_image[rc][0])
C.channel_maximum[0]=imgdata.rawdata.color_image[rc][0];
if(C.channel_maximum[1]<imgdata.rawdata.color_image[rc][1])
C.channel_maximum[1]=imgdata.rawdata.color_image[rc][1];
if(C.channel_maximum[2]<imgdata.rawdata.color_image[rc][2])
C.channel_maximum[2]=imgdata.rawdata.color_image[rc][2];
if(C.channel_maximum[3]<imgdata.rawdata.color_image[rc][3])
C.channel_maximum[3]=imgdata.rawdata.color_image[rc][3];
}
}
else if(decoder_info.decoder_flags & LIBRAW_DECODER_4COMPONENT)
{
for(int row = S.top_margin; row < S.height+S.top_margin; row++)
for(int col = S.left_margin; col < S.width+S.left_margin; col++)
{
int rc = row*S.raw_width+col;
if(C.channel_maximum[0]<imgdata.rawdata.color_image[rc][0])
C.channel_maximum[0]=imgdata.rawdata.color_image[rc][0];
if(C.channel_maximum[1]<imgdata.rawdata.color_image[rc][1])
C.channel_maximum[1]=imgdata.rawdata.color_image[rc][1];
if(C.channel_maximum[2]<imgdata.rawdata.color_image[rc][2])
C.channel_maximum[2]=imgdata.rawdata.color_image[rc][2];
if(C.channel_maximum[3]<imgdata.rawdata.color_image[rc][3])
C.channel_maximum[3]=imgdata.rawdata.color_image[rc][4];
}
}
else if (decoder_info.decoder_flags & LIBRAW_DECODER_FLATFIELD)
{
for(int row = 0; row < S.height; row++)
{
int colors[4];
for (int xx=0;xx<4;xx++)
colors[xx] = COLOR(row,xx);
for(int col = 0; col < S.width; col++)
{
int cc = colors[col&3];
if(C.channel_maximum[cc]
< imgdata.rawdata.raw_image[(row+S.top_margin)*S.raw_width
+(col+S.left_margin)])
C.channel_maximum[cc] =
imgdata.rawdata.raw_image[(row+S.top_margin)*S.raw_width
+(col+S.left_margin)];
}
}
}
}
// recover image sizes
S.iwidth = save_iwidth;
S.iheight = save_iheight;
IO.shrink = save_shrink;
// phase-one black
if(imgdata.rawdata.ph1_black)
C.ph1_black = imgdata.rawdata.ph1_black;
O.document_mode = save_document_mode;
// adjust black to possible maximum
unsigned int i = C.cblack[3];
unsigned int c;
for(c=0;c<3;c++)
if (i > C.cblack[c]) i = C.cblack[c];
for (c=0;c<4;c++)
C.cblack[c] -= i;
C.black += i;
// Save color,sizes and internal data into raw_image fields
memmove(&imgdata.rawdata.color,&imgdata.color,sizeof(imgdata.color));
memmove(&imgdata.rawdata.sizes,&imgdata.sizes,sizeof(imgdata.sizes));
memmove(&imgdata.rawdata.iparams,&imgdata.idata,sizeof(imgdata.idata));
memmove(&imgdata.rawdata.ioparams,&libraw_internal_data.internal_output_params,sizeof(libraw_internal_data.internal_output_params));
SET_PROC_FLAG(LIBRAW_PROGRESS_LOAD_RAW);
RUN_CALLBACK(LIBRAW_PROGRESS_LOAD_RAW,1,2);
return 0;
}
catch ( LibRaw_exceptions err) {
EXCEPTION_HANDLER(err);
}
catch (std::exception ee) {
EXCEPTION_HANDLER(LIBRAW_EXCEPTION_IO_CORRUPT);
}
} | 1 | [
"CWE-399"
]
| LibRaw | c14ae36d28e80139b2f31b5d9d7623db3b597a3a | 326,628,891,785,204,370,000,000,000,000,000,000,000 | 177 | fixed error handling for broken full-color images |
MagickExport size_t GetOptimalKernelWidth(const double radius,
const double sigma)
{
return(GetOptimalKernelWidth1D(radius,sigma));
} | 0 | [
"CWE-369"
]
| ImageMagick6 | 90255f0834eead08d59f46b0bda7b1580451cc0f | 316,962,461,598,389,660,000,000,000,000,000,000,000 | 5 | https://github.com/ImageMagick/ImageMagick/issues/3077 |
int HttpFile::save(const std::string &path) const
{
return implPtr_->save(path);
} | 0 | [
"CWE-552"
]
| drogon | 3c785326c63a34aa1799a639ae185bc9453cb447 | 21,682,483,173,228,894,000,000,000,000,000,000,000 | 4 | Prevent malformed upload path causing arbitrary write (#1174) |
Subsets and Splits