func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
{
return (struct dev_kfree_skb_cb *)skb->cb; | 0 | [
"CWE-400",
"CWE-703"
] | linux | fac8e0f579695a3ecbc4d3cac369139d7f819971 | 309,196,688,279,253,170,000,000,000,000,000,000,000 | 4 | tunnels: Don't apply GRO to multiple layers of encapsulation.
When drivers express support for TSO of encapsulated packets, they
only mean that they can do it for one layer of encapsulation.
Supporting additional levels would mean updating, at a minimum,
more IP length fields and they are unaware of this.
No encapsulation device expresses support for handling offloaded
encapsulated packets, so we won't generate these types of frames
in the transmit path. However, GRO doesn't have a check for
multiple levels of encapsulation and will attempt to build them.
UDP tunnel GRO actually does prevent this situation but it only
handles multiple UDP tunnels stacked on top of each other. This
generalizes that solution to prevent any kind of tunnel stacking
that would cause problems.
Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack")
Signed-off-by: Jesse Gross <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf,
size_t buflen, int flags)
{
struct inode *inode = dentry->d_inode;
if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
return -EOPNOTSUPP;
return nfs4_proc_set_acl(inode, buf, buflen);
} | 0 | [
"CWE-703"
] | linux | dc0b027dfadfcb8a5504f7d8052754bf8d501ab9 | 56,544,606,711,298,850,000,000,000,000,000,000,000 | 10 | NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <[email protected]> |
pblock_init(Slapi_PBlock *pb)
{
memset(pb, '\0', sizeof(Slapi_PBlock));
} | 0 | [
"CWE-415"
] | 389-ds-base | a3c298f8140d3e4fa1bd5a670f1bb965a21a9b7b | 139,232,858,159,711,640,000,000,000,000,000,000,000 | 4 | Issue 5218 - double-free of the virtual attribute context in persistent search (#5219)
description:
A search is processed by a worker using a private pblock.
If the search is persistent, the worker spawn a thread
and kind of duplicate its private pblock so that the spawn
thread continue to process the persistent search.
Then worker ends the initial search, reinit (free) its private pblock,
and returns monitoring the wait_queue.
When the persistent search completes, it frees the duplicated
pblock.
The problem is that private pblock and duplicated pblock
are referring to a same structure (pb_vattr_context).
That can lead to a double free
Fix:
When cloning the pblock (slapi_pblock_clone) make sure
to transfert the references inside the original (private)
pblock to the target (cloned) one
That includes pb_vattr_context pointer.
Reviewed by: Mark Reynolds, James Chapman, Pierre Rogier (Thanks !)
Co-authored-by: Mark Reynolds <[email protected]> |
int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg)
{
s32 msg_len = (le16_to_cpu(*(__le16 *)&msg[0]) & 0x0fff);
u16 msg_type = (msg[2]);
rsi_dbg(FSM_ZONE, "%s: Msg Len: %d, Msg Type: %4x\n",
__func__, msg_len, msg_type);
switch (msg_type) {
case TA_CONFIRM_TYPE:
return rsi_handle_ta_confirm_type(common, msg);
case CARD_READY_IND:
common->hibernate_resume = false;
rsi_dbg(FSM_ZONE, "%s: Card ready indication received\n",
__func__);
return rsi_handle_card_ready(common, msg);
case TX_STATUS_IND:
switch (msg[RSI_TX_STATUS_TYPE]) {
case PROBEREQ_CONFIRM:
common->mgmt_q_block = false;
rsi_dbg(FSM_ZONE, "%s: Probe confirm received\n",
__func__);
break;
case EAPOL4_CONFIRM:
if (msg[RSI_TX_STATUS]) {
common->eapol4_confirm = true;
if (!rsi_send_block_unblock_frame(common,
false))
common->hw_data_qs_blocked = false;
}
}
break;
case BEACON_EVENT_IND:
rsi_dbg(INFO_ZONE, "Beacon event\n");
if (common->fsm_state != FSM_MAC_INIT_DONE)
return -1;
if (common->iface_down)
return -1;
if (!common->beacon_enabled)
return -1;
rsi_send_beacon(common);
break;
case WOWLAN_WAKEUP_REASON:
rsi_dbg(ERR_ZONE, "\n\nWakeup Type: %x\n", msg[15]);
switch (msg[15]) {
case RSI_UNICAST_MAGIC_PKT:
rsi_dbg(ERR_ZONE,
"*** Wakeup for Unicast magic packet ***\n");
break;
case RSI_BROADCAST_MAGICPKT:
rsi_dbg(ERR_ZONE,
"*** Wakeup for Broadcast magic packet ***\n");
break;
case RSI_EAPOL_PKT:
rsi_dbg(ERR_ZONE,
"*** Wakeup for GTK renewal ***\n");
break;
case RSI_DISCONNECT_PKT:
rsi_dbg(ERR_ZONE,
"*** Wakeup for Disconnect ***\n");
break;
case RSI_HW_BMISS_PKT:
rsi_dbg(ERR_ZONE,
"*** Wakeup for HW Beacon miss ***\n");
break;
default:
rsi_dbg(ERR_ZONE,
"##### Un-intentional Wakeup #####\n");
break;
}
break;
case RX_DOT11_MGMT:
return rsi_mgmt_pkt_to_core(common, msg, msg_len);
default:
rsi_dbg(INFO_ZONE, "Received packet type: 0x%x\n", msg_type);
}
return 0;
} | 0 | [
"CWE-400",
"CWE-401"
] | linux | d563131ef23cbc756026f839a82598c8445bc45f | 102,423,234,797,955,120,000,000,000,000,000,000,000 | 78 | rsi: release skb if rsi_prepare_beacon fails
In rsi_send_beacon, if rsi_prepare_beacon fails the allocated skb should
be released.
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Kalle Valo <[email protected]> |
static int readOHDRmessages(struct READER *reader,
struct DATAOBJECT *dataobject,
uint64_t end_of_messages) {
FILE *fhd = reader->fhd;
int err;
long end;
while (ftell(fhd) <
end_of_messages - 4) { /* final gap may has a size of up to 3 */
uint8_t header_message_type = (uint8_t)fgetc(fhd);
uint16_t header_message_size = (uint16_t)readValue(reader, 2);
uint8_t header_message_flags = (uint8_t)fgetc(fhd);
if ((header_message_flags & ~5) != 0) {
mylog("OHDR unsupported OHDR message flag %02X\n", header_message_flags);
return MYSOFA_UNSUPPORTED_FORMAT;
}
if ((dataobject->flags & (1 << 2)) != 0)
/* ignore header_creation_order */
if (fseek(reader->fhd, 2, SEEK_CUR) < 0)
return errno;
mylog(" OHDR message type %2d offset %6lX len %4X\n", header_message_type,
ftell(fhd), header_message_size);
end = ftell(fhd) + header_message_size;
switch (header_message_type) {
case 0: /* NIL Message */
if (!!(err = readOHDRHeaderMessageNIL(reader, header_message_size)))
return err;
break;
case 1: /* Dataspace Message */
if (!!(err = readOHDRHeaderMessageDataspace(reader, &dataobject->ds)))
return err;
break;
case 2: /* Link Info Message */
if (!!(err = readOHDRHeaderMessageLinkInfo(reader, &dataobject->li)))
return err;
break;
case 3: /* Datatype Message */
if (!!(err = readOHDRHeaderMessageDatatype(reader, &dataobject->dt)))
return err;
break;
case 4: /* Data Fill Message Old */
if (!!(err = readOHDRHeaderMessageDataFillOld(reader)))
return err;
break;
case 5: /* Data Fill Message */
if (!!(err = readOHDRHeaderMessageDataFill(reader)))
return err;
break;
case 8: /* Data Layout Message */
if (!!(err = readOHDRHeaderMessageDataLayout(reader, dataobject)))
return err;
break;
case 10: /* Group Info Message */
if (!!(err = readOHDRHeaderMessageGroupInfo(reader, &dataobject->gi)))
return err;
break;
case 11: /* Filter Pipeline Message */
if (!!(err = readOHDRHeaderMessageFilterPipeline(reader)))
return err;
break;
case 12: /* Attribute Message */
if (!!(err = readOHDRHeaderMessageAttribute(reader, dataobject)))
return err;
break;
case 16: /* Continue Message */
if (!!(err = readOHDRHeaderMessageContinue(reader, dataobject)))
return err;
break;
case 21: /* Attribute Info Message */
if (!!(err = readOHDRHeaderMessageAttributeInfo(reader, &dataobject->ai)))
return err;
break;
default:
mylog("OHDR unknown header message of type %d\n", header_message_type);
return MYSOFA_UNSUPPORTED_FORMAT;
}
if (ftell(fhd) != end) {
mylog("OHDR message length mismatch by %ld\n", ftell(fhd) - end);
return MYSOFA_INTERNAL_ERROR;
}
}
if (fseek(fhd, end_of_messages + 4, SEEK_SET) < 0) /* skip checksum */
return errno;
return MYSOFA_OK;
} | 0 | [
"CWE-787"
] | libmysofa | 890400ebd092c574707d0c132124f8ff047e20e1 | 322,649,742,615,820,470,000,000,000,000,000,000,000 | 94 | Fix for issue 163 |
static void ccall(JF, js_Ast *fun, js_Ast *args)
{
int n;
switch (fun->type) {
case EXP_INDEX:
cexp(J, F, fun->a);
emit(J, F, OP_DUP);
cexp(J, F, fun->b);
emit(J, F, OP_GETPROP);
emit(J, F, OP_ROT2);
break;
case EXP_MEMBER:
cexp(J, F, fun->a);
emit(J, F, OP_DUP);
emitstring(J, F, OP_GETPROP_S, fun->b->string);
emit(J, F, OP_ROT2);
break;
case EXP_IDENTIFIER:
if (!strcmp(fun->string, "eval")) {
ceval(J, F, fun, args);
return;
}
/* fallthrough */
default:
cexp(J, F, fun);
emit(J, F, OP_UNDEF);
break;
}
n = cargs(J, F, args);
emit(J, F, OP_CALL);
emitarg(J, F, n);
} | 0 | [
"CWE-703",
"CWE-787"
] | mujs | df8559e7bdbc6065276e786217eeee70f28fce66 | 282,138,207,830,108,330,000,000,000,000,000,000,000 | 32 | Bug 704749: Clear jump list after patching jump addresses.
Since we can emit a statement multiple times when compiling try/finally
we have to use a new patch list for each instance. |
static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
size_t size, int flags)
{
struct tcp_sock *tp = tcp_sk(sk);
int mss_now, size_goal;
int err;
ssize_t copied;
long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
/* Wait for a connection to finish. One exception is TCP Fast Open
* (passive side) where data is allowed to be sent before a connection
* is fully established.
*/
if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
!tcp_passive_fastopen(sk)) {
if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
goto out_err;
}
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
mss_now = tcp_send_mss(sk, &size_goal, flags);
copied = 0;
err = -EPIPE;
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto out_err;
while (size > 0) {
struct sk_buff *skb = tcp_write_queue_tail(sk);
int copy, i;
bool can_coalesce;
if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
new_segment:
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
if (!skb)
goto wait_for_memory;
skb_entail(sk, skb);
copy = size_goal;
}
if (copy > size)
copy = size;
i = skb_shinfo(skb)->nr_frags;
can_coalesce = skb_can_coalesce(skb, i, page, offset);
if (!can_coalesce && i >= MAX_SKB_FRAGS) {
tcp_mark_push(tp, skb);
goto new_segment;
}
if (!sk_wmem_schedule(sk, copy))
goto wait_for_memory;
if (can_coalesce) {
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
} else {
get_page(page);
skb_fill_page_desc(skb, i, page, offset, copy);
}
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
sk->sk_wmem_queued += copy;
sk_mem_charge(sk, copy);
skb->ip_summed = CHECKSUM_PARTIAL;
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
skb_shinfo(skb)->gso_segs = 0;
if (!copied)
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
copied += copy;
offset += copy;
if (!(size -= copy))
goto out;
if (skb->len < size_goal || (flags & MSG_OOB))
continue;
if (forced_push(tp)) {
tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
} else if (skb == tcp_send_head(sk))
tcp_push_one(sk, mss_now);
continue;
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
tcp_push(sk, flags & ~MSG_MORE, mss_now,
TCP_NAGLE_PUSH, size_goal);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
mss_now = tcp_send_mss(sk, &size_goal, flags);
}
out:
if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
return copied;
do_error:
if (copied)
goto out;
out_err:
return sk_stream_error(sk, flags, err);
} | 0 | [] | linux | 7bced397510ab569d31de4c70b39e13355046387 | 226,143,813,640,486,100,000,000,000,000,000,000,000 | 117 | net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.
This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.
Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():
https://lkml.org/lkml/2014/9/3/177
Cc: Dave Jiang <[email protected]>
Cc: Vinod Koul <[email protected]>
Cc: David Whipple <[email protected]>
Cc: Alexander Duyck <[email protected]>
Cc: <[email protected]>
Reported-by: Roman Gushchin <[email protected]>
Acked-by: David S. Miller <[email protected]>
Signed-off-by: Dan Williams <[email protected]> |
psutil_proc_io_counters(PyObject *self, PyObject *args) {
DWORD pid;
HANDLE hProcess;
IO_COUNTERS IoCounters;
if (! PyArg_ParseTuple(args, "l", &pid))
return NULL;
hProcess = psutil_handle_from_pid(pid, PROCESS_QUERY_LIMITED_INFORMATION);
if (NULL == hProcess)
return NULL;
if (! GetProcessIoCounters(hProcess, &IoCounters)) {
PyErr_SetFromWindowsErr(0);
CloseHandle(hProcess);
return NULL;
}
CloseHandle(hProcess);
return Py_BuildValue("(KKKKKK)",
IoCounters.ReadOperationCount,
IoCounters.WriteOperationCount,
IoCounters.ReadTransferCount,
IoCounters.WriteTransferCount,
IoCounters.OtherOperationCount,
IoCounters.OtherTransferCount);
} | 0 | [
"CWE-415"
] | psutil | 7d512c8e4442a896d56505be3e78f1156f443465 | 250,446,330,419,842,380,000,000,000,000,000,000,000 | 26 | Use Py_CLEAR instead of Py_DECREF to also set the variable to NULL (#1616)
These files contain loops that convert system data into python objects
and during the process they create objects and dereference their
refcounts after they have been added to the resulting list.
However, in case of errors during the creation of those python objects,
the refcount to previously allocated objects is dropped again with
Py_XDECREF, which should be a no-op in case the paramater is NULL. Even
so, in most of these loops the variables pointing to the objects are
never set to NULL, even after Py_DECREF is called at the end of the loop
iteration. This means, after the first iteration, if an error occurs
those python objects will get their refcount dropped two times,
resulting in a possible double-free. |
static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
{
u32 val, bmcr, mac_mode, ptest = 0;
tg3_phy_toggle_apd(tp, false);
tg3_phy_toggle_automdix(tp, 0);
if (extlpbk && tg3_phy_set_extloopbk(tp))
return -EIO;
bmcr = BMCR_FULLDPLX;
switch (speed) {
case SPEED_10:
break;
case SPEED_100:
bmcr |= BMCR_SPEED100;
break;
case SPEED_1000:
default:
if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
speed = SPEED_100;
bmcr |= BMCR_SPEED100;
} else {
speed = SPEED_1000;
bmcr |= BMCR_SPEED1000;
}
}
if (extlpbk) {
if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
tg3_readphy(tp, MII_CTRL1000, &val);
val |= CTL1000_AS_MASTER |
CTL1000_ENABLE_MASTER;
tg3_writephy(tp, MII_CTRL1000, val);
} else {
ptest = MII_TG3_FET_PTEST_TRIM_SEL |
MII_TG3_FET_PTEST_TRIM_2;
tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
}
} else
bmcr |= BMCR_LOOPBACK;
tg3_writephy(tp, MII_BMCR, bmcr);
/* The write needs to be flushed for the FETs */
if (tp->phy_flags & TG3_PHYFLG_IS_FET)
tg3_readphy(tp, MII_BMCR, &bmcr);
udelay(40);
if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
tg3_asic_rev(tp) == ASIC_REV_5785) {
tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
MII_TG3_FET_PTEST_FRC_TX_LINK |
MII_TG3_FET_PTEST_FRC_TX_LOCK);
/* The write needs to be flushed for the AC131 */
tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
}
/* Reset to prevent losing 1st rx packet intermittently */
if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
tg3_flag(tp, 5780_CLASS)) {
tw32_f(MAC_RX_MODE, RX_MODE_RESET);
udelay(10);
tw32_f(MAC_RX_MODE, tp->rx_mode);
}
mac_mode = tp->mac_mode &
~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
if (speed == SPEED_1000)
mac_mode |= MAC_MODE_PORT_MODE_GMII;
else
mac_mode |= MAC_MODE_PORT_MODE_MII;
if (tg3_asic_rev(tp) == ASIC_REV_5700) {
u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
if (masked_phy_id == TG3_PHY_ID_BCM5401)
mac_mode &= ~MAC_MODE_LINK_POLARITY;
else if (masked_phy_id == TG3_PHY_ID_BCM5411)
mac_mode |= MAC_MODE_LINK_POLARITY;
tg3_writephy(tp, MII_TG3_EXT_CTRL,
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
}
tw32(MAC_MODE, mac_mode);
udelay(40);
return 0;
} | 0 | [
"CWE-476",
"CWE-119"
] | linux | 715230a44310a8cf66fbfb5a46f9a62a9b2de424 | 143,878,602,982,377,480,000,000,000,000,000,000,000 | 92 | tg3: fix length overflow in VPD firmware parsing
Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version
when present") introduced VPD parsing that contained a potential length
overflow.
Limit the hardware's reported firmware string length (max 255 bytes) to
stay inside the driver's firmware string length (32 bytes). On overflow,
truncate the formatted firmware string instead of potentially overwriting
portions of the tg3 struct.
http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf
Signed-off-by: Kees Cook <[email protected]>
Reported-by: Oded Horovitz <[email protected]>
Reported-by: Brad Spengler <[email protected]>
Cc: [email protected]
Cc: Matt Carlson <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void display_update(DisplayChangeListener *dcl,
int x, int y, int w, int h)
{
PCIQXLDevice *qxl = container_of(dcl, PCIQXLDevice, ssd.dcl);
if (qxl->mode == QXL_MODE_VGA) {
qemu_spice_display_update(&qxl->ssd, x, y, w, h);
}
} | 0 | [
"CWE-476"
] | qemu | d52680fc932efb8a2f334cc6993e705ed1e31e99 | 77,101,896,962,691,225,000,000,000,000,000,000,000 | 9 | qxl: check release info object
When releasing spice resources in release_resource() routine,
if release info object 'ext.info' is null, it leads to null
pointer dereference. Add check to avoid it.
Reported-by: Bugs SysSec <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]> |
inline bool is_hex(char c, int &v) {
if ('0' <= c && c <= '9') {
v = c - '0';
return true;
} else if ('a' <= c && c <= 'f') {
v = c - 'a' + 10;
return true;
} else if ('A' <= c && c <= 'F') {
v = c - 'A' + 10;
return true;
}
return false;
} | 0 | [
"CWE-125"
] | cpp-peglib | b3b29ce8f3acf3a32733d930105a17d7b0ba347e | 288,296,300,391,613,700,000,000,000,000,000,000,000 | 13 | Fix #122 |
ftp_loop (struct url *u, char **local_file, int *dt, struct url *proxy,
bool recursive, bool glob)
{
ccon con; /* FTP connection */
uerr_t res;
*dt = 0;
xzero (con);
con.csock = -1;
con.st = ON_YOUR_OWN;
con.rs = ST_UNIX;
con.id = NULL;
con.proxy = proxy;
/* If the file name is empty, the user probably wants a directory
index. We'll provide one, properly HTML-ized. Unless
opt.htmlify is 0, of course. :-) */
if (!*u->file && !recursive)
{
struct fileinfo *f;
res = ftp_get_listing (u, &con, &f);
if (res == RETROK)
{
if (opt.htmlify && !opt.spider)
{
char *filename = (opt.output_document
? xstrdup (opt.output_document)
: (con.target ? xstrdup (con.target)
: url_file_name (u)));
res = ftp_index (filename, u, f);
if (res == FTPOK && opt.verbose)
{
if (!opt.output_document)
{
struct_stat st;
wgint sz;
if (stat (filename, &st) == 0)
sz = st.st_size;
else
sz = -1;
logprintf (LOG_NOTQUIET,
_("Wrote HTML-ized index to %s [%s].\n"),
quote (filename), number_to_static_string (sz));
}
else
logprintf (LOG_NOTQUIET,
_("Wrote HTML-ized index to %s.\n"),
quote (filename));
}
xfree (filename);
}
freefileinfo (f);
}
}
else
{
bool ispattern = false;
if (glob)
{
/* Treat the URL as a pattern if the file name part of the
URL path contains wildcards. (Don't check for u->file
because it is unescaped and therefore doesn't leave users
the option to escape literal '*' as %2A.) */
char *file_part = strrchr (u->path, '/');
if (!file_part)
file_part = u->path;
ispattern = has_wildcards_p (file_part);
}
if (ispattern || recursive || opt.timestamping)
{
/* ftp_retrieve_glob is a catch-all function that gets called
if we need globbing, time-stamping or recursion. Its
third argument is just what we really need. */
res = ftp_retrieve_glob (u, &con,
ispattern ? GLOB_GLOBALL : GLOB_GETONE);
}
else
res = ftp_loop_internal (u, NULL, &con, local_file);
}
if (res == FTPOK)
res = RETROK;
if (res == RETROK)
*dt |= RETROKF;
/* If a connection was left, quench it. */
if (con.csock != -1)
fd_close (con.csock);
xfree_null (con.id);
con.id = NULL;
xfree_null (con.target);
con.target = NULL;
return res;
} | 0 | [
"CWE-20"
] | wget | 3e25a9817f47fbb8660cc6a3b2f3eea239526c6c | 254,472,226,209,179,080,000,000,000,000,000,000,000 | 95 | Introduce --trust-server-names. Close CVE-2010-2252. |
isoent_add_child_tail(struct isoent *parent, struct isoent *child)
{
if (!__archive_rb_tree_insert_node(
&(parent->rbtree), (struct archive_rb_node *)child))
return (0);
child->chnext = NULL;
*parent->children.last = child;
parent->children.last = &(child->chnext);
parent->children.cnt++;
child->parent = parent;
/* Add a child to a sub-directory chain */
child->drnext = NULL;
if (child->dir) {
*parent->subdirs.last = child;
parent->subdirs.last = &(child->drnext);
parent->subdirs.cnt++;
child->parent = parent;
}
return (1);
} | 0 | [
"CWE-190"
] | libarchive | 3014e19820ea53c15c90f9d447ca3e668a0b76c6 | 69,498,082,573,068,040,000,000,000,000,000,000,000 | 22 | Issue 711: Be more careful about verifying filename lengths when writing ISO9660 archives
* Don't cast size_t to int, since this can lead to overflow
on machines where sizeof(int) < sizeof(size_t)
* Check a + b > limit by writing it as
a > limit || b > limit || a + b > limit
to avoid problems when a + b wraps around. |
flatpak_dir_create_child_repo (FlatpakDir *self,
GFile *cache_dir,
GLnxLockFile *file_lock,
const char *optional_commit,
GError **error)
{
g_autoptr(GFile) repo_dir = NULL;
g_autoptr(GFile) repo_dir_config = NULL;
g_autoptr(OstreeRepo) repo = NULL;
g_autofree char *tmpdir_name = NULL;
g_autoptr(OstreeRepo) new_repo = NULL;
g_autoptr(GKeyFile) config = NULL;
g_autofree char *current_mode = NULL;
GKeyFile *orig_config = NULL;
g_autofree char *orig_min_free_space_percent = NULL;
g_autofree char *orig_min_free_space_size = NULL;
/* We use bare-user-only here now, which means we don't need xattrs
* for the child repo. This only works as long as the pulled repo
* is valid in a bare-user-only repo, i.e. doesn't have xattrs or
* weird permissions, because then the pull into the system repo
* would complain that the checksum was wrong. However, by now all
* flatpak builds are likely to be valid, so this is fine.
*/
OstreeRepoMode mode = OSTREE_REPO_MODE_BARE_USER_ONLY;
const char *mode_str = "bare-user-only";
if (!flatpak_dir_ensure_repo (self, NULL, error))
return NULL;
orig_config = ostree_repo_get_config (self->repo);
if (!flatpak_allocate_tmpdir (AT_FDCWD,
flatpak_file_get_path_cached (cache_dir),
"repo-", &tmpdir_name,
NULL,
file_lock,
NULL,
NULL, error))
return NULL;
repo_dir = g_file_get_child (cache_dir, tmpdir_name);
new_repo = ostree_repo_new (repo_dir);
repo_dir_config = g_file_get_child (repo_dir, "config");
if (!g_file_query_exists (repo_dir_config, NULL))
{
if (!ostree_repo_create (new_repo, mode, NULL, error))
return NULL;
}
else
{
/* Try to open, but on failure, re-create */
if (!ostree_repo_open (new_repo, NULL, NULL))
{
flatpak_rm_rf (repo_dir, NULL, NULL);
if (!ostree_repo_create (new_repo, mode, NULL, error))
return NULL;
}
}
config = ostree_repo_copy_config (new_repo);
/* Verify that the mode is the expected one; if it isn't, recreate the repo */
current_mode = g_key_file_get_string (config, "core", "mode", NULL);
if (current_mode == NULL || g_strcmp0 (current_mode, mode_str) != 0)
{
flatpak_rm_rf (repo_dir, NULL, NULL);
/* Re-initialize the object because its dir's contents have been deleted (and it
* holds internal references to them) */
g_object_unref (new_repo);
new_repo = ostree_repo_new (repo_dir);
if (!ostree_repo_create (new_repo, mode, NULL, error))
return NULL;
/* Reload the repo config */
g_key_file_free (config);
config = ostree_repo_copy_config (new_repo);
}
/* Ensure the config is updated */
g_key_file_set_string (config, "core", "parent",
flatpak_file_get_path_cached (ostree_repo_get_path (self->repo)));
/* Copy the min space percent value so it affects the temporary repo too */
orig_min_free_space_percent = g_key_file_get_value (orig_config, "core", "min-free-space-percent", NULL);
if (orig_min_free_space_percent)
g_key_file_set_value (config, "core", "min-free-space-percent", orig_min_free_space_percent);
/* Copy the min space size value so it affects the temporary repo too */
orig_min_free_space_size = g_key_file_get_value (orig_config, "core", "min-free-space-size", NULL);
if (orig_min_free_space_size)
g_key_file_set_value (config, "core", "min-free-space-size", orig_min_free_space_size);
if (!ostree_repo_write_config (new_repo, config, error))
return NULL;
/* We need to reopen to apply the parent config */
repo = ostree_repo_new (repo_dir);
if (!ostree_repo_open (repo, NULL, error))
return NULL;
/* We don't need to sync the child repos, they are never used for stable storage, and we
verify + fsync when importing to stable storage */
ostree_repo_set_disable_fsync (repo, TRUE);
g_autoptr(GFile) user_cache_dir = flatpak_ensure_user_cache_dir_location (error);
if (user_cache_dir == NULL)
return FALSE;
if (!ostree_repo_set_cache_dir (repo, AT_FDCWD,
flatpak_file_get_path_cached (user_cache_dir),
NULL, error))
return FALSE;
/* Create a commitpartial in the child repo if needed to ensure we download everything, because
any commitpartial state in the parent will not otherwise be inherited */
if (optional_commit)
{
g_autofree char *commitpartial_basename = g_strconcat (optional_commit, ".commitpartial", NULL);
g_autoptr(GFile) orig_commitpartial =
flatpak_build_file (ostree_repo_get_path (self->repo),
"state", commitpartial_basename, NULL);
if (g_file_query_exists (orig_commitpartial, NULL))
{
g_autoptr(GFile) commitpartial =
flatpak_build_file (ostree_repo_get_path (repo),
"state", commitpartial_basename, NULL);
g_file_replace_contents (commitpartial, "", 0, NULL, FALSE, G_FILE_CREATE_REPLACE_DESTINATION, NULL, NULL, NULL);
}
}
return g_steal_pointer (&repo);
} | 0 | [
"CWE-74"
] | flatpak | fb473cad801c6b61706353256cab32330557374a | 96,320,629,200,695,870,000,000,000,000,000,000,000 | 137 | dir: Pass environment via bwrap --setenv when running apply_extra
This means we can systematically pass the environment variables
through bwrap(1), even if it is setuid and thus is filtering out
security-sensitive environment variables. bwrap ends up being
run with an empty environment instead.
As with the previous commit, this regressed while fixing CVE-2021-21261.
Fixes: 6d1773d2 "run: Convert all environment variables into bwrap arguments"
Signed-off-by: Simon McVittie <[email protected]> |
void c_shquote_skip_str(const char **strp,
size_t *n_strp,
size_t len) {
c_assert(len <= *n_strp);
*strp += len;
*n_strp -= len;
} | 0 | [
"CWE-787"
] | c-shquote | 7fd15f8e272136955f7ffc37df29fbca9ddceca1 | 280,931,823,476,567,930,000,000,000,000,000,000,000 | 8 | strnspn: fix buffer overflow
Fix the strnspn and strncspn functions to use a properly sized buffer.
It used to be 1 byte too short. Checking for `0xff` in a string will
thus write `0xff` once byte beyond the stack space of the local buffer.
Note that the public API does not allow to pass `0xff` to those
functions. Therefore, this is a read-only buffer overrun, possibly
causing bogus reports from the parser, but still well-defined.
Reported-by: Steffen Robertz
Signed-off-by: David Rheinsberg <[email protected]> |
int cil_gen_call(struct cil_db *db, struct cil_tree_node *parse_current, struct cil_tree_node *ast_node)
{
enum cil_syntax syntax[] = {
CIL_SYN_STRING,
CIL_SYN_STRING,
CIL_SYN_LIST | CIL_SYN_EMPTY_LIST | CIL_SYN_END,
CIL_SYN_END
};
int syntax_len = sizeof(syntax)/sizeof(*syntax);
struct cil_call *call = NULL;
int rc = SEPOL_ERR;
if (db == NULL || parse_current == NULL || ast_node == NULL) {
goto exit;
}
rc = __cil_verify_syntax(parse_current, syntax, syntax_len);
if (rc != SEPOL_OK) {
goto exit;
}
cil_call_init(&call);
call->macro_str = parse_current->next->data;
if (parse_current->next->next != NULL) {
cil_tree_init(&call->args_tree);
cil_copy_ast(db, parse_current->next->next, call->args_tree->root);
}
ast_node->data = call;
ast_node->flavor = CIL_CALL;
return SEPOL_OK;
exit:
cil_tree_log(parse_current, CIL_ERR, "Bad macro call");
cil_destroy_call(call);
return rc;
} | 0 | [
"CWE-125"
] | selinux | 340f0eb7f3673e8aacaf0a96cbfcd4d12a405521 | 73,220,301,977,468,410,000,000,000,000,000,000,000 | 40 | libsepol/cil: Check for statements not allowed in optional blocks
While there are some checks for invalid statements in an optional
block when resolving the AST, there are no checks when building the
AST.
OSS-Fuzz found the following policy which caused a null dereference
in cil_tree_get_next_path().
(blockinherit b3)
(sid SID)
(sidorder(SID))
(optional o
(ibpkeycon :(1 0)s)
(block b3
(filecon""block())
(filecon""block())))
The problem is that the blockinherit copies block b3 before
the optional block is disabled. When the optional is disabled,
block b3 is deleted along with everything else in the optional.
Later, when filecon statements with the same path are found an
error message is produced and in trying to find out where the block
was copied from, the reference to the deleted block is used. The
error handling code assumes (rightly) that if something was copied
from a block then that block should still exist.
It is clear that in-statements, blocks, and macros cannot be in an
optional, because that allows nodes to be copied from the optional
block to somewhere outside even though the optional could be disabled
later. When optionals are disabled the AST is reset and the
resolution is restarted at the point of resolving macro calls, so
anything resolved before macro calls will never be re-resolved.
This includes tunableifs, in-statements, blockinherits,
blockabstracts, and macro definitions. Tunable declarations also
cannot be in an optional block because they are needed to resolve
tunableifs. It should be fine to allow blockinherit statements in
an optional, because that is copying nodes from outside the optional
to the optional and if the optional is later disabled, everything
will be deleted anyway.
Check and quit with an error if a tunable declaration, in-statement,
block, blockabstract, or macro definition is found within an
optional when either building or resolving the AST.
Signed-off-by: James Carter <[email protected]> |
ClientSslTransportOptions ecdsaOnlyClientOptions() {
auto options = ClientSslTransportOptions().setClientEcdsaCert(true);
if (tls_version_ == envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3) {
return options.setSigningAlgorithmsForTest("ecdsa_secp256r1_sha256");
} else {
return options.setCipherSuites({"ECDHE-ECDSA-AES128-GCM-SHA256"});
}
} | 0 | [
"CWE-400"
] | envoy | 0e49a495826ea9e29134c1bd54fdeb31a034f40c | 8,605,767,029,208,063,000,000,000,000,000,000,000 | 8 | http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]> |
void DocumentSourceGraphLookUp::checkMemoryUsage() {
// TODO SERVER-23980: Implement spilling to disk if allowDiskUse is specified.
uassert(40099,
"$graphLookup reached maximum memory consumption",
(_visitedUsageBytes + _frontierUsageBytes) < _maxMemoryUsageBytes);
_cache.evictDownTo(_maxMemoryUsageBytes - _frontierUsageBytes - _visitedUsageBytes);
} | 0 | [
"CWE-416"
] | mongo | d6133a3a5464fac202f512b0310dfeb200c126f9 | 52,819,734,939,781,560,000,000,000,000,000,000,000 | 7 | SERVER-43350 $lookup with no local default or user-specified collation should explicitly set the simple collation on the foreign expression context |
static void end_block_io_op(struct bio *bio)
{
__end_block_io_op(bio->bi_private, bio->bi_error);
bio_put(bio);
} | 0 | [
"CWE-200"
] | linux | 089bc0143f489bd3a4578bdff5f4ca68fb26f341 | 274,643,871,043,628,550,000,000,000,000,000,000,000 | 5 | xen-blkback: don't leak stack data via response ring
Rather than constructing a local structure instance on the stack, fill
the fields directly on the shared ring, just like other backends do.
Build on the fact that all response structure flavors are actually
identical (the old code did make this assumption too).
This is XSA-216.
Cc: [email protected]
Signed-off-by: Jan Beulich <[email protected]>
Reviewed-by: Konrad Rzeszutek Wilk <[email protected]>
Signed-off-by: Konrad Rzeszutek Wilk <[email protected]> |
static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t offset,
u64 ino, unsigned int d_type)
{
struct linux_dirent64 __user *dirent;
struct compat_getdents_callback64 *buf = __buf;
int jj = NAME_OFFSET(dirent);
int reclen = COMPAT_ROUND_UP64(jj + namlen + 1);
u64 off;
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;
dirent = buf->previous;
if (dirent) {
if (__put_user_unaligned(offset, &dirent->d_off))
goto efault;
}
dirent = buf->current_dir;
if (__put_user_unaligned(ino, &dirent->d_ino))
goto efault;
off = 0;
if (__put_user_unaligned(off, &dirent->d_off))
goto efault;
if (__put_user(reclen, &dirent->d_reclen))
goto efault;
if (__put_user(d_type, &dirent->d_type))
goto efault;
if (copy_to_user(dirent->d_name, name, namlen))
goto efault;
if (__put_user(0, dirent->d_name + namlen))
goto efault;
buf->previous = dirent;
dirent = (void __user *)dirent + reclen;
buf->current_dir = dirent;
buf->count -= reclen;
return 0;
efault:
buf->error = -EFAULT;
return -EFAULT;
} | 0 | [] | linux-2.6 | 822191a2fa1584a29c3224ab328507adcaeac1ab | 221,834,775,877,451,760,000,000,000,000,000,000,000 | 41 | [PATCH] skip data conversion in compat_sys_mount when data_page is NULL
OpenVZ Linux kernel team has found a problem with mounting in compat mode.
Simple command "mount -t smbfs ..." on Fedora Core 5 distro in 32-bit mode
leads to oops:
Unable to handle kernel NULL pointer dereference at 0000000000000000 RIP: compat_sys_mount+0xd6/0x290
Process mount (pid: 14656, veid=300, threadinfo ffff810034d30000, task ffff810034c86bc0)
Call Trace: ia32_sysret+0x0/0xa
The problem is that data_page pointer can be NULL, so we should skip data
conversion in this case.
Signed-off-by: Andrey Mirkin <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
ipf_is_last_v6_frag(ovs_be16 ip6f_offlg)
{
if ((ip6f_offlg & IP6F_OFF_MASK) &&
!(ip6f_offlg & IP6F_MORE_FRAG)) {
return true;
}
return false;
} | 0 | [
"CWE-401"
] | ovs | 803ed12e31b0377c37d7aa8c94b3b92f2081e349 | 318,902,878,772,386,700,000,000,000,000,000,000,000 | 8 | ipf: release unhandled packets from the batch
Since 640d4db788ed ("ipf: Fix a use-after-free error, ...") the ipf
framework unconditionally allocates a new dp_packet to track
individual fragments. This prevents a use-after-free. However, an
additional issue was present - even when the packet buffer is cloned,
if the ip fragment handling code keeps it, the original buffer is
leaked during the refill loop. Even in the original processing code,
the hardcoded dnsteal branches would always leak a packet buffer from
the refill loop.
This can be confirmed with valgrind:
==717566== 16,672 (4,480 direct, 12,192 indirect) bytes in 8 blocks are definitely lost in loss record 390 of 390
==717566== at 0x484086F: malloc (vg_replace_malloc.c:380)
==717566== by 0x537BFD: xmalloc__ (util.c:137)
==717566== by 0x537BFD: xmalloc (util.c:172)
==717566== by 0x46DDD4: dp_packet_new (dp-packet.c:153)
==717566== by 0x46DDD4: dp_packet_new_with_headroom (dp-packet.c:163)
==717566== by 0x550AA6: netdev_linux_batch_rxq_recv_sock.constprop.0 (netdev-linux.c:1262)
==717566== by 0x5512AF: netdev_linux_rxq_recv (netdev-linux.c:1511)
==717566== by 0x4AB7E0: netdev_rxq_recv (netdev.c:727)
==717566== by 0x47F00D: dp_netdev_process_rxq_port (dpif-netdev.c:4699)
==717566== by 0x47FD13: dpif_netdev_run (dpif-netdev.c:5957)
==717566== by 0x4331D2: type_run (ofproto-dpif.c:370)
==717566== by 0x41DFD8: ofproto_type_run (ofproto.c:1768)
==717566== by 0x40A7FB: bridge_run__ (bridge.c:3245)
==717566== by 0x411269: bridge_run (bridge.c:3310)
==717566== by 0x406E6C: main (ovs-vswitchd.c:127)
The fix is to delete the original packet when it isn't able to be
reinserted into the packet batch. Subsequent valgrind runs show that
the packets are not leaked from the batch any longer.
Fixes: 640d4db788ed ("ipf: Fix a use-after-free error, and remove the 'do_not_steal' flag.")
Fixes: 4ea96698f667 ("Userspace datapath: Add fragmentation handling.")
Reported-by: Wan Junjie <[email protected]>
Reported-at: https://github.com/openvswitch/ovs-issues/issues/226
Signed-off-by: Aaron Conole <[email protected]>
Reviewed-by: David Marchand <[email protected]>
Tested-by: Wan Junjie <[email protected]>
Signed-off-by: Alin-Gabriel Serdean <[email protected]> |
TEST(IndexBoundsBuilderTest,
TranslateDottedFieldNotEqualToNullShouldBuildInexactBoundsIfIndexIsMultiKey) {
BSONObj indexPattern = BSON("a.b" << 1);
auto testIndex = buildSimpleIndexEntry(indexPattern);
testIndex.multikey = true;
BSONObj matchObj = BSON("a.b" << BSON("$ne" << BSONNULL));
auto expr = parseMatchExpression(matchObj);
OrderedIntervalList oil;
IndexBoundsBuilder::BoundsTightness tightness;
IndexBoundsBuilder::translate(
expr.get(), indexPattern.firstElement(), testIndex, &oil, &tightness);
ASSERT_EQUALS(oil.name, "a.b");
ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH);
assertBoundsRepresentNotEqualsNull(oil);
} | 0 | [
"CWE-754"
] | mongo | f8f55e1825ee5c7bdb3208fc7c5b54321d172732 | 173,903,005,067,256,870,000,000,000,000,000,000,000 | 18 | SERVER-44377 generate correct plan for indexed inequalities to null |
static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
unsigned long address, unsigned long size)
{
__tlb_adjust_range(tlb, address, size);
tlb->cleared_puds = 1;
} | 0 | [
"CWE-362"
] | linux | b67fbebd4cf980aecbcc750e1462128bffe8ae15 | 271,823,002,392,722,360,000,000,000,000,000,000,000 | 6 | mmu_gather: Force tlb-flush VM_PFNMAP vmas
Jann reported a race between munmap() and unmap_mapping_range(), where
unmap_mapping_range() will no-op once unmap_vmas() has unlinked the
VMA; however munmap() will not yet have invalidated the TLBs.
Therefore unmap_mapping_range() will complete while there are still
(stale) TLB entries for the specified range.
Mitigate this by force flushing TLBs for VM_PFNMAP ranges.
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Acked-by: Will Deacon <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
unsigned int flags)
{
struct ext4_extent_header *neh;
struct buffer_head *bh;
ext4_fsblk_t newblock, goal = 0;
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
int err = 0;
size_t ext_size = 0;
/* Try to prepend new index to old one */
if (ext_depth(inode))
goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
if (goal > le32_to_cpu(es->s_first_data_block)) {
flags |= EXT4_MB_HINT_TRY_GOAL;
goal--;
} else
goal = ext4_inode_to_goal_block(inode);
newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
NULL, &err);
if (newblock == 0)
return err;
bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
if (unlikely(!bh))
return -ENOMEM;
lock_buffer(bh);
err = ext4_journal_get_create_access(handle, bh);
if (err) {
unlock_buffer(bh);
goto out;
}
ext_size = sizeof(EXT4_I(inode)->i_data);
/* move top-level index/leaf into new block */
memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
/* zero out unused area in the extent block */
memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
/* set size of new block */
neh = ext_block_hdr(bh);
/* old root could have indexes or leaves
* so calculate e_max right way */
if (ext_depth(inode))
neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
else
neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
neh->eh_magic = EXT4_EXT_MAGIC;
ext4_extent_block_csum_set(inode, neh);
set_buffer_uptodate(bh);
unlock_buffer(bh);
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (err)
goto out;
/* Update top-level index: num,max,pointer */
neh = ext_inode_hdr(inode);
neh->eh_entries = cpu_to_le16(1);
ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
if (neh->eh_depth == 0) {
/* Root extent block becomes index block */
neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
EXT_FIRST_INDEX(neh)->ei_block =
EXT_FIRST_EXTENT(neh)->ee_block;
}
ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n",
le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
le16_add_cpu(&neh->eh_depth, 1);
err = ext4_mark_inode_dirty(handle, inode);
out:
brelse(bh);
return err;
} | 0 | [
"CWE-703"
] | linux | ce9f24cccdc019229b70a5c15e2b09ad9c0ab5d1 | 162,657,345,811,603,800,000,000,000,000,000,000,000 | 79 | ext4: check journal inode extents more carefully
Currently, system zones just track ranges of block, that are "important"
fs metadata (bitmaps, group descriptors, journal blocks, etc.). This
however complicates how extent tree (or indirect blocks) can be checked
for inodes that actually track such metadata - currently the journal
inode but arguably we should be treating quota files or resize inode
similarly. We cannot run __ext4_ext_check() on such metadata inodes when
loading their extents as that would immediately trigger the validity
checks and so we just hack around that and special-case the journal
inode. This however leads to a situation that a journal inode which has
extent tree of depth at least one can have invalid extent tree that gets
unnoticed until ext4_cache_extents() crashes.
To overcome this limitation, track inode number each system zone belongs
to (0 is used for zones not belonging to any inode). We can then verify
inode number matches the expected one when verifying extent tree and
thus avoid the false errors. With this there's no need to to
special-case journal inode during extent tree checking anymore so remove
it.
Fixes: 0a944e8a6c66 ("ext4: don't perform block validity checks on the journal inode")
Reported-by: Wolfgang Frisch <[email protected]>
Reviewed-by: Lukas Czerner <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Theodore Ts'o <[email protected]> |
TEST(MessageCompressorManager, BadCompressionRequested) {
auto input = BSON("isMaster" << 1 << "compression" << BSON_ARRAY("fakecompressor"));
checkServerNegotiation(input, {});
} | 0 | [] | mongo | 5ad69b851801edadbfde8fdf271f4ba7c21170b5 | 33,715,502,868,985,170,000,000,000,000,000,000,000 | 4 | SERVER-31273 Use Source/Sink version of snappy functions
(cherry picked from commit 59ead734faa8aa51f0c53bf2bd39d0a0247ddf99) |
directory_initiate_command(const char *address, const tor_addr_t *_addr,
uint16_t or_port, uint16_t dir_port,
int supports_conditional_consensus,
int supports_begindir, const char *digest,
uint8_t dir_purpose, uint8_t router_purpose,
int anonymized_connection, const char *resource,
const char *payload, size_t payload_len,
time_t if_modified_since)
{
directory_initiate_command_rend(address, _addr, or_port, dir_port,
supports_conditional_consensus,
supports_begindir, digest, dir_purpose,
router_purpose, anonymized_connection,
resource, payload, payload_len,
if_modified_since, NULL);
} | 0 | [] | tor | 973c18bf0e84d14d8006a9ae97fde7f7fb97e404 | 168,184,738,849,264,550,000,000,000,000,000,000,000 | 16 | Fix assertion failure in tor_timegm.
Fixes bug 6811. |
static void next_reap_node(void)
{
int node = __this_cpu_read(slab_reap_node);
node = next_node_in(node, node_online_map);
__this_cpu_write(slab_reap_node, node);
} | 0 | [
"CWE-703"
] | linux | c4e490cf148e85ead0d1b1c2caaba833f1d5b29f | 93,448,720,431,546,120,000,000,000,000,000,000,000 | 7 | mm/slab.c: fix SLAB freelist randomization duplicate entries
This patch fixes a bug in the freelist randomization code. When a high
random number is used, the freelist will contain duplicate entries. It
will result in different allocations sharing the same chunk.
It will result in odd behaviours and crashes. It should be uncommon but
it depends on the machines. We saw it happening more often on some
machines (every few hours of running tests).
Fixes: c7ce4f60ac19 ("mm: SLAB freelist randomization")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: John Sperbeck <[email protected]>
Signed-off-by: Thomas Garnier <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
push_write_next_chunk (SoupMessage *msg, gpointer user_data)
{
PushHandle *handle = user_data;
/* If we've been restarted, seek to the beginning of the file. */
if (handle->read_status == PUSH_READ_STATUS_RESET)
{
GError *error = NULL;
/* We've been restarted but there's still a read in flight, so defer. */
if (handle->buf)
{
handle->read_status = PUSH_READ_STATUS_DEFERRED;
return;
}
handle->n_read = 0;
handle->n_written = 0;
handle->read_status = PUSH_READ_STATUS_NONE;
if (!g_seekable_seek (G_SEEKABLE (handle->in),
0, G_SEEK_SET,
handle->job->cancellable, &error))
{
g_vfs_job_failed_from_error (handle->job, error);
g_error_free (error);
soup_session_cancel_message (G_VFS_BACKEND_HTTP (handle->backend)->session,
handle->msg,
SOUP_STATUS_CANCELLED);
return;
}
}
handle->buf = g_malloc (CHUNK_SIZE);
g_input_stream_read_async (handle->in,
handle->buf, CHUNK_SIZE,
0, handle->job->cancellable,
push_read_cb, handle);
} | 0 | [] | gvfs | f81ff2108ab3b6e370f20dcadd8708d23f499184 | 172,222,778,456,507,760,000,000,000,000,000,000,000 | 39 | dav: don't unescape the uri twice
path_equal tries to unescape path before comparing. Unfortunately
this function is used also for already unescaped paths. Therefore
unescaping can fail. This commit reverts changes which was done in
commit 50af53d and unescape just uris, which aren't unescaped yet.
https://bugzilla.gnome.org/show_bug.cgi?id=743298 |
_libssh2_channel_write(LIBSSH2_CHANNEL *channel, int stream_id,
const unsigned char *buf, size_t buflen)
{
int rc = 0;
LIBSSH2_SESSION *session = channel->session;
ssize_t wrote = 0; /* counter for this specific this call */
/* In theory we could split larger buffers into several smaller packets
* but it turns out to be really hard and nasty to do while still offering
* the API/prototype.
*
* Instead we only deal with the first 32K in this call and for the parent
* function to call it again with the remainder! 32K is a conservative
* limit based on the text in RFC4253 section 6.1.
*/
if(buflen > 32700)
buflen = 32700;
if(channel->write_state == libssh2_NB_state_idle) {
unsigned char *s = channel->write_packet;
_libssh2_debug(channel->session, LIBSSH2_TRACE_CONN,
"Writing %d bytes on channel %lu/%lu, stream #%d",
(int) buflen, channel->local.id, channel->remote.id,
stream_id);
if(channel->local.close)
return _libssh2_error(channel->session,
LIBSSH2_ERROR_CHANNEL_CLOSED,
"We've already closed this channel");
else if(channel->local.eof)
return _libssh2_error(channel->session,
LIBSSH2_ERROR_CHANNEL_EOF_SENT,
"EOF has already been received, "
"data might be ignored");
/* drain the incoming flow first, mostly to make sure we get all
* pending window adjust packets */
do
rc = _libssh2_transport_read(session);
while(rc > 0);
if((rc < 0) && (rc != LIBSSH2_ERROR_EAGAIN)) {
return _libssh2_error(channel->session, rc,
"Failure while draining incoming flow");
}
if(channel->local.window_size <= 0) {
/* there's no room for data so we stop */
/* Waiting on the socket to be writable would be wrong because we
* would be back here immediately, but a readable socket might
* herald an incoming window adjustment.
*/
session->socket_block_directions = LIBSSH2_SESSION_BLOCK_INBOUND;
return (rc == LIBSSH2_ERROR_EAGAIN?rc:0);
}
channel->write_bufwrite = buflen;
*(s++) = stream_id ? SSH_MSG_CHANNEL_EXTENDED_DATA :
SSH_MSG_CHANNEL_DATA;
_libssh2_store_u32(&s, channel->remote.id);
if(stream_id)
_libssh2_store_u32(&s, stream_id);
/* Don't exceed the remote end's limits */
/* REMEMBER local means local as the SOURCE of the data */
if(channel->write_bufwrite > channel->local.window_size) {
_libssh2_debug(session, LIBSSH2_TRACE_CONN,
"Splitting write block due to %lu byte "
"window_size on %lu/%lu/%d",
channel->local.window_size, channel->local.id,
channel->remote.id, stream_id);
channel->write_bufwrite = channel->local.window_size;
}
if(channel->write_bufwrite > channel->local.packet_size) {
_libssh2_debug(session, LIBSSH2_TRACE_CONN,
"Splitting write block due to %lu byte "
"packet_size on %lu/%lu/%d",
channel->local.packet_size, channel->local.id,
channel->remote.id, stream_id);
channel->write_bufwrite = channel->local.packet_size;
}
/* store the size here only, the buffer is passed in as-is to
_libssh2_transport_send() */
_libssh2_store_u32(&s, channel->write_bufwrite);
channel->write_packet_len = s - channel->write_packet;
_libssh2_debug(session, LIBSSH2_TRACE_CONN,
"Sending %d bytes on channel %lu/%lu, stream_id=%d",
(int) channel->write_bufwrite, channel->local.id,
channel->remote.id, stream_id);
channel->write_state = libssh2_NB_state_created;
}
if(channel->write_state == libssh2_NB_state_created) {
rc = _libssh2_transport_send(session, channel->write_packet,
channel->write_packet_len,
buf, channel->write_bufwrite);
if(rc == LIBSSH2_ERROR_EAGAIN) {
return _libssh2_error(session, rc,
"Unable to send channel data");
}
else if(rc) {
channel->write_state = libssh2_NB_state_idle;
return _libssh2_error(session, rc,
"Unable to send channel data");
}
/* Shrink local window size */
channel->local.window_size -= channel->write_bufwrite;
wrote += channel->write_bufwrite;
/* Since _libssh2_transport_write() succeeded, we must return
now to allow the caller to provide the next chunk of data.
We cannot move on to send the next piece of data that may
already have been provided in this same function call, as we
risk getting EAGAIN for that and we can't return information
both about sent data as well as EAGAIN. So, by returning short
now, the caller will call this function again with new data to
send */
channel->write_state = libssh2_NB_state_idle;
return wrote;
}
return LIBSSH2_ERROR_INVAL; /* reaching this point is really bad */
} | 0 | [
"CWE-787"
] | libssh2 | dc109a7f518757741590bb993c0c8412928ccec2 | 336,815,831,218,607,920,000,000,000,000,000,000,000 | 133 | Security fixes (#315)
* Bounds checks
Fixes for CVEs
https://www.libssh2.org/CVE-2019-3863.html
https://www.libssh2.org/CVE-2019-3856.html
* Packet length bounds check
CVE
https://www.libssh2.org/CVE-2019-3855.html
* Response length check
CVE
https://www.libssh2.org/CVE-2019-3859.html
* Bounds check
CVE
https://www.libssh2.org/CVE-2019-3857.html
* Bounds checking
CVE
https://www.libssh2.org/CVE-2019-3859.html
and additional data validation
* Check bounds before reading into buffers
* Bounds checking
CVE
https://www.libssh2.org/CVE-2019-3859.html
* declare SIZE_MAX and UINT_MAX if needed |
else if (type == NBD_REPLY_TYPE_OFFSET_DATA) {
/* The spec states that 0-length requests are unspecified, but
* 0-length replies are broken. Still, it's easy enough to support
* them as an extension, so we use < instead of <=.
*/
if (cmd->type != NBD_CMD_READ) {
SET_NEXT_STATE (%.DEAD);
set_error (0, "invalid command for receiving offset-data chunk, "
"cmd->type=%" PRIu16 ", "
"this is likely to be a bug in the server",
cmd->type);
return 0;
}
if (length < sizeof h->sbuf.sr.payload.offset_data) {
SET_NEXT_STATE (%.DEAD);
set_error (0, "too short length in NBD_REPLY_TYPE_OFFSET_DATA");
return 0;
}
h->rbuf = &h->sbuf.sr.payload.offset_data;
h->rlen = sizeof h->sbuf.sr.payload.offset_data;
SET_NEXT_STATE (%RECV_OFFSET_DATA);
return 0;
} | 0 | [] | libnbd | 2c1987fc23d6d0f537edc6d4701e95a2387f7917 | 19,149,486,193,221,827,000,000,000,000,000,000,000 | 23 | lib: Fix stack corruption with structured reply containing negative offset.
Because of improper bounds checking, when receiving a structured reply
some offset/lengths sent by the server could cause libnbd to execute
arbitrary code under control of a malicious server.
A structured reply segment containing (for example):
offset = 18446744073709551615 (== (uint64_t) -1,
or similar negative offsets)
length = 100 (any small positive number < request count)
In both original bounds tests the error case would not be reached:
if (offset < cmd->offset) { // very large < 0
// error case
}
if (offset + length > cmd->count) { // 99 > 512
// error case
}
The result of the negative offset is that data under control of the
server is written to memory before the read buffer supplied by the
client. If the read buffer is located on the stack then this allows
the stack return address from nbd_pread() to be trivially modified,
allowing arbitrary code execution under the control of the server. If
the buffer is located on the heap then other memory objects before the
buffer can be overwritten, which again would usually lead to arbitrary
code execution.
This commit adds a central function to handle bounds checking for all
cases, and the corrected bounds check is written once in this function.
This bug was found by fuzzing libnbd with American Fuzzy Lop as
described here:
https://groups.google.com/forum/#!topic/afl-users/WZzAnfItxM4
(cherry picked from commit f75f602a6361c0c5f42debfeea6980f698ce7f09) |
compat_mpt_command(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct mpt_ioctl_command32 karg32;
struct mpt_ioctl_command32 __user *uarg = (struct mpt_ioctl_command32 __user *) arg;
struct mpt_ioctl_command karg;
MPT_ADAPTER *iocp = NULL;
int iocnum, iocnumX;
int nonblock = (filp->f_flags & O_NONBLOCK);
int ret;
if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32)))
return -EFAULT;
/* Verify intended MPT adapter */
iocnumX = karg32.hdr.iocnum & 0xFF;
if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
(iocp == NULL)) {
printk(KERN_DEBUG MYNAM "::compat_mpt_command @%d - ioc%d not found!\n",
__LINE__, iocnumX);
return -ENODEV;
}
if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0)
return ret;
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mpt_command() called\n",
iocp->name));
/* Copy data to karg */
karg.hdr.iocnum = karg32.hdr.iocnum;
karg.hdr.port = karg32.hdr.port;
karg.timeout = karg32.timeout;
karg.maxReplyBytes = karg32.maxReplyBytes;
karg.dataInSize = karg32.dataInSize;
karg.dataOutSize = karg32.dataOutSize;
karg.maxSenseBytes = karg32.maxSenseBytes;
karg.dataSgeOffset = karg32.dataSgeOffset;
karg.replyFrameBufPtr = (char __user *)(unsigned long)karg32.replyFrameBufPtr;
karg.dataInBufPtr = (char __user *)(unsigned long)karg32.dataInBufPtr;
karg.dataOutBufPtr = (char __user *)(unsigned long)karg32.dataOutBufPtr;
karg.senseDataPtr = (char __user *)(unsigned long)karg32.senseDataPtr;
/* Pass new structure to do_mpt_command
*/
ret = mptctl_do_mpt_command (karg, &uarg->MF);
mutex_unlock(&iocp->ioctl_cmds.mutex);
return ret;
} | 1 | [
"CWE-362",
"CWE-369"
] | linux | 28d76df18f0ad5bcf5fa48510b225f0ed262a99b | 164,195,225,865,569,520,000,000,000,000,000,000,000 | 52 | scsi: mptfusion: Fix double fetch bug in ioctl
Tom Hatskevich reported that we look up "iocp" then, in the called
functions we do a second copy_from_user() and look it up again.
The problem that could cause is:
drivers/message/fusion/mptctl.c
674 /* All of these commands require an interrupt or
675 * are unknown/illegal.
676 */
677 if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0)
^^^^
We take this lock.
678 return ret;
679
680 if (cmd == MPTFWDOWNLOAD)
681 ret = mptctl_fw_download(arg);
^^^
Then the user memory changes and we look up "iocp" again but a different
one so now we are holding the incorrect lock and have a race condition.
682 else if (cmd == MPTCOMMAND)
683 ret = mptctl_mpt_command(arg);
The security impact of this bug is not as bad as it could have been
because these operations are all privileged and root already has
enormous destructive power. But it's still worth fixing.
This patch passes the "iocp" pointer to the functions to avoid the
second lookup. That deletes 100 lines of code from the driver so
it's a nice clean up as well.
Link: https://lore.kernel.org/r/20200114123414.GA7957@kadam
Reported-by: Tom Hatskevich <[email protected]>
Reviewed-by: Greg Kroah-Hartman <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]> |
cleanup (void)
{
/* Free external resources, close files, etc. */
/* Close WARC file. */
if (opt.warc_filename != 0)
warc_close ();
log_close ();
if (output_stream)
if (fclose (output_stream) == EOF)
inform_exit_status (CLOSEFAILED);
/* No need to check for error because Wget flushes its output (and
checks for errors) after any data arrives. */
/* We're exiting anyway so there's no real need to call free()
hundreds of times. Skipping the frees will make Wget exit
faster.
However, when detecting leaks, it's crucial to free() everything
because then you can find the real leaks, i.e. the allocated
memory which grows with the size of the program. */
#ifdef DEBUG_MALLOC
convert_cleanup ();
res_cleanup ();
http_cleanup ();
cleanup_html_url ();
spider_cleanup ();
host_cleanup ();
log_cleanup ();
netrc_cleanup (netrc_list);
for (i = 0; i < nurl; i++)
xfree (url[i]);
xfree_null (opt.choose_config);
xfree_null (opt.lfilename);
xfree_null (opt.dir_prefix);
xfree_null (opt.input_filename);
xfree_null (opt.output_document);
free_vec (opt.accepts);
free_vec (opt.rejects);
free_vec (opt.excludes);
free_vec (opt.includes);
free_vec (opt.domains);
free_vec (opt.follow_tags);
free_vec (opt.ignore_tags);
xfree_null (opt.progress_type);
xfree_null (opt.ftp_user);
xfree_null (opt.ftp_passwd);
xfree_null (opt.ftp_proxy);
xfree_null (opt.https_proxy);
xfree_null (opt.http_proxy);
free_vec (opt.no_proxy);
xfree_null (opt.useragent);
xfree_null (opt.referer);
xfree_null (opt.http_user);
xfree_null (opt.http_passwd);
free_vec (opt.user_headers);
free_vec (opt.warc_user_headers);
# ifdef HAVE_SSL
xfree_null (opt.cert_file);
xfree_null (opt.private_key);
xfree_null (opt.ca_directory);
xfree_null (opt.ca_cert);
xfree_null (opt.random_file);
xfree_null (opt.egd_file);
# endif
xfree_null (opt.bind_address);
xfree_null (opt.cookies_input);
xfree_null (opt.cookies_output);
xfree_null (opt.user);
xfree_null (opt.passwd);
xfree_null (opt.base_href);
xfree_null (opt.method);
#endif /* DEBUG_MALLOC */
} | 0 | [
"CWE-22"
] | wget | 18b0979357ed7dc4e11d4f2b1d7e0f5932d82aa7 | 143,733,100,627,465,210,000,000,000,000,000,000,000 | 81 | CVE-2014-4877: Arbitrary Symlink Access
Wget was susceptible to a symlink attack which could create arbitrary
files, directories or symbolic links and set their permissions when
retrieving a directory recursively through FTP. This commit changes the
default settings in Wget such that Wget no longer creates local symbolic
links, but rather traverses them and retrieves the pointed-to file in
such a retrieval.
The old behaviour can be attained by passing the --retr-symlinks=no
option to the Wget invokation command. |
TEST_P(DownstreamProtocolIntegrationTest, HeaderNormalizationRejection) {
config_helper_.addConfigModifier(
[](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&
hcm) -> void {
hcm.set_path_with_escaped_slashes_action(
envoy::extensions::filters::network::http_connection_manager::v3::
HttpConnectionManager::REJECT_REQUEST);
});
initialize();
codec_client_ = makeHttpConnection(lookupPort("http"));
default_request_headers_.setPath("/test/long%2Furl");
auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);
EXPECT_TRUE(response->waitForEndStream());
EXPECT_TRUE(response->complete());
EXPECT_EQ("400", response->headers().getStatusValue());
} | 0 | [
"CWE-22"
] | envoy | 5333b928d8bcffa26ab19bf018369a835f697585 | 102,521,381,130,323,580,000,000,000,000,000,000,000 | 18 | Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <[email protected]> |
void e2fsck_pass1_dupblocks(e2fsck_t ctx, char *block_buf)
{
ext2_filsys fs = ctx->fs;
struct problem_context pctx;
#ifdef RESOURCE_TRACK
struct resource_track rtrack;
#endif
clear_problem_context(&pctx);
pctx.errcode = e2fsck_allocate_inode_bitmap(fs,
_("multiply claimed inode map"),
EXT2FS_BMAP64_RBTREE, "inode_dup_map",
&inode_dup_map);
if (pctx.errcode) {
fix_problem(ctx, PR_1B_ALLOCATE_IBITMAP_ERROR, &pctx);
ctx->flags |= E2F_FLAG_ABORT;
return;
}
dict_init(&ino_dict, DICTCOUNT_T_MAX, dict_int_cmp);
dict_init(&clstr_dict, DICTCOUNT_T_MAX, dict_int_cmp);
dict_set_allocator(&ino_dict, NULL, inode_dnode_free, NULL);
dict_set_allocator(&clstr_dict, NULL, cluster_dnode_free, NULL);
init_resource_track(&rtrack, ctx->fs->io);
pass1b(ctx, block_buf);
print_resource_track(ctx, "Pass 1b", &rtrack, ctx->fs->io);
init_resource_track(&rtrack, ctx->fs->io);
pass1c(ctx, block_buf);
print_resource_track(ctx, "Pass 1c", &rtrack, ctx->fs->io);
init_resource_track(&rtrack, ctx->fs->io);
pass1d(ctx, block_buf);
print_resource_track(ctx, "Pass 1d", &rtrack, ctx->fs->io);
if (ext2fs_has_feature_shared_blocks(ctx->fs->super) &&
(ctx->options & E2F_OPT_UNSHARE_BLOCKS)) {
/*
* If we successfully managed to unshare all blocks, unset the
* shared block feature.
*/
blk64_t next;
int result = ext2fs_find_first_set_block_bitmap2(
ctx->block_dup_map,
ctx->fs->super->s_first_data_block,
ext2fs_blocks_count(ctx->fs->super) - 1,
&next);
if (result == ENOENT && !(ctx->options & E2F_OPT_NO)) {
ext2fs_clear_feature_shared_blocks(ctx->fs->super);
ext2fs_mark_super_dirty(ctx->fs);
}
}
/*
* Time to free all of the accumulated data structures that we
* don't need anymore.
*/
dict_free_nodes(&ino_dict);
dict_free_nodes(&clstr_dict);
ext2fs_free_inode_bitmap(inode_dup_map);
} | 0 | [
"CWE-787"
] | e2fsprogs | 71ba13755337e19c9a826dfc874562a36e1b24d3 | 306,628,593,226,884,950,000,000,000,000,000,000,000 | 63 | e2fsck: don't try to rehash a deleted directory
If directory has been deleted in pass1[bcd] processing, then we
shouldn't try to rehash the directory in pass 3a when we try to
rehash/reoptimize directories.
Signed-off-by: Theodore Ts'o <[email protected]> |
void rtps_util_add_ipv4_address_t(proto_tree *tree, packet_info *pinfo, tvbuff_t *tvb, gint offset,
const guint encoding, int hf_item) {
proto_item *ti;
ti = proto_tree_add_item(tree, hf_item, tvb, offset, 4, encoding);
if (tvb_get_ntohl(tvb, offset) == IPADDRESS_INVALID)
expert_add_info(pinfo, ti, &ei_rtps_ip_invalid);
} | 0 | [
"CWE-401"
] | wireshark | 33e63d19e5496c151bad69f65cdbc7cba2b4c211 | 32,014,297,650,618,610,000,000,000,000,000,000,000 | 9 | RTPS: Fixup our coherent set map.
coherent_set_tracking.coherent_set_registry_map uses a struct as a key,
but the hash and comparison routines treat keys as a sequence of bytes.
Make sure every key byte is initialized. Fixes #16994.
Call wmem_strong_hash on our key in coherent_set_key_hash_by_key instead
of creating and leaking a GBytes struct. |
static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper,
const struct nlattr *attr)
{
struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1];
unsigned int class_max;
int err;
err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
nfnl_cthelper_expect_policy_set, NULL);
if (err < 0)
return err;
if (!tb[NFCTH_POLICY_SET_NUM])
return -EINVAL;
class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
if (helper->expect_class_max + 1 != class_max)
return -EBUSY;
return nfnl_cthelper_update_policy_all(tb, helper);
} | 0 | [
"CWE-862"
] | linux | 4b380c42f7d00a395feede754f0bc2292eebe6e5 | 247,970,125,770,193,300,000,000,000,000,000,000,000 | 21 | netfilter: nfnetlink_cthelper: Add missing permission checks
The capability check in nfnetlink_rcv() verifies that the caller
has CAP_NET_ADMIN in the namespace that "owns" the netlink socket.
However, nfnl_cthelper_list is shared by all net namespaces on the
system. An unprivileged user can create user and net namespaces
in which he holds CAP_NET_ADMIN to bypass the netlink_net_capable()
check:
$ nfct helper list
nfct v1.4.4: netlink error: Operation not permitted
$ vpnns -- nfct helper list
{
.name = ftp,
.queuenum = 0,
.l3protonum = 2,
.l4protonum = 6,
.priv_data_len = 24,
.status = enabled,
};
Add capable() checks in nfnetlink_cthelper, as this is cleaner than
trying to generalize the solution.
Signed-off-by: Kevin Cernekee <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]> |
WandExport MagickBooleanType MagickCommandGenesis(ImageInfo *image_info,
MagickCommand command,int argc,char **argv,char **metadata,
ExceptionInfo *exception)
{
char
client_name[MaxTextExtent],
*option;
double
duration,
serial;
MagickBooleanType
concurrent,
regard_warnings,
status;
register ssize_t
i;
size_t
iterations,
number_threads;
ssize_t
n;
(void) setlocale(LC_ALL,"");
(void) setlocale(LC_NUMERIC,"C");
GetPathComponent(argv[0],TailPath,client_name);
(void) SetClientName(client_name);
concurrent=MagickFalse;
duration=(-1.0);
iterations=1;
status=MagickTrue;
regard_warnings=MagickFalse;
for (i=1; i < (ssize_t) (argc-1); i++)
{
option=argv[i];
if ((strlen(option) == 1) || ((*option != '-') && (*option != '+')))
continue;
if (LocaleCompare("-bench",option) == 0)
iterations=StringToUnsignedLong(argv[++i]);
if (LocaleCompare("-concurrent",option) == 0)
concurrent=MagickTrue;
if (LocaleCompare("-debug",option) == 0)
(void) SetLogEventMask(argv[++i]);
if (LocaleCompare("-distribute-cache",option) == 0)
{
DistributePixelCacheServer(StringToInteger(argv[++i]),exception);
exit(0);
}
if (LocaleCompare("-duration",option) == 0)
duration=StringToDouble(argv[++i],(char **) NULL);
if (LocaleCompare("-regard-warnings",option) == 0)
regard_warnings=MagickTrue;
}
if (iterations == 1)
{
char
*text;
text=(char *) NULL;
status=command(image_info,argc,argv,&text,exception);
if (exception->severity != UndefinedException)
{
if ((exception->severity > ErrorException) ||
(regard_warnings != MagickFalse))
status=MagickFalse;
CatchException(exception);
}
if (text != (char *) NULL)
{
if (metadata != (char **) NULL)
(void) ConcatenateString(&(*metadata),text);
text=DestroyString(text);
}
return(status);
}
number_threads=GetOpenMPMaximumThreads();
serial=0.0;
for (n=1; n <= (ssize_t) number_threads; n++)
{
double
e,
parallel,
user_time;
TimerInfo
*timer;
(void) SetMagickResourceLimit(ThreadResource,(MagickSizeType) n);
timer=AcquireTimerInfo();
if (concurrent == MagickFalse)
{
for (i=0; i < (ssize_t) iterations; i++)
{
char
*text;
text=(char *) NULL;
if (status == MagickFalse)
continue;
if (duration > 0)
{
if (GetElapsedTime(timer) > duration)
continue;
(void) ContinueTimer(timer);
}
status=command(image_info,argc,argv,&text,exception);
if (exception->severity != UndefinedException)
{
if ((exception->severity > ErrorException) ||
(regard_warnings != MagickFalse))
status=MagickFalse;
CatchException(exception);
}
if (text != (char *) NULL)
{
if (metadata != (char **) NULL)
(void) ConcatenateString(&(*metadata),text);
text=DestroyString(text);
}
}
}
else
{
SetOpenMPNested(1);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp parallel for shared(status)
#endif
for (i=0; i < (ssize_t) iterations; i++)
{
char
*text;
text=(char *) NULL;
if (status == MagickFalse)
continue;
if (duration > 0)
{
if (GetElapsedTime(timer) > duration)
continue;
(void) ContinueTimer(timer);
}
status=command(image_info,argc,argv,&text,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_MagickCommandGenesis)
#endif
{
if (exception->severity != UndefinedException)
{
if ((exception->severity > ErrorException) ||
(regard_warnings != MagickFalse))
status=MagickFalse;
CatchException(exception);
}
if (text != (char *) NULL)
{
if (metadata != (char **) NULL)
(void) ConcatenateString(&(*metadata),text);
text=DestroyString(text);
}
}
}
}
user_time=GetUserTime(timer);
parallel=GetElapsedTime(timer);
e=1.0;
if (n == 1)
serial=parallel;
else
e=((1.0/(1.0/((serial/(serial+parallel))+(1.0-(serial/(serial+parallel)))/
(double) n)))-(1.0/(double) n))/(1.0-1.0/(double) n);
(void) FormatLocaleFile(stderr,
"Performance[%.20g]: %.20gi %0.3fips %0.3fe %0.3fu %lu:%02lu.%03lu\n",
(double) n,(double) iterations,(double) iterations/parallel,e,user_time,
(unsigned long) (parallel/60.0),(unsigned long) floor(fmod(parallel,
60.0)),(unsigned long) (1000.0*(parallel-floor(parallel))+0.5));
timer=DestroyTimerInfo(timer);
}
return(status);
} | 0 | [
"CWE-617"
] | ImageMagick | 12f34b60564de1cbec08e23e2413dab5b64daeb7 | 317,474,701,976,924,700,000,000,000,000,000,000,000 | 183 | https://github.com/ImageMagick/ImageMagick/issues/802 |
libarchive_read_read_cb (struct archive *ar_read,
void *client_data,
const void **buffer)
{
AutoarExtractor *self;
gssize read_size;
g_debug ("libarchive_read_read_cb: called");
self = AUTOAR_EXTRACTOR (client_data);
if (self->error != NULL || self->istream == NULL)
return -1;
*buffer = self->buffer;
read_size = g_input_stream_read (self->istream,
self->buffer,
self->buffer_size,
self->cancellable,
&(self->error));
if (self->error != NULL)
return -1;
g_debug ("libarchive_read_read_cb: %" G_GSSIZE_FORMAT, read_size);
return read_size;
} | 0 | [
"CWE-22"
] | gnome-autoar | adb067e645732fdbe7103516e506d09eb6a54429 | 254,829,848,987,333,400,000,000,000,000,000,000,000 | 26 | AutoarExtractor: Do not extract files outside the destination dir
Currently, a malicious archive can cause that the files are extracted
outside of the destination dir. This can happen if the archive contains
a file whose parent is a symbolic link, which points outside of the
destination dir. This is potentially a security threat similar to
CVE-2020-11736. Let's skip such problematic files when extracting.
Fixes: https://gitlab.gnome.org/GNOME/gnome-autoar/-/issues/7 |
static void netstamp_clear(struct work_struct *work)
{
int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
int wanted;
wanted = atomic_add_return(deferred, &netstamp_wanted);
if (wanted > 0)
static_key_enable(&netstamp_needed);
else
static_key_disable(&netstamp_needed);
} | 0 | [
"CWE-476"
] | linux | 0ad646c81b2182f7fa67ec0c8c825e0ee165696d | 52,053,755,404,145,000,000,000,000,000,000,000,000 | 11 | tun: call dev_get_valid_name() before register_netdevice()
register_netdevice() could fail early when we have an invalid
dev name, in which case ->ndo_uninit() is not called. For tun
device, this is a problem because a timer etc. are already
initialized and it expects ->ndo_uninit() to clean them up.
We could move these initializations into a ->ndo_init() so
that register_netdevice() knows better, however this is still
complicated due to the logic in tun_detach().
Therefore, I choose to just call dev_get_valid_name() before
register_netdevice(), which is quicker and much easier to audit.
And for this specific case, it is already enough.
Fixes: 96442e42429e ("tuntap: choose the txq based on rxq")
Reported-by: Dmitry Alexeev <[email protected]>
Cc: Jason Wang <[email protected]>
Cc: "Michael S. Tsirkin" <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static CImg<T> vector(const T& a0) {
CImg<T> r(1,1);
r[0] = a0;
return r;
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 109,682,153,913,141,760,000,000,000,000,000,000,000 | 5 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
GF_Err DoWrite(MovieWriter *mw, GF_List *writers, GF_BitStream *bs, u8 Emulation, u64 StartOffset)
{
u32 i;
GF_Err e;
TrackWriter *writer;
u64 offset, sampOffset, predOffset;
u32 chunkNumber, descIndex, sampSize;
Bool force;
GF_StscEntry *stsc_ent;
u64 size, mdatSize = 0;
GF_ISOFile *movie = mw->movie;
/*write meta content first - WE DON'T support fragmentation of resources in ISOM atm*/
if (movie->openMode != GF_ISOM_OPEN_WRITE) {
if (movie->meta) {
e = DoWriteMeta(movie, movie->meta, bs, Emulation, StartOffset, &size);
if (e) return e;
mdatSize += size;
StartOffset += size;
}
if (movie->moov && movie->moov->meta) {
e = DoWriteMeta(movie, movie->meta, bs, Emulation, StartOffset, &size);
if (e) return e;
mdatSize += size;
StartOffset += size;
}
i=0;
while ((writer = (TrackWriter*)gf_list_enum(writers, &i))) {
if (writer->mdia->mediaTrack->meta) {
e = DoWriteMeta(movie, movie->meta, bs, Emulation, StartOffset, &size);
if (e) return e;
mdatSize += size;
StartOffset += size;
}
}
}
offset = StartOffset;
predOffset = 0;
i=0;
while ((writer = (TrackWriter*)gf_list_enum(writers, &i))) {
while (!writer->isDone) {
Bool self_contained;
u32 nb_samp=1;
//To Check: are empty sample tables allowed ???
if (writer->sampleNumber > writer->stbl->SampleSize->sampleCount) {
writer->isDone = 1;
continue;
}
e = stbl_GetSampleInfos(writer->stbl, writer->sampleNumber, &sampOffset, &chunkNumber, &descIndex, &stsc_ent);
if (e) return e;
e = stbl_GetSampleSize(writer->stbl->SampleSize, writer->sampleNumber, &sampSize);
if (e) return e;
update_writer_constant_dur(movie, writer, stsc_ent, &nb_samp, &sampSize, GF_TRUE);
//update our chunks.
force = 0;
if (movie->openMode == GF_ISOM_OPEN_WRITE) {
offset = sampOffset;
if (predOffset != offset)
force = 1;
}
if (writer->stbl->MaxChunkSize && (writer->chunkSize + sampSize > writer->stbl->MaxChunkSize)) {
writer->chunkSize = 0;
force = 1;
}
writer->chunkSize += sampSize;
self_contained = ((writer->all_dref_mode==ISOM_DREF_SELF) || Media_IsSelfContained(writer->mdia, descIndex) ) ? GF_TRUE : GF_FALSE;
//update our global offset...
if (self_contained) {
e = stbl_SetChunkAndOffset(writer->stbl, writer->sampleNumber, descIndex, writer->stsc, &writer->stco, offset, force, nb_samp);
if (e) return e;
if (movie->openMode == GF_ISOM_OPEN_WRITE) {
predOffset = sampOffset + sampSize;
} else {
offset += sampSize;
mdatSize += sampSize;
}
} else {
if (predOffset != offset) force = 1;
predOffset = sampOffset + sampSize;
//we have a DataRef, so use the offset idicated in sampleToChunk and ChunkOffset tables...
e = stbl_SetChunkAndOffset(writer->stbl, writer->sampleNumber, descIndex, writer->stsc, &writer->stco, sampOffset, force, nb_samp);
if (e) return e;
}
//we write the sample if not emulation
if (!Emulation) {
if (self_contained) {
e = WriteSample(mw, sampSize, sampOffset, stsc_ent->isEdited, bs, 1);
if (e) return e;
}
}
//ok, the track is done
if (writer->sampleNumber >= writer->stbl->SampleSize->sampleCount) {
writer->isDone = 1;
} else {
writer->sampleNumber += nb_samp;
}
}
}
//set the mdatSize...
movie->mdat->dataSize = mdatSize;
return GF_OK;
} | 0 | [
"CWE-416"
] | gpac | 5aba27604d957e960d8069d85ccaf868f8a7b07a | 192,261,882,913,612,570,000,000,000,000,000,000,000 | 108 | fixed #1661 |
void vsock_remove_connected(struct vsock_sock *vsk)
{
spin_lock_bh(&vsock_table_lock);
__vsock_remove_connected(vsk);
spin_unlock_bh(&vsock_table_lock);
} | 0 | [
"CWE-20",
"CWE-269"
] | linux | f3d3342602f8bcbf37d7c46641cb9bca7618eb1c | 249,029,155,973,045,100,000,000,000,000,000,000,000 | 6 | net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
gdImagePtr gdImageCreateFromGifCtx(gdIOCtxPtr fd) /* {{{ */
{
int BitPixel;
#if 0
int ColorResolution;
int Background;
int AspectRatio;
#endif
int Transparent = (-1);
unsigned char buf[16];
unsigned char c;
unsigned char ColorMap[3][MAXCOLORMAPSIZE];
unsigned char localColorMap[3][MAXCOLORMAPSIZE];
int imw, imh, screen_width, screen_height;
int gif87a, useGlobalColormap;
int bitPixel;
int i;
/*1.4//int imageCount = 0; */
int ZeroDataBlock = FALSE;
int haveGlobalColormap;
gdImagePtr im = 0;
/*1.4//imageNumber = 1; */
if (! ReadOK(fd,buf,6)) {
return 0;
}
if (strncmp((char *)buf,"GIF",3) != 0) {
return 0;
}
if (memcmp((char *)buf+3, "87a", 3) == 0) {
gif87a = 1;
} else if (memcmp((char *)buf+3, "89a", 3) == 0) {
gif87a = 0;
} else {
return 0;
}
if (! ReadOK(fd,buf,7)) {
return 0;
}
BitPixel = 2<<(buf[4]&0x07);
#if 0
ColorResolution = (int) (((buf[4]&0x70)>>3)+1);
Background = buf[5];
AspectRatio = buf[6];
#endif
screen_width = imw = LM_to_uint(buf[0],buf[1]);
screen_height = imh = LM_to_uint(buf[2],buf[3]);
haveGlobalColormap = BitSet(buf[4], LOCALCOLORMAP); /* Global Colormap */
if (haveGlobalColormap) {
if (ReadColorMap(fd, BitPixel, ColorMap)) {
return 0;
}
}
for (;;) {
int top, left;
int width, height;
if (! ReadOK(fd,&c,1)) {
return 0;
}
if (c == ';') { /* GIF terminator */
goto terminated;
}
if (c == '!') { /* Extension */
if (! ReadOK(fd,&c,1)) {
return 0;
}
DoExtension(fd, c, &Transparent, &ZeroDataBlock);
continue;
}
if (c != ',') { /* Not a valid start character */
continue;
}
/*1.4//++imageCount; */
if (! ReadOK(fd,buf,9)) {
return 0;
}
useGlobalColormap = ! BitSet(buf[8], LOCALCOLORMAP);
bitPixel = 1<<((buf[8]&0x07)+1);
left = LM_to_uint(buf[0], buf[1]);
top = LM_to_uint(buf[2], buf[3]);
width = LM_to_uint(buf[4], buf[5]);
height = LM_to_uint(buf[6], buf[7]);
if (left + width > screen_width || top + height > screen_height) {
if (VERBOSE) {
printf("Frame is not confined to screen dimension.\n");
}
return 0;
}
if (!(im = gdImageCreate(width, height))) {
return 0;
}
im->interlace = BitSet(buf[8], INTERLACE);
if (!useGlobalColormap) {
if (ReadColorMap(fd, bitPixel, localColorMap)) {
gdImageDestroy(im);
return 0;
}
ReadImage(im, fd, width, height, localColorMap,
BitSet(buf[8], INTERLACE), &ZeroDataBlock);
} else {
if (!haveGlobalColormap) {
gdImageDestroy(im);
return 0;
}
ReadImage(im, fd, width, height,
ColorMap,
BitSet(buf[8], INTERLACE), &ZeroDataBlock);
}
if (Transparent != (-1)) {
gdImageColorTransparent(im, Transparent);
}
goto terminated;
}
terminated:
/* Terminator before any image was declared! */
if (!im) {
return 0;
}
if (!im->colorsTotal) {
gdImageDestroy(im);
return 0;
}
/* Check for open colors at the end, so
we can reduce colorsTotal and ultimately
BitsPerPixel */
for (i=((im->colorsTotal-1)); (i>=0); i--) {
if (im->open[i]) {
im->colorsTotal--;
} else {
break;
}
}
return im;
} | 1 | [
"CWE-200"
] | php-src | 8dc4f4dc9e44d1cbfe4654aa6e0dc27c94913938 | 101,555,333,886,468,920,000,000,000,000,000,000,000 | 150 | Fix #74435: Buffer over-read into uninitialized memory
The stack allocated color map buffers were not zeroed before usage, and
so undefined palette indexes could cause information leakage. |
strqueue_diff(Strqueue *sq1, Strqueue *sq2, Strqueue *osq)
{
int i = 0, j = 0;
while (i < sq1->nstr && j < sq2->nstr)
{
int r = strqueue_sort_cmp(sq1->str + i, sq2->str + j, 0);
if (!r)
i++, j++;
else if (r < 0)
strqueue_pushjoin(osq, "-", sq1->str[i++], 0);
else
strqueue_pushjoin(osq, "+", sq2->str[j++], 0);
}
while (i < sq1->nstr)
strqueue_pushjoin(osq, "-", sq1->str[i++], 0);
while (j < sq2->nstr)
strqueue_pushjoin(osq, "+", sq2->str[j++], 0);
} | 0 | [
"CWE-120"
] | libsolv | 0077ef29eb46d2e1df2f230fc95a1d9748d49dec | 288,368,117,956,631,160,000,000,000,000,000,000,000 | 18 | testcase_read: error out if repos are added or the system is changed too late
We must not add new solvables after the considered map was created, the solver
was created, or jobs were added. We may not changed the system after jobs have
been added.
(Jobs may point inside the whatproviedes array, so we must not invalidate this
area.) |
int FileIo::close()
{
int rc = 0;
if (munmap() != 0) rc = 2;
if (p_->fp_ != 0) {
if (std::fclose(p_->fp_) != 0) rc |= 1;
p_->fp_= 0;
}
return rc;
} | 0 | [
"CWE-125"
] | exiv2 | 6e3855aed7ba8bb4731fc4087ca7f9078b2f3d97 | 228,051,271,981,077,620,000,000,000,000,000,000,000 | 10 | Fix https://github.com/Exiv2/exiv2/issues/55 |
strncmpic(uschar *s, uschar *t, int n)
{
while (n--)
{
int c = tolower(*s++) - tolower(*t++);
if (c) return c;
}
return 0;
} | 0 | [] | exim | 24c929a27415c7cfc7126c47e4cad39acf3efa6b | 153,126,484,337,755,500,000,000,000,000,000,000,000 | 9 | Buffer overrun fix. fixes: bug #787 |
static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
size_t siz, loff_t *ppos)
{
/* We need this handler such that alloc_file() enables
* f_mode with FMODE_CAN_WRITE.
*/
return -EINVAL;
} | 0 | [
"CWE-307"
] | linux | 350a5c4dd2452ea999cc5e1d4a8dbf12de2f97ef | 155,953,839,675,919,050,000,000,000,000,000,000,000 | 8 | bpf: Dont allow vmlinux BTF to be used in map_create and prog_load.
The syzbot got FD of vmlinux BTF and passed it into map_create which caused
crash in btf_type_id_size() when it tried to access resolved_ids. The vmlinux
BTF doesn't have 'resolved_ids' and 'resolved_sizes' initialized to save
memory. To avoid such issues disallow using vmlinux BTF in prog_load and
map_create commands.
Fixes: 5329722057d4 ("bpf: Assign ID to vmlinux BTF and return extra info for BTF in GET_OBJ_INFO")
Reported-by: [email protected]
Signed-off-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Yonghong Song <[email protected]>
Link: https://lore.kernel.org/bpf/[email protected] |
static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
{
struct futex_hash_bucket *hb;
get_futex_key_refs(&q->key);
hb = hash_futex(&q->key);
q->lock_ptr = &hb->lock;
spin_lock(&hb->lock);
return hb;
} | 1 | [
"CWE-119",
"CWE-787"
] | linux | 7ada876a8703f23befbb20a7465a702ee39b1704 | 248,884,291,113,777,570,000,000,000,000,000,000,000 | 11 | futex: Fix errors in nested key ref-counting
futex_wait() is leaking key references due to futex_wait_setup()
acquiring an additional reference via the queue_lock() routine. The
nested key ref-counting has been masking bugs and complicating code
analysis. queue_lock() is only called with a previously ref-counted
key, so remove the additional ref-counting from the queue_(un)lock()
functions.
Also futex_wait_requeue_pi() drops one key reference too many in
unqueue_me_pi(). Remove the key reference handling from
unqueue_me_pi(). This was paired with a queue_lock() in
futex_lock_pi(), so the count remains unchanged.
Document remaining nested key ref-counting sites.
Signed-off-by: Darren Hart <[email protected]>
Reported-and-tested-by: Matthieu Fertré<[email protected]>
Reported-by: Louis Rilling<[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Eric Dumazet <[email protected]>
Cc: John Kacur <[email protected]>
Cc: Rusty Russell <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: [email protected] |
void luaV_finishget (lua_State *L, const TValue *t, TValue *key, StkId val,
const TValue *slot) {
int loop; /* counter to avoid infinite loops */
const TValue *tm; /* metamethod */
for (loop = 0; loop < MAXTAGLOOP; loop++) {
if (slot == NULL) { /* 't' is not a table? */
lua_assert(!ttistable(t));
tm = luaT_gettmbyobj(L, t, TM_INDEX);
if (l_unlikely(notm(tm)))
luaG_typeerror(L, t, "index"); /* no metamethod */
/* else will try the metamethod */
}
else { /* 't' is a table */
lua_assert(isempty(slot));
tm = fasttm(L, hvalue(t)->metatable, TM_INDEX); /* table's metamethod */
if (tm == NULL) { /* no metamethod? */
setnilvalue(s2v(val)); /* result is nil */
return;
}
/* else will try the metamethod */
}
if (ttisfunction(tm)) { /* is metamethod a function? */
luaT_callTMres(L, tm, t, key, val); /* call it */
return;
}
t = tm; /* else try to access 'tm[key]' */
if (luaV_fastget(L, t, key, slot, luaH_get)) { /* fast track? */
setobj2s(L, val, slot); /* done */
return;
}
/* else repeat (tail call 'luaV_finishget') */
}
luaG_runerror(L, "'__index' chain too long; possible loop");
} | 0 | [
"CWE-787"
] | lua | 42d40581dd919fb134c07027ca1ce0844c670daf | 130,773,074,629,351,200,000,000,000,000,000,000,000 | 34 | Save stack space while handling errors
Because error handling (luaG_errormsg) uses slots from EXTRA_STACK,
and some errors can recur (e.g., string overflow while creating an
error message in 'luaG_runerror', or a C-stack overflow before calling
the message handler), the code should use stack slots with parsimony.
This commit fixes the bug "Lua-stack overflow when C stack overflows
while handling an error". |
CPH_METHOD(LoadFromFile)
{
HRESULT res;
char *filename, *fullpath;
int filename_len;
long flags = 0;
OLECHAR *olefilename;
CPH_FETCH();
CPH_NO_OBJ();
res = get_persist_file(helper);
if (helper->ipf) {
if (FAILURE == zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|l",
&filename, &filename_len, &flags)) {
php_com_throw_exception(E_INVALIDARG, "Invalid arguments" TSRMLS_CC);
return;
}
if (!(fullpath = expand_filepath(filename, NULL TSRMLS_CC))) {
RETURN_FALSE;
}
if ((PG(safe_mode) && (!php_checkuid(fullpath, NULL, CHECKUID_CHECK_FILE_AND_DIR))) ||
php_check_open_basedir(fullpath TSRMLS_CC)) {
efree(fullpath);
RETURN_FALSE;
}
olefilename = php_com_string_to_olestring(fullpath, strlen(fullpath), helper->codepage TSRMLS_CC);
efree(fullpath);
res = IPersistFile_Load(helper->ipf, olefilename, flags);
efree(olefilename);
if (FAILED(res)) {
php_com_throw_exception(res, NULL TSRMLS_CC);
}
} else {
php_com_throw_exception(res, NULL TSRMLS_CC);
}
} | 1 | [] | php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 139,114,469,657,038,060,000,000,000,000,000,000,000 | 44 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
handle_is_local (GdmDBusDisplay *skeleton,
GDBusMethodInvocation *invocation,
GdmDisplay *self)
{
gboolean is_local;
gdm_display_is_local (self, &is_local, NULL);
gdm_dbus_display_complete_is_local (skeleton, invocation, is_local);
return TRUE;
} | 0 | [
"CWE-754"
] | gdm | 4e6e5335d29c039bed820c43bfd1c19cb62539ff | 289,250,687,450,091,030,000,000,000,000,000,000,000 | 12 | display: Use autoptr to handle errors in look for existing users
It will make things just cleaner |
TEST_F(HttpConnectionManagerImplTest, ZeroByteDataFiltering) {
InSequence s;
setup(false, "");
StreamDecoder* decoder = nullptr;
EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void {
decoder = &conn_manager_->newStream(response_encoder_);
HeaderMapPtr headers{
new TestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}};
decoder->decodeHeaders(std::move(headers), false);
}));
setupFilterChain(2, 0);
EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))
.WillOnce(Return(FilterHeadersStatus::StopIteration));
// Kick off the incoming data.
Buffer::OwnedImpl fake_input("1234");
conn_manager_->onData(fake_input, false);
// Continue headers only of filter 1.
EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))
.WillOnce(Return(FilterHeadersStatus::StopIteration));
decoder_filters_[0]->callbacks_->continueDecoding();
// Stop zero byte data.
EXPECT_CALL(*decoder_filters_[0], decodeData(_, true))
.WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));
EXPECT_CALL(*decoder_filters_[0], decodeComplete());
Buffer::OwnedImpl zero;
decoder->decodeData(zero, true);
// Continue.
EXPECT_CALL(*decoder_filters_[1], decodeData(_, true))
.WillOnce(Return(FilterDataStatus::StopIterationNoBuffer));
EXPECT_CALL(*decoder_filters_[1], decodeComplete());
decoder_filters_[0]->callbacks_->continueDecoding();
} | 0 | [
"CWE-400",
"CWE-703"
] | envoy | afc39bea36fd436e54262f150c009e8d72db5014 | 3,118,923,184,101,288,300,000,000,000,000,000,000 | 39 | Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <[email protected]> |
virPCIELinkFormat(virBufferPtr buf,
virPCIELinkPtr lnk,
const char *attrib)
{
if (!lnk)
return;
virBufferAsprintf(buf, "<link validity='%s'", attrib);
if (lnk->port >= 0)
virBufferAsprintf(buf, " port='%d'", lnk->port);
if (lnk->speed)
virBufferAsprintf(buf, " speed='%s'",
virPCIELinkSpeedTypeToString(lnk->speed));
virBufferAsprintf(buf, " width='%d'", lnk->width);
virBufferAddLit(buf, "/>\n");
} | 0 | [
"CWE-119"
] | libvirt | 4c4d0e2da07b5a035b26a0ff13ec27070f7c7b1a | 25,801,960,267,310,396,000,000,000,000,000,000,000 | 16 | conf: Fix segfault when parsing mdev types
Commit f1b0890 introduced a potential crash due to incorrect operator
precedence when accessing an element from a pointer to an array.
Backtrace below:
#0 virNodeDeviceGetMdevTypesCaps (sysfspath=0x7fff801661e0 "/sys/devices/pci0000:00/0000:00:02.0", mdev_types=0x7fff801c9b40, nmdev_types=0x7fff801c9b48) at ../src/conf/node_device_conf.c:2676
#1 0x00007ffff7caf53d in virNodeDeviceGetPCIDynamicCaps (sysfsPath=0x7fff801661e0 "/sys/devices/pci0000:00/0000:00:02.0", pci_dev=0x7fff801c9ac8) at ../src/conf/node_device_conf.c:2705
#2 0x00007ffff7cae38f in virNodeDeviceUpdateCaps (def=0x7fff80168a10) at ../src/conf/node_device_conf.c:2342
#3 0x00007ffff7cb11c0 in virNodeDeviceObjMatch (obj=0x7fff84002e50, flags=0) at ../src/conf/virnodedeviceobj.c:850
#4 0x00007ffff7cb153d in virNodeDeviceObjListExportCallback (payload=0x7fff84002e50, name=0x7fff801cbc20 "pci_0000_00_02_0", opaque=0x7fffe2ffc6a0) at ../src/conf/virnodedeviceobj.c:909
#5 0x00007ffff7b69146 in virHashForEach (table=0x7fff9814b700 = {...}, iter=0x7ffff7cb149e <virNodeDeviceObjListExportCallback>, opaque=0x7fffe2ffc6a0) at ../src/util/virhash.c:394
#6 0x00007ffff7cb1694 in virNodeDeviceObjListExport (conn=0x7fff98013170, devs=0x7fff98154430, devices=0x7fffe2ffc798, filter=0x7ffff7cf47a1 <virConnectListAllNodeDevicesCheckACL>, flags=0)
at ../src/conf/virnodedeviceobj.c:943
#7 0x00007fffe00694b2 in nodeConnectListAllNodeDevices (conn=0x7fff98013170, devices=0x7fffe2ffc798, flags=0) at ../src/node_device/node_device_driver.c:228
#8 0x00007ffff7e703aa in virConnectListAllNodeDevices (conn=0x7fff98013170, devices=0x7fffe2ffc798, flags=0) at ../src/libvirt-nodedev.c:130
#9 0x000055555557f796 in remoteDispatchConnectListAllNodeDevices (server=0x555555627080, client=0x5555556bf050, msg=0x5555556c0000, rerr=0x7fffe2ffc8a0, args=0x7fffd4008470, ret=0x7fffd40084e0)
at src/remote/remote_daemon_dispatch_stubs.h:1613
#10 0x000055555557f6f9 in remoteDispatchConnectListAllNodeDevicesHelper (server=0x555555627080, client=0x5555556bf050, msg=0x5555556c0000, rerr=0x7fffe2ffc8a0, args=0x7fffd4008470, ret=0x7fffd40084e0)
at src/remote/remote_daemon_dispatch_stubs.h:1591
#11 0x00007ffff7ce9542 in virNetServerProgramDispatchCall (prog=0x555555690c10, server=0x555555627080, client=0x5555556bf050, msg=0x5555556c0000) at ../src/rpc/virnetserverprogram.c:428
#12 0x00007ffff7ce90bd in virNetServerProgramDispatch (prog=0x555555690c10, server=0x555555627080, client=0x5555556bf050, msg=0x5555556c0000) at ../src/rpc/virnetserverprogram.c:302
#13 0x00007ffff7cf042b in virNetServerProcessMsg (srv=0x555555627080, client=0x5555556bf050, prog=0x555555690c10, msg=0x5555556c0000) at ../src/rpc/virnetserver.c:137
#14 0x00007ffff7cf04eb in virNetServerHandleJob (jobOpaque=0x5555556b66b0, opaque=0x555555627080) at ../src/rpc/virnetserver.c:154
#15 0x00007ffff7bd912f in virThreadPoolWorker (opaque=0x55555562bc70) at ../src/util/virthreadpool.c:163
#16 0x00007ffff7bd8645 in virThreadHelper (data=0x55555562bc90) at ../src/util/virthread.c:233
#17 0x00007ffff6d90432 in start_thread () at /lib64/libpthread.so.0
#18 0x00007ffff75c5913 in clone () at /lib64/libc.so.6
Signed-off-by: Jonathon Jongsma <[email protected]>
Reviewed-by: Ján Tomko <[email protected]>
Signed-off-by: Ján Tomko <[email protected]> |
/* {{{ proto bool tidy_parse_string(string input [, mixed config_options [, string encoding]])
Parse a document stored in a string */
static PHP_FUNCTION(tidy_parse_string)
{
char *input, *enc = NULL;
int input_len, enc_len = 0;
zval **options = NULL;
PHPTidyObj *obj;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|Zs", &input, &input_len, &options, &enc, &enc_len) == FAILURE) {
RETURN_FALSE;
}
tidy_instanciate(tidy_ce_doc, return_value TSRMLS_CC);
obj = (PHPTidyObj *) zend_object_store_get_object(return_value TSRMLS_CC);
TIDY_APPLY_CONFIG_ZVAL(obj->ptdoc->doc, options);
if(php_tidy_parse_string(obj, input, input_len, enc TSRMLS_CC) == FAILURE) {
zval_dtor(return_value);
INIT_ZVAL(*return_value);
RETURN_FALSE; | 0 | [] | php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 224,581,278,479,784,600,000,000,000,000,000,000,000 | 24 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
const CImg<T>& _save_png(std::FILE *const file, const char *const filename,
const unsigned int bytes_per_pixel=0) const {
if (!file && !filename)
throw CImgArgumentException(_cimg_instance
"save_png(): Specified filename is (null).",
cimg_instance);
if (is_empty()) { cimg::fempty(file,filename); return *this; }
#ifndef cimg_use_png
cimg::unused(bytes_per_pixel);
if (!file) return save_other(filename);
else throw CImgIOException(_cimg_instance
"save_png(): Unable to save data in '(*FILE)' unless libpng is enabled.",
cimg_instance);
#else
#if defined __GNUC__
const char *volatile nfilename = filename; // Use 'volatile' to avoid (wrong) g++ warning
std::FILE *volatile nfile = file?file:cimg::fopen(nfilename,"wb");
volatile double stmin, stmax = (double)max_min(stmin);
#else
const char *nfilename = filename;
std::FILE *nfile = file?file:cimg::fopen(nfilename,"wb");
double stmin, stmax = (double)max_min(stmin);
#endif
if (_depth>1)
cimg::warn(_cimg_instance
"save_png(): Instance is volumetric, only the first slice will be saved in file '%s'.",
cimg_instance,
filename);
if (_spectrum>4)
cimg::warn(_cimg_instance
"save_png(): Instance is multispectral, only the three first channels will be saved in file '%s'.",
cimg_instance,
filename);
if (stmin<0 || (bytes_per_pixel==1 && stmax>=256) || stmax>=65536)
cimg::warn(_cimg_instance
"save_png(): Instance has pixel values in [%g,%g], probable type overflow in file '%s'.",
cimg_instance,
stmin,stmax,filename);
// Setup PNG structures for write
png_voidp user_error_ptr = 0;
png_error_ptr user_error_fn = 0, user_warning_fn = 0;
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING,user_error_ptr, user_error_fn,
user_warning_fn);
if (!png_ptr){
if (!file) cimg::fclose(nfile);
throw CImgIOException(_cimg_instance
"save_png(): Failed to initialize 'png_ptr' structure when saving file '%s'.",
cimg_instance,
nfilename?nfilename:"(FILE*)");
}
png_infop info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr) {
png_destroy_write_struct(&png_ptr,(png_infopp)0);
if (!file) cimg::fclose(nfile);
throw CImgIOException(_cimg_instance
"save_png(): Failed to initialize 'info_ptr' structure when saving file '%s'.",
cimg_instance,
nfilename?nfilename:"(FILE*)");
}
if (setjmp(png_jmpbuf(png_ptr))) {
png_destroy_write_struct(&png_ptr, &info_ptr);
if (!file) cimg::fclose(nfile);
throw CImgIOException(_cimg_instance
"save_png(): Encountered unknown fatal error in libpng when saving file '%s'.",
cimg_instance,
nfilename?nfilename:"(FILE*)");
}
png_init_io(png_ptr, nfile);
const int bit_depth = bytes_per_pixel?(bytes_per_pixel*8):(stmax>=256?16:8);
int color_type;
switch (spectrum()) {
case 1 : color_type = PNG_COLOR_TYPE_GRAY; break;
case 2 : color_type = PNG_COLOR_TYPE_GRAY_ALPHA; break;
case 3 : color_type = PNG_COLOR_TYPE_RGB; break;
default : color_type = PNG_COLOR_TYPE_RGB_ALPHA;
}
const int interlace_type = PNG_INTERLACE_NONE;
const int compression_type = PNG_COMPRESSION_TYPE_DEFAULT;
const int filter_method = PNG_FILTER_TYPE_DEFAULT;
png_set_IHDR(png_ptr,info_ptr,_width,_height,bit_depth,color_type,interlace_type,compression_type,filter_method);
png_write_info(png_ptr,info_ptr);
const int byte_depth = bit_depth>>3;
const int numChan = spectrum()>4?4:spectrum();
const int pixel_bit_depth_flag = numChan * (bit_depth - 1);
// Allocate Memory for Image Save and Fill pixel data
png_bytep *const imgData = new png_byte*[_height];
for (unsigned int row = 0; row<_height; ++row) imgData[row] = new png_byte[byte_depth*numChan*_width];
const T *pC0 = data(0,0,0,0);
switch (pixel_bit_depth_flag) {
case 7 : { // Gray 8-bit
cimg_forY(*this,y) {
unsigned char *ptrd = imgData[y];
cimg_forX(*this,x) *(ptrd++) = (unsigned char)*(pC0++);
}
} break;
case 14 : { // Gray w/ Alpha 8-bit
const T *pC1 = data(0,0,0,1);
cimg_forY(*this,y) {
unsigned char *ptrd = imgData[y];
cimg_forX(*this,x) {
*(ptrd++) = (unsigned char)*(pC0++);
*(ptrd++) = (unsigned char)*(pC1++);
}
}
} break;
case 21 : { // RGB 8-bit
const T *pC1 = data(0,0,0,1), *pC2 = data(0,0,0,2);
cimg_forY(*this,y) {
unsigned char *ptrd = imgData[y];
cimg_forX(*this,x) {
*(ptrd++) = (unsigned char)*(pC0++);
*(ptrd++) = (unsigned char)*(pC1++);
*(ptrd++) = (unsigned char)*(pC2++);
}
}
} break;
case 28 : { // RGB x/ Alpha 8-bit
const T *pC1 = data(0,0,0,1), *pC2 = data(0,0,0,2), *pC3 = data(0,0,0,3);
cimg_forY(*this,y){
unsigned char *ptrd = imgData[y];
cimg_forX(*this,x){
*(ptrd++) = (unsigned char)*(pC0++);
*(ptrd++) = (unsigned char)*(pC1++);
*(ptrd++) = (unsigned char)*(pC2++);
*(ptrd++) = (unsigned char)*(pC3++);
}
}
} break;
case 15 : { // Gray 16-bit
cimg_forY(*this,y){
unsigned short *ptrd = (unsigned short*)(imgData[y]);
cimg_forX(*this,x) *(ptrd++) = (unsigned short)*(pC0++);
if (!cimg::endianness()) cimg::invert_endianness((unsigned short*)imgData[y],_width);
}
} break;
case 30 : { // Gray w/ Alpha 16-bit
const T *pC1 = data(0,0,0,1);
cimg_forY(*this,y){
unsigned short *ptrd = (unsigned short*)(imgData[y]);
cimg_forX(*this,x) {
*(ptrd++) = (unsigned short)*(pC0++);
*(ptrd++) = (unsigned short)*(pC1++);
}
if (!cimg::endianness()) cimg::invert_endianness((unsigned short*)imgData[y],2*_width);
}
} break;
case 45 : { // RGB 16-bit
const T *pC1 = data(0,0,0,1), *pC2 = data(0,0,0,2);
cimg_forY(*this,y) {
unsigned short *ptrd = (unsigned short*)(imgData[y]);
cimg_forX(*this,x) {
*(ptrd++) = (unsigned short)*(pC0++);
*(ptrd++) = (unsigned short)*(pC1++);
*(ptrd++) = (unsigned short)*(pC2++);
}
if (!cimg::endianness()) cimg::invert_endianness((unsigned short*)imgData[y],3*_width);
}
} break;
case 60 : { // RGB w/ Alpha 16-bit
const T *pC1 = data(0,0,0,1), *pC2 = data(0,0,0,2), *pC3 = data(0,0,0,3);
cimg_forY(*this,y) {
unsigned short *ptrd = (unsigned short*)(imgData[y]);
cimg_forX(*this,x) {
*(ptrd++) = (unsigned short)*(pC0++);
*(ptrd++) = (unsigned short)*(pC1++);
*(ptrd++) = (unsigned short)*(pC2++);
*(ptrd++) = (unsigned short)*(pC3++);
}
if (!cimg::endianness()) cimg::invert_endianness((unsigned short*)imgData[y],4*_width);
}
} break;
default :
if (!file) cimg::fclose(nfile);
throw CImgIOException(_cimg_instance
"save_png(): Encountered unknown fatal error in libpng when saving file '%s'.",
cimg_instance,
nfilename?nfilename:"(FILE*)");
}
png_write_image(png_ptr,imgData);
png_write_end(png_ptr,info_ptr);
png_destroy_write_struct(&png_ptr, &info_ptr);
// Deallocate Image Write Memory
cimg_forY(*this,n) delete[] imgData[n];
delete[] imgData;
if (!file) cimg::fclose(nfile);
return *this;
#endif
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 248,568,551,998,324,700,000,000,000,000,000,000,000 | 199 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
WandExport MagickBooleanType PushDrawingWand(DrawingWand *wand)
{
assert(wand != (DrawingWand *) NULL);
assert(wand->signature == MagickWandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
wand->index++;
wand->graphic_context=(DrawInfo **) ResizeQuantumMemory(wand->graphic_context,
(size_t) wand->index+1UL,sizeof(*wand->graphic_context));
if (wand->graphic_context == (DrawInfo **) NULL)
{
wand->index--;
ThrowDrawException(ResourceLimitError,"MemoryAllocationFailed",
wand->name);
return(MagickFalse);
}
CurrentContext=CloneDrawInfo((ImageInfo *) NULL,
wand->graphic_context[wand->index-1]);
(void) MVGPrintf(wand,"push graphic-context\n");
wand->indent_depth++;
return(MagickTrue);
} | 0 | [
"CWE-476"
] | ImageMagick | 6ad5fc3c9b652eec27fc0b1a0817159f8547d5d9 | 36,448,727,188,339,294,000,000,000,000,000,000,000 | 22 | https://github.com/ImageMagick/ImageMagick/issues/716 |
static int _undefine_var(pam_handle_t *pamh, int ctrl, VAR *var)
{
/* We have a variable to undefine, this is a simple function */
D(("Called and exit."));
if (ctrl & PAM_DEBUG_ARG) {
pam_syslog(pamh, LOG_DEBUG, "remove variable \"%s\"", var->name);
}
return pam_putenv(pamh, var->name);
} | 0 | [
"CWE-119"
] | linux-pam | caf5e7f61c8d9288daa49b4f61962e6b1239121d | 50,675,609,831,841,880,000,000,000,000,000,000,000 | 10 | pam_env: correctly count leading whitespace when parsing environment file
* modules/pam_env/pam_env.c (_assemble_line): Correctly count leading
whitespace.
Fixes CVE-2011-3148.
Bug-Ubuntu: https://bugs.launchpad.net/ubuntu/+source/pam/+bug/874469 |
static void aac_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
scsi_remove_host(shost);
__aac_shutdown(aac);
aac_fib_map_free(aac);
pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
aac->comm_phys);
kfree(aac->queues);
aac_adapter_ioremap(aac, 0);
kfree(aac->fibs);
kfree(aac->fsa_dev);
list_del(&aac->entry);
scsi_host_put(shost);
pci_disable_device(pdev);
if (list_empty(&aac_devices)) {
unregister_chrdev(aac_cfg_major, "aac");
aac_cfg_major = -1;
}
} | 0 | [
"CWE-284",
"CWE-264"
] | linux | f856567b930dfcdbc3323261bf77240ccdde01f5 | 106,120,339,239,469,700,000,000,000,000,000,000,000 | 26 | aacraid: missing capable() check in compat ioctl
In commit d496f94d22d1 ('[SCSI] aacraid: fix security weakness') we
added a check on CAP_SYS_RAWIO to the ioctl. The compat ioctls need the
check as well.
Signed-off-by: Dan Carpenter <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
ossl_asn1_decode0(unsigned char **pp, long length, long *offset, int depth,
int yield, long *num_read)
{
unsigned char *start, *p;
const unsigned char *p0;
long len = 0, inner_read = 0, off = *offset, hlen;
int tag, tc, j;
VALUE asn1data, tag_class;
p = *pp;
start = p;
p0 = p;
j = ASN1_get_object(&p0, &len, &tag, &tc, length);
p = (unsigned char *)p0;
if(j & 0x80) ossl_raise(eASN1Error, NULL);
if(len > length) ossl_raise(eASN1Error, "value is too short");
if((tc & V_ASN1_PRIVATE) == V_ASN1_PRIVATE)
tag_class = sym_PRIVATE;
else if((tc & V_ASN1_CONTEXT_SPECIFIC) == V_ASN1_CONTEXT_SPECIFIC)
tag_class = sym_CONTEXT_SPECIFIC;
else if((tc & V_ASN1_APPLICATION) == V_ASN1_APPLICATION)
tag_class = sym_APPLICATION;
else
tag_class = sym_UNIVERSAL;
hlen = p - start;
if(yield) {
VALUE arg = rb_ary_new();
rb_ary_push(arg, LONG2NUM(depth));
rb_ary_push(arg, LONG2NUM(*offset));
rb_ary_push(arg, LONG2NUM(hlen));
rb_ary_push(arg, LONG2NUM(len));
rb_ary_push(arg, (j & V_ASN1_CONSTRUCTED) ? Qtrue : Qfalse);
rb_ary_push(arg, ossl_asn1_class2sym(tc));
rb_ary_push(arg, INT2NUM(tag));
rb_yield(arg);
}
if(j & V_ASN1_CONSTRUCTED) {
*pp += hlen;
off += hlen;
asn1data = int_ossl_asn1_decode0_cons(pp, length, len, &off, depth, yield, j, tag, tag_class, &inner_read);
inner_read += hlen;
}
else {
if ((j & 0x01) && (len == 0)) ossl_raise(eASN1Error, "Infinite length for primitive value");
asn1data = int_ossl_asn1_decode0_prim(pp, len, hlen, tag, tag_class, &inner_read);
off += hlen + len;
}
if (num_read)
*num_read = inner_read;
if (len != 0 && inner_read != hlen + len) {
ossl_raise(eASN1Error,
"Type mismatch. Bytes read: %ld Bytes available: %ld",
inner_read, hlen + len);
}
*offset = off;
return asn1data;
} | 1 | [
"CWE-119"
] | openssl | 1648afef33c1d97fb203c82291b8a61269e85d3b | 72,791,181,393,943,390,000,000,000,000,000,000,000 | 61 | asn1: fix out-of-bounds read in decoding constructed objects
OpenSSL::ASN1.{decode,decode_all,traverse} have a bug of out-of-bounds
read. int_ossl_asn1_decode0_cons() does not give the correct available
length to ossl_asn1_decode() when decoding the inner components of a
constructed object. This can cause out-of-bounds read if a crafted input
given.
Reference: https://hackerone.com/reports/170316 |
NTSTATUS smb1cli_req_chain_submit(struct tevent_req **reqs, int num_reqs)
{
struct smbXcli_req_state *first_state =
tevent_req_data(reqs[0],
struct smbXcli_req_state);
struct smbXcli_req_state *state;
size_t wct_offset;
size_t chain_padding = 0;
int i, iovlen;
struct iovec *iov = NULL;
struct iovec *this_iov;
NTSTATUS status;
ssize_t nbt_len;
if (num_reqs == 1) {
return smb1cli_req_writev_submit(reqs[0], first_state,
first_state->smb1.iov,
first_state->smb1.iov_count);
}
iovlen = 0;
for (i=0; i<num_reqs; i++) {
if (!tevent_req_is_in_progress(reqs[i])) {
return NT_STATUS_INTERNAL_ERROR;
}
state = tevent_req_data(reqs[i], struct smbXcli_req_state);
if (state->smb1.iov_count < 4) {
return NT_STATUS_INVALID_PARAMETER_MIX;
}
if (i == 0) {
/*
* The NBT and SMB header
*/
iovlen += 2;
} else {
/*
* Chain padding
*/
iovlen += 1;
}
/*
* words and bytes
*/
iovlen += state->smb1.iov_count - 2;
}
iov = talloc_zero_array(first_state, struct iovec, iovlen);
if (iov == NULL) {
return NT_STATUS_NO_MEMORY;
}
first_state->smb1.chained_requests = (struct tevent_req **)talloc_memdup(
first_state, reqs, sizeof(*reqs) * num_reqs);
if (first_state->smb1.chained_requests == NULL) {
TALLOC_FREE(iov);
return NT_STATUS_NO_MEMORY;
}
wct_offset = HDR_WCT;
this_iov = iov;
for (i=0; i<num_reqs; i++) {
size_t next_padding = 0;
uint16_t *vwv;
state = tevent_req_data(reqs[i], struct smbXcli_req_state);
if (i < num_reqs-1) {
if (!smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))
|| CVAL(state->smb1.hdr, HDR_WCT) < 2) {
TALLOC_FREE(iov);
TALLOC_FREE(first_state->smb1.chained_requests);
return NT_STATUS_INVALID_PARAMETER_MIX;
}
}
wct_offset += smbXcli_iov_len(state->smb1.iov+2,
state->smb1.iov_count-2) + 1;
if ((wct_offset % 4) != 0) {
next_padding = 4 - (wct_offset % 4);
}
wct_offset += next_padding;
vwv = state->smb1.vwv;
if (i < num_reqs-1) {
struct smbXcli_req_state *next_state =
tevent_req_data(reqs[i+1],
struct smbXcli_req_state);
SCVAL(vwv+0, 0, CVAL(next_state->smb1.hdr, HDR_COM));
SCVAL(vwv+0, 1, 0);
SSVAL(vwv+1, 0, wct_offset);
} else if (smb1cli_is_andx_req(CVAL(state->smb1.hdr, HDR_COM))) {
/* properly end the chain */
SCVAL(vwv+0, 0, 0xff);
SCVAL(vwv+0, 1, 0xff);
SSVAL(vwv+1, 0, 0);
}
if (i == 0) {
/*
* The NBT and SMB header
*/
this_iov[0] = state->smb1.iov[0];
this_iov[1] = state->smb1.iov[1];
this_iov += 2;
} else {
/*
* This one is a bit subtle. We have to add
* chain_padding bytes between the requests, and we
* have to also include the wct field of the
* subsequent requests. We use the subsequent header
* for the padding, it contains the wct field in its
* last byte.
*/
this_iov[0].iov_len = chain_padding+1;
this_iov[0].iov_base = (void *)&state->smb1.hdr[
sizeof(state->smb1.hdr) - this_iov[0].iov_len];
memset(this_iov[0].iov_base, 0, this_iov[0].iov_len-1);
this_iov += 1;
}
/*
* copy the words and bytes
*/
memcpy(this_iov, state->smb1.iov+2,
sizeof(struct iovec) * (state->smb1.iov_count-2));
this_iov += state->smb1.iov_count - 2;
chain_padding = next_padding;
}
nbt_len = iov_buflen(&iov[1], iovlen-1);
if ((nbt_len == -1) || (nbt_len > first_state->conn->smb1.max_xmit)) {
TALLOC_FREE(iov);
TALLOC_FREE(first_state->smb1.chained_requests);
return NT_STATUS_INVALID_PARAMETER_MIX;
}
status = smb1cli_req_writev_submit(reqs[0], first_state, iov, iovlen);
if (!NT_STATUS_IS_OK(status)) {
TALLOC_FREE(iov);
TALLOC_FREE(first_state->smb1.chained_requests);
return status;
}
return NT_STATUS_OK;
} | 0 | [
"CWE-20"
] | samba | a819d2b440aafa3138d95ff6e8b824da885a70e9 | 40,330,741,897,126,647,000,000,000,000,000,000,000 | 150 | CVE-2015-5296: libcli/smb: make sure we require signing when we demand encryption on a session
BUG: https://bugzilla.samba.org/show_bug.cgi?id=11536
Signed-off-by: Stefan Metzmacher <[email protected]>
Reviewed-by: Jeremy Allison <[email protected]> |
file_tryelf(struct magic_set *ms, int fd, const unsigned char *buf,
size_t nbytes)
{
union {
int32_t l;
char c[sizeof (int32_t)];
} u;
int clazz;
int swap;
struct stat st;
off_t fsize;
int flags = 0;
Elf32_Ehdr elf32hdr;
Elf64_Ehdr elf64hdr;
uint16_t type, phnum, shnum;
if (ms->flags & (MAGIC_MIME|MAGIC_APPLE))
return 0;
/*
* ELF executables have multiple section headers in arbitrary
* file locations and thus file(1) cannot determine it from easily.
* Instead we traverse thru all section headers until a symbol table
* one is found or else the binary is stripped.
* Return immediately if it's not ELF (so we avoid pipe2file unless needed).
*/
if (buf[EI_MAG0] != ELFMAG0
|| (buf[EI_MAG1] != ELFMAG1 && buf[EI_MAG1] != OLFMAG1)
|| buf[EI_MAG2] != ELFMAG2 || buf[EI_MAG3] != ELFMAG3)
return 0;
/*
* If we cannot seek, it must be a pipe, socket or fifo.
*/
if((lseek(fd, (off_t)0, SEEK_SET) == (off_t)-1) && (errno == ESPIPE))
fd = file_pipe2file(ms, fd, buf, nbytes);
if (fstat(fd, &st) == -1) {
file_badread(ms);
return -1;
}
fsize = st.st_size;
clazz = buf[EI_CLASS];
switch (clazz) {
case ELFCLASS32:
#undef elf_getu
#define elf_getu(a, b) elf_getu32(a, b)
#undef elfhdr
#define elfhdr elf32hdr
#include "elfclass.h"
case ELFCLASS64:
#undef elf_getu
#define elf_getu(a, b) elf_getu64(a, b)
#undef elfhdr
#define elfhdr elf64hdr
#include "elfclass.h"
default:
if (file_printf(ms, ", unknown class %d", clazz) == -1)
return -1;
break;
}
return 0;
} | 0 | [
"CWE-399",
"CWE-284"
] | file | b4c01141e5367f247b84dcaf6aefbb4e741842b8 | 41,093,658,840,263,970,000,000,000,000,000,000,000 | 64 | - limit the number of program and section header number of sections to be
processed to avoid excessive processing time.
- if a bad note is found, return 0 to stop processing immediately. |
static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ems_usb *dev = netdev_priv(netdev);
struct ems_tx_urb_context *context = NULL;
struct net_device_stats *stats = &netdev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
struct ems_cpc_msg *msg;
struct urb *urb;
u8 *buf;
int i, err;
size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
+ sizeof(struct cpc_can_msg);
if (can_dropped_invalid_skb(netdev, skb))
return NETDEV_TX_OK;
/* create a URB, and a buffer for it, and copy the data to the URB */
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb)
goto nomem;
buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma);
if (!buf) {
netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
goto nomem;
}
msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE];
msg->msg.can_msg.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK);
msg->msg.can_msg.length = cf->len;
if (cf->can_id & CAN_RTR_FLAG) {
msg->type = cf->can_id & CAN_EFF_FLAG ?
CPC_CMD_TYPE_EXT_RTR_FRAME : CPC_CMD_TYPE_RTR_FRAME;
msg->length = CPC_CAN_MSG_MIN_SIZE;
} else {
msg->type = cf->can_id & CAN_EFF_FLAG ?
CPC_CMD_TYPE_EXT_CAN_FRAME : CPC_CMD_TYPE_CAN_FRAME;
for (i = 0; i < cf->len; i++)
msg->msg.can_msg.msg[i] = cf->data[i];
msg->length = CPC_CAN_MSG_MIN_SIZE + cf->len;
}
for (i = 0; i < MAX_TX_URBS; i++) {
if (dev->tx_contexts[i].echo_index == MAX_TX_URBS) {
context = &dev->tx_contexts[i];
break;
}
}
/*
* May never happen! When this happens we'd more URBs in flight as
* allowed (MAX_TX_URBS).
*/
if (!context) {
usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
usb_free_urb(urb);
netdev_warn(netdev, "couldn't find free context\n");
return NETDEV_TX_BUSY;
}
context->dev = dev;
context->echo_index = i;
usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
size, ems_usb_write_bulk_callback, context);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->tx_submitted);
can_put_echo_skb(skb, netdev, context->echo_index, 0);
atomic_inc(&dev->active_tx_urbs);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (unlikely(err)) {
can_free_echo_skb(netdev, context->echo_index, NULL);
usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
atomic_dec(&dev->active_tx_urbs);
if (err == -ENODEV) {
netif_device_detach(netdev);
} else {
netdev_warn(netdev, "failed tx_urb %d\n", err);
stats->tx_dropped++;
}
} else {
netif_trans_update(netdev);
/* Slow down tx path */
if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
netif_stop_queue(netdev);
}
}
/*
* Release our reference to this URB, the USB core will eventually free
* it entirely.
*/
usb_free_urb(urb);
return NETDEV_TX_OK;
nomem:
dev_kfree_skb(skb);
stats->tx_dropped++;
return NETDEV_TX_OK;
} | 0 | [
"CWE-415"
] | linux | c70222752228a62135cee3409dccefd494a24646 | 307,040,999,029,903,540,000,000,000,000,000,000,000 | 120 | can: ems_usb: ems_usb_start_xmit(): fix double dev_kfree_skb() in error path
There is no need to call dev_kfree_skb() when usb_submit_urb() fails
beacause can_put_echo_skb() deletes the original skb and
can_free_echo_skb() deletes the cloned skb.
Link: https://lore.kernel.org/all/[email protected]
Fixes: 702171adeed3 ("ems_usb: Added support for EMS CPC-USB/ARM7 CAN/USB interface")
Cc: [email protected]
Cc: Sebastian Haas <[email protected]>
Signed-off-by: Hangyu Hua <[email protected]>
Signed-off-by: Marc Kleine-Budde <[email protected]> |
void qemu_input_event_send_key_number(QemuConsole *src, int num, bool down)
{
KeyValue *key = g_new0(KeyValue, 1);
key->type = KEY_VALUE_KIND_NUMBER;
key->u.number.data = num;
qemu_input_event_send_key(src, key, down);
} | 0 | [
"CWE-772"
] | qemu | fa18f36a461984eae50ab957e47ec78dae3c14fc | 244,471,811,719,442,600,000,000,000,000,000,000,000 | 7 | input: limit kbd queue depth
Apply a limit to the number of items we accept into the keyboard queue.
Impact: Without this limit vnc clients can exhaust host memory by
sending keyboard events faster than qemu feeds them to the guest.
Fixes: CVE-2017-8379
Cc: P J P <[email protected]>
Cc: Huawei PSIRT <[email protected]>
Reported-by: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]>
Message-id: [email protected] |
void ping_seq_stop(struct seq_file *seq, void *v)
{
read_unlock_bh(&ping_table.lock);
} | 0 | [
"CWE-20"
] | net | bceaa90240b6019ed73b49965eac7d167610be69 | 19,934,675,725,526,576,000,000,000,000,000,000,000 | 4 | inet: prevent leakage of uninitialized memory to user in recv syscalls
Only update *addr_len when we actually fill in sockaddr, otherwise we
can return uninitialized memory from the stack to the caller in the
recvfrom, recvmmsg and recvmsg syscalls. Drop the the (addr_len == NULL)
checks because we only get called with a valid addr_len pointer either
from sock_common_recvmsg or inet_recvmsg.
If a blocking read waits on a socket which is concurrently shut down we
now return zero and set msg_msgnamelen to 0.
Reported-by: mpb <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd,
ev_ssize_t howmuch)
{
struct evbuffer_chain *chain = buffer->first;
struct evbuffer_chain_file_segment *info =
EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment,
chain);
const int source_fd = info->segment->fd;
#if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)
int res;
ev_off_t len = chain->off;
#elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)
ev_ssize_t res;
ev_off_t offset = chain->misalign;
#endif
ASSERT_EVBUFFER_LOCKED(buffer);
#if defined(SENDFILE_IS_MACOSX)
res = sendfile(source_fd, dest_fd, chain->misalign, &len, NULL, 0);
if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
return (-1);
return (len);
#elif defined(SENDFILE_IS_FREEBSD)
res = sendfile(source_fd, dest_fd, chain->misalign, chain->off, NULL, &len, 0);
if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
return (-1);
return (len);
#elif defined(SENDFILE_IS_LINUX)
/* TODO(niels): implement splice */
res = sendfile(dest_fd, source_fd, &offset, chain->off);
if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
/* if this is EAGAIN or EINTR return 0; otherwise, -1 */
return (0);
}
return (res);
#elif defined(SENDFILE_IS_SOLARIS)
{
const off_t offset_orig = offset;
res = sendfile(dest_fd, source_fd, &offset, chain->off);
if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
if (offset - offset_orig)
return offset - offset_orig;
/* if this is EAGAIN or EINTR and no bytes were
* written, return 0 */
return (0);
}
return (res);
}
#endif
} | 0 | [
"CWE-189"
] | libevent | 841ecbd96105c84ac2e7c9594aeadbcc6fb38bc4 | 203,770,315,605,637,350,000,000,000,000,000,000,000 | 53 | Fix CVE-2014-6272 in Libevent 2.1
For this fix, we need to make sure that passing too-large inputs to
the evbuffer functions can't make us do bad things with the heap.
Also, lower the maximum chunk size to the lower of off_t, size_t maximum.
This is necessary since otherwise we could get into an infinite loop
if we make a chunk that 'misalign' cannot index into. |
proxy_create (Proxy **res, CK_FUNCTION_LIST **loaded,
Mapping *mappings, unsigned int n_mappings)
{
CK_RV rv = CKR_OK;
Proxy *py;
py = calloc (1, sizeof (Proxy));
return_val_if_fail (py != NULL, CKR_HOST_MEMORY);
py->forkid = p11_forkid;
py->last_id = 0;
py->inited = modules_dup (loaded);
if (py->inited == NULL) {
proxy_free (py, 0);
return_val_if_reached (CKR_HOST_MEMORY);
}
rv = p11_kit_modules_initialize (py->inited, NULL);
if (rv == CKR_OK) {
rv = proxy_list_slots (py, mappings, n_mappings);
}
if (rv != CKR_OK) {
proxy_free (py, 1);
return rv;
}
py->sessions = p11_dict_new (p11_dict_ulongptr_hash, p11_dict_ulongptr_equal, NULL, free);
if (py->sessions == NULL) {
proxy_free (py, 1);
return_val_if_reached (CKR_HOST_MEMORY);
}
py->refs = 1;
*res = py;
return CKR_OK;
} | 0 | [
"CWE-190"
] | p11-kit | 5307a1d21a50cacd06f471a873a018d23ba4b963 | 244,019,177,154,739,800,000,000,000,000,000,000,000 | 39 | Check for arithmetic overflows before allocating |
Field_float(uint32 len_arg, bool maybe_null_arg,
const LEX_CSTRING *field_name_arg, uint8 dec_arg)
:Field_real((uchar*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0, (uint) 0,
NONE, field_name_arg, dec_arg, 0, 0)
{
if (dec_arg >= FLOATING_POINT_DECIMALS)
dec_arg= NOT_FIXED_DEC;
} | 0 | [
"CWE-416",
"CWE-703"
] | server | 08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917 | 276,006,537,257,256,600,000,000,000,000,000,000,000 | 8 | MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]> |
static void vrend_update_scissor_state(struct vrend_context *ctx)
{
struct pipe_scissor_state *ss;
GLint y;
GLuint idx;
unsigned mask = ctx->sub->scissor_state_dirty;
while (mask) {
idx = u_bit_scan(&mask);
if (idx >= PIPE_MAX_VIEWPORTS) {
vrend_report_buffer_error(ctx, 0);
break;
}
ss = &ctx->sub->ss[idx];
y = ss->miny;
if (idx > 0 && has_feature(feat_viewport_array))
glScissorIndexed(idx, ss->minx, y, ss->maxx - ss->minx, ss->maxy - ss->miny);
else
glScissor(ss->minx, y, ss->maxx - ss->minx, ss->maxy - ss->miny);
}
ctx->sub->scissor_state_dirty = 0;
} | 0 | [
"CWE-787"
] | virglrenderer | cbc8d8b75be360236cada63784046688aeb6d921 | 155,487,082,220,230,100,000,000,000,000,000,000,000 | 23 | vrend: check transfer bounds for negative values too and report error
Closes #138
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Emil Velikov <[email protected]> |
static int ZEND_FASTCALL ZEND_IS_NOT_IDENTICAL_SPEC_VAR_VAR_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
zend_free_op free_op1, free_op2;
zval *result = &EX_T(opline->result.u.var).tmp_var;
is_identical_function(result,
_get_zval_ptr_var(&opline->op1, EX(Ts), &free_op1 TSRMLS_CC),
_get_zval_ptr_var(&opline->op2, EX(Ts), &free_op2 TSRMLS_CC) TSRMLS_CC);
Z_LVAL_P(result) = !Z_LVAL_P(result);
if (free_op1.var) {zval_ptr_dtor(&free_op1.var);};
if (free_op2.var) {zval_ptr_dtor(&free_op2.var);};
ZEND_VM_NEXT_OPCODE();
} | 0 | [] | php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 200,304,268,666,244,260,000,000,000,000,000,000,000 | 14 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
uint64_t crypt_get_iv_offset(struct crypt_device *cd)
{
if (!cd)
return 0;
if (isPLAIN(cd->type))
return cd->u.plain.hdr.skip;
if (isLOOPAES(cd->type))
return cd->u.loopaes.hdr.skip;
if (isTCRYPT(cd->type))
return TCRYPT_get_iv_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
return 0;
} | 0 | [
"CWE-345"
] | cryptsetup | 0113ac2d889c5322659ad0596d4cfc6da53e356c | 325,998,553,533,752,120,000,000,000,000,000,000,000 | 16 | Fix CVE-2021-4122 - LUKS2 reencryption crash recovery attack
Fix possible attacks against data confidentiality through LUKS2 online
reencryption extension crash recovery.
An attacker can modify on-disk metadata to simulate decryption in
progress with crashed (unfinished) reencryption step and persistently
decrypt part of the LUKS device.
This attack requires repeated physical access to the LUKS device but
no knowledge of user passphrases.
The decryption step is performed after a valid user activates
the device with a correct passphrase and modified metadata.
There are no visible warnings for the user that such recovery happened
(except using the luksDump command). The attack can also be reversed
afterward (simulating crashed encryption from a plaintext) with
possible modification of revealed plaintext.
The problem was caused by reusing a mechanism designed for actual
reencryption operation without reassessing the security impact for new
encryption and decryption operations. While the reencryption requires
calculating and verifying both key digests, no digest was needed to
initiate decryption recovery if the destination is plaintext (no
encryption key). Also, some metadata (like encryption cipher) is not
protected, and an attacker could change it. Note that LUKS2 protects
visible metadata only when a random change occurs. It does not protect
against intentional modification but such modification must not cause
a violation of data confidentiality.
The fix introduces additional digest protection of reencryption
metadata. The digest is calculated from known keys and critical
reencryption metadata. Now an attacker cannot create correct metadata
digest without knowledge of a passphrase for used keyslots.
For more details, see LUKS2 On-Disk Format Specification version 1.1.0. |
NO_INLINE JsVar *jspeStatementFor() {
JSP_ASSERT_MATCH(LEX_R_FOR);
JSP_MATCH('(');
bool wasInLoop = (execInfo.execute&EXEC_IN_LOOP)!=0;
execInfo.execute |= EXEC_FOR_INIT;
// initialisation
JsVar *forStatement = 0;
// we could have 'for (;;)' - so don't munch up our semicolon if that's all we have
if (lex->tk != ';')
forStatement = jspeStatement();
if (jspIsInterrupted()) {
jsvUnLock(forStatement);
return 0;
}
execInfo.execute &= (JsExecFlags)~EXEC_FOR_INIT;
if (lex->tk == LEX_R_IN) {
// for (i in array)
// where i = jsvUnLock(forStatement);
if (JSP_SHOULD_EXECUTE && !jsvIsName(forStatement)) {
jsvUnLock(forStatement);
jsExceptionHere(JSET_ERROR, "FOR a IN b - 'a' must be a variable name, not %t", forStatement);
return 0;
}
JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_IN, jsvUnLock(forStatement), 0);
JsVar *array = jsvSkipNameAndUnLock(jspeExpression());
JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock2(forStatement, array), 0);
JslCharPos forBodyStart = jslCharPosClone(&lex->tokenStart);
JSP_SAVE_EXECUTE();
jspSetNoExecute();
execInfo.execute |= EXEC_IN_LOOP;
jsvUnLock(jspeBlockOrStatement());
JslCharPos forBodyEnd = jslCharPosClone(&lex->tokenStart);
if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP;
JSP_RESTORE_EXECUTE();
if (JSP_SHOULD_EXECUTE) {
if (jsvIsIterable(array)) {
JsvIsInternalChecker checkerFunction = jsvGetInternalFunctionCheckerFor(array);
JsVar *foundPrototype = 0;
JsvIterator it;
jsvIteratorNew(&it, array, JSIF_DEFINED_ARRAY_ElEMENTS);
bool hasHadBreak = false;
while (JSP_SHOULD_EXECUTE && jsvIteratorHasElement(&it) && !hasHadBreak) {
JsVar *loopIndexVar = jsvIteratorGetKey(&it);
bool ignore = false;
if (checkerFunction && checkerFunction(loopIndexVar)) {
ignore = true;
if (jsvIsString(loopIndexVar) &&
jsvIsStringEqual(loopIndexVar, JSPARSE_INHERITS_VAR))
foundPrototype = jsvSkipName(loopIndexVar);
}
if (!ignore) {
JsVar *indexValue = jsvIsName(loopIndexVar) ?
jsvCopyNameOnly(loopIndexVar, false/*no copy children*/, false/*not a name*/) :
loopIndexVar;
if (indexValue) { // could be out of memory
assert(!jsvIsName(indexValue) && jsvGetRefs(indexValue)==0);
jspReplaceWithOrAddToRoot(forStatement, indexValue);
if (indexValue!=loopIndexVar) jsvUnLock(indexValue);
jsvIteratorNext(&it);
jslSeekToP(&forBodyStart);
execInfo.execute |= EXEC_IN_LOOP;
jspDebuggerLoopIfCtrlC();
jsvUnLock(jspeBlockOrStatement());
if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP;
if (execInfo.execute & EXEC_CONTINUE)
execInfo.execute = EXEC_YES;
else if (execInfo.execute & EXEC_BREAK) {
execInfo.execute = EXEC_YES;
hasHadBreak = true;
}
}
} else
jsvIteratorNext(&it);
jsvUnLock(loopIndexVar);
if (!jsvIteratorHasElement(&it) && foundPrototype) {
jsvIteratorFree(&it);
jsvIteratorNew(&it, foundPrototype, JSIF_DEFINED_ARRAY_ElEMENTS);
jsvUnLock(foundPrototype);
foundPrototype = 0;
}
}
assert(!foundPrototype);
jsvIteratorFree(&it);
} else if (!jsvIsUndefined(array)) {
jsExceptionHere(JSET_ERROR, "FOR loop can only iterate over Arrays, Strings or Objects, not %t", array);
}
}
jslSeekToP(&forBodyEnd);
jslCharPosFree(&forBodyStart);
jslCharPosFree(&forBodyEnd);
jsvUnLock2(forStatement, array);
} else { // ----------------------------------------------- NORMAL FOR LOOP
#ifdef JSPARSE_MAX_LOOP_ITERATIONS
int loopCount = JSPARSE_MAX_LOOP_ITERATIONS;
#endif
bool loopCond = true;
bool hasHadBreak = false;
jsvUnLock(forStatement);
JSP_MATCH(';');
JslCharPos forCondStart = jslCharPosClone(&lex->tokenStart);
if (lex->tk != ';') {
JsVar *cond = jspeAssignmentExpression(); // condition
loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond));
jsvUnLock(cond);
}
JSP_MATCH_WITH_CLEANUP_AND_RETURN(';',jslCharPosFree(&forCondStart);,0);
JslCharPos forIterStart = jslCharPosClone(&lex->tokenStart);
if (lex->tk != ')') { // we could have 'for (;;)'
JSP_SAVE_EXECUTE();
jspSetNoExecute();
jsvUnLock(jspeExpression()); // iterator
JSP_RESTORE_EXECUTE();
}
JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jslCharPosFree(&forCondStart);jslCharPosFree(&forIterStart);,0);
JslCharPos forBodyStart = jslCharPosClone(&lex->tokenStart); // actual for body
JSP_SAVE_EXECUTE();
if (!loopCond) jspSetNoExecute();
execInfo.execute |= EXEC_IN_LOOP;
jsvUnLock(jspeBlockOrStatement());
JslCharPos forBodyEnd = jslCharPosClone(&lex->tokenStart);
if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP;
if (loopCond || !JSP_SHOULD_EXECUTE) {
if (execInfo.execute & EXEC_CONTINUE)
execInfo.execute = EXEC_YES;
else if (execInfo.execute & EXEC_BREAK) {
execInfo.execute = EXEC_YES;
hasHadBreak = true;
}
}
if (!loopCond) JSP_RESTORE_EXECUTE();
if (loopCond) {
jslSeekToP(&forIterStart);
if (lex->tk != ')') jsvUnLock(jspeExpression());
}
while (!hasHadBreak && JSP_SHOULD_EXECUTE && loopCond
#ifdef JSPARSE_MAX_LOOP_ITERATIONS
&& loopCount-->0
#endif
) {
jslSeekToP(&forCondStart);
;
if (lex->tk == ';') {
loopCond = true;
} else {
JsVar *cond = jspeAssignmentExpression();
loopCond = jsvGetBoolAndUnLock(jsvSkipName(cond));
jsvUnLock(cond);
}
if (JSP_SHOULD_EXECUTE && loopCond) {
jslSeekToP(&forBodyStart);
execInfo.execute |= EXEC_IN_LOOP;
jspDebuggerLoopIfCtrlC();
jsvUnLock(jspeBlockOrStatement());
if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP;
if (execInfo.execute & EXEC_CONTINUE)
execInfo.execute = EXEC_YES;
else if (execInfo.execute & EXEC_BREAK) {
execInfo.execute = EXEC_YES;
hasHadBreak = true;
}
}
if (JSP_SHOULD_EXECUTE && loopCond && !hasHadBreak) {
jslSeekToP(&forIterStart);
if (lex->tk != ')') jsvUnLock(jspeExpression());
}
}
jslSeekToP(&forBodyEnd);
jslCharPosFree(&forCondStart);
jslCharPosFree(&forIterStart);
jslCharPosFree(&forBodyStart);
jslCharPosFree(&forBodyEnd);
#ifdef JSPARSE_MAX_LOOP_ITERATIONS
if (loopCount<=0) {
jsExceptionHere(JSET_ERROR, "FOR Loop exceeded the maximum number of iterations ("STRINGIFY(JSPARSE_MAX_LOOP_ITERATIONS)")");
}
#endif
}
return 0;
} | 0 | [
"CWE-125"
] | Espruino | bf4416ab9129ee3afd56739ea4e3cd0da5484b6b | 21,919,710,047,607,575,000,000,000,000,000,000,000 | 190 | Fix bug if using an undefined member of an object for for..in (fix #1437) |
static unsigned long arch_get_unmapped_area_common(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags, enum mmap_allocation_direction dir)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long addr = addr0;
int do_color_align;
struct vm_unmapped_area_info info;
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
if (flags & MAP_FIXED) {
/* Even MAP_FIXED mappings must reside within TASK_SIZE */
if (TASK_SIZE - len < addr)
return -EINVAL;
/*
* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
return -EINVAL;
return addr;
}
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
/* requesting a specific address */
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
info.length = len;
info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
if (dir == DOWN) {
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK))
return addr;
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
}
info.flags = 0;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
return vm_unmapped_area(&info);
} | 0 | [
"CWE-119"
] | linux | 1be7107fbe18eed3e319a6c3e83c78254b693acb | 303,473,861,093,170,100,000,000,000,000,000,000,000 | 71 | mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]> |
static void hid_pointer_sync(DeviceState *dev)
{
HIDState *hs = (HIDState *)dev;
HIDPointerEvent *prev, *curr, *next;
bool event_compression = false;
if (hs->n == QUEUE_LENGTH-1) {
/*
* Queue full. We are losing information, but we at least
* keep track of most recent button state.
*/
return;
}
prev = &hs->ptr.queue[(hs->head + hs->n - 1) & QUEUE_MASK];
curr = &hs->ptr.queue[(hs->head + hs->n) & QUEUE_MASK];
next = &hs->ptr.queue[(hs->head + hs->n + 1) & QUEUE_MASK];
if (hs->n > 0) {
/*
* No button state change between previous and current event
* (and previous wasn't seen by the guest yet), so there is
* motion information only and we can combine the two event
* into one.
*/
if (curr->buttons_state == prev->buttons_state) {
event_compression = true;
}
}
if (event_compression) {
/* add current motion to previous, clear current */
if (hs->kind == HID_MOUSE) {
prev->xdx += curr->xdx;
curr->xdx = 0;
prev->ydy += curr->ydy;
curr->ydy = 0;
} else {
prev->xdx = curr->xdx;
prev->ydy = curr->ydy;
}
prev->dz += curr->dz;
curr->dz = 0;
} else {
/* prepate next (clear rel, copy abs + btns) */
if (hs->kind == HID_MOUSE) {
next->xdx = 0;
next->ydy = 0;
} else {
next->xdx = curr->xdx;
next->ydy = curr->ydy;
}
next->dz = 0;
next->buttons_state = curr->buttons_state;
/* make current guest visible, notify guest */
hs->n++;
hs->event(hs);
}
} | 0 | [
"CWE-772"
] | qemu | 51dbea77a29ea46173373a6dad4ebd95d4661f42 | 30,817,074,624,049,323,000,000,000,000,000,000,000 | 59 | hid: Reset kbd modifiers on reset
When resetting the keyboard, we need to reset not just the pending keystrokes,
but also any pending modifiers. Otherwise there's a race when we're getting
reset while running an escape sequence (modifier 0x100).
Cc: [email protected]
Signed-off-by: Alexander Graf <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]> |
static int show_tid_map(struct seq_file *m, void *v)
{
return show_map(m, v, 0);
} | 0 | [
"CWE-200"
] | linux | ab676b7d6fbf4b294bf198fb27ade5b0e865c7ce | 102,009,244,706,723,930,000,000,000,000,000,000,000 | 4 | pagemap: do not leak physical addresses to non-privileged userspace
As pointed by recent post[1] on exploiting DRAM physical imperfection,
/proc/PID/pagemap exposes sensitive information which can be used to do
attacks.
This disallows anybody without CAP_SYS_ADMIN to read the pagemap.
[1] http://googleprojectzero.blogspot.com/2015/03/exploiting-dram-rowhammer-bug-to-gain.html
[ Eventually we might want to do anything more finegrained, but for now
this is the simple model. - Linus ]
Signed-off-by: Kirill A. Shutemov <[email protected]>
Acked-by: Konstantin Khlebnikov <[email protected]>
Acked-by: Andy Lutomirski <[email protected]>
Cc: Pavel Emelyanov <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Mark Seaborn <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
sasl_mechlist_do_rebuild(void)
{
(void) sasl_mechlist_string_build(NULL, NULL, NULL);
if (me.connected)
(void) sasl_mechlist_sts(sasl_mechlist_string);
} | 0 | [
"CWE-287",
"CWE-288"
] | atheme | 4e664c75d0b280a052eb8b5e81aa41944e593c52 | 270,786,654,663,426,440,000,000,000,000,000,000,000 | 7 | saslserv/main: Track EID we're pending login to
The existing model does not remember that we've sent a SVSLOGIN for a
given SASL session, and simply assumes that if a client is introduced
with a SASL session open, that session must have succeeded. The security
of this approach requires ircd to implicitly abort SASL sessions on
client registration.
This also means that if a client successfully authenticates and then
does something else its pending login is forgotten about, even though a
SVSLOGIN has been sent for it, and the ircd is going to think it's
logged in.
This change removes the dependency on ircd's state machine by keeping
explicit track of the pending login, i.e. the one we've most recently
sent a SVSLOGIN for. The next commit will ensure that a client abort
(even an implicit one) doesn't blow that information away. |
static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
bool loop_ok)
{
int *insn_stack = env->cfg.insn_stack;
int *insn_state = env->cfg.insn_state;
if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
return 0;
if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
return 0;
if (w < 0 || w >= env->prog->len) {
verbose_linfo(env, t, "%d: ", t);
verbose(env, "jump out of range from insn %d to %d\n", t, w);
return -EINVAL;
}
if (e == BRANCH)
/* mark branch target for state pruning */
init_explored_state(env, w);
if (insn_state[w] == 0) {
/* tree-edge */
insn_state[t] = DISCOVERED | e;
insn_state[w] = DISCOVERED;
if (env->cfg.cur_stack >= env->prog->len)
return -E2BIG;
insn_stack[env->cfg.cur_stack++] = w;
return 1;
} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
if (loop_ok && env->bpf_capable)
return 0;
verbose_linfo(env, t, "%d: ", t);
verbose_linfo(env, w, "%d: ", w);
verbose(env, "back-edge from insn %d to %d\n", t, w);
return -EINVAL;
} else if (insn_state[w] == EXPLORED) {
/* forward- or cross-edge */
insn_state[t] = DISCOVERED | e;
} else {
verbose(env, "insn state internal bug\n");
return -EFAULT;
}
return 0;
} | 0 | [
"CWE-119",
"CWE-681",
"CWE-787"
] | linux | 5b9fbeb75b6a98955f628e205ac26689bcb1383e | 240,131,385,035,624,800,000,000,000,000,000,000,000 | 46 | bpf: Fix scalar32_min_max_or bounds tracking
Simon reported an issue with the current scalar32_min_max_or() implementation.
That is, compared to the other 32 bit subreg tracking functions, the code in
scalar32_min_max_or() stands out that it's using the 64 bit registers instead
of 32 bit ones. This leads to bounds tracking issues, for example:
[...]
8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
8: (79) r1 = *(u64 *)(r0 +0)
R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
9: (b7) r0 = 1
10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
10: (18) r2 = 0x600000002
12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
12: (ad) if r1 < r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: (95) exit
14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
14: (25) if r1 > 0x0 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: (95) exit
16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
16: (47) r1 |= 0
17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x1; 0x700000000),s32_max_value=1,u32_max_value=1) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
[...]
The bound tests on the map value force the upper unsigned bound to be 25769803777
in 64 bit (0b11000000000000000000000000000000001) and then lower one to be 1. By
using OR they are truncated and thus result in the range [1,1] for the 32 bit reg
tracker. This is incorrect given the only thing we know is that the value must be
positive and thus 2147483647 (0b1111111111111111111111111111111) at max for the
subregs. Fix it by using the {u,s}32_{min,max}_value vars instead. This also makes
sense, for example, for the case where we update dst_reg->s32_{min,max}_value in
the else branch we need to use the newly computed dst_reg->u32_{min,max}_value as
we know that these are positive. Previously, in the else branch the 64 bit values
of umin_value=1 and umax_value=32212254719 were used and latter got truncated to
be 1 as upper bound there. After the fix the subreg range is now correct:
[...]
8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
8: (79) r1 = *(u64 *)(r0 +0)
R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
9: (b7) r0 = 1
10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
10: (18) r2 = 0x600000002
12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
12: (ad) if r1 < r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: (95) exit
14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
14: (25) if r1 > 0x0 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: (95) exit
16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
16: (47) r1 |= 0
17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
[...]
Fixes: 3f50f132d840 ("bpf: Verifier, do explicit ALU32 bounds tracking")
Reported-by: Simon Scannell <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Reviewed-by: John Fastabend <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]> |
void file_test(const char* file, byte* check)
{
FILE* f;
int i = 0;
MD5 md5;
byte buf[1024];
byte md5sum[MD5::DIGEST_SIZE];
if( !( f = fopen( file, "rb" ) )) {
printf("Can't open %s\n", file);
return;
}
while( ( i = (int)fread(buf, 1, sizeof(buf), f )) > 0 )
md5.Update(buf, i);
md5.Final(md5sum);
memcpy(check, md5sum, sizeof(md5sum));
for(int j = 0; j < MD5::DIGEST_SIZE; ++j )
printf( "%02x", md5sum[j] );
printf(" %s\n", file);
fclose(f);
} | 0 | [] | mysql-server | 5c6169fb309981b564a17bee31b367a18866d674 | 280,339,367,054,723,300,000,000,000,000,000,000,000 | 25 | Bug #24740291: YASSL UPDATE TO 2.4.2 |
static OPJ_BOOL opj_j2k_write_first_tile_part(opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
OPJ_UINT32 p_total_data_size,
opj_stream_private_t *p_stream,
struct opj_event_mgr * p_manager)
{
OPJ_UINT32 l_nb_bytes_written = 0;
OPJ_UINT32 l_current_nb_bytes_written;
OPJ_BYTE * l_begin_data = 00;
opj_tcd_t * l_tcd = 00;
opj_cp_t * l_cp = 00;
l_tcd = p_j2k->m_tcd;
l_cp = &(p_j2k->m_cp);
l_tcd->cur_pino = 0;
/*Get number of tile parts*/
p_j2k->m_specific_param.m_encoder.m_current_poc_tile_part_number = 0;
/* INDEX >> */
/* << INDEX */
l_current_nb_bytes_written = 0;
l_begin_data = p_data;
if (! opj_j2k_write_sot(p_j2k, p_data, &l_current_nb_bytes_written, p_stream,
p_manager)) {
return OPJ_FALSE;
}
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
if (!OPJ_IS_CINEMA(l_cp->rsiz)) {
#if 0
for (compno = 1; compno < p_j2k->m_private_image->numcomps; compno++) {
l_current_nb_bytes_written = 0;
opj_j2k_write_coc_in_memory(p_j2k, compno, p_data, &l_current_nb_bytes_written,
p_manager);
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
l_current_nb_bytes_written = 0;
opj_j2k_write_qcc_in_memory(p_j2k, compno, p_data, &l_current_nb_bytes_written,
p_manager);
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
}
#endif
if (l_cp->tcps[p_j2k->m_current_tile_number].numpocs) {
l_current_nb_bytes_written = 0;
opj_j2k_write_poc_in_memory(p_j2k, p_data, &l_current_nb_bytes_written,
p_manager);
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
p_total_data_size -= l_current_nb_bytes_written;
}
}
l_current_nb_bytes_written = 0;
if (! opj_j2k_write_sod(p_j2k, l_tcd, p_data, &l_current_nb_bytes_written,
p_total_data_size, p_stream, p_manager)) {
return OPJ_FALSE;
}
l_nb_bytes_written += l_current_nb_bytes_written;
* p_data_written = l_nb_bytes_written;
/* Writing Psot in SOT marker */
opj_write_bytes(l_begin_data + 6, l_nb_bytes_written,
4); /* PSOT */
if (OPJ_IS_CINEMA(l_cp->rsiz)) {
opj_j2k_update_tlm(p_j2k, l_nb_bytes_written);
}
return OPJ_TRUE;
} | 1 | [
"CWE-119",
"CWE-787"
] | openjpeg | dcac91b8c72f743bda7dbfa9032356bc8110098a | 153,472,520,582,042,650,000,000,000,000,000,000,000 | 83 | opj_j2k_write_sot(): fix potential write heap buffer overflow (#991) |
void ring_buffer_discard_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event)
{
struct ring_buffer_per_cpu *cpu_buffer;
int cpu;
/* The event is discarded regardless */
rb_event_discard(event);
cpu = smp_processor_id();
cpu_buffer = buffer->buffers[cpu];
/*
* This must only be called if the event has not been
* committed yet. Thus we can assume that preemption
* is still disabled.
*/
RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
rb_decrement_entry(cpu_buffer, event);
if (rb_try_to_discard(cpu_buffer, event))
goto out;
out:
rb_end_commit(cpu_buffer);
trace_recursive_unlock(cpu_buffer);
preempt_enable_notrace();
} | 0 | [
"CWE-362"
] | linux | bbeb97464eefc65f506084fd9f18f21653e01137 | 77,164,062,559,086,900,000,000,000,000,000,000,000 | 31 | tracing: Fix race in trace_open and buffer resize call
Below race can come, if trace_open and resize of
cpu buffer is running parallely on different cpus
CPUX CPUY
ring_buffer_resize
atomic_read(&buffer->resize_disabled)
tracing_open
tracing_reset_online_cpus
ring_buffer_reset_cpu
rb_reset_cpu
rb_update_pages
remove/insert pages
resetting pointer
This race can cause data abort or some times infinte loop in
rb_remove_pages and rb_insert_pages while checking pages
for sanity.
Take buffer lock to fix this.
Link: https://lkml.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: b23d7a5f4a07a ("ring-buffer: speed up buffer resets by avoiding synchronize_rcu for each CPU")
Signed-off-by: Gaurav Kohli <[email protected]>
Signed-off-by: Steven Rostedt (VMware) <[email protected]> |
static int intel_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
int bit, loops;
u64 status;
int handled;
cpuc = &__get_cpu_var(cpu_hw_events);
/*
* Some chipsets need to unmask the LVTPC in a particular spot
* inside the nmi handler. As a result, the unmasking was pushed
* into all the nmi handlers.
*
* This handler doesn't seem to have any issues with the unmasking
* so it was left at the top.
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
intel_pmu_disable_all();
handled = intel_pmu_drain_bts_buffer();
status = intel_pmu_get_status();
if (!status) {
intel_pmu_enable_all(0);
return handled;
}
loops = 0;
again:
intel_pmu_ack_status(status);
if (++loops > 100) {
WARN_ONCE(1, "perfevents: irq loop stuck!\n");
perf_event_print_debug();
intel_pmu_reset();
goto done;
}
inc_irq_stat(apic_perf_irqs);
intel_pmu_lbr_read();
/*
* PEBS overflow sets bit 62 in the global status register
*/
if (__test_and_clear_bit(62, (unsigned long *)&status)) {
handled++;
x86_pmu.drain_pebs(regs);
}
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];
handled++;
if (!test_bit(bit, cpuc->active_mask))
continue;
if (!intel_pmu_save_and_restart(event))
continue;
perf_sample_data_init(&data, 0, event->hw.last_period);
if (has_branch_stack(event))
data.br_stack = &cpuc->lbr_stack;
if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
}
/*
* Repeat if there is more work to be done:
*/
status = intel_pmu_get_status();
if (status)
goto again;
done:
intel_pmu_enable_all(0);
return handled;
} | 0 | [
"CWE-20",
"CWE-401"
] | linux | f1923820c447e986a9da0fc6bf60c1dccdf0408e | 288,803,973,593,575,200,000,000,000,000,000,000,000 | 81 | perf/x86: Fix offcore_rsp valid mask for SNB/IVB
The valid mask for both offcore_response_0 and
offcore_response_1 was wrong for SNB/SNB-EP,
IVB/IVB-EP. It was possible to write to
reserved bit and cause a GP fault crashing
the kernel.
This patch fixes the problem by correctly marking the
reserved bits in the valid mask for all the processors
mentioned above.
A distinction between desktop and server parts is introduced
because bits 24-30 are only available on the server parts.
This version of the patch is just a rebase to perf/urgent tree
and should apply to older kernels as well.
Signed-off-by: Stephane Eranian <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
dissect_kafka_offset_for_leader_epoch_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
kafka_api_version_t api_version)
{
proto_item *subti;
proto_tree *subtree;
if (api_version >= 2) {
offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
}
subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
ett_kafka_topics,
&subti, "Topics");
offset = dissect_kafka_array(subtree, tvb, pinfo, offset, 0, api_version,
&dissect_kafka_offset_for_leader_epoch_response_topic, NULL);
proto_item_set_end(subti, tvb, offset);
return offset;
} | 0 | [
"CWE-401"
] | wireshark | f4374967bbf9c12746b8ec3cd54dddada9dd353e | 219,160,937,849,524,500,000,000,000,000,000,000,000 | 20 | Kafka: Limit our decompression size.
Don't assume that the Internet has our best interests at heart when it
gives us the size of our decompression buffer. Assign an arbitrary limit
of 50 MB.
This fixes #16739 in that it takes care of
** (process:17681): WARNING **: 20:03:07.440: Dissector bug, protocol Kafka, in packet 31: ../epan/proto.c:7043: failed assertion "end >= fi->start"
which is different from the original error output. It looks like *that*
might have taken care of in one of the other recent Kafka bug fixes.
The decompression routines return a success or failure status. Use
gbooleans instead of ints for that. |
f_inputrestore(typval_T *argvars UNUSED, typval_T *rettv)
{
if (ga_userinput.ga_len > 0)
{
--ga_userinput.ga_len;
restore_typeahead((tasave_T *)(ga_userinput.ga_data)
+ ga_userinput.ga_len);
/* default return is zero == OK */
}
else if (p_verbose > 1)
{
verb_msg(_("called inputrestore() more often than inputsave()"));
rettv->vval.v_number = 1; /* Failed */
}
} | 0 | [
"CWE-78"
] | vim | 8c62a08faf89663e5633dc5036cd8695c80f1075 | 114,424,788,376,323,570,000,000,000,000,000,000,000 | 15 | patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others. |
struct st_mysql_extension* mysql_extension_init(struct st_mysql *mysql __attribute__((unused)))
{
struct st_mysql_extension *ext;
ext= my_malloc(PSI_NOT_INSTRUMENTED,
sizeof(struct st_mysql_extension), MYF(MY_WME | MY_ZEROFILL));
return ext; | 0 | [
"CWE-284",
"CWE-295"
] | mysql-server | 3bd5589e1a5a93f9c224badf983cd65c45215390 | 212,092,743,839,601,780,000,000,000,000,000,000,000 | 8 | WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options |
k5_asn1_encode_bitstring(asn1buf *buf, uint8_t *const *val, size_t len)
{
insert_bytes(buf, *val, len);
insert_byte(buf, 0);
return 0;
} | 0 | [
"CWE-674",
"CWE-787"
] | krb5 | 57415dda6cf04e73ffc3723be518eddfae599bfd | 93,957,708,604,525,390,000,000,000,000,000,000,000 | 6 | Add recursion limit for ASN.1 indefinite lengths
The libkrb5 ASN.1 decoder supports BER indefinite lengths. It
computes the tag length using recursion; the lack of a recursion limit
allows an attacker to overrun the stack and cause the process to
crash. Reported by Demi Obenour.
CVE-2020-28196:
In MIT krb5 releases 1.11 and later, an unauthenticated attacker can
cause a denial of service for any client or server to which it can
send an ASN.1-encoded Kerberos message of sufficient length.
ticket: 8959 (new)
tags: pullup
target_version: 1.18-next
target_version: 1.17-next |
static int first_nibble_is_2(RAnal* anal, RAnalOp* op, ut16 code){
if (IS_MOVB_REG_TO_REGREF(code)) { // 0010nnnnmmmm0000 mov.b <REG_M>,@<REG_N>
op->type = R_ANAL_OP_TYPE_STORE;
op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code));
op->dst = anal_fill_reg_ref (anal, GET_TARGET_REG(code), BYTE_SIZE);
} else if (IS_MOVW_REG_TO_REGREF(code)) {
op->type = R_ANAL_OP_TYPE_STORE;
op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code));
op->dst = anal_fill_reg_ref (anal, GET_TARGET_REG(code), WORD_SIZE);
} else if (IS_MOVL_REG_TO_REGREF(code)) {
op->type = R_ANAL_OP_TYPE_STORE;
op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code));
op->dst = anal_fill_reg_ref (anal, GET_TARGET_REG(code), LONG_SIZE);
} else if (IS_AND_REGS(code)) {
op->type = R_ANAL_OP_TYPE_AND;
op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code));
op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code));
} else if (IS_XOR_REGS(code)) {
op->type = R_ANAL_OP_TYPE_XOR;
op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code));
op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code));
} else if (IS_OR_REGS(code)) {
op->type = R_ANAL_OP_TYPE_OR;
op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code));
op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code));
} else if (IS_PUSHB(code) || IS_PUSHW(code) || IS_PUSHL(code)) {
op->type = R_ANAL_OP_TYPE_PUSH;
//TODO Handle 'pushes' (mov Rm,@-Rn)
} else if (IS_TSTRR(code)) {
op->type = R_ANAL_OP_TYPE_ACMP;
//TODO: handle tst reg,reg
} else if (IS_CMPSTR(code)) { //0010nnnnmmmm1100 cmp/str <REG_M>,<REG_N>
op->type = R_ANAL_OP_TYPE_ACMP; //maybe not?
op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code));
op->src[1] = anal_fill_ai_rg (anal, GET_TARGET_REG(code));
//todo: handle cmp/str byte-per-byte cmp?
} else if (IS_XTRCT(code)) { //0010nnnnmmmm1101 xtrct <REG_M>,<REG_N>
op->type = R_ANAL_OP_TYPE_MOV;
op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code));
op->src[1] = anal_fill_ai_rg (anal, GET_TARGET_REG(code));
op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code));
//todo: add details ?
} else if (IS_DIV0S(code)) {
op->type = R_ANAL_OP_TYPE_DIV;
//todo: add details?
} else if (IS_MULUW(code) || IS_MULSW(code)) { //0010nnnnmmmm111_ mul{s,u}.w <REG_M>,<REG_N>
op->type = R_ANAL_OP_TYPE_MUL;
op->src[0] = anal_fill_ai_rg(anal,GET_SOURCE_REG(code));
op->src[1] = anal_fill_ai_rg(anal,GET_TARGET_REG(code));
//todo: dest=MACL
}
return op->size;
} | 0 | [
"CWE-125"
] | radare2 | 77c47cf873dd55b396da60baa2ca83bbd39e4add | 43,011,579,300,061,260,000,000,000,000,000,000,000 | 54 | Fix #9903 - oobread in RAnal.sh |
static long dugetn(DviContext *dvi, size_t n)
{
long val;
if(NEEDBYTES(dvi, n) && get_bytes(dvi, n) == -1)
return -1;
val = mugetn(dvi->buffer.data + dvi->buffer.pos, n);
dvi->buffer.pos += n;
return val;
} | 0 | [
"CWE-20"
] | evince | d4139205b010ed06310d14284e63114e88ec6de2 | 178,492,733,365,554,100,000,000,000,000,000,000,000 | 10 | backends: Fix several security issues in the dvi-backend.
See CVE-2010-2640, CVE-2010-2641, CVE-2010-2642 and CVE-2010-2643. |
void sas_suspend_sata(struct asd_sas_port *port)
{
struct domain_device *dev;
mutex_lock(&port->ha->disco_mutex);
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
struct sata_device *sata;
if (!dev_is_sata(dev))
continue;
sata = &dev->sata_dev;
if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND)
continue;
ata_sas_port_suspend(sata->ap);
}
mutex_unlock(&port->ha->disco_mutex);
sas_ata_flush_pm_eh(port, __func__);
} | 0 | [
"CWE-284"
] | linux | 0558f33c06bb910e2879e355192227a8e8f0219d | 313,026,483,854,215,880,000,000,000,000,000,000,000 | 21 | scsi: libsas: direct call probe and destruct
In commit 87c8331fcf72 ("[SCSI] libsas: prevent domain rediscovery
competing with ata error handling") introduced disco mutex to prevent
rediscovery competing with ata error handling and put the whole
revalidation in the mutex. But the rphy add/remove needs to wait for the
error handling which also grabs the disco mutex. This may leads to dead
lock.So the probe and destruct event were introduce to do the rphy
add/remove asynchronously and out of the lock.
The asynchronously processed workers makes the whole discovery process
not atomic, the other events may interrupt the process. For example,
if a loss of signal event inserted before the probe event, the
sas_deform_port() is called and the port will be deleted.
And sas_port_delete() may run before the destruct event, but the
port-x:x is the top parent of end device or expander. This leads to
a kernel WARNING such as:
[ 82.042979] sysfs group 'power' not found for kobject 'phy-1:0:22'
[ 82.042983] ------------[ cut here ]------------
[ 82.042986] WARNING: CPU: 54 PID: 1714 at fs/sysfs/group.c:237
sysfs_remove_group+0x94/0xa0
[ 82.043059] Call trace:
[ 82.043082] [<ffff0000082e7624>] sysfs_remove_group+0x94/0xa0
[ 82.043085] [<ffff00000864e320>] dpm_sysfs_remove+0x60/0x70
[ 82.043086] [<ffff00000863ee10>] device_del+0x138/0x308
[ 82.043089] [<ffff00000869a2d0>] sas_phy_delete+0x38/0x60
[ 82.043091] [<ffff00000869a86c>] do_sas_phy_delete+0x6c/0x80
[ 82.043093] [<ffff00000863dc20>] device_for_each_child+0x58/0xa0
[ 82.043095] [<ffff000008696f80>] sas_remove_children+0x40/0x50
[ 82.043100] [<ffff00000869d1bc>] sas_destruct_devices+0x64/0xa0
[ 82.043102] [<ffff0000080e93bc>] process_one_work+0x1fc/0x4b0
[ 82.043104] [<ffff0000080e96c0>] worker_thread+0x50/0x490
[ 82.043105] [<ffff0000080f0364>] kthread+0xfc/0x128
[ 82.043107] [<ffff0000080836c0>] ret_from_fork+0x10/0x50
Make probe and destruct a direct call in the disco and revalidate function,
but put them outside the lock. The whole discovery or revalidate won't
be interrupted by other events. And the DISCE_PROBE and DISCE_DESTRUCT
event are deleted as a result of the direct call.
Introduce a new list to destruct the sas_port and put the port delete after
the destruct. This makes sure the right order of destroying the sysfs
kobject and fix the warning above.
In sas_ex_revalidate_domain() have a loop to find all broadcasted
device, and sometimes we have a chance to find the same expander twice.
Because the sas_port will be deleted at the end of the whole revalidate
process, sas_port with the same name cannot be added before this.
Otherwise the sysfs will complain of creating duplicate filename. Since
the LLDD will send broadcast for every device change, we can only
process one expander's revalidation.
[mkp: kbuild test robot warning]
Signed-off-by: Jason Yan <[email protected]>
CC: John Garry <[email protected]>
CC: Johannes Thumshirn <[email protected]>
CC: Ewan Milne <[email protected]>
CC: Christoph Hellwig <[email protected]>
CC: Tomas Henzl <[email protected]>
CC: Dan Williams <[email protected]>
Reviewed-by: Hannes Reinecke <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]> |
static int input_consumed_signal(h2_mplx *m, h2_stream *stream)
{
if (stream->input) {
return h2_beam_report_consumption(stream->input);
}
return 0;
} | 0 | [
"CWE-444"
] | mod_h2 | 825de6a46027b2f4c30d7ff5a0c8b852d639c207 | 244,547,453,138,945,600,000,000,000,000,000,000,000 | 7 | * Fixed keepalives counter on slave connections. |
dir_free(void *ptr)
{
struct dir_data *dir = ptr;
if (dir->dir) closedir(dir->dir);
xfree(dir);
} | 0 | [
"CWE-22"
] | ruby | bd5661a3cbb38a8c3a3ea10cd76c88bbef7871b8 | 263,429,548,874,118,200,000,000,000,000,000,000,000 | 7 | dir.c: check NUL bytes
* dir.c (GlobPathValue): should be used in rb_push_glob only.
other methods should use FilePathValue.
https://hackerone.com/reports/302338
* dir.c (rb_push_glob): expand GlobPathValue
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62989 b2dd03c8-39d4-4d8f-98ff-823fe69b080e |
push_caller(const char *path, VALUE val, void *enc)
{
struct push_glob_args *arg = (struct push_glob_args *)val;
return ruby_glob0(path, arg->fd, arg->glob.base, arg->flags, &rb_glob_funcs,
(VALUE)&arg->glob, enc);
} | 0 | [
"CWE-22"
] | ruby | bd5661a3cbb38a8c3a3ea10cd76c88bbef7871b8 | 257,143,202,979,860,380,000,000,000,000,000,000,000 | 7 | dir.c: check NUL bytes
* dir.c (GlobPathValue): should be used in rb_push_glob only.
other methods should use FilePathValue.
https://hackerone.com/reports/302338
* dir.c (rb_push_glob): expand GlobPathValue
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62989 b2dd03c8-39d4-4d8f-98ff-823fe69b080e |
void fput(struct file *file)
{
if (atomic_long_dec_and_test(&file->f_count)) {
struct task_struct *task = current;
file_sb_list_del(file);
if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
init_task_work(&file->f_u.fu_rcuhead, ____fput);
if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
return;
/*
* After this task has run exit_task_work(),
* task_work_add() will fail. Fall through to delayed
* fput to avoid leaking *file.
*/
}
if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
schedule_work(&delayed_fput_work);
}
} | 1 | [
"CWE-17"
] | linux | eee5cc2702929fd41cce28058dc6d6717f723f87 | 252,422,285,563,229,260,000,000,000,000,000,000,000 | 21 | get rid of s_files and files_lock
The only thing we need it for is alt-sysrq-r (emergency remount r/o)
and these days we can do just as well without going through the
list of files.
Signed-off-by: Al Viro <[email protected]> |
static BOOL update_gdi_cache_bitmap_v3(rdpContext* context, CACHE_BITMAP_V3_ORDER* cacheBitmapV3)
{
rdpBitmap* bitmap;
rdpBitmap* prevBitmap;
BOOL compressed = TRUE;
rdpCache* cache = context->cache;
rdpSettings* settings = context->settings;
BITMAP_DATA_EX* bitmapData = &cacheBitmapV3->bitmapData;
bitmap = Bitmap_Alloc(context);
if (!bitmap)
return FALSE;
if (!cacheBitmapV3->bpp)
cacheBitmapV3->bpp = settings->ColorDepth;
compressed = (bitmapData->codecID != RDP_CODEC_ID_NONE);
Bitmap_SetDimensions(bitmap, bitmapData->width, bitmapData->height);
if (!bitmap->Decompress(context, bitmap, bitmapData->data, bitmapData->width,
bitmapData->height, bitmapData->bpp, bitmapData->length, compressed,
bitmapData->codecID))
{
Bitmap_Free(context, bitmap);
return FALSE;
}
if (!bitmap->New(context, bitmap))
{
Bitmap_Free(context, bitmap);
return FALSE;
}
prevBitmap = bitmap_cache_get(cache->bitmap, cacheBitmapV3->cacheId, cacheBitmapV3->cacheIndex);
Bitmap_Free(context, prevBitmap);
return bitmap_cache_put(cache->bitmap, cacheBitmapV3->cacheId, cacheBitmapV3->cacheIndex,
bitmap);
} | 0 | [
"CWE-125"
] | FreeRDP | 0b6b92a25a77d533b8a92d6acc840a81e103684e | 310,307,420,897,676,040,000,000,000,000,000,000,000 | 38 | Fixed CVE-2020-11525: Out of bounds read in bitmap_cache_new
Thanks to Sunglin and HuanGMz from Knownsec 404 |
bool MaybeReplaceShapeOrShapeNOp(
const Node* n, const std::vector<PartialTensorShape>& input_shapes,
std::unordered_map<const Node*, std::vector<Tensor>>*
shape_replacement_map) {
std::vector<Tensor> defined_shape;
for (const auto& shape : input_shapes) {
if (!shape.IsFullyDefined()) {
return false;
}
const int rank = shape.dims();
DataType op_type = n->output_type(0);
Tensor t(op_type, TensorShape({rank}));
if (op_type == DT_INT64) {
auto vec = t.vec<int64_t>();
for (int i = 0; i < rank; ++i) {
vec(i) = shape.dim_size(i);
}
} else {
CHECK(op_type == DT_INT32);
auto vec = t.vec<int32>();
for (int i = 0; i < rank; ++i) {
if (shape.dim_size(i) > INT_MAX) {
VLOG(1) << "Node " << n->name() << " has input shape dimension " << i
<< " of " << shape.dim_size(i) << " but type INT32 "
<< " so not replacing as constant: this will trigger a "
"runtime error later.";
return false;
}
vec(i) = static_cast<int32>(shape.dim_size(i));
}
}
defined_shape.push_back(t);
}
// All the inputs had known shapes so we can replace the node by constants
// later in the rewrite.
shape_replacement_map->insert({n, defined_shape});
return true;
} | 0 | [
"CWE-824"
] | tensorflow | 7731e8dfbe4a56773be5dc94d631611211156659 | 63,287,166,045,166,630,000,000,000,000,000,000,000 | 38 | Don't constant-fold DT_RESOURCE constants.
PiperOrigin-RevId: 391803952
Change-Id: I0ea3ec31d3e7dfda0f03b4027a237f08d00a3091 |
http_get_tls_info (http_t hd, const char *what)
{
(void)what;
if (!hd)
return NULL;
return hd->uri->use_tls? "":NULL;
} | 0 | [
"CWE-352"
] | gnupg | 4a4bb874f63741026bd26264c43bb32b1099f060 | 168,302,880,335,377,620,000,000,000,000,000,000,000 | 9 | dirmngr: Avoid possible CSRF attacks via http redirects.
* dirmngr/http.h (parsed_uri_s): Add fields off_host and off_path.
(http_redir_info_t): New.
* dirmngr/http.c (do_parse_uri): Set new fields.
(same_host_p): New.
(http_prepare_redirect): New.
* dirmngr/t-http-basic.c: New test.
* dirmngr/ks-engine-hkp.c (send_request): Use http_prepare_redirect
instead of the open code.
* dirmngr/ks-engine-http.c (ks_http_fetch): Ditto.
--
With this change a http query will not follow a redirect unless the
Location header gives the same host. If the host is different only
the host and port is taken from the Location header and the original
path and query parts are kept.
Signed-off-by: Werner Koch <[email protected]>
(cherry picked from commit fa1b1eaa4241ff3f0634c8bdf8591cbc7c464144) |
e_cal_backend_ews_init (ECalBackendEws *cbews)
{
cbews->priv = G_TYPE_INSTANCE_GET_PRIVATE (cbews, E_TYPE_CAL_BACKEND_EWS, ECalBackendEwsPrivate);
g_rec_mutex_init (&cbews->priv->cnc_lock);
e_cal_backend_ews_populate_windows_zones ();
} | 0 | [
"CWE-295"
] | evolution-ews | 915226eca9454b8b3e5adb6f2fff9698451778de | 169,535,180,730,192,960,000,000,000,000,000,000,000 | 8 | I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27 |
static int rm_probe(AVProbeData *p)
{
/* check file header */
if ((p->buf[0] == '.' && p->buf[1] == 'R' &&
p->buf[2] == 'M' && p->buf[3] == 'F' &&
p->buf[4] == 0 && p->buf[5] == 0) ||
(p->buf[0] == '.' && p->buf[1] == 'r' &&
p->buf[2] == 'a' && p->buf[3] == 0xfd))
return AVPROBE_SCORE_MAX;
else
return 0;
} | 0 | [
"CWE-399",
"CWE-834"
] | FFmpeg | 124eb202e70678539544f6268efc98131f19fa49 | 90,142,357,423,899,490,000,000,000,000,000,000,000 | 12 | avformat/rmdec: Fix DoS due to lack of eof check
Fixes: loop.ivr
Found-by: Xiaohei and Wangchu from Alibaba Security Team
Signed-off-by: Michael Niedermayer <[email protected]> |
cib_remote_new(const char *server, const char *user, const char *passwd, int port,
gboolean encrypted)
{
cib_remote_opaque_t *private = NULL;
cib_t *cib = cib_new_variant();
private = calloc(1, sizeof(cib_remote_opaque_t));
cib->variant = cib_remote;
cib->variant_opaque = private;
if (server) {
private->server = strdup(server);
}
if (user) {
private->user = strdup(user);
}
if (passwd) {
private->passwd = strdup(passwd);
}
private->port = port;
private->command.encrypted = encrypted;
private->callback.encrypted = encrypted;
/* assign variant specific ops */
cib->delegate_fn = cib_remote_perform_op;
cib->cmds->signon = cib_remote_signon;
cib->cmds->signoff = cib_remote_signoff;
cib->cmds->free = cib_remote_free;
cib->cmds->inputfd = cib_remote_inputfd;
cib->cmds->register_notification = cib_remote_register_notification;
cib->cmds->set_connection_dnotify = cib_remote_set_connection_dnotify;
return cib;
} | 0 | [
"CWE-399"
] | pacemaker | 564f7cc2a51dcd2f28ab12a13394f31be5aa3c93 | 238,756,244,243,859,900,000,000,000,000,000,000,000 | 39 | High: core: Internal tls api improvements for reuse with future LRMD tls backend. |
aspath_segment_add (struct aspath *as, int type)
{
struct assegment *seg = as->segments;
struct assegment *new = assegment_new (type, 0);
if (seg)
{
while (seg->next)
seg = seg->next;
seg->next = new;
}
else
as->segments = new;
} | 0 | [
"CWE-20"
] | quagga | 7a42b78be9a4108d98833069a88e6fddb9285008 | 258,140,978,535,001,040,000,000,000,000,000,000,000 | 14 | bgpd: Fix AS_PATH size calculation for long paths
If you have an AS_PATH with more entries than
what can be written into a single AS_SEGMENT_MAX
it needs to be broken up. The code that noticed
that the AS_PATH needs to be broken up was not
correctly calculating the size of the resulting
message. This patch addresses this issue. |
static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp)
{
struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
struct bpf_prog *new = xdp->prog;
int ret = 0;
switch (xdp->command) {
case XDP_SETUP_PROG:
rcu_assign_pointer(dev->xdp_prog, new);
if (old)
bpf_prog_put(old);
if (old && !new) {
static_key_slow_dec(&generic_xdp_needed);
} else if (new && !old) {
static_key_slow_inc(&generic_xdp_needed);
dev_disable_lro(dev);
}
break;
case XDP_QUERY_PROG:
xdp->prog_attached = !!old;
xdp->prog_id = old ? old->aux->id : 0;
break;
default:
ret = -EINVAL;
break;
}
return ret; | 0 | [
"CWE-476"
] | linux | 0ad646c81b2182f7fa67ec0c8c825e0ee165696d | 229,972,991,522,270,180,000,000,000,000,000,000,000 | 32 | tun: call dev_get_valid_name() before register_netdevice()
register_netdevice() could fail early when we have an invalid
dev name, in which case ->ndo_uninit() is not called. For tun
device, this is a problem because a timer etc. are already
initialized and it expects ->ndo_uninit() to clean them up.
We could move these initializations into a ->ndo_init() so
that register_netdevice() knows better, however this is still
complicated due to the logic in tun_detach().
Therefore, I choose to just call dev_get_valid_name() before
register_netdevice(), which is quicker and much easier to audit.
And for this specific case, it is already enough.
Fixes: 96442e42429e ("tuntap: choose the txq based on rxq")
Reported-by: Dmitry Alexeev <[email protected]>
Cc: Jason Wang <[email protected]>
Cc: "Michael S. Tsirkin" <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void sycc_to_rgb(int offset, int upb, int y, int cb, int cr,
int *out_r, int *out_g, int *out_b)
{
int r, g, b;
cb -= offset;
cr -= offset;
r = y + (int)(1.402 * (float)cr);
if (r < 0) {
r = 0;
} else if (r > upb) {
r = upb;
}
*out_r = r;
g = y - (int)(0.344 * (float)cb + 0.714 * (float)cr);
if (g < 0) {
g = 0;
} else if (g > upb) {
g = upb;
}
*out_g = g;
b = y + (int)(1.772 * (float)cb);
if (b < 0) {
b = 0;
} else if (b > upb) {
b = upb;
}
*out_b = b;
} | 0 | [
"CWE-119",
"CWE-787"
] | openjpeg | 2e5ab1d9987831c981ff05862e8ccf1381ed58ea | 82,949,005,254,515,280,000,000,000,000,000,000,000 | 31 | color_apply_icc_profile: avoid potential heap buffer overflow
Derived from a patch by Thuan Pham |
Subsets and Splits