func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
add_expose_path (GHashTable *hash_table,
FlatpakFilesystemMode mode,
const char *path)
{
struct stat st;
if (lstat (path, &st) != 0)
return;
if (S_ISDIR (st.st_mode) ||
S_ISREG (st.st_mode) ||
S_ISLNK (st.st_mode) ||
S_ISSOCK (st.st_mode))
{
guint old_mode;
old_mode = GPOINTER_TO_INT (g_hash_table_lookup (hash_table, path));
if (S_ISLNK (st.st_mode))
{
g_autofree char *resolved = flatpak_resolve_link (path, NULL);
/* Don't keep symlinks into /app or /usr, as they are not the
same as on the host, and we generally can't create the parents
for them anyway */
if (resolved &&
!g_str_has_prefix (resolved, "/app/") &&
!g_str_has_prefix (resolved, "/usr/"))
{
add_expose_path (hash_table, mode, resolved);
mode = FAKE_MODE_SYMLINK;
}
else
mode = 0;
}
if (mode > 0)
g_hash_table_insert (hash_table, g_strdup (path), GINT_TO_POINTER ( MAX (old_mode, mode)));
}
}
| 0 |
[
"CWE-20"
] |
flatpak
|
902fb713990a8f968ea4350c7c2a27ff46f1a6c4
| 46,939,653,215,835,760,000,000,000,000,000,000,000 | 38 |
Use seccomp to filter out TIOCSTI ioctl
This would otherwise let the sandbox add input to the controlling tty.
|
bool operator <(const Principal& o) const {
return (t < o.t) || ((t == o.t) && (u < o.u));
}
| 0 |
[
"CWE-617"
] |
ceph
|
b3118cabb8060a8cc6a01c4e8264cb18e7b1745a
| 185,061,584,681,726,430,000,000,000,000,000,000,000 | 3 |
rgw: Remove assertions in IAM Policy
A couple of them could be triggered by user input.
Signed-off-by: Adam C. Emerson <[email protected]>
|
Field *Type_handler_float::make_conversion_table_field(TABLE *table,
uint metadata,
const Field *target)
const
{
return new (table->in_use->mem_root)
Field_float(NULL, 12 /*max_length*/, (uchar *) "", 1, Field::NONE,
TMPNAME, 0/*dec*/, 0/*zerofill*/, 0/*unsigned_flag*/);
}
| 0 |
[
"CWE-120"
] |
server
|
eca207c46293bc72dd8d0d5622153fab4d3fccf1
| 257,665,841,529,803,960,000,000,000,000,000,000,000 | 9 |
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix.
|
static int update_filter(struct tap_filter *filter, void __user *arg)
{
struct { u8 u[ETH_ALEN]; } *addr;
struct tun_filter uf;
int err, alen, n, nexact;
if (copy_from_user(&uf, arg, sizeof(uf)))
return -EFAULT;
if (!uf.count) {
/* Disabled */
filter->count = 0;
return 0;
}
alen = ETH_ALEN * uf.count;
addr = memdup_user(arg + sizeof(uf), alen);
if (IS_ERR(addr))
return PTR_ERR(addr);
/* The filter is updated without holding any locks. Which is
* perfectly safe. We disable it first and in the worst
* case we'll accept a few undesired packets. */
filter->count = 0;
wmb();
/* Use first set of addresses as an exact filter */
for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
nexact = n;
/* Remaining multicast addresses are hashed,
* unicast will leave the filter disabled. */
memset(filter->mask, 0, sizeof(filter->mask));
for (; n < uf.count; n++) {
if (!is_multicast_ether_addr(addr[n].u)) {
err = 0; /* no filter */
goto free_addr;
}
addr_hash_set(filter->mask, addr[n].u);
}
/* For ALLMULTI just set the mask to all ones.
* This overrides the mask populated above. */
if ((uf.flags & TUN_FLT_ALLMULTI))
memset(filter->mask, ~0, sizeof(filter->mask));
/* Now enable the filter */
wmb();
filter->count = nexact;
/* Return the number of exact filters */
err = nexact;
free_addr:
kfree(addr);
return err;
}
| 0 |
[
"CWE-476"
] |
linux
|
0ad646c81b2182f7fa67ec0c8c825e0ee165696d
| 71,540,685,978,895,980,000,000,000,000,000,000,000 | 58 |
tun: call dev_get_valid_name() before register_netdevice()
register_netdevice() could fail early when we have an invalid
dev name, in which case ->ndo_uninit() is not called. For tun
device, this is a problem because a timer etc. are already
initialized and it expects ->ndo_uninit() to clean them up.
We could move these initializations into a ->ndo_init() so
that register_netdevice() knows better, however this is still
complicated due to the logic in tun_detach().
Therefore, I choose to just call dev_get_valid_name() before
register_netdevice(), which is quicker and much easier to audit.
And for this specific case, it is already enough.
Fixes: 96442e42429e ("tuntap: choose the txq based on rxq")
Reported-by: Dmitry Alexeev <[email protected]>
Cc: Jason Wang <[email protected]>
Cc: "Michael S. Tsirkin" <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
strqueue_pushjoin(Strqueue *q, const char *s1, const char *s2, const char *s3)
{
q->str = solv_extend(q->str, q->nstr, 1, sizeof(*q->str), STRQUEUE_BLOCK);
q->str[q->nstr++] = solv_dupjoin(s1, s2, s3);
}
| 0 |
[
"CWE-120"
] |
libsolv
|
0077ef29eb46d2e1df2f230fc95a1d9748d49dec
| 288,016,942,943,173,780,000,000,000,000,000,000,000 | 5 |
testcase_read: error out if repos are added or the system is changed too late
We must not add new solvables after the considered map was created, the solver
was created, or jobs were added. We may not changed the system after jobs have
been added.
(Jobs may point inside the whatproviedes array, so we must not invalidate this
area.)
|
hybiReadAndDecode(rfbClientPtr cl, char *dst, int len, int *sockRet)
{
int n;
int i;
int toReturn;
int toDecode;
int bufsize;
int nextRead;
unsigned char *data;
uint32_t *data32;
ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx;
/* if data was carried over, copy to start of buffer */
memcpy(wsctx->writePos, wsctx->carryBuf, wsctx->carrylen);
wsctx->writePos += wsctx->carrylen;
/* -1 accounts for potential '\0' terminator for base64 decoding */
bufsize = wsctx->codeBufDecode + ARRAYSIZE(wsctx->codeBufDecode) - wsctx->writePos - 1;
if (hybiRemaining(wsctx) > bufsize) {
nextRead = bufsize;
} else {
nextRead = hybiRemaining(wsctx);
}
rfbLog("calling read with buf=%p and len=%d (decodebuf=%p headerLen=%d\n)", wsctx->writePos, nextRead, wsctx->codeBufDecode, wsctx->header.headerLen);
if (wsctx->nReadRaw < wsctx->nToRead) {
/* decode more data */
if (-1 == (n = ws_read(cl, wsctx->writePos, nextRead))) {
int olderrno = errno;
rfbErr("%s: read; %m", __func__);
errno = olderrno;
*sockRet = -1;
return WS_HYBI_STATE_ERR;
} else if (n == 0) {
*sockRet = 0;
return WS_HYBI_STATE_ERR;
}
wsctx->nReadRaw += n;
rfbLog("read %d bytes from socket; nRead=%d\n", n, wsctx->nReadRaw);
} else {
n = 0;
}
wsctx->writePos += n;
if (wsctx->nReadRaw >= wsctx->nToRead) {
if (wsctx->nReadRaw > wsctx->nToRead) {
rfbErr("%s: internal error, read past websocket frame", __func__);
errno=EIO;
*sockRet = -1;
return WS_HYBI_STATE_ERR;
}
}
toDecode = wsctx->writePos - hybiPayloadStart(wsctx);
rfbLog("toDecode=%d from n=%d carrylen=%d headerLen=%d\n", toDecode, n, wsctx->carrylen, wsctx->header.headerLen);
if (toDecode < 0) {
rfbErr("%s: internal error; negative number of bytes to decode: %d", __func__, toDecode);
errno=EIO;
*sockRet = -1;
return WS_HYBI_STATE_ERR;
}
/* for a possible base64 decoding, we decode multiples of 4 bytes until
* the whole frame is received and carry over any remaining bytes in the carry buf*/
data = (unsigned char *)hybiPayloadStart(wsctx);
data32= (uint32_t *)data;
for (i = 0; i < (toDecode >> 2); i++) {
data32[i] ^= wsctx->header.mask.u;
}
rfbLog("mask decoding; i=%d toDecode=%d\n", i, toDecode);
if (wsctx->hybiDecodeState == WS_HYBI_STATE_FRAME_COMPLETE) {
/* process the remaining bytes (if any) */
for (i*=4; i < toDecode; i++) {
data[i] ^= wsctx->header.mask.c[i % 4];
}
/* all data is here, no carrying */
wsctx->carrylen = 0;
} else {
/* carry over remaining, non-multiple-of-four bytes */
wsctx->carrylen = toDecode - (i * 4);
if (wsctx->carrylen < 0 || wsctx->carrylen > ARRAYSIZE(wsctx->carryBuf)) {
rfbErr("%s: internal error, invalid carry over size: carrylen=%d, toDecode=%d, i=%d", __func__, wsctx->carrylen, toDecode, i);
*sockRet = -1;
errno = EIO;
return WS_HYBI_STATE_ERR;
}
rfbLog("carrying over %d bytes from %p to %p\n", wsctx->carrylen, wsctx->writePos + (i * 4), wsctx->carryBuf);
memcpy(wsctx->carryBuf, data + (i * 4), wsctx->carrylen);
}
toReturn = toDecode - wsctx->carrylen;
switch (wsctx->header.opcode) {
case WS_OPCODE_CLOSE:
/* this data is not returned as payload data */
if (hybiWsFrameComplete(wsctx)) {
rfbLog("got closure, reason %d\n", WS_NTOH16(((uint16_t *)data)[0]));
errno = ECONNRESET;
*sockRet = -1;
return WS_HYBI_STATE_FRAME_COMPLETE;
} else {
rfbErr("%s: close reason with long frame not supported", __func__);
errno = EIO;
*sockRet = -1;
return WS_HYBI_STATE_ERR;
}
break;
case WS_OPCODE_TEXT_FRAME:
data[toReturn] = '\0';
rfbLog("Initiate Base64 decoding in %p with max size %d and '\\0' at %p\n", data, bufsize, data + toReturn);
if (-1 == (wsctx->readlen = b64_pton((char *)data, data, bufsize))) {
rfbErr("Base64 decode error in %s; data=%p bufsize=%d", __func__, data, bufsize);
rfbErr("%s: Base64 decode error; %m\n", __func__);
}
wsctx->writePos = hybiPayloadStart(wsctx);
break;
case WS_OPCODE_BINARY_FRAME:
wsctx->readlen = toReturn;
wsctx->writePos = hybiPayloadStart(wsctx);
break;
default:
rfbErr("%s: unhandled opcode %d, b0: %02x, b1: %02x\n", __func__, (int)wsctx->header.opcode, wsctx->header.data->b0, wsctx->header.data->b1);
}
wsctx->readPos = data;
return hybiReturnData(dst, len, wsctx, sockRet);
}
| 0 |
[
"CWE-787"
] |
libvncserver
|
aac95a9dcf4bbba87b76c72706c3221a842ca433
| 226,620,486,976,177,280,000,000,000,000,000,000,000 | 133 |
fix overflow and refactor websockets decode (Hybi)
fix critical heap-based buffer overflow which allowed easy modification
of a return address via an overwritten function pointer
fix bug causing connections to fail due a "one websocket frame = one
ws_read" assumption, which failed with LibVNCServer-0.9.11
refactor websocket Hybi decode to use a simple state machine for
decoding of websocket frames
|
void Lex_input_stream::add_digest_token(uint token, LEX_YYSTYPE yylval)
{
if (m_digest != NULL)
{
m_digest= digest_add_token(m_digest, token, yylval);
}
}
| 0 |
[
"CWE-476"
] |
server
|
3a52569499e2f0c4d1f25db1e81617a9d9755400
| 206,012,200,161,753,780,000,000,000,000,000,000,000 | 7 |
MDEV-25636: Bug report: abortion in sql/sql_parse.cc:6294
The asserion failure was caused by this query
select /*id=1*/ from t1
where
col= ( select /*id=2*/ from ... where corr_cond1
union
select /*id=4*/ from ... where corr_cond2)
Here,
- select with id=2 was correlated due to corr_cond1.
- select with id=4 was initially correlated due to corr_cond2, but then
the optimizer optimized away the correlation, making the select with id=4
uncorrelated.
However, since select with id=2 remained correlated, the execution had to
re-compute the whole UNION. When it tried to execute select with id=4, it
hit an assertion (join buffer already free'd).
This is because select with id=4 has freed its execution structures after
it has been executed once. The select is uncorrelated, so it did not expect
it would need to be executed for the second time.
Fixed this by adding this logic in
st_select_lex::optimize_unflattened_subqueries():
If a member of a UNION is correlated, mark all its members as
correlated, so that they are prepared to be executed multiple times.
|
parse_nsh(const void **datap, size_t *sizep, struct ovs_key_nsh *key)
{
const struct nsh_hdr *nsh = (const struct nsh_hdr *) *datap;
uint8_t version, length, flags, ttl;
/* Check if it is long enough for NSH header, doesn't support
* MD type 2 yet
*/
if (OVS_UNLIKELY(*sizep < NSH_BASE_HDR_LEN)) {
return false;
}
version = nsh_get_ver(nsh);
flags = nsh_get_flags(nsh);
length = nsh_hdr_len(nsh);
ttl = nsh_get_ttl(nsh);
if (OVS_UNLIKELY(length > *sizep || version != 0)) {
return false;
}
key->flags = flags;
key->ttl = ttl;
key->mdtype = nsh->md_type;
key->np = nsh->next_proto;
key->path_hdr = nsh_get_path_hdr(nsh);
switch (key->mdtype) {
case NSH_M_TYPE1:
if (length != NSH_M_TYPE1_LEN) {
return false;
}
for (size_t i = 0; i < 4; i++) {
key->context[i] = get_16aligned_be32(&nsh->md1.context[i]);
}
break;
case NSH_M_TYPE2:
/* Don't support MD type 2 metedata parsing yet */
if (length < NSH_BASE_HDR_LEN) {
return false;
}
memset(key->context, 0, sizeof(key->context));
break;
default:
/* We don't parse other context headers yet. */
break;
}
data_pull(datap, sizep, length);
return true;
}
| 0 |
[
"CWE-400"
] |
ovs
|
79cec1a736b91548ec882d840986a11affda1068
| 224,368,508,801,364,530,000,000,000,000,000,000,000 | 53 |
flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]>
|
g_keyfile_settings_backend_constructed (GObject *object)
{
GKeyfileSettingsBackend *kfsb = G_KEYFILE_SETTINGS_BACKEND (object);
if (kfsb->file == NULL)
{
char *filename = g_build_filename (g_get_user_config_dir (),
"glib-2.0", "settings", "keyfile",
NULL);
kfsb->file = g_file_new_for_path (filename);
g_free (filename);
}
if (kfsb->prefix == NULL)
{
kfsb->prefix = g_strdup ("/");
kfsb->prefix_len = 1;
}
kfsb->keyfile = g_key_file_new ();
kfsb->permission = g_simple_permission_new (TRUE);
kfsb->dir = g_file_get_parent (kfsb->file);
g_mkdir_with_parents (g_file_peek_path (kfsb->dir), 0700);
kfsb->file_monitor = g_file_monitor (kfsb->file, G_FILE_MONITOR_NONE, NULL, NULL);
kfsb->dir_monitor = g_file_monitor (kfsb->dir, G_FILE_MONITOR_NONE, NULL, NULL);
compute_checksum (kfsb->digest, NULL, 0);
g_signal_connect (kfsb->file_monitor, "changed",
G_CALLBACK (file_changed), kfsb);
g_signal_connect (kfsb->dir_monitor, "changed",
G_CALLBACK (dir_changed), kfsb);
g_keyfile_settings_backend_keyfile_writable (kfsb);
g_keyfile_settings_backend_keyfile_reload (kfsb);
load_system_settings (kfsb);
}
| 0 |
[
"CWE-732"
] |
glib
|
5e4da714f00f6bfb2ccd6d73d61329c6f3a08429
| 327,380,158,640,333,080,000,000,000,000,000,000,000 | 40 |
keyfile settings: Use tighter permissions
When creating directories, create them with 700 permissions,
instead of 777.
Closes: #1658
|
pt_contained_circle(PG_FUNCTION_ARGS)
{
Point *point = PG_GETARG_POINT_P(0);
CIRCLE *circle = PG_GETARG_CIRCLE_P(1);
double d;
d = point_dt(&circle->center, point);
PG_RETURN_BOOL(d <= circle->radius);
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
postgres
|
31400a673325147e1205326008e32135a78b4d8a
| 161,499,764,066,050,230,000,000,000,000,000,000,000 | 9 |
Predict integer overflow to avoid buffer overruns.
Several functions, mostly type input functions, calculated an allocation
size such that the calculation wrapped to a small positive value when
arguments implied a sufficiently-large requirement. Writes past the end
of the inadvertent small allocation followed shortly thereafter.
Coverity identified the path_in() vulnerability; code inspection led to
the rest. In passing, add check_stack_depth() to prevent stack overflow
in related functions.
Back-patch to 8.4 (all supported versions). The non-comment hstore
changes touch code that did not exist in 8.4, so that part stops at 9.0.
Noah Misch and Heikki Linnakangas, reviewed by Tom Lane.
Security: CVE-2014-0064
|
static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface,
struct rsi_hw *adapter)
{
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
__le16 buffer_size;
int ii, bin_found = 0, bout_found = 0;
iface_desc = &(interface->altsetting[0]);
for (ii = 0; ii < iface_desc->desc.bNumEndpoints; ++ii) {
endpoint = &(iface_desc->endpoint[ii].desc);
if (!dev->bulkin_endpoint_addr[bin_found] &&
(endpoint->bEndpointAddress & USB_DIR_IN) &&
((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_BULK)) {
buffer_size = endpoint->wMaxPacketSize;
dev->bulkin_size[bin_found] = buffer_size;
dev->bulkin_endpoint_addr[bin_found] =
endpoint->bEndpointAddress;
bin_found++;
}
if (!dev->bulkout_endpoint_addr[bout_found] &&
!(endpoint->bEndpointAddress & USB_DIR_IN) &&
((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_BULK)) {
buffer_size = endpoint->wMaxPacketSize;
dev->bulkout_endpoint_addr[bout_found] =
endpoint->bEndpointAddress;
dev->bulkout_size[bout_found] = buffer_size;
bout_found++;
}
if (bin_found >= MAX_BULK_EP || bout_found >= MAX_BULK_EP)
break;
}
if (!(dev->bulkin_endpoint_addr[0]) &&
dev->bulkout_endpoint_addr[0])
return -EINVAL;
return 0;
}
| 0 |
[
"CWE-415"
] |
wireless-drivers
|
8b51dc7291473093c821195c4b6af85fadedbc2f
| 338,458,988,630,530,030,000,000,000,000,000,000,000 | 46 |
rsi: fix a double free bug in rsi_91x_deinit()
`dev` (struct rsi_91x_usbdev *) field of adapter
(struct rsi_91x_usbdev *) is allocated and initialized in
`rsi_init_usb_interface`. If any error is detected in information
read from the device side, `rsi_init_usb_interface` will be
freed. However, in the higher level error handling code in
`rsi_probe`, if error is detected, `rsi_91x_deinit` is called
again, in which `dev` will be freed again, resulting double free.
This patch fixes the double free by removing the free operation on
`dev` in `rsi_init_usb_interface`, because `rsi_91x_deinit` is also
used in `rsi_disconnect`, in that code path, the `dev` field is not
(and thus needs to be) freed.
This bug was found in v4.19, but is also present in the latest version
of kernel. Fixes CVE-2019-15504.
Reported-by: Hui Peng <[email protected]>
Reported-by: Mathias Payer <[email protected]>
Signed-off-by: Hui Peng <[email protected]>
Reviewed-by: Guenter Roeck <[email protected]>
Signed-off-by: Kalle Valo <[email protected]>
|
unsigned short lebytes2ushort(const u8 *buf)
{
if (buf == NULL)
return 0U;
return (unsigned short)buf[1] << 8 | (unsigned short)buf[0];
}
| 0 |
[
"CWE-415",
"CWE-119"
] |
OpenSC
|
360e95d45ac4123255a4c796db96337f332160ad
| 233,886,731,602,735,800,000,000,000,000,000,000,000 | 6 |
fixed out of bounds writes
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting the problems.
|
static int ext4_commit_super(struct super_block *sb, int sync)
{
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
int error = 0;
if (!sbh || block_device_ejected(sb))
return error;
if (buffer_write_io_error(sbh)) {
/*
* Oh, dear. A previous attempt to write the
* superblock failed. This could happen because the
* USB device was yanked out. Or it could happen to
* be a transient write error and maybe the block will
* be remapped. Nothing we can do but to retry the
* write and hope for the best.
*/
ext4_msg(sb, KERN_ERR, "previous I/O error to "
"superblock detected");
clear_buffer_write_io_error(sbh);
set_buffer_uptodate(sbh);
}
/*
* If the file system is mounted read-only, don't update the
* superblock write time. This avoids updating the superblock
* write time when we are mounting the root file system
* read/only but we need to replay the journal; at that point,
* for people who are east of GMT and who make their clock
* tick in localtime for Windows bug-for-bug compatibility,
* the clock is set in the future, and this will cause e2fsck
* to complain and force a full file system check.
*/
if (!(sb->s_flags & MS_RDONLY))
es->s_wtime = cpu_to_le32(get_seconds());
if (sb->s_bdev->bd_part)
es->s_kbytes_written =
cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
EXT4_SB(sb)->s_sectors_written_start) >> 1));
else
es->s_kbytes_written =
cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
ext4_free_blocks_count_set(es,
EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeclusters_counter)));
if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
es->s_free_inodes_count =
cpu_to_le32(percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeinodes_counter));
BUFFER_TRACE(sbh, "marking dirty");
ext4_superblock_csum_set(sb);
mark_buffer_dirty(sbh);
if (sync) {
error = __sync_dirty_buffer(sbh,
test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC);
if (error)
return error;
error = buffer_write_io_error(sbh);
if (error) {
ext4_msg(sb, KERN_ERR, "I/O error while writing "
"superblock");
clear_buffer_write_io_error(sbh);
set_buffer_uptodate(sbh);
}
}
return error;
}
| 0 |
[
"CWE-362"
] |
linux
|
ea3d7209ca01da209cda6f0dea8be9cc4b7a933b
| 163,819,299,675,861,140,000,000,000,000,000,000,000 | 69 |
ext4: fix races between page faults and hole punching
Currently, page faults and hole punching are completely unsynchronized.
This can result in page fault faulting in a page into a range that we
are punching after truncate_pagecache_range() has been called and thus
we can end up with a page mapped to disk blocks that will be shortly
freed. Filesystem corruption will shortly follow. Note that the same
race is avoided for truncate by checking page fault offset against
i_size but there isn't similar mechanism available for punching holes.
Fix the problem by creating new rw semaphore i_mmap_sem in inode and
grab it for writing over truncate, hole punching, and other functions
removing blocks from extent tree and for read over page faults. We
cannot easily use i_data_sem for this since that ranks below transaction
start and we need something ranking above it so that it can be held over
the whole truncate / hole punching operation. Also remove various
workarounds we had in the code to reduce race window when page fault
could have created pages with stale mapping information.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
|
static oidc_provider_t* oidc_get_provider_for_issuer(request_rec *r,
oidc_cfg *c, const char *issuer, apr_byte_t allow_discovery) {
/* by default we'll assume that we're dealing with a single statically configured OP */
oidc_provider_t *provider = NULL;
if (oidc_provider_static_config(r, c, &provider) == FALSE)
return NULL;
/* unless a metadata directory was configured, so we'll try and get the provider settings from there */
if (c->metadata_dir != NULL) {
/* try and get metadata from the metadata directory for the OP that sent this response */
if ((oidc_metadata_get(r, c, issuer, &provider, allow_discovery)
== FALSE) || (provider == NULL)) {
/* don't know nothing about this OP/issuer */
oidc_error(r, "no provider metadata found for issuer \"%s\"",
issuer);
return NULL;
}
}
return provider;
}
| 0 |
[
"CWE-79"
] |
mod_auth_openidc
|
55ea0a085290cd2c8cdfdd960a230cbc38ba8b56
| 332,160,498,789,468,450,000,000,000,000,000,000,000 | 25 |
Add a function to escape Javascript characters
|
Testable(bool isAssociative, bool isCommutative)
: ExpressionNary(
boost::intrusive_ptr<ExpressionContextForTest>(new ExpressionContextForTest())),
_isAssociative(isAssociative),
_isCommutative(isCommutative) {}
| 0 |
[
"CWE-835"
] |
mongo
|
0a076417d1d7fba3632b73349a1fd29a83e68816
| 168,045,763,235,657,730,000,000,000,000,000,000,000 | 5 |
SERVER-38070 fix infinite loop in agg expression
|
day_to_sec(VALUE d)
{
if (safe_mul_p(d, DAY_IN_SECONDS))
return LONG2FIX(FIX2LONG(d) * DAY_IN_SECONDS);
return f_mul(d, INT2FIX(DAY_IN_SECONDS));
}
| 0 |
[] |
date
|
3959accef8da5c128f8a8e2fd54e932a4fb253b0
| 173,262,039,552,910,100,000,000,000,000,000,000,000 | 6 |
Add length limit option for methods that parses date strings
`Date.parse` now raises an ArgumentError when a given date string is
longer than 128. You can configure the limit by giving `limit` keyword
arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`,
the limit is disabled.
Not only `Date.parse` but also the following methods are changed.
* Date._parse
* Date.parse
* DateTime.parse
* Date._iso8601
* Date.iso8601
* DateTime.iso8601
* Date._rfc3339
* Date.rfc3339
* DateTime.rfc3339
* Date._xmlschema
* Date.xmlschema
* DateTime.xmlschema
* Date._rfc2822
* Date.rfc2822
* DateTime.rfc2822
* Date._rfc822
* Date.rfc822
* DateTime.rfc822
* Date._jisx0301
* Date.jisx0301
* DateTime.jisx0301
|
TEST_F(OwnedImplTest, Prepend) {
const std::string suffix = "World!", prefix = "Hello, ";
Buffer::OwnedImpl buffer;
buffer.add(suffix);
buffer.prepend(prefix);
EXPECT_EQ(suffix.size() + prefix.size(), buffer.length());
EXPECT_EQ(prefix + suffix, buffer.toString());
// Prepend a large string that will only partially fit in the space remaining
// at the front of the buffer.
std::string big_prefix;
big_prefix.reserve(16385);
for (unsigned i = 0; i < 16; i++) {
big_prefix += std::string(1024, 'A' + i);
}
big_prefix.push_back('-');
buffer.prepend(big_prefix);
EXPECT_EQ(big_prefix.size() + prefix.size() + suffix.size(), buffer.length());
EXPECT_EQ(big_prefix + prefix + suffix, buffer.toString());
}
| 0 |
[
"CWE-401"
] |
envoy
|
5eba69a1f375413fb93fab4173f9c393ac8c2818
| 252,610,032,626,031,200,000,000,000,000,000,000,000 | 21 |
[buffer] Add on-drain hook to buffer API and use it to avoid fragmentation due to tracking of H2 data and control frames in the output buffer (#144)
Signed-off-by: antonio <[email protected]>
|
xsltParseTemplateContent(xsltStylesheetPtr style, xmlNodePtr templ) {
xmlNodePtr cur, delete;
/*
* This content comes from the stylesheet
* For stylesheets, the set of whitespace-preserving
* element names consists of just xsl:text.
*/
cur = templ->children;
delete = NULL;
while (cur != NULL) {
if (delete != NULL) {
#ifdef WITH_XSLT_DEBUG_BLANKS
xsltGenericDebug(xsltGenericDebugContext,
"xsltParseTemplateContent: removing text\n");
#endif
xmlUnlinkNode(delete);
xmlFreeNode(delete);
delete = NULL;
}
if (IS_XSLT_ELEM(cur)) {
if (IS_XSLT_NAME(cur, "text")) {
/*
* TODO: Processing of xsl:text should be moved to
* xsltPrecomputeStylesheet(), since otherwise this
* will be performed for every multiply included
* stylesheet; i.e. this here is not skipped with
* the use of the style->nopreproc flag.
*/
if (cur->children != NULL) {
xmlChar *prop;
xmlNodePtr text = cur->children, next;
int noesc = 0;
prop = xmlGetNsProp(cur,
(const xmlChar *)"disable-output-escaping",
NULL);
if (prop != NULL) {
#ifdef WITH_XSLT_DEBUG_PARSING
xsltGenericDebug(xsltGenericDebugContext,
"Disable escaping: %s\n", text->content);
#endif
if (xmlStrEqual(prop, (const xmlChar *)"yes")) {
noesc = 1;
} else if (!xmlStrEqual(prop,
(const xmlChar *)"no")){
xsltTransformError(NULL, style, cur,
"xsl:text: disable-output-escaping allows only yes or no\n");
style->warnings++;
}
xmlFree(prop);
}
while (text != NULL) {
if (text->type == XML_COMMENT_NODE) {
text = text->next;
continue;
}
if ((text->type != XML_TEXT_NODE) &&
(text->type != XML_CDATA_SECTION_NODE)) {
xsltTransformError(NULL, style, cur,
"xsltParseTemplateContent: xslt:text content problem\n");
style->errors++;
break;
}
if ((noesc) && (text->type != XML_CDATA_SECTION_NODE))
text->name = xmlStringTextNoenc;
text = text->next;
}
/*
* replace xsl:text by the list of childs
*/
if (text == NULL) {
text = cur->children;
while (text != NULL) {
if ((style->internalized) &&
(text->content != NULL) &&
(!xmlDictOwns(style->dict, text->content))) {
/*
* internalize the text string
*/
if (text->doc->dict != NULL) {
const xmlChar *tmp;
tmp = xmlDictLookup(text->doc->dict,
text->content, -1);
if (tmp != text->content) {
xmlNodeSetContent(text, NULL);
text->content = (xmlChar *) tmp;
}
}
}
next = text->next;
xmlUnlinkNode(text);
xmlAddPrevSibling(cur, text);
text = next;
}
}
}
delete = cur;
goto skip_children;
}
}
else if ((cur->ns != NULL) && (style->nsDefs != NULL) &&
(xsltCheckExtPrefix(style, cur->ns->prefix)))
{
/*
* okay this is an extension element compile it too
*/
xsltStylePreCompute(style, cur);
}
else if (cur->type == XML_ELEMENT_NODE)
{
/*
* This is an element which will be output as part of the
* template exectution, precompile AVT if found.
*/
if ((cur->ns == NULL) && (style->defaultAlias != NULL)) {
cur->ns = xmlSearchNsByHref(cur->doc, cur,
style->defaultAlias);
}
if (cur->properties != NULL) {
xmlAttrPtr attr = cur->properties;
while (attr != NULL) {
xsltCompileAttr(style, attr);
attr = attr->next;
}
}
}
/*
* Skip to next node
*/
if (cur->children != NULL) {
if (cur->children->type != XML_ENTITY_DECL) {
cur = cur->children;
continue;
}
}
skip_children:
if (cur->next != NULL) {
cur = cur->next;
continue;
}
do {
cur = cur->parent;
if (cur == NULL)
break;
if (cur == templ) {
cur = NULL;
break;
}
if (cur->next != NULL) {
cur = cur->next;
break;
}
} while (cur != NULL);
}
if (delete != NULL) {
#ifdef WITH_XSLT_DEBUG_PARSING
xsltGenericDebug(xsltGenericDebugContext,
"xsltParseTemplateContent: removing text\n");
#endif
xmlUnlinkNode(delete);
xmlFreeNode(delete);
delete = NULL;
}
/*
* Skip the first params
*/
cur = templ->children;
while (cur != NULL) {
if ((IS_XSLT_ELEM(cur)) && (!(IS_XSLT_NAME(cur, "param"))))
break;
cur = cur->next;
}
/*
* Browse the remainder of the template
*/
while (cur != NULL) {
if ((IS_XSLT_ELEM(cur)) && (IS_XSLT_NAME(cur, "param"))) {
xmlNodePtr param = cur;
xsltTransformError(NULL, style, cur,
"xsltParseTemplateContent: ignoring misplaced param element\n");
if (style != NULL) style->warnings++;
cur = cur->next;
xmlUnlinkNode(param);
xmlFreeNode(param);
} else
break;
}
}
| 0 |
[] |
libxslt
|
7089a62b8f133b42a2981cf1f920a8b3fe9a8caa
| 273,424,451,458,027,360,000,000,000,000,000,000,000 | 199 |
Crash compiling stylesheet with DTD
* libxslt/xslt.c: when a stylesheet embbeds a DTD the compilation
process could get seriously wrong
|
static BROTLI_INLINE void ProcessRepeatedCodeLength(uint32_t code_len,
uint32_t repeat_delta, uint32_t alphabet_size, uint32_t* symbol,
uint32_t* repeat, uint32_t* space, uint32_t* prev_code_len,
uint32_t* repeat_code_len, uint16_t* symbol_lists,
uint16_t* code_length_histo, int* next_symbol) {
uint32_t old_repeat;
uint32_t extra_bits = 3; /* for BROTLI_REPEAT_ZERO_CODE_LENGTH */
uint32_t new_len = 0; /* for BROTLI_REPEAT_ZERO_CODE_LENGTH */
if (code_len == BROTLI_REPEAT_PREVIOUS_CODE_LENGTH) {
new_len = *prev_code_len;
extra_bits = 2;
}
if (*repeat_code_len != new_len) {
*repeat = 0;
*repeat_code_len = new_len;
}
old_repeat = *repeat;
if (*repeat > 0) {
*repeat -= 2;
*repeat <<= extra_bits;
}
*repeat += repeat_delta + 3U;
repeat_delta = *repeat - old_repeat;
if (*symbol + repeat_delta > alphabet_size) {
BROTLI_DUMP();
*symbol = alphabet_size;
*space = 0xFFFFF;
return;
}
BROTLI_LOG(("[ReadHuffmanCode] code_length[%d..%d] = %d\n",
(int)*symbol, (int)(*symbol + repeat_delta - 1), (int)*repeat_code_len));
if (*repeat_code_len != 0) {
unsigned last = *symbol + repeat_delta;
int next = next_symbol[*repeat_code_len];
do {
symbol_lists[next] = (uint16_t)*symbol;
next = (int)*symbol;
} while (++(*symbol) != last);
next_symbol[*repeat_code_len] = next;
*space -= repeat_delta << (15 - *repeat_code_len);
code_length_histo[*repeat_code_len] =
(uint16_t)(code_length_histo[*repeat_code_len] + repeat_delta);
} else {
*symbol += repeat_delta;
}
}
| 0 |
[
"CWE-120"
] |
brotli
|
223d80cfbec8fd346e32906c732c8ede21f0cea6
| 198,976,243,104,938,780,000,000,000,000,000,000,000 | 46 |
Update (#826)
* IMPORTANT: decoder: fix potential overflow when input chunk is >2GiB
* simplify max Huffman table size calculation
* eliminate symbol duplicates (static arrays in .h files)
* minor combing in research/ code
|
void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
unsigned int nr)
{
while (nr-- > 0) {
struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
NULL);
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
if (!group)
group = ext4_get_groups_count(sb);
group--;
grp = ext4_get_group_info(sb, group);
if (EXT4_MB_GRP_NEED_INIT(grp) &&
ext4_free_group_clusters(sb, gdp) > 0 &&
!(ext4_has_group_desc_csum(sb) &&
(gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
if (ext4_mb_init_group(sb, group, GFP_NOFS))
break;
}
}
}
| 0 |
[
"CWE-703"
] |
linux
|
ce9f24cccdc019229b70a5c15e2b09ad9c0ab5d1
| 63,553,547,250,638,680,000,000,000,000,000,000,000 | 22 |
ext4: check journal inode extents more carefully
Currently, system zones just track ranges of block, that are "important"
fs metadata (bitmaps, group descriptors, journal blocks, etc.). This
however complicates how extent tree (or indirect blocks) can be checked
for inodes that actually track such metadata - currently the journal
inode but arguably we should be treating quota files or resize inode
similarly. We cannot run __ext4_ext_check() on such metadata inodes when
loading their extents as that would immediately trigger the validity
checks and so we just hack around that and special-case the journal
inode. This however leads to a situation that a journal inode which has
extent tree of depth at least one can have invalid extent tree that gets
unnoticed until ext4_cache_extents() crashes.
To overcome this limitation, track inode number each system zone belongs
to (0 is used for zones not belonging to any inode). We can then verify
inode number matches the expected one when verifying extent tree and
thus avoid the false errors. With this there's no need to to
special-case journal inode during extent tree checking anymore so remove
it.
Fixes: 0a944e8a6c66 ("ext4: don't perform block validity checks on the journal inode")
Reported-by: Wolfgang Frisch <[email protected]>
Reviewed-by: Lukas Czerner <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Theodore Ts'o <[email protected]>
|
dummy_symbol_get (location loc)
{
/* Incremented for each generated symbol. */
static int dummy_count = 0;
char buf[32];
int len = snprintf (buf, sizeof buf, "$@%d", ++dummy_count);
assure (len < sizeof buf);
symbol *sym = symbol_get (buf, loc);
sym->content->class = nterm_sym;
return sym;
}
| 0 |
[] |
bison
|
b7aab2dbad43aaf14eebe78d54aafa245a000988
| 236,232,666,304,269,600,000,000,000,000,000,000,000 | 11 |
fix: crash when redefining the EOF token
Reported by Agency for Defense Development.
https://lists.gnu.org/r/bug-bison/2020-08/msg00008.html
On an empty such as
%token FOO
BAR
FOO 0
%%
input: %empty
we crash because when we find FOO 0, we decrement ntokens (since FOO
was discovered to be EOF, which is already known to be a token, so we
increment ntokens for it, and need to cancel this). This "works well"
when EOF is properly defined in one go, but here it is first defined
and later only assign token code 0. In the meanwhile BAR was given
the token number that we just decremented.
To fix this, assign symbol numbers after parsing, not during parsing,
so that we also saw all the explicit token codes. To maintain the
current numbers (I'd like to keep no difference in the output, not
just equivalence), we need to make sure the symbols are numbered in
the same order: that of appearance in the source file. So we need the
locations to be correct, which was almost the case, except for nterms
that appeared several times as LHS (i.e., several times as "foo:
..."). Fixing the use of location_of_lhs sufficed (it appears it was
intended for this use, but its implementation was unfinished: it was
always set to "false" only).
* src/symtab.c (symbol_location_as_lhs_set): Update location_of_lhs.
(symbol_code_set): Remove broken hack that decremented ntokens.
(symbol_class_set, dummy_symbol_get): Don't set number, ntokens and
nnterms.
(symbol_check_defined): Do it.
(symbols): Don't count nsyms here.
Actually, don't count nsyms at all: let it be done in...
* src/reader.c (check_and_convert_grammar): here. Define nsyms from
ntokens and nnterms after parsing.
* tests/input.at (EOF redeclared): New.
* examples/c/bistromathic/bistromathic.test: Adjust the traces: in
"%nterm <double> exp %% input: ...", exp used to be numbered before
input.
|
static void p54u_disconnect(struct usb_interface *intf)
{
struct ieee80211_hw *dev = usb_get_intfdata(intf);
struct p54u_priv *priv;
if (!dev)
return;
priv = dev->priv;
wait_for_completion(&priv->fw_wait_load);
p54_unregister_common(dev);
usb_put_dev(interface_to_usbdev(intf));
release_firmware(priv->fw);
p54_free_common(dev);
}
| 1 |
[
"CWE-416"
] |
linux
|
6e41e2257f1094acc37618bf6c856115374c6922
| 111,153,236,960,557,930,000,000,000,000,000,000,000 | 16 |
p54usb: Fix race between disconnect and firmware loading
The syzbot fuzzer found a bug in the p54 USB wireless driver. The
issue involves a race between disconnect and the firmware-loader
callback routine, and it has several aspects.
One big problem is that when the firmware can't be loaded, the
callback routine tries to unbind the driver from the USB _device_ (by
calling device_release_driver) instead of from the USB _interface_ to
which it is actually bound (by calling usb_driver_release_interface).
The race involves access to the private data structure. The driver's
disconnect handler waits for a completion that is signalled by the
firmware-loader callback routine. As soon as the completion is
signalled, you have to assume that the private data structure may have
been deallocated by the disconnect handler -- even if the firmware was
loaded without errors. However, the callback routine does access the
private data several times after that point.
Another problem is that, in order to ensure that the USB device
structure hasn't been freed when the callback routine runs, the driver
takes a reference to it. This isn't good enough any more, because now
that the callback routine calls usb_driver_release_interface, it has
to ensure that the interface structure hasn't been freed.
Finally, the driver takes an unnecessary reference to the USB device
structure in the probe function and drops the reference in the
disconnect handler. This extra reference doesn't accomplish anything,
because the USB core already guarantees that a device structure won't
be deallocated while a driver is still bound to any of its interfaces.
To fix these problems, this patch makes the following changes:
Call usb_driver_release_interface() rather than
device_release_driver().
Don't signal the completion until after the important
information has been copied out of the private data structure,
and don't refer to the private data at all thereafter.
Lock udev (the interface's parent) before unbinding the driver
instead of locking udev->parent.
During the firmware loading process, take a reference to the
USB interface instead of the USB device.
Don't take an unnecessary reference to the device during probe
(and then don't drop it during disconnect).
Signed-off-by: Alan Stern <[email protected]>
Reported-and-tested-by: [email protected]
CC: <[email protected]>
Acked-by: Christian Lamparter <[email protected]>
Signed-off-by: Kalle Valo <[email protected]>
|
uint64_t getSize() {
return m_zipStat.size;
}
| 0 |
[
"CWE-22"
] |
hhvm
|
65c95a01541dd2fbc9c978ac53bed235b5376686
| 274,800,441,058,066,400,000,000,000,000,000,000,000 | 3 |
ZipArchive::extractTo bug 70350
Summary:Don't allow upward directory traversal when extracting zip archive files.
Files in zip files with `..` or starting at main root `/` should be normalized
to something where the file being extracted winds up within the directory or
a subdirectory where the actual extraction is taking place.
http://git.php.net/?p=php-src.git;a=commit;h=f9c2bf73adb2ede0a486b0db466c264f2b27e0bb
Reviewed By: FBNeal
Differential Revision: D2798452
fb-gh-sync-id: 844549c93e011d1e991bb322bf85822246b04e30
shipit-source-id: 844549c93e011d1e991bb322bf85822246b04e30
|
R_API void r_bin_java_print_rtv_annotations_attr_summary(RBinJavaAttrInfo *attr) {
if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_ANNOTATION_ATTR) {
Eprintf ("Runtime Visible Annotations Attribute Information:\n");
Eprintf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset);
Eprintf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name);
Eprintf (" Attribute Length: %d\n", attr->length);
r_bin_java_print_annotation_array_summary (&attr->info.annotation_array);
}
}
| 0 |
[
"CWE-787"
] |
radare2
|
9650e3c352f675687bf6c6f65ff2c4a3d0e288fa
| 70,545,750,060,809,140,000,000,000,000,000,000,000 | 9 |
Fix oobread segfault in java arith8.class ##crash
* Reported by Cen Zhang via huntr.dev
|
int streamDecrID(streamID *id) {
int ret = C_OK;
if (id->seq == 0) {
if (id->ms == 0) {
/* Special case where 'id' is the first possible streamID... */
id->ms = id->seq = UINT64_MAX;
ret = C_ERR;
} else {
id->ms--;
id->seq = UINT64_MAX;
}
} else {
id->seq--;
}
return ret;
}
| 0 |
[
"CWE-703",
"CWE-401"
] |
redis
|
4a7a4e42db8ff757cdf3f4a824f66426036034ef
| 143,793,837,366,047,700,000,000,000,000,000,000,000 | 16 |
Fix memory leak in streamGetEdgeID (#10753)
si is initialized by streamIteratorStart(), we should call
streamIteratorStop() on it when done.
regression introduced in #9127 (redis 7.0)
|
vips_foreign_find_save_buffer_sub( VipsForeignSaveClass *save_class,
const char *suffix )
{
VipsObjectClass *object_class = VIPS_OBJECT_CLASS( save_class );
VipsForeignClass *class = VIPS_FOREIGN_CLASS( save_class );
if( class->suffs &&
vips_ispostfix( object_class->nickname, "_buffer" ) &&
vips_filename_suffix_match( suffix, class->suffs ) )
return( save_class );
return( NULL );
}
| 0 |
[
"CWE-362",
"CWE-476"
] |
libvips
|
20d840e6da15c1574b3ed998bc92f91d1e36c2a5
| 2,411,317,420,413,855,400,000,000,000,000,000,000 | 13 |
fix a crash with delayed load
If a delayed load failed, it could leave the pipeline only half-set up.
Sebsequent threads could then segv.
Set a load-has-failed flag and test before generate.
See https://github.com/jcupitt/libvips/issues/893
|
static inline uint8_t ide_atapi_set_profile(uint8_t *buf, uint8_t *index,
uint16_t profile)
{
uint8_t *buf_profile = buf + 12; /* start of profiles */
buf_profile += ((*index) * 4); /* start of indexed profile */
stw_be_p(buf_profile, profile);
buf_profile[2] = ((buf_profile[0] == buf[6]) && (buf_profile[1] == buf[7]));
/* each profile adds 4 bytes to the response */
(*index)++;
buf[11] += 4; /* Additional Length */
return 4;
}
| 0 |
[
"CWE-125"
] |
qemu
|
813212288970c39b1800f63e83ac6e96588095c6
| 69,581,906,376,638,060,000,000,000,000,000,000,000 | 15 |
ide: atapi: assert that the buffer pointer is in range
A case was reported where s->io_buffer_index can be out of range.
The report skimped on the details but it seems to be triggered
by s->lba == -1 on the READ/READ CD paths (e.g. by sending an
ATAPI command with LBA = 0xFFFFFFFF). For now paper over it
with assertions. The first one ensures that there is no overflow
when incrementing s->io_buffer_index, the second checks for the
buffer overrun.
Note that the buffer overrun is only a read, so I am not sure
if the assertion failure is actually less harmful than the overrun.
Signed-off-by: Paolo Bonzini <[email protected]>
Message-id: [email protected]
Reviewed-by: Kevin Wolf <[email protected]>
Signed-off-by: Peter Maydell <[email protected]>
|
static gboolean prpl_xfer_write(struct file_transfer *ft, char *buffer, unsigned int len)
{
struct prpl_xfer_data *px = ft->data;
if (write(px->fd, buffer, len) != len) {
imcb_file_canceled(px->ic, ft, "Error while writing temporary file");
return FALSE;
}
if (lseek(px->fd, 0, SEEK_CUR) >= ft->file_size) {
close(px->fd);
px->fd = -1;
purple_transfer_forward(ft);
imcb_file_finished(px->ic, ft);
px->ft = NULL;
} else {
px->timeout = b_timeout_add(0, purple_transfer_request_cb, ft);
}
return TRUE;
}
| 0 |
[
"CWE-476"
] |
bitlbee
|
30d598ce7cd3f136ee9d7097f39fa9818a272441
| 2,122,260,834,756,488,600,000,000,000,000,000,000 | 22 |
purple: Fix crash on ft requests from unknown contacts
Followup to 701ab81 (included in 3.5) which was a partial fix which only
improved things for non-libpurple file transfers (that is, just jabber)
|
PHP_FUNCTION(sqlite_array_query)
{
zval *zdb, *ent;
struct php_sqlite_db *db;
struct php_sqlite_result *rres;
char *sql;
int sql_len;
long mode = PHPSQLITE_BOTH;
char *errtext = NULL;
zend_bool decode_binary = 1;
zval *object = getThis();
if (object) {
if (FAILURE == zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|lb", &sql, &sql_len, &mode, &decode_binary)) {
return;
}
DB_FROM_OBJECT(db, object);
} else {
if (FAILURE == zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET,
ZEND_NUM_ARGS() TSRMLS_CC, "sr|lb", &sql, &sql_len, &zdb, &mode, &decode_binary) &&
FAILURE == zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rs|lb", &zdb, &sql, &sql_len, &mode, &decode_binary)) {
return;
}
DB_FROM_ZVAL(db, &zdb);
}
PHP_SQLITE_EMPTY_QUERY;
/* avoid doing work if we can */
if (!return_value_used) {
db->last_err_code = sqlite_exec(db->db, sql, NULL, NULL, &errtext);
if (db->last_err_code != SQLITE_OK) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", errtext);
sqlite_freemem(errtext);
}
return;
}
rres = (struct php_sqlite_result *)ecalloc(1, sizeof(*rres));
sqlite_query(NULL, db, sql, sql_len, (int)mode, 0, NULL, &rres, NULL TSRMLS_CC);
if (db->last_err_code != SQLITE_OK) {
if (rres) {
efree(rres);
}
RETURN_FALSE;
}
array_init(return_value);
while (rres->curr_row < rres->nrows) {
MAKE_STD_ZVAL(ent);
php_sqlite_fetch_array(rres, mode, decode_binary, 1, ent TSRMLS_CC);
add_next_index_zval(return_value, ent);
}
real_result_dtor(rres TSRMLS_CC);
}
| 0 |
[] |
php-src
|
ce96fd6b0761d98353761bf78d5bfb55291179fd
| 29,153,769,320,000,750,000,000,000,000,000,000,000 | 57 |
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
|
authentic_pin_is_verified(struct sc_card *card, struct sc_pin_cmd_data *pin_cmd_data,
int *tries_left)
{
struct sc_context *ctx = card->ctx;
struct sc_pin_cmd_data pin_cmd;
int rv;
LOG_FUNC_CALLED(ctx);
if (pin_cmd_data->pin_type != SC_AC_CHV)
LOG_TEST_RET(ctx, SC_ERROR_NOT_SUPPORTED, "PIN type is not supported for the verification");
pin_cmd = *pin_cmd_data;
pin_cmd.pin1.data = (unsigned char *)"";
pin_cmd.pin1.len = 0;
rv = authentic_chv_verify(card, &pin_cmd, tries_left);
LOG_FUNC_RETURN(ctx, rv);
}
| 0 |
[
"CWE-125"
] |
OpenSC
|
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
| 210,550,774,027,554,000,000,000,000,000,000,000,000 | 20 |
fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
|
static int opdiv(RAsm *a, ut8 *data, const Opcode *op) {
int l = 0;
if ( op->operands[0].type & OT_QWORD ) {
data[l++] = 0x48;
}
switch (op->operands_count) {
case 1:
if ( op->operands[0].type & OT_WORD ) {
data[l++] = 0x66;
}
if (op->operands[0].type & OT_BYTE) {
data[l++] = 0xf6;
} else {
data[l++] = 0xf7;
}
if (op->operands[0].type & OT_MEMORY) {
data[l++] = 0x30 | op->operands[0].regs[0];
} else {
data[l++] = 0xf0 | op->operands[0].reg;
}
break;
default:
return -1;
}
return l;
}
| 0 |
[
"CWE-119",
"CWE-125",
"CWE-787"
] |
radare2
|
9b46d38dd3c4de6048a488b655c7319f845af185
| 120,735,182,947,055,490,000,000,000,000,000,000,000 | 27 |
Fix #12372 and #12373 - Crash in x86 assembler (#12380)
0 ,0,[bP-bL-bP-bL-bL-r-bL-bP-bL-bL-
mov ,0,[ax+Bx-ax+Bx-ax+ax+Bx-ax+Bx--
leA ,0,[bP-bL-bL-bP-bL-bP-bL-60@bL-
leA ,0,[bP-bL-r-bP-bL-bP-bL-60@bL-
mov ,0,[ax+Bx-ax+Bx-ax+ax+Bx-ax+Bx--
|
static uint64_t power_mem_read(void *opaque, hwaddr addr, unsigned size)
{
return 0;
}
| 0 |
[
"CWE-476"
] |
qemu
|
ad280559c68360c9f1cd7be063857853759e6a73
| 156,720,874,987,304,430,000,000,000,000,000,000,000 | 4 |
sun4u: add power_mem_read routine
Define skeleton 'power_mem_read' routine. Avoid NULL dereference.
Reported-by: Fakhri Zulkifli <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Signed-off-by: Mark Cave-Ayland <[email protected]>
|
process_cmd_cmdaccheck(CMD_Request *msg, char *line)
{
IPAddr ip;
msg->command = htons(REQ_CMDACCHECK);
if (accheck_getaddr(line, &ip)) {
UTI_IPHostToNetwork(&ip, &msg->data.ac_check.ip);
return 1;
} else {
fprintf(stderr, "Could not read address\n");
return 0;
}
}
| 0 |
[
"CWE-189"
] |
chrony
|
7712455d9aa33d0db0945effaa07e900b85987b1
| 91,884,982,647,068,800,000,000,000,000,000,000,000 | 12 |
Fix buffer overflow when processing crafted command packets
When the length of the REQ_SUBNETS_ACCESSED, REQ_CLIENT_ACCESSES
command requests and the RPY_SUBNETS_ACCESSED, RPY_CLIENT_ACCESSES,
RPY_CLIENT_ACCESSES_BY_INDEX, RPY_MANUAL_LIST command replies is
calculated, the number of items stored in the packet is not validated.
A crafted command request/reply can be used to crash the server/client.
Only clients allowed by cmdallow (by default only localhost) can crash
the server.
With chrony versions 1.25 and 1.26 this bug has a smaller security
impact as the server requires the clients to be authenticated in order
to process the subnet and client accesses commands. In 1.27 and 1.28,
however, the invalid calculated length is included also in the
authentication check which may cause another crash.
|
void diff_setup(struct diff_options *options)
{
memset(options, 0, sizeof(*options));
options->file = stdout;
options->line_termination = '\n';
options->break_opt = -1;
options->rename_limit = -1;
options->dirstat_percent = 3;
options->context = 3;
options->change = diff_change;
options->add_remove = diff_addremove;
if (diff_use_color_default > 0)
DIFF_OPT_SET(options, COLOR_DIFF);
else
DIFF_OPT_CLR(options, COLOR_DIFF);
options->detect_rename = diff_detect_rename_default;
options->a_prefix = "a/";
options->b_prefix = "b/";
}
| 0 |
[
"CWE-119"
] |
git
|
fd55a19eb1d49ae54008d932a65f79cd6fda45c9
| 92,553,199,690,681,660,000,000,000,000,000,000,000 | 23 |
Fix buffer overflow in git diff
If PATH_MAX on your system is smaller than a path stored, it may cause
buffer overflow and stack corruption in diff_addremove() and diff_change()
functions when running git-diff
Signed-off-by: Dmitry Potapov <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]>
|
static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
{
if (e4b->bd_bitmap_page) {
unlock_page(e4b->bd_bitmap_page);
put_page(e4b->bd_bitmap_page);
}
if (e4b->bd_buddy_page) {
unlock_page(e4b->bd_buddy_page);
put_page(e4b->bd_buddy_page);
}
}
| 0 |
[
"CWE-416"
] |
linux
|
8844618d8aa7a9973e7b527d038a2a589665002c
| 319,040,005,337,234,070,000,000,000,000,000,000,000 | 11 |
ext4: only look at the bg_flags field if it is valid
The bg_flags field in the block group descripts is only valid if the
uninit_bg or metadata_csum feature is enabled. We were not
consistently looking at this field; fix this.
Also block group #0 must never have uninitialized allocation bitmaps,
or need to be zeroed, since that's where the root inode, and other
special inodes are set up. Check for these conditions and mark the
file system as corrupted if they are detected.
This addresses CVE-2018-10876.
https://bugzilla.kernel.org/show_bug.cgi?id=199403
Signed-off-by: Theodore Ts'o <[email protected]>
Cc: [email protected]
|
void faad_rewindbits(bitfile *ld)
{
uint32_t tmp;
ld->bytes_left = ld->buffer_size;
if (ld->bytes_left >= 4)
{
tmp = getdword((uint32_t*)&ld->start[0]);
ld->bytes_left -= 4;
} else {
tmp = getdword_n((uint32_t*)&ld->start[0], ld->bytes_left);
ld->bytes_left = 0;
}
ld->bufa = tmp;
if (ld->bytes_left >= 4)
{
tmp = getdword((uint32_t*)&ld->start[1]);
ld->bytes_left -= 4;
} else {
tmp = getdword_n((uint32_t*)&ld->start[1], ld->bytes_left);
ld->bytes_left = 0;
}
ld->bufb = tmp;
ld->bits_left = 32;
ld->tail = &ld->start[2];
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
faad2
|
942c3e0aee748ea6fe97cb2c1aa5893225316174
| 22,543,299,476,393,004,000,000,000,000,000,000,000 | 29 |
Fix a couple buffer overflows
https://hackerone.com/reports/502816
https://hackerone.com/reports/507858
https://github.com/videolan/vlc/blob/master/contrib/src/faad2/faad2-fix-overflows.patch
|
static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
{
unsigned long long margin;
margin = res_counter_margin(&memcg->res);
if (do_swap_account)
margin = min(margin, res_counter_margin(&memcg->memsw));
return margin >> PAGE_SHIFT;
}
| 0 |
[
"CWE-264"
] |
linux-2.6
|
1a5a9906d4e8d1976b701f889d8f35d54b928f25
| 30,328,061,113,518,905,000,000,000,000,000,000,000 | 9 |
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38+]
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
TEST_P(Security, BuiltinAuthenticationAndCryptoPlugin_besteffort_submessage_large_string)
{
PubSubReader<StringType> reader(TEST_TOPIC_NAME);
PubSubWriter<StringType> writer(TEST_TOPIC_NAME);
PropertyPolicy pub_part_property_policy, sub_part_property_policy,
pub_property_policy, sub_property_policy;
sub_part_property_policy.properties().emplace_back(Property("dds.sec.auth.plugin",
"builtin.PKI-DH"));
sub_part_property_policy.properties().emplace_back(Property("dds.sec.auth.builtin.PKI-DH.identity_ca",
"file://" + std::string(certs_path) + "/maincacert.pem"));
sub_part_property_policy.properties().emplace_back(Property("dds.sec.auth.builtin.PKI-DH.identity_certificate",
"file://" + std::string(certs_path) + "/mainsubcert.pem"));
sub_part_property_policy.properties().emplace_back(Property("dds.sec.auth.builtin.PKI-DH.private_key",
"file://" + std::string(certs_path) + "/mainsubkey.pem"));
sub_part_property_policy.properties().emplace_back(Property("dds.sec.crypto.plugin",
"builtin.AES-GCM-GMAC"));
sub_property_policy.properties().emplace_back("rtps.endpoint.submessage_protection_kind", "ENCRYPT");
reader.history_depth(10).
property_policy(sub_part_property_policy).
entity_property_policy(sub_property_policy).init();
ASSERT_TRUE(reader.isInitialized());
pub_part_property_policy.properties().emplace_back(Property("dds.sec.auth.plugin",
"builtin.PKI-DH"));
pub_part_property_policy.properties().emplace_back(Property("dds.sec.auth.builtin.PKI-DH.identity_ca",
"file://" + std::string(certs_path) + "/maincacert.pem"));
pub_part_property_policy.properties().emplace_back(Property("dds.sec.auth.builtin.PKI-DH.identity_certificate",
"file://" + std::string(certs_path) + "/mainpubcert.pem"));
pub_part_property_policy.properties().emplace_back(Property("dds.sec.auth.builtin.PKI-DH.private_key",
"file://" + std::string(certs_path) + "/mainpubkey.pem"));
pub_part_property_policy.properties().emplace_back(Property("dds.sec.crypto.plugin",
"builtin.AES-GCM-GMAC"));
pub_property_policy.properties().emplace_back("rtps.endpoint.submessage_protection_kind", "ENCRYPT");
writer.history_depth(10).
reliability(eprosima::fastrtps::BEST_EFFORT_RELIABILITY_QOS).
property_policy(pub_part_property_policy).
entity_property_policy(pub_property_policy).init();
ASSERT_TRUE(writer.isInitialized());
// Wait for authorization
reader.waitAuthorized();
writer.waitAuthorized();
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
auto data = default_large_string_data_generator();
reader.startReception(data);
// Send data
writer.send(data);
// In this test all data should be sent.
ASSERT_TRUE(data.empty());
// Block reader until reception finished or timeout.
reader.block_for_at_least(2);
}
| 0 |
[
"CWE-284"
] |
Fast-DDS
|
d2aeab37eb4fad4376b68ea4dfbbf285a2926384
| 51,265,795,280,533,050,000,000,000,000,000,000,000 | 64 |
check remote permissions (#1387)
* Refs 5346. Blackbox test
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. one-way string compare
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Do not add partition separator on last partition
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Access control unit testing
It only covers Partition and Topic permissions
Signed-off-by: Iker Luengo <[email protected]>
* Refs #3680. Fix partition check on Permissions plugin.
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Fix tests on mac
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Fix windows tests
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Avoid memory leak on test
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Proxy data mocks should not return temporary objects
Signed-off-by: Iker Luengo <[email protected]>
* refs 3680. uncrustify
Signed-off-by: Iker Luengo <[email protected]>
Co-authored-by: Miguel Company <[email protected]>
|
void BinaryProtocolReader::readFieldEnd() {}
| 0 |
[
"CWE-703",
"CWE-770"
] |
fbthrift
|
c9a903e5902834e95bbd4ab0e9fa53ba0189f351
| 2,247,394,522,260,169,000,000,000,000,000,000,000 | 1 |
Better handling of truncated data when reading strings
Summary:
Currently we read string size and blindly pre-allocate it. This allows malicious attacker to send a few bytes message and cause server to allocate huge amount of memory (>1GB).
This diff changes the logic to check if we have enough data in the buffer before allocating the string.
This is a second part of a fix for CVE-2019-3553.
Reviewed By: vitaut
Differential Revision: D14393393
fbshipit-source-id: e2046d2f5b087d3abc9a9d2c6c107cf088673057
|
static int ip6_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb->dst;
struct net_device *dev = dst->dev;
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr,
&skb->nh.ipv6h->saddr)) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
/* Do not check for IFF_ALLMULTI; multicast routing
is not supported in any case.
*/
if (newskb)
NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
newskb->dev,
ip6_dev_loopback_xmit);
if (skb->nh.ipv6h->hop_limit == 0) {
IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
return 0;
}
}
IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
}
return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
}
| 0 |
[] |
linux
|
e89e9cf539a28df7d0eb1d0a545368e9920b34ac
| 207,725,848,408,760,570,000,000,000,000,000,000,000 | 36 |
[IPv4/IPv6]: UFO Scatter-gather approach
Attached is kernel patch for UDP Fragmentation Offload (UFO) feature.
1. This patch incorporate the review comments by Jeff Garzik.
2. Renamed USO as UFO (UDP Fragmentation Offload)
3. udp sendfile support with UFO
This patches uses scatter-gather feature of skb to generate large UDP
datagram. Below is a "how-to" on changes required in network device
driver to use the UFO interface.
UDP Fragmentation Offload (UFO) Interface:
-------------------------------------------
UFO is a feature wherein the Linux kernel network stack will offload the
IP fragmentation functionality of large UDP datagram to hardware. This
will reduce the overhead of stack in fragmenting the large UDP datagram to
MTU sized packets
1) Drivers indicate their capability of UFO using
dev->features |= NETIF_F_UFO | NETIF_F_HW_CSUM | NETIF_F_SG
NETIF_F_HW_CSUM is required for UFO over ipv6.
2) UFO packet will be submitted for transmission using driver xmit routine.
UFO packet will have a non-zero value for
"skb_shinfo(skb)->ufo_size"
skb_shinfo(skb)->ufo_size will indicate the length of data part in each IP
fragment going out of the adapter after IP fragmentation by hardware.
skb->data will contain MAC/IP/UDP header and skb_shinfo(skb)->frags[]
contains the data payload. The skb->ip_summed will be set to CHECKSUM_HW
indicating that hardware has to do checksum calculation. Hardware should
compute the UDP checksum of complete datagram and also ip header checksum of
each fragmented IP packet.
For IPV6 the UFO provides the fragment identification-id in
skb_shinfo(skb)->ip6_frag_id. The adapter should use this ID for generating
IPv6 fragments.
Signed-off-by: Ananda Raju <[email protected]>
Signed-off-by: Rusty Russell <[email protected]> (forwarded)
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
|
DLLEXPORT int tjDecodeYUVPlanes(tjhandle handle,
const unsigned char **srcPlanes,
const int *strides, int subsamp,
unsigned char *dstBuf, int width, int pitch,
int height, int pixelFormat, int flags)
{
JSAMPROW *row_pointer = NULL;
JSAMPLE *_tmpbuf[MAX_COMPONENTS];
JSAMPROW *tmpbuf[MAX_COMPONENTS], *inbuf[MAX_COMPONENTS];
int i, retval = 0, row, pw0, ph0, pw[MAX_COMPONENTS], ph[MAX_COMPONENTS];
JSAMPLE *ptr;
jpeg_component_info *compptr;
int (*old_read_markers) (j_decompress_ptr);
void (*old_reset_marker_reader) (j_decompress_ptr);
GET_DINSTANCE(handle);
this->jerr.stopOnWarning = (flags & TJFLAG_STOPONWARNING) ? TRUE : FALSE;
for (i = 0; i < MAX_COMPONENTS; i++) {
tmpbuf[i] = NULL; _tmpbuf[i] = NULL; inbuf[i] = NULL;
}
if ((this->init & DECOMPRESS) == 0)
THROW("tjDecodeYUVPlanes(): Instance has not been initialized for decompression");
if (!srcPlanes || !srcPlanes[0] || subsamp < 0 || subsamp >= NUMSUBOPT ||
dstBuf == NULL || width <= 0 || pitch < 0 || height <= 0 ||
pixelFormat < 0 || pixelFormat >= TJ_NUMPF)
THROW("tjDecodeYUVPlanes(): Invalid argument");
if (subsamp != TJSAMP_GRAY && (!srcPlanes[1] || !srcPlanes[2]))
THROW("tjDecodeYUVPlanes(): Invalid argument");
if (setjmp(this->jerr.setjmp_buffer)) {
/* If we get here, the JPEG code has signaled an error. */
retval = -1; goto bailout;
}
if (pixelFormat == TJPF_CMYK)
THROW("tjDecodeYUVPlanes(): Cannot decode YUV images into CMYK pixels.");
if (pitch == 0) pitch = width * tjPixelSize[pixelFormat];
dinfo->image_width = width;
dinfo->image_height = height;
#ifndef NO_PUTENV
if (flags & TJFLAG_FORCEMMX) putenv("JSIMD_FORCEMMX=1");
else if (flags & TJFLAG_FORCESSE) putenv("JSIMD_FORCESSE=1");
else if (flags & TJFLAG_FORCESSE2) putenv("JSIMD_FORCESSE2=1");
#endif
if (setDecodeDefaults(dinfo, pixelFormat, subsamp, flags) == -1) {
retval = -1; goto bailout;
}
old_read_markers = dinfo->marker->read_markers;
dinfo->marker->read_markers = my_read_markers;
old_reset_marker_reader = dinfo->marker->reset_marker_reader;
dinfo->marker->reset_marker_reader = my_reset_marker_reader;
jpeg_read_header(dinfo, TRUE);
dinfo->marker->read_markers = old_read_markers;
dinfo->marker->reset_marker_reader = old_reset_marker_reader;
this->dinfo.out_color_space = pf2cs[pixelFormat];
if (flags & TJFLAG_FASTDCT) this->dinfo.dct_method = JDCT_FASTEST;
dinfo->do_fancy_upsampling = FALSE;
dinfo->Se = DCTSIZE2 - 1;
jinit_master_decompress(dinfo);
(*dinfo->upsample->start_pass) (dinfo);
pw0 = PAD(width, dinfo->max_h_samp_factor);
ph0 = PAD(height, dinfo->max_v_samp_factor);
if (pitch == 0) pitch = dinfo->output_width * tjPixelSize[pixelFormat];
if ((row_pointer = (JSAMPROW *)malloc(sizeof(JSAMPROW) * ph0)) == NULL)
THROW("tjDecodeYUVPlanes(): Memory allocation failure");
for (i = 0; i < height; i++) {
if (flags & TJFLAG_BOTTOMUP)
row_pointer[i] = &dstBuf[(height - i - 1) * (size_t)pitch];
else
row_pointer[i] = &dstBuf[i * (size_t)pitch];
}
if (height < ph0)
for (i = height; i < ph0; i++) row_pointer[i] = row_pointer[height - 1];
for (i = 0; i < dinfo->num_components; i++) {
compptr = &dinfo->comp_info[i];
_tmpbuf[i] =
(JSAMPLE *)malloc(PAD(compptr->width_in_blocks * DCTSIZE, 32) *
compptr->v_samp_factor + 32);
if (!_tmpbuf[i])
THROW("tjDecodeYUVPlanes(): Memory allocation failure");
tmpbuf[i] = (JSAMPROW *)malloc(sizeof(JSAMPROW) * compptr->v_samp_factor);
if (!tmpbuf[i])
THROW("tjDecodeYUVPlanes(): Memory allocation failure");
for (row = 0; row < compptr->v_samp_factor; row++) {
unsigned char *_tmpbuf_aligned =
(unsigned char *)PAD((size_t)_tmpbuf[i], 32);
tmpbuf[i][row] =
&_tmpbuf_aligned[PAD(compptr->width_in_blocks * DCTSIZE, 32) * row];
}
pw[i] = pw0 * compptr->h_samp_factor / dinfo->max_h_samp_factor;
ph[i] = ph0 * compptr->v_samp_factor / dinfo->max_v_samp_factor;
inbuf[i] = (JSAMPROW *)malloc(sizeof(JSAMPROW) * ph[i]);
if (!inbuf[i])
THROW("tjDecodeYUVPlanes(): Memory allocation failure");
ptr = (JSAMPLE *)srcPlanes[i];
for (row = 0; row < ph[i]; row++) {
inbuf[i][row] = ptr;
ptr += (strides && strides[i] != 0) ? strides[i] : pw[i];
}
}
if (setjmp(this->jerr.setjmp_buffer)) {
/* If we get here, the JPEG code has signaled an error. */
retval = -1; goto bailout;
}
for (row = 0; row < ph0; row += dinfo->max_v_samp_factor) {
JDIMENSION inrow = 0, outrow = 0;
for (i = 0, compptr = dinfo->comp_info; i < dinfo->num_components;
i++, compptr++)
jcopy_sample_rows(inbuf[i],
row * compptr->v_samp_factor / dinfo->max_v_samp_factor, tmpbuf[i], 0,
compptr->v_samp_factor, pw[i]);
(dinfo->upsample->upsample) (dinfo, tmpbuf, &inrow,
dinfo->max_v_samp_factor, &row_pointer[row],
&outrow, dinfo->max_v_samp_factor);
}
jpeg_abort_decompress(dinfo);
bailout:
if (dinfo->global_state > DSTATE_START) jpeg_abort_decompress(dinfo);
if (row_pointer) free(row_pointer);
for (i = 0; i < MAX_COMPONENTS; i++) {
if (tmpbuf[i] != NULL) free(tmpbuf[i]);
if (_tmpbuf[i] != NULL) free(_tmpbuf[i]);
if (inbuf[i] != NULL) free(inbuf[i]);
}
if (this->jerr.warning) retval = -1;
this->jerr.stopOnWarning = FALSE;
return retval;
}
| 0 |
[
"CWE-787"
] |
libjpeg-turbo
|
2a9e3bd7430cfda1bc812d139e0609c6aca0b884
| 25,145,811,162,548,330,000,000,000,000,000,000,000 | 144 |
TurboJPEG: Properly handle gigapixel images
Prevent several integer overflow issues and subsequent segfaults that
occurred when attempting to compress or decompress gigapixel images with
the TurboJPEG API:
- Modify tjBufSize(), tjBufSizeYUV2(), and tjPlaneSizeYUV() to avoid
integer overflow when computing the return values and to return an
error if such an overflow is unavoidable.
- Modify tjunittest to validate the above.
- Modify tjCompress2(), tjEncodeYUVPlanes(), tjDecompress2(), and
tjDecodeYUVPlanes() to avoid integer overflow when computing the row
pointers in the 64-bit TurboJPEG C API.
- Modify TJBench (both C and Java versions) to avoid overflowing the
size argument to malloc()/new and to fail gracefully if such an
overflow is unavoidable.
In general, this allows gigapixel images to be accommodated by the
64-bit TurboJPEG C API when using automatic JPEG buffer (re)allocation.
Such images cannot currently be accommodated without automatic JPEG
buffer (re)allocation, due to the fact that tjAlloc() accepts a 32-bit
integer argument (oops.) Such images cannot be accommodated in the
TurboJPEG Java API due to the fact that Java always uses a signed 32-bit
integer as an array index.
Fixes #361
|
CImgDisplay& show_mouse() {
if (is_empty()) return *this;
_is_cursor_visible = true;
return *this;
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 46,112,345,286,957,460,000,000,000,000,000,000,000 | 5 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
void TABLE::remember_blob_values(String *blob_storage)
{
Field **vfield_ptr;
for (vfield_ptr= vfield; *vfield_ptr; vfield_ptr++)
{
if ((*vfield_ptr)->type() == MYSQL_TYPE_BLOB &&
!(*vfield_ptr)->vcol_info->stored_in_db)
{
Field_blob *blob= ((Field_blob*) *vfield_ptr);
memcpy((void*) blob_storage, (void*) &blob->value, sizeof(blob->value));
blob_storage++;
blob->value.release();
}
}
}
| 0 |
[
"CWE-416"
] |
server
|
c02ebf3510850ba78a106be9974c94c3b97d8585
| 39,539,634,192,676,380,000,000,000,000,000,000,000 | 15 |
MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments.
|
int ClientHandler::do_write() { return write_(*this); }
| 0 |
[] |
nghttp2
|
95efb3e19d174354ca50c65d5d7227d92bcd60e1
| 120,537,662,219,893,220,000,000,000,000,000,000,000 | 1 |
Don't read too greedily
|
struct dce_aux *dce100_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst]);
return &aux_engine->base;
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
104c307147ad379617472dd91a5bcb368d72bd6d
| 319,969,724,387,937,860,000,000,000,000,000,000,000 | 16 |
drm/amd/display: prevent memory leak
In dcn*_create_resource_pool the allocated memory should be released if
construct pool fails.
Reviewed-by: Harry Wentland <[email protected]>
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Alex Deucher <[email protected]>
|
ews_get_absolute_date_transition (ESoapParameter *node)
{
ESoapParameter *param;
EEwsCalendarAbsoluteDateTransition *absolute_date_transition = NULL;
EEwsCalendarTo *to = NULL;
gchar *date_time = NULL;
gboolean success = FALSE;
param = e_soap_parameter_get_first_child_by_name (node, "To");
if (param != NULL)
to = ews_get_to (param);
if (to == NULL)
goto exit;
param = e_soap_parameter_get_first_child_by_name (node, "DateTime");
if (param != NULL)
date_time = e_soap_parameter_get_string_value (param);
if (date_time == NULL)
goto exit;
success = TRUE;
exit:
if (success) {
absolute_date_transition = e_ews_calendar_absolute_date_transition_new ();
absolute_date_transition->to = to;
absolute_date_transition->date_time = date_time;
} else {
e_ews_calendar_to_free (to);
g_free (date_time);
}
return absolute_date_transition;
}
| 0 |
[
"CWE-295"
] |
evolution-ews
|
915226eca9454b8b3e5adb6f2fff9698451778de
| 296,437,277,916,377,600,000,000,000,000,000,000,000 | 36 |
I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
|
static int atif_ioctl(int cmd, void *arg)
{
static char aarp_mcast[6] = { 0x09, 0x00, 0x00, 0xFF, 0xFF, 0xFF };
struct ifreq atreq;
struct atalk_netrange *nr;
struct sockaddr_at *sa;
struct net_device *dev;
struct atalk_iface *atif;
int ct;
int limit;
struct rtentry rtdef;
int add_route;
if (copy_from_user(&atreq, arg, sizeof(atreq)))
return -EFAULT;
dev = __dev_get_by_name(atreq.ifr_name);
if (!dev)
return -ENODEV;
sa = (struct sockaddr_at *)&atreq.ifr_addr;
atif = atalk_find_dev(dev);
switch (cmd) {
case SIOCSIFADDR:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (sa->sat_family != AF_APPLETALK)
return -EINVAL;
if (dev->type != ARPHRD_ETHER &&
dev->type != ARPHRD_LOOPBACK &&
dev->type != ARPHRD_LOCALTLK &&
dev->type != ARPHRD_PPP)
return -EPROTONOSUPPORT;
nr = (struct atalk_netrange *)&sa->sat_zero[0];
add_route = 1;
/*
* if this is a point-to-point iface, and we already
* have an iface for this AppleTalk address, then we
* should not add a route
*/
if ((dev->flags & IFF_POINTOPOINT) &&
atalk_find_interface(sa->sat_addr.s_net,
sa->sat_addr.s_node)) {
printk(KERN_DEBUG "AppleTalk: point-to-point "
"interface added with "
"existing address\n");
add_route = 0;
}
/*
* Phase 1 is fine on LocalTalk but we don't do
* EtherTalk phase 1. Anyone wanting to add it go ahead.
*/
if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2)
return -EPROTONOSUPPORT;
if (sa->sat_addr.s_node == ATADDR_BCAST ||
sa->sat_addr.s_node == 254)
return -EINVAL;
if (atif) {
/* Already setting address */
if (atif->status & ATIF_PROBE)
return -EBUSY;
atif->address.s_net = sa->sat_addr.s_net;
atif->address.s_node = sa->sat_addr.s_node;
atrtr_device_down(dev); /* Flush old routes */
} else {
atif = atif_add_device(dev, &sa->sat_addr);
if (!atif)
return -ENOMEM;
}
atif->nets = *nr;
/*
* Check if the chosen address is used. If so we
* error and atalkd will try another.
*/
if (!(dev->flags & IFF_LOOPBACK) &&
!(dev->flags & IFF_POINTOPOINT) &&
atif_probe_device(atif) < 0) {
atif_drop_device(dev);
return -EADDRINUSE;
}
/* Hey it worked - add the direct routes */
sa = (struct sockaddr_at *)&rtdef.rt_gateway;
sa->sat_family = AF_APPLETALK;
sa->sat_addr.s_net = atif->address.s_net;
sa->sat_addr.s_node = atif->address.s_node;
sa = (struct sockaddr_at *)&rtdef.rt_dst;
rtdef.rt_flags = RTF_UP;
sa->sat_family = AF_APPLETALK;
sa->sat_addr.s_node = ATADDR_ANYNODE;
if (dev->flags & IFF_LOOPBACK ||
dev->flags & IFF_POINTOPOINT)
rtdef.rt_flags |= RTF_HOST;
/* Routerless initial state */
if (nr->nr_firstnet == htons(0) &&
nr->nr_lastnet == htons(0xFFFE)) {
sa->sat_addr.s_net = atif->address.s_net;
atrtr_create(&rtdef, dev);
atrtr_set_default(dev);
} else {
limit = ntohs(nr->nr_lastnet);
if (limit - ntohs(nr->nr_firstnet) > 4096) {
printk(KERN_WARNING "Too many routes/"
"iface.\n");
return -EINVAL;
}
if (add_route)
for (ct = ntohs(nr->nr_firstnet);
ct <= limit; ct++) {
sa->sat_addr.s_net = htons(ct);
atrtr_create(&rtdef, dev);
}
}
dev_mc_add(dev, aarp_mcast, 6, 1);
return 0;
case SIOCGIFADDR:
if (!atif)
return -EADDRNOTAVAIL;
sa->sat_family = AF_APPLETALK;
sa->sat_addr = atif->address;
break;
case SIOCGIFBRDADDR:
if (!atif)
return -EADDRNOTAVAIL;
sa->sat_family = AF_APPLETALK;
sa->sat_addr.s_net = atif->address.s_net;
sa->sat_addr.s_node = ATADDR_BCAST;
break;
case SIOCATALKDIFADDR:
case SIOCDIFADDR:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (sa->sat_family != AF_APPLETALK)
return -EINVAL;
atalk_dev_down(dev);
break;
case SIOCSARP:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (sa->sat_family != AF_APPLETALK)
return -EINVAL;
if (!atif)
return -EADDRNOTAVAIL;
/*
* for now, we only support proxy AARP on ELAP;
* we should be able to do it for LocalTalk, too.
*/
if (dev->type != ARPHRD_ETHER)
return -EPROTONOSUPPORT;
/*
* atif points to the current interface on this network;
* we aren't concerned about its current status (at
* least for now), but it has all the settings about
* the network we're going to probe. Consequently, it
* must exist.
*/
if (!atif)
return -EADDRNOTAVAIL;
nr = (struct atalk_netrange *)&(atif->nets);
/*
* Phase 1 is fine on Localtalk but we don't do
* Ethertalk phase 1. Anyone wanting to add it go ahead.
*/
if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2)
return -EPROTONOSUPPORT;
if (sa->sat_addr.s_node == ATADDR_BCAST ||
sa->sat_addr.s_node == 254)
return -EINVAL;
/*
* Check if the chosen address is used. If so we
* error and ATCP will try another.
*/
if (atif_proxy_probe_device(atif, &(sa->sat_addr)) < 0)
return -EADDRINUSE;
/*
* We now have an address on the local network, and
* the AARP code will defend it for us until we take it
* down. We don't set up any routes right now, because
* ATCP will install them manually via SIOCADDRT.
*/
break;
case SIOCDARP:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (sa->sat_family != AF_APPLETALK)
return -EINVAL;
if (!atif)
return -EADDRNOTAVAIL;
/* give to aarp module to remove proxy entry */
aarp_proxy_remove(atif->dev, &(sa->sat_addr));
return 0;
}
return copy_to_user(arg, &atreq, sizeof(atreq)) ? -EFAULT : 0;
}
| 0 |
[] |
history
|
7ab442d7e0a76402c12553ee256f756097cae2d2
| 266,365,374,729,117,400,000,000,000,000,000,000,000 | 217 |
[DDP]: Convert to new protocol interface.
Convert ddp to the new protocol interface which means it has to
handle fragmented skb's. The only big change is in the checksum
routine which has to do more work (like skb_checksum).
Minor speedup is folding the carry to avoid a branch.
Tested against a 2.4 system and by running both code over
a range of packets.
|
int ldbDelBreakpoint(int line) {
int j;
for (j = 0; j < ldb.bpcount; j++) {
if (ldb.bp[j] == line) {
ldb.bpcount--;
memmove(ldb.bp+j,ldb.bp+j+1,ldb.bpcount-j);
return 1;
}
}
return 0;
}
| 0 |
[
"CWE-703",
"CWE-125"
] |
redis
|
6ac3c0b7abd35f37201ed2d6298ecef4ea1ae1dd
| 102,145,238,655,077,210,000,000,000,000,000,000,000 | 12 |
Fix protocol parsing on 'ldbReplParseCommand' (CVE-2021-32672)
The protocol parsing on 'ldbReplParseCommand' (LUA debugging)
Assumed protocol correctness. This means that if the following
is given:
*1
$100
test
The parser will try to read additional 94 unallocated bytes after
the client buffer.
This commit fixes this issue by validating that there are actually enough
bytes to read. It also limits the amount of data that can be sent by
the debugger client to 1M so the client will not be able to explode
the memory.
|
Http::Http1::CodecStats& http1CodecStats() {
return Http::Http1::CodecStats::atomicGet(http1_codec_stats_, stats_store_);
}
| 0 |
[
"CWE-400"
] |
envoy
|
dfddb529e914d794ac552e906b13d71233609bf7
| 244,185,834,985,204,920,000,000,000,000,000,000,000 | 3 |
listener: Add configurable accepted connection limits (#153)
Add support for per-listener limits on accepted connections.
Signed-off-by: Tony Allen <[email protected]>
|
CImgException(const char *const format, ...):_message(0) { _cimg_exception_err("CImgException",true); }
CImgException(const CImgException& e):std::exception(e) {
const size_t size = std::strlen(e._message);
_message = new char[size + 1];
std::strncpy(_message,e._message,size);
_message[size] = 0;
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 93,371,126,377,451,100,000,000,000,000,000,000,000 | 6 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
int nla_recv_pdu(rdpNla* nla, wStream* s)
{
WINPR_ASSERT(nla);
WINPR_ASSERT(s);
if (nla_decode_ts_request(nla, s) < 1)
return -1;
if (nla->errorCode)
{
UINT32 code;
switch (nla->errorCode)
{
case STATUS_PASSWORD_MUST_CHANGE:
code = FREERDP_ERROR_CONNECT_PASSWORD_MUST_CHANGE;
break;
case STATUS_PASSWORD_EXPIRED:
code = FREERDP_ERROR_CONNECT_PASSWORD_EXPIRED;
break;
case STATUS_ACCOUNT_DISABLED:
code = FREERDP_ERROR_CONNECT_ACCOUNT_DISABLED;
break;
case STATUS_LOGON_FAILURE:
code = FREERDP_ERROR_CONNECT_LOGON_FAILURE;
break;
case STATUS_WRONG_PASSWORD:
code = FREERDP_ERROR_CONNECT_WRONG_PASSWORD;
break;
case STATUS_ACCESS_DENIED:
code = FREERDP_ERROR_CONNECT_ACCESS_DENIED;
break;
case STATUS_ACCOUNT_RESTRICTION:
code = FREERDP_ERROR_CONNECT_ACCOUNT_RESTRICTION;
break;
case STATUS_ACCOUNT_LOCKED_OUT:
code = FREERDP_ERROR_CONNECT_ACCOUNT_LOCKED_OUT;
break;
case STATUS_ACCOUNT_EXPIRED:
code = FREERDP_ERROR_CONNECT_ACCOUNT_EXPIRED;
break;
case STATUS_LOGON_TYPE_NOT_GRANTED:
code = FREERDP_ERROR_CONNECT_LOGON_TYPE_NOT_GRANTED;
break;
default:
WLog_ERR(TAG, "SPNEGO failed with NTSTATUS: 0x%08" PRIX32 "", nla->errorCode);
code = FREERDP_ERROR_AUTHENTICATION_FAILED;
break;
}
freerdp_set_last_error_log(nla->rdpcontext, code);
return -1;
}
return nla_client_recv(nla);
}
| 0 |
[] |
FreeRDP
|
479e891545473f01c187daffdfa05fc752b54b72
| 252,665,145,599,558,800,000,000,000,000,000,000,000 | 66 |
check return values for SetCredentialsAttributes, throw warnings for unsupported attributes
|
static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
{
struct cma_multicast *mc = multicast->context;
struct rdma_id_private *id_priv = mc->id_priv;
struct rdma_cm_event event = {};
int ret = 0;
mutex_lock(&id_priv->handler_mutex);
if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL ||
READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)
goto out;
cma_make_mc_event(status, id_priv, multicast, &event, mc);
ret = cma_cm_event_handler(id_priv, &event);
rdma_destroy_ah_attr(&event.param.ud.ah_attr);
WARN_ON(ret);
out:
mutex_unlock(&id_priv->handler_mutex);
return 0;
}
| 0 |
[
"CWE-416"
] |
linux
|
bc0bdc5afaa740d782fbf936aaeebd65e5c2921d
| 280,457,905,155,778,740,000,000,000,000,000,000,000 | 21 |
RDMA/cma: Do not change route.addr.src_addr.ss_family
If the state is not idle then rdma_bind_addr() will immediately fail and
no change to global state should happen.
For instance if the state is already RDMA_CM_LISTEN then this will corrupt
the src_addr and would cause the test in cma_cancel_operation():
if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
To view a mangled src_addr, eg with a IPv6 loopback address but an IPv4
family, failing the test.
This would manifest as this trace from syzkaller:
BUG: KASAN: use-after-free in __list_add_valid+0x93/0xa0 lib/list_debug.c:26
Read of size 8 at addr ffff8881546491e0 by task syz-executor.1/32204
CPU: 1 PID: 32204 Comm: syz-executor.1 Not tainted 5.12.0-rc8-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:79 [inline]
dump_stack+0x141/0x1d7 lib/dump_stack.c:120
print_address_description.constprop.0.cold+0x5b/0x2f8 mm/kasan/report.c:232
__kasan_report mm/kasan/report.c:399 [inline]
kasan_report.cold+0x7c/0xd8 mm/kasan/report.c:416
__list_add_valid+0x93/0xa0 lib/list_debug.c:26
__list_add include/linux/list.h:67 [inline]
list_add_tail include/linux/list.h:100 [inline]
cma_listen_on_all drivers/infiniband/core/cma.c:2557 [inline]
rdma_listen+0x787/0xe00 drivers/infiniband/core/cma.c:3751
ucma_listen+0x16a/0x210 drivers/infiniband/core/ucma.c:1102
ucma_write+0x259/0x350 drivers/infiniband/core/ucma.c:1732
vfs_write+0x28e/0xa30 fs/read_write.c:603
ksys_write+0x1ee/0x250 fs/read_write.c:658
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
Which is indicating that an rdma_id_private was destroyed without doing
cma_cancel_listens().
Instead of trying to re-use the src_addr memory to indirectly create an
any address build one explicitly on the stack and bind to that as any
other normal flow would do.
Link: https://lore.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: 732d41c545bb ("RDMA/cma: Make the locking for automatic state transition more clear")
Reported-by: [email protected]
Tested-by: Hao Sun <[email protected]>
Reviewed-by: Leon Romanovsky <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
|
NTSTATUS unix_perms_from_wire(connection_struct *conn,
const SMB_STRUCT_STAT *psbuf,
uint32_t perms,
enum perm_type ptype,
mode_t *ret_perms)
{
mode_t ret = 0;
if (perms == SMB_MODE_NO_CHANGE) {
if (!VALID_STAT(*psbuf)) {
return NT_STATUS_INVALID_PARAMETER;
} else {
*ret_perms = psbuf->st_ex_mode;
return NT_STATUS_OK;
}
}
ret = wire_perms_to_unix(perms);
if (ptype == PERM_NEW_FILE) {
/*
* "create mask"/"force create mode" are
* only applied to new files, not existing ones.
*/
ret &= lp_create_mask(SNUM(conn));
/* Add in force bits */
ret |= lp_force_create_mode(SNUM(conn));
} else if (ptype == PERM_NEW_DIR) {
/*
* "directory mask"/"force directory mode" are
* only applied to new directories, not existing ones.
*/
ret &= lp_directory_mask(SNUM(conn));
/* Add in force bits */
ret |= lp_force_directory_mode(SNUM(conn));
}
*ret_perms = ret;
return NT_STATUS_OK;
}
| 0 |
[
"CWE-787"
] |
samba
|
22b4091924977f6437b59627f33a8e6f02b41011
| 254,533,924,169,977,000,000,000,000,000,000,000,000 | 40 |
CVE-2021-44142: smbd: add Netatalk xattr used by vfs_fruit to the list of private Samba xattrs
This is an internal xattr that should not be user visible.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=14914
Signed-off-by: Ralph Boehme <[email protected]>
Reviewed-by: Jeremy Allison <[email protected]>
|
static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
{
if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
return 0;
if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
return 0;
if (w < 0 || w >= env->prog->len) {
verbose(env, "jump out of range from insn %d to %d\n", t, w);
return -EINVAL;
}
if (e == BRANCH)
/* mark branch target for state pruning */
env->explored_states[w] = STATE_LIST_MARK;
if (insn_state[w] == 0) {
/* tree-edge */
insn_state[t] = DISCOVERED | e;
insn_state[w] = DISCOVERED;
if (cur_stack >= env->prog->len)
return -E2BIG;
insn_stack[cur_stack++] = w;
return 1;
} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
verbose(env, "back-edge from insn %d to %d\n", t, w);
return -EINVAL;
} else if (insn_state[w] == EXPLORED) {
/* forward- or cross-edge */
insn_state[t] = DISCOVERED | e;
} else {
verbose(env, "insn state internal bug\n");
return -EFAULT;
}
return 0;
}
| 0 |
[
"CWE-20"
] |
linux
|
c131187db2d3fa2f8bf32fdf4e9a4ef805168467
| 177,203,742,379,378,650,000,000,000,000,000,000,000 | 37 |
bpf: fix branch pruning logic
when the verifier detects that register contains a runtime constant
and it's compared with another constant it will prune exploration
of the branch that is guaranteed not to be taken at runtime.
This is all correct, but malicious program may be constructed
in such a way that it always has a constant comparison and
the other branch is never taken under any conditions.
In this case such path through the program will not be explored
by the verifier. It won't be taken at run-time either, but since
all instructions are JITed the malicious program may cause JITs
to complain about using reserved fields, etc.
To fix the issue we have to track the instructions explored by
the verifier and sanitize instructions that are dead at run time
with NOPs. We cannot reject such dead code, since llvm generates
it for valid C code, since it doesn't do as much data flow
analysis as the verifier does.
Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)")
Signed-off-by: Alexei Starovoitov <[email protected]>
Acked-by: Daniel Borkmann <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
|
int ssl3_get_key_exchange(SSL *s)
{
#ifndef OPENSSL_NO_RSA
unsigned char *q,md_buf[EVP_MAX_MD_SIZE*2];
#endif
EVP_MD_CTX md_ctx;
unsigned char *param,*p;
int al,i,j,param_len,ok;
long n,alg;
EVP_PKEY *pkey=NULL;
#ifndef OPENSSL_NO_RSA
RSA *rsa=NULL;
#endif
#ifndef OPENSSL_NO_DH
DH *dh=NULL;
#endif
#ifndef OPENSSL_NO_ECDH
EC_KEY *ecdh = NULL;
BN_CTX *bn_ctx = NULL;
EC_POINT *srvr_ecpoint = NULL;
int curve_nid = 0;
int encoded_pt_len = 0;
#endif
/* use same message size as in ssl3_get_certificate_request()
* as ServerKeyExchange message may be skipped */
n=s->method->ssl_get_message(s,
SSL3_ST_CR_KEY_EXCH_A,
SSL3_ST_CR_KEY_EXCH_B,
-1,
s->max_cert_list,
&ok);
if (!ok) return((int)n);
if (s->s3->tmp.message_type != SSL3_MT_SERVER_KEY_EXCHANGE)
{
s->s3->tmp.reuse_message=1;
return(1);
}
param=p=(unsigned char *)s->init_msg;
if (s->session->sess_cert != NULL)
{
#ifndef OPENSSL_NO_RSA
if (s->session->sess_cert->peer_rsa_tmp != NULL)
{
RSA_free(s->session->sess_cert->peer_rsa_tmp);
s->session->sess_cert->peer_rsa_tmp=NULL;
}
#endif
#ifndef OPENSSL_NO_DH
if (s->session->sess_cert->peer_dh_tmp)
{
DH_free(s->session->sess_cert->peer_dh_tmp);
s->session->sess_cert->peer_dh_tmp=NULL;
}
#endif
#ifndef OPENSSL_NO_ECDH
if (s->session->sess_cert->peer_ecdh_tmp)
{
EC_KEY_free(s->session->sess_cert->peer_ecdh_tmp);
s->session->sess_cert->peer_ecdh_tmp=NULL;
}
#endif
}
else
{
s->session->sess_cert=ssl_sess_cert_new();
}
param_len=0;
alg=s->s3->tmp.new_cipher->algorithms;
EVP_MD_CTX_init(&md_ctx);
#ifndef OPENSSL_NO_RSA
if (alg & SSL_kRSA)
{
if ((rsa=RSA_new()) == NULL)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
n2s(p,i);
param_len=i+2;
if (param_len > n)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_RSA_MODULUS_LENGTH);
goto f_err;
}
if (!(rsa->n=BN_bin2bn(p,i,rsa->n)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
n2s(p,i);
param_len+=i+2;
if (param_len > n)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_RSA_E_LENGTH);
goto f_err;
}
if (!(rsa->e=BN_bin2bn(p,i,rsa->e)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
n-=param_len;
/* this should be because we are using an export cipher */
if (alg & SSL_aRSA)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509);
else
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
s->session->sess_cert->peer_rsa_tmp=rsa;
rsa=NULL;
}
#else /* OPENSSL_NO_RSA */
if (0)
;
#endif
#ifndef OPENSSL_NO_DH
else if (alg & SSL_kEDH)
{
if ((dh=DH_new()) == NULL)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_DH_LIB);
goto err;
}
n2s(p,i);
param_len=i+2;
if (param_len > n)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_DH_P_LENGTH);
goto f_err;
}
if (!(dh->p=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
n2s(p,i);
param_len+=i+2;
if (param_len > n)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_DH_G_LENGTH);
goto f_err;
}
if (!(dh->g=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
n2s(p,i);
param_len+=i+2;
if (param_len > n)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_DH_PUB_KEY_LENGTH);
goto f_err;
}
if (!(dh->pub_key=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
n-=param_len;
#ifndef OPENSSL_NO_RSA
if (alg & SSL_aRSA)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509);
#else
if (0)
;
#endif
#ifndef OPENSSL_NO_DSA
else if (alg & SSL_aDSS)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_DSA_SIGN].x509);
#endif
/* else anonymous DH, so no certificate or pkey. */
s->session->sess_cert->peer_dh_tmp=dh;
dh=NULL;
}
else if ((alg & SSL_kDHr) || (alg & SSL_kDHd))
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_TRIED_TO_USE_UNSUPPORTED_CIPHER);
goto f_err;
}
#endif /* !OPENSSL_NO_DH */
#ifndef OPENSSL_NO_ECDH
else if (alg & SSL_kECDHE)
{
EC_GROUP *ngroup;
const EC_GROUP *group;
if ((ecdh=EC_KEY_new()) == NULL)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
/* Extract elliptic curve parameters and the
* server's ephemeral ECDH public key.
* Keep accumulating lengths of various components in
* param_len and make sure it never exceeds n.
*/
/* XXX: For now we only support named (not generic) curves
* and the ECParameters in this case is just three bytes.
*/
param_len=3;
if ((param_len > n) ||
(*p != NAMED_CURVE_TYPE) ||
((curve_nid = curve_id2nid(*(p + 2))) == 0))
{
al=SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_UNABLE_TO_FIND_ECDH_PARAMETERS);
goto f_err;
}
ngroup = EC_GROUP_new_by_curve_name(curve_nid);
if (ngroup == NULL)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_EC_LIB);
goto err;
}
if (EC_KEY_set_group(ecdh, ngroup) == 0)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_EC_LIB);
goto err;
}
EC_GROUP_free(ngroup);
group = EC_KEY_get0_group(ecdh);
if (SSL_C_IS_EXPORT(s->s3->tmp.new_cipher) &&
(EC_GROUP_get_degree(group) > 163))
{
al=SSL_AD_EXPORT_RESTRICTION;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_ECGROUP_TOO_LARGE_FOR_CIPHER);
goto f_err;
}
p+=3;
/* Next, get the encoded ECPoint */
if (((srvr_ecpoint = EC_POINT_new(group)) == NULL) ||
((bn_ctx = BN_CTX_new()) == NULL))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
encoded_pt_len = *p; /* length of encoded point */
p+=1;
param_len += (1 + encoded_pt_len);
if ((param_len > n) ||
(EC_POINT_oct2point(group, srvr_ecpoint,
p, encoded_pt_len, bn_ctx) == 0))
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_ECPOINT);
goto f_err;
}
n-=param_len;
p+=encoded_pt_len;
/* The ECC/TLS specification does not mention
* the use of DSA to sign ECParameters in the server
* key exchange message. We do support RSA and ECDSA.
*/
if (0) ;
#ifndef OPENSSL_NO_RSA
else if (alg & SSL_aRSA)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509);
#endif
#ifndef OPENSSL_NO_ECDSA
else if (alg & SSL_aECDSA)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_ECC].x509);
#endif
/* else anonymous ECDH, so no certificate or pkey. */
EC_KEY_set_public_key(ecdh, srvr_ecpoint);
s->session->sess_cert->peer_ecdh_tmp=ecdh;
ecdh=NULL;
BN_CTX_free(bn_ctx);
bn_ctx = NULL;
EC_POINT_free(srvr_ecpoint);
srvr_ecpoint = NULL;
}
else if (alg & SSL_kECDH)
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_UNEXPECTED_MESSAGE);
goto f_err;
}
#endif /* !OPENSSL_NO_ECDH */
if (alg & SSL_aFZA)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_TRIED_TO_USE_UNSUPPORTED_CIPHER);
goto f_err;
}
/* p points to the next byte, there are 'n' bytes left */
/* if it was signed, check the signature */
if (pkey != NULL)
{
n2s(p,i);
n-=2;
j=EVP_PKEY_size(pkey);
if ((i != n) || (n > j) || (n <= 0))
{
/* wrong packet length */
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_WRONG_SIGNATURE_LENGTH);
goto f_err;
}
#ifndef OPENSSL_NO_RSA
if (pkey->type == EVP_PKEY_RSA)
{
int num;
j=0;
q=md_buf;
for (num=2; num > 0; num--)
{
EVP_MD_CTX_set_flags(&md_ctx,
EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
EVP_DigestInit_ex(&md_ctx,(num == 2)
?s->ctx->md5:s->ctx->sha1, NULL);
EVP_DigestUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE);
EVP_DigestUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE);
EVP_DigestUpdate(&md_ctx,param,param_len);
EVP_DigestFinal_ex(&md_ctx,q,(unsigned int *)&i);
q+=i;
j+=i;
}
i=RSA_verify(NID_md5_sha1, md_buf, j, p, n,
pkey->pkey.rsa);
if (i < 0)
{
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_RSA_DECRYPT);
goto f_err;
}
if (i == 0)
{
/* bad signature */
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SIGNATURE);
goto f_err;
}
}
else
#endif
#ifndef OPENSSL_NO_DSA
if (pkey->type == EVP_PKEY_DSA)
{
/* lets do DSS */
EVP_VerifyInit_ex(&md_ctx,EVP_dss1(), NULL);
EVP_VerifyUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE);
EVP_VerifyUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE);
EVP_VerifyUpdate(&md_ctx,param,param_len);
if (EVP_VerifyFinal(&md_ctx,p,(int)n,pkey) <= 0)
{
/* bad signature */
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SIGNATURE);
goto f_err;
}
}
else
#endif
#ifndef OPENSSL_NO_ECDSA
if (pkey->type == EVP_PKEY_EC)
{
/* let's do ECDSA */
EVP_VerifyInit_ex(&md_ctx,EVP_ecdsa(), NULL);
EVP_VerifyUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE);
EVP_VerifyUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE);
EVP_VerifyUpdate(&md_ctx,param,param_len);
if (EVP_VerifyFinal(&md_ctx,p,(int)n,pkey) <= 0)
{
/* bad signature */
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SIGNATURE);
goto f_err;
}
}
else
#endif
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
}
else
{
/* still data left over */
if (!(alg & SSL_aNULL))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
if (n != 0)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_EXTRA_DATA_IN_MESSAGE);
goto f_err;
}
}
EVP_PKEY_free(pkey);
EVP_MD_CTX_cleanup(&md_ctx);
return(1);
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
err:
EVP_PKEY_free(pkey);
#ifndef OPENSSL_NO_RSA
if (rsa != NULL)
RSA_free(rsa);
#endif
#ifndef OPENSSL_NO_DH
if (dh != NULL)
DH_free(dh);
#endif
#ifndef OPENSSL_NO_ECDH
BN_CTX_free(bn_ctx);
EC_POINT_free(srvr_ecpoint);
if (ecdh != NULL)
EC_KEY_free(ecdh);
#endif
EVP_MD_CTX_cleanup(&md_ctx);
return(-1);
}
| 0 |
[
"CWE-326"
] |
openssl
|
410a49a4fa1d2a1a9775ee29f9e40cbbda79c149
| 247,024,451,311,470,000,000,000,000,000,000,000,000 | 459 |
Fix for CVE-2014-0224
Only accept change cipher spec when it is expected instead of at any
time. This prevents premature setting of session keys before the master
secret is determined which an attacker could use as a MITM attack.
Thanks to KIKUCHI Masashi (Lepidum Co. Ltd.) for reporting this issue
and providing the initial fix this patch is based on.
|
xmlNsWarn(xmlParserCtxtPtr ctxt, xmlParserErrors error,
const char *msg,
const xmlChar * info1, const xmlChar * info2,
const xmlChar * info3)
{
if ((ctxt != NULL) && (ctxt->disableSAX != 0) &&
(ctxt->instate == XML_PARSER_EOF))
return;
__xmlRaiseError(NULL, NULL, NULL, ctxt, NULL, XML_FROM_NAMESPACE, error,
XML_ERR_WARNING, NULL, 0, (const char *) info1,
(const char *) info2, (const char *) info3, 0, 0, msg,
info1, info2, info3);
}
| 0 |
[
"CWE-125"
] |
libxml2
|
77404b8b69bc122d12231807abf1a837d121b551
| 45,906,275,980,146,470,000,000,000,000,000,000,000 | 13 |
Make sure the parser returns when getting a Stop order
patch backported from chromiun bug fixes, assuming author is Chris
|
int mongo_env_read_socket( mongo *conn, void *buf, int len ) {
char *cbuf = buf;
while ( len ) {
int sent = recv( conn->sock, cbuf, len, 0 );
if ( sent == 0 || sent == -1 ) {
__mongo_set_error( conn, MONGO_IO_ERROR, strerror( errno ), errno );
return MONGO_ERROR;
}
cbuf += sent;
len -= sent;
}
return MONGO_OK;
}
| 1 |
[
"CWE-190"
] |
mongo-c-driver-legacy
|
1a1f5e26a4309480d88598913f9eebf9e9cba8ca
| 216,421,266,933,410,650,000,000,000,000,000,000,000 | 14 |
don't mix up int and size_t (first pass to fix that)
|
_zip_cdir_grow(zip_cdir_t *cd, zip_uint64_t additional_entries, zip_error_t *error)
{
zip_uint64_t i, new_alloc;
zip_entry_t *new_entry;
if (additional_entries == 0) {
return true;
}
new_alloc = cd->nentry_alloc + additional_entries;
if (new_alloc < additional_entries || new_alloc > SIZE_MAX/sizeof(*(cd->entry))) {
zip_error_set(error, ZIP_ER_MEMORY, 0);
return false;
}
if ((new_entry = (zip_entry_t *)realloc(cd->entry, sizeof(*(cd->entry))*(size_t)new_alloc)) == NULL) {
zip_error_set(error, ZIP_ER_MEMORY, 0);
return false;
}
cd->entry = new_entry;
for (i = cd->nentry; i < new_alloc; i++) {
_zip_entry_init(cd->entry+i);
}
cd->nentry = cd->nentry_alloc = new_alloc;
return true;
}
| 0 |
[
"CWE-416",
"CWE-415"
] |
libzip
|
2217022b7d1142738656d891e00b3d2d9179b796
| 29,313,649,848,855,248,000,000,000,000,000,000,000 | 31 |
Fix double free().
Found by Brian 'geeknik' Carpenter using AFL.
|
void ecryptfs_put_lower_file(struct inode *inode)
{
struct ecryptfs_inode_info *inode_info;
inode_info = ecryptfs_inode_to_private(inode);
if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count,
&inode_info->lower_file_mutex)) {
fput(inode_info->lower_file);
inode_info->lower_file = NULL;
mutex_unlock(&inode_info->lower_file_mutex);
}
}
| 0 |
[
"CWE-362",
"CWE-284",
"CWE-264"
] |
linux
|
764355487ea220fdc2faf128d577d7f679b91f97
| 139,335,541,104,464,700,000,000,000,000,000,000,000 | 12 |
Ecryptfs: Add mount option to check uid of device being mounted = expect uid
Close a TOCTOU race for mounts done via ecryptfs-mount-private. The mount
source (device) can be raced when the ownership test is done in userspace.
Provide Ecryptfs a means to force the uid check at mount time.
Signed-off-by: John Johansen <[email protected]>
Cc: <[email protected]>
Signed-off-by: Tyler Hicks <[email protected]>
|
int importItems(string_array &target, bool scope_limit)
{
string_array result;
std::stringstream ss;
std::string path, content, strLine;
unsigned int itemCount = 0;
for(std::string &x : target)
{
if(x.find("!!import:") == x.npos)
{
result.emplace_back(x);
continue;
}
path = x.substr(x.find(":") + 1);
writeLog(0, "Trying to import items from " + path);
std::string proxy = parseProxy(global.proxyConfig);
if(fileExist(path))
content = fileGet(path, scope_limit);
else if(isLink(path))
content = webGet(path, proxy, global.cacheConfig);
else
writeLog(0, "File not found or not a valid URL: " + path, LOG_LEVEL_ERROR);
if(!content.size())
return -1;
ss << content;
char delimiter = getLineBreak(content);
std::string::size_type lineSize;
while(getline(ss, strLine, delimiter))
{
lineSize = strLine.size();
if(lineSize && strLine[lineSize - 1] == '\r') //remove line break
strLine.erase(--lineSize);
if(!lineSize || strLine[0] == ';' || strLine[0] == '#' || (lineSize >= 2 && strLine[0] == '/' && strLine[1] == '/')) //empty lines and comments are ignored
continue;
result.emplace_back(std::move(strLine));
itemCount++;
}
ss.clear();
}
target.swap(result);
writeLog(0, "Imported " + std::to_string(itemCount) + " item(s).");
return 0;
}
| 0 |
[
"CWE-434",
"CWE-94"
] |
subconverter
|
ce8d2bd0f13f05fcbd2ed90755d097f402393dd3
| 32,144,759,894,642,790,000,000,000,000,000,000,000 | 46 |
Enhancements
Add authorization check before loading scripts.
Add detailed logs when loading preference settings.
|
static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
bool write_fault, bool *writable)
{
struct kvm_memory_slot *slot;
if (async)
*async = false;
slot = gfn_to_memslot(kvm, gfn);
return __gfn_to_pfn_memslot(slot, gfn, atomic, async, write_fault,
writable);
}
| 0 |
[
"CWE-399"
] |
linux
|
12d6e7538e2d418c08f082b1b44ffa5fb7270ed8
| 303,952,695,877,756,120,000,000,000,000,000,000,000 | 13 |
KVM: perform an invalid memslot step for gpa base change
PPC must flush all translations before the new memory slot
is visible.
Signed-off-by: Marcelo Tosatti <[email protected]>
Signed-off-by: Avi Kivity <[email protected]>
|
find_hints_in_secrets (gpointer key, gpointer data, gpointer user_data)
{
FindHintsInfo *info = (FindHintsInfo *) user_data;
const char **iter;
for (iter = info->hints; !info->found && *iter; iter++) {
if (!strcmp (*iter, (const char *) key) && data && G_IS_VALUE (data))
info->found = TRUE;
}
}
| 0 |
[
"CWE-200"
] |
network-manager-applet
|
8627880e07c8345f69ed639325280c7f62a8f894
| 242,044,045,268,227,850,000,000,000,000,000,000,000 | 10 |
editor: prevent any registration of objects on the system bus
D-Bus access-control is name-based; so requests for a specific name
are allowed/denied based on the rules in /etc/dbus-1/system.d. But
apparently apps still get a non-named service on the bus, and if we
register *any* object even though we don't have a named service,
dbus and dbus-glib will happily proxy signals. Since the connection
editor shouldn't ever expose anything having to do with connections
on any bus, make sure that's the case.
|
void InstanceKlass::oop_print_value_on(oop obj, outputStream* st) {
st->print("a ");
name()->print_value_on(st);
obj->print_address_on(st);
if (this == vmClasses::String_klass()
&& java_lang_String::value(obj) != NULL) {
ResourceMark rm;
int len = java_lang_String::length(obj);
int plen = (len < 24 ? len : 12);
char* str = java_lang_String::as_utf8_string(obj, 0, plen);
st->print(" = \"%s\"", str);
if (len > plen)
st->print("...[%d]", len);
} else if (this == vmClasses::Class_klass()) {
Klass* k = java_lang_Class::as_Klass(obj);
st->print(" = ");
if (k != NULL) {
k->print_value_on(st);
} else {
const char* tname = type2name(java_lang_Class::primitive_type(obj));
st->print("%s", tname ? tname : "type?");
}
} else if (this == vmClasses::MethodType_klass()) {
st->print(" = ");
java_lang_invoke_MethodType::print_signature(obj, st);
} else if (java_lang_boxing_object::is_instance(obj)) {
st->print(" = ");
java_lang_boxing_object::print(obj, st);
} else if (this == vmClasses::LambdaForm_klass()) {
oop vmentry = java_lang_invoke_LambdaForm::vmentry(obj);
if (vmentry != NULL) {
st->print(" => ");
vmentry->print_value_on(st);
}
} else if (this == vmClasses::MemberName_klass()) {
Metadata* vmtarget = java_lang_invoke_MemberName::vmtarget(obj);
if (vmtarget != NULL) {
st->print(" = ");
vmtarget->print_value_on(st);
} else {
oop clazz = java_lang_invoke_MemberName::clazz(obj);
oop name = java_lang_invoke_MemberName::name(obj);
if (clazz != NULL) {
clazz->print_value_on(st);
} else {
st->print("NULL");
}
st->print(".");
if (name != NULL) {
name->print_value_on(st);
} else {
st->print("NULL");
}
}
}
}
| 0 |
[] |
jdk17u
|
f8eb9abe034f7c6bea4da05a9ea42017b3f80730
| 304,525,262,762,893,330,000,000,000,000,000,000,000 | 56 |
8270386: Better verification of scan methods
Reviewed-by: coleenp
Backport-of: ac329cef45979bd0159ecd1347e36f7129bb2ce4
|
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int shorthand, unsigned int dest, int dest_mode)
{
struct kvm_lapic *target = vcpu->arch.apic;
u32 mda = kvm_apic_mda(vcpu, dest, source, target);
ASSERT(target);
switch (shorthand) {
case APIC_DEST_NOSHORT:
if (dest_mode == APIC_DEST_PHYSICAL)
return kvm_apic_match_physical_addr(target, mda);
else
return kvm_apic_match_logical_addr(target, mda);
case APIC_DEST_SELF:
return target == source;
case APIC_DEST_ALLINC:
return true;
case APIC_DEST_ALLBUT:
return target != source;
default:
return false;
}
}
| 0 |
[
"CWE-703",
"CWE-459"
] |
linux
|
f7d8a19f9a056a05c5c509fa65af472a322abfee
| 282,437,702,717,213,430,000,000,000,000,000,000,000 | 23 |
Revert "KVM: x86: Open code necessary bits of kvm_lapic_set_base() at vCPU RESET"
Revert a change to open code bits of kvm_lapic_set_base() when emulating
APIC RESET to fix an apic_hw_disabled underflow bug due to arch.apic_base
and apic_hw_disabled being unsyncrhonized when the APIC is created. If
kvm_arch_vcpu_create() fails after creating the APIC, kvm_free_lapic()
will see the initialized-to-zero vcpu->arch.apic_base and decrement
apic_hw_disabled without KVM ever having incremented apic_hw_disabled.
Using kvm_lapic_set_base() in kvm_lapic_reset() is also desirable for a
potential future where KVM supports RESET outside of vCPU creation, in
which case all the side effects of kvm_lapic_set_base() are needed, e.g.
to handle the transition from x2APIC => xAPIC.
Alternatively, KVM could temporarily increment apic_hw_disabled (and call
kvm_lapic_set_base() at RESET), but that's a waste of cycles and would
impact the performance of other vCPUs and VMs. The other subtle side
effect is that updating the xAPIC ID needs to be done at RESET regardless
of whether the APIC was previously enabled, i.e. kvm_lapic_reset() needs
an explicit call to kvm_apic_set_xapic_id() regardless of whether or not
kvm_lapic_set_base() also performs the update. That makes stuffing the
enable bit at vCPU creation slightly more palatable, as doing so affects
only the apic_hw_disabled key.
Opportunistically tweak the comment to explicitly call out the connection
between vcpu->arch.apic_base and apic_hw_disabled, and add a comment to
call out the need to always do kvm_apic_set_xapic_id() at RESET.
Underflow scenario:
kvm_vm_ioctl() {
kvm_vm_ioctl_create_vcpu() {
kvm_arch_vcpu_create() {
if (something_went_wrong)
goto fail_free_lapic;
/* vcpu->arch.apic_base is initialized when something_went_wrong is false. */
kvm_vcpu_reset() {
kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) {
vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
}
}
return 0;
fail_free_lapic:
kvm_free_lapic() {
/* vcpu->arch.apic_base is not yet initialized when something_went_wrong is true. */
if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
static_branch_slow_dec_deferred(&apic_hw_disabled); // <= underflow bug.
}
return r;
}
}
}
This (mostly) reverts commit 421221234ada41b4a9f0beeb08e30b07388bd4bd.
Fixes: 421221234ada ("KVM: x86: Open code necessary bits of kvm_lapic_set_base() at vCPU RESET")
Reported-by: [email protected]
Debugged-by: Tetsuo Handa <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
virDomainMigrateUnmanaged(virDomainPtr domain,
const char *xmlin,
unsigned int flags,
const char *dname,
const char *dconnuri,
const char *miguri,
unsigned long long bandwidth)
{
int ret = -1;
virTypedParameterPtr params = NULL;
int nparams = 0;
int maxparams = 0;
if (miguri &&
virTypedParamsAddString(¶ms, &nparams, &maxparams,
VIR_MIGRATE_PARAM_URI, miguri) < 0)
goto cleanup;
if (dname &&
virTypedParamsAddString(¶ms, &nparams, &maxparams,
VIR_MIGRATE_PARAM_DEST_NAME, dname) < 0)
goto cleanup;
if (xmlin &&
virTypedParamsAddString(¶ms, &nparams, &maxparams,
VIR_MIGRATE_PARAM_DEST_XML, xmlin) < 0)
goto cleanup;
if (virTypedParamsAddULLong(¶ms, &nparams, &maxparams,
VIR_MIGRATE_PARAM_BANDWIDTH, bandwidth) < 0)
goto cleanup;
ret = virDomainMigrateUnmanagedParams(domain, dconnuri, params,
nparams, flags);
cleanup:
virTypedParamsFree(params, nparams);
return ret;
}
| 0 |
[
"CWE-254"
] |
libvirt
|
506e9d6c2d4baaf580d489fff0690c0ff2ff588f
| 308,554,877,146,321,220,000,000,000,000,000,000,000 | 37 |
virDomainGetTime: Deny on RO connections
We have a policy that if API may end up talking to a guest agent
it should require RW connection. We don't obey the rule in
virDomainGetTime().
Signed-off-by: Michal Privoznik <[email protected]>
|
v8::Local<v8::Value> FindFrameByName(v8::Isolate* isolate,
const std::string& name) {
content::RenderFrame* render_frame;
if (!MaybeGetRenderFrame(isolate, "findFrameByName", &render_frame))
return v8::Null(isolate);
blink::WebFrame* frame = render_frame->GetWebFrame()->FindFrameByName(
blink::WebString::FromUTF8(name));
return CreateWebFrameRenderer(isolate, frame);
}
| 0 |
[] |
electron
|
e9fa834757f41c0b9fe44a4dffe3d7d437f52d34
| 143,511,557,585,823,620,000,000,000,000,000,000,000 | 10 |
fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame
Co-authored-by: Samuel Attard <[email protected]>
Co-authored-by: Samuel Attard <[email protected]>
|
void msPostGISLayerFreeItemInfo(layerObj *layer)
{
#ifdef USE_POSTGIS
if (layer->debug) {
msDebug("msPostGISLayerFreeItemInfo called.\n");
}
if (layer->iteminfo) {
free(layer->iteminfo);
}
layer->iteminfo = NULL;
#endif
}
| 0 |
[
"CWE-89"
] |
mapserver
|
3a10f6b829297dae63492a8c63385044bc6953ed
| 153,574,663,633,236,740,000,000,000,000,000,000,000 | 13 |
Fix potential SQL Injection with postgis TIME filters (#4834)
|
static struct playlist *new_playlist(HLSContext *c, const char *url,
const char *base)
{
struct playlist *pls = av_mallocz(sizeof(struct playlist));
if (!pls)
return NULL;
reset_packet(&pls->pkt);
ff_make_absolute_url(pls->url, sizeof(pls->url), base, url);
pls->seek_timestamp = AV_NOPTS_VALUE;
pls->is_id3_timestamped = -1;
pls->id3_mpegts_timestamp = AV_NOPTS_VALUE;
dynarray_add(&c->playlists, &c->n_playlists, pls);
return pls;
}
| 0 |
[
"CWE-703"
] |
FFmpeg
|
7ba100d3e6e8b1e5d5342feb960a7f081d6e15af
| 169,458,276,841,862,640,000,000,000,000,000,000,000 | 16 |
avformat/hls: Fix DoS due to infinite loop
Fixes: loop.m3u
The default max iteration count of 1000 is arbitrary and ideas for a better solution are welcome
Found-by: Xiaohei and Wangchu from Alibaba Security Team
Previous version reviewed-by: Steven Liu <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
(cherry picked from commit 7ec414892ddcad88313848494b6fc5f437c9ca4a)
Signed-off-by: Michael Niedermayer <[email protected]>
|
With(asdl_seq * items, asdl_seq * body, string type_comment, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena *arena)
{
stmt_ty p;
p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p));
if (!p)
return NULL;
p->kind = With_kind;
p->v.With.items = items;
p->v.With.body = body;
p->v.With.type_comment = type_comment;
p->lineno = lineno;
p->col_offset = col_offset;
p->end_lineno = end_lineno;
p->end_col_offset = end_col_offset;
return p;
}
| 0 |
[
"CWE-125"
] |
cpython
|
dcfcd146f8e6fc5c2fc16a4c192a0c5f5ca8c53c
| 80,330,457,058,636,510,000,000,000,000,000,000,000 | 17 |
bpo-35766: Merge typed_ast back into CPython (GH-11645)
|
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
hrtimer_cancel(&vcpu->arch.hlt_timer);
kfree(vcpu->arch.apic);
}
| 0 |
[
"CWE-399"
] |
kvm
|
5b40572ed5f0344b9dbee486a17c589ce1abe1a3
| 136,836,534,718,429,790,000,000,000,000,000,000,000 | 6 |
KVM: Ensure all vcpus are consistent with in-kernel irqchip settings
If some vcpus are created before KVM_CREATE_IRQCHIP, then
irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading
to potential NULL pointer dereferences.
Fix by:
- ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called
- ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP
This is somewhat long winded because vcpu->arch.apic is created without
kvm->lock held.
Based on earlier patch by Michael Ellerman.
Signed-off-by: Michael Ellerman <[email protected]>
Signed-off-by: Avi Kivity <[email protected]>
|
HttpHeader::insertEntry(HttpHeaderEntry * e)
{
assert(e);
assert(any_valid_header(e->id));
debugs(55, 7, this << " adding entry: " << e->id << " at " << entries.size());
// Http::HdrType::BAD_HDR is filtered out by assert_any_valid_header
if (CBIT_TEST(mask, e->id)) {
++ headerStatsTable[e->id].repCount;
} else {
CBIT_SET(mask, e->id);
}
entries.insert(entries.begin(),e);
/* increment header length, allow for ": " and crlf */
len += e->name.size() + 2 + e->value.size() + 2;
}
| 0 |
[
"CWE-444"
] |
squid
|
9c8e2a71aa1d3c159a319d9365c346c48dc783a5
| 325,270,902,922,504,820,000,000,000,000,000,000,000 | 19 |
Enforce token characters for field-name (#700)
RFC 7230 defines field-name as a token. Request splitting and cache
poisoning attacks have used non-token characters to fool broken HTTP
agents behind or in front of Squid for years. This change should
significantly reduce that abuse.
If we discover exceptional situations that need special treatment, the
relaxed parser can allow them on a case-by-case basis (while being extra
careful about framing-related header fields), just like we already
tolerate some header whitespace (e.g., between the response header
field-name and colon).
|
static gint select_next_page_no(gint current_page_no, gpointer data)
{
GtkWidget *page;
again:
log_notice("%s: current_page_no:%d", __func__, current_page_no);
current_page_no++;
page = gtk_notebook_get_nth_page(g_assistant, current_page_no);
if (pages[PAGENO_EVENT_SELECTOR].page_widget == page)
{
if (!g_expert_mode && (g_auto_event_list == NULL))
{
return current_page_no; //stay here and let user select the workflow
}
if (!g_expert_mode)
{
/* (note: this frees and sets to NULL g_event_selected) */
char *event = setup_next_processed_event(&g_auto_event_list);
if (!event)
{
current_page_no = pages[PAGENO_EVENT_PROGRESS].page_no - 1;
goto again;
}
if (!get_sensitive_data_permission(event))
{
free(event);
cancel_processing(g_lbl_event_log, /* default message */ NULL, TERMINATE_NOFLAGS);
current_page_no = pages[PAGENO_EVENT_PROGRESS].page_no - 1;
goto again;
}
if (problem_data_get_content_or_NULL(g_cd, FILENAME_NOT_REPORTABLE))
{
free(event);
char *msg = xasprintf(_("This problem should not be reported "
"(it is likely a known problem). %s"),
problem_data_get_content_or_NULL(g_cd, FILENAME_NOT_REPORTABLE)
);
cancel_processing(g_lbl_event_log, msg, TERMINATE_NOFLAGS);
free(msg);
current_page_no = pages[PAGENO_EVENT_PROGRESS].page_no - 1;
goto again;
}
g_event_selected = event;
/* Notify a user that some configuration options miss values, but */
/* don't force him to provide them. */
check_event_config(g_event_selected);
/* >>> and this but this is clearer
* because it does exactly the same thing
* but I'm pretty scared to touch it */
current_page_no = pages[PAGENO_EVENT_SELECTOR].page_no + 1;
goto event_was_selected;
}
}
if (pages[PAGENO_EVENT_SELECTOR + 1].page_widget == page)
{
event_was_selected:
if (!g_event_selected)
{
/* Go back to selectors */
current_page_no = pages[PAGENO_EVENT_SELECTOR].page_no - 1;
goto again;
}
if (!event_need_review(g_event_selected))
{
current_page_no = pages[PAGENO_EVENT_PROGRESS].page_no - 1;
goto again;
}
}
#if 0
if (pages[PAGENO_EDIT_COMMENT].page_widget == page)
{
if (problem_data_get_content_or_NULL(g_cd, FILENAME_COMMENT))
goto again; /* no comment, skip this page */
}
#endif
if (pages[PAGENO_EVENT_DONE].page_widget == page)
{
if (g_auto_event_list)
{
/* Go back to selectors */
current_page_no = pages[PAGENO_SUMMARY].page_no;
}
goto again;
}
if (pages[PAGENO_NOT_SHOWN].page_widget == page)
{
if (!g_expert_mode)
exit(0);
/* No! this would SEGV (infinitely recurse into select_next_page_no) */
/*gtk_assistant_commit(g_assistant);*/
current_page_no = pages[PAGENO_EVENT_SELECTOR].page_no - 1;
goto again;
}
log_notice("%s: selected page #%d", __func__, current_page_no);
return current_page_no;
}
| 0 |
[
"CWE-200"
] |
libreport
|
257578a23d1537a2d235aaa2b1488ee4f818e360
| 25,881,412,046,054,620,000,000,000,000,000,000,000 | 111 |
wizard: fix save users changes after reviewing dump dir files
If the user reviewed the dump dir's files during reporting the crash, the
changes was thrown away and original data was passed to the bugzilla bug
report.
report-gtk saves the first text view buffer and then reloads data from the
reported problem directory, which causes that the changes made to those text
views are thrown away.
Function save_text_if_changed(), except of saving text, also reload the files
from dump dir and update gui state from the dump dir. The commit moves the
reloading and updating gui functions away from this function.
Related to rhbz#1270235
Signed-off-by: Matej Habrnal <[email protected]>
|
nv_redo(cmdarg_T *cap)
{
if (!checkclearopq(cap->oap))
{
u_redo((int)cap->count1);
curwin->w_set_curswant = TRUE;
}
}
| 0 |
[
"CWE-416"
] |
vim
|
35a9a00afcb20897d462a766793ff45534810dc3
| 170,602,313,819,350,670,000,000,000,000,000,000,000 | 8 |
patch 8.2.3428: using freed memory when replacing
Problem: Using freed memory when replacing. (Dhiraj Mishra)
Solution: Get the line pointer after calling ins_copychar().
|
TEST_F(LoaderTest, NoTagMatch) {
SavedModelBundle bundle;
RunOptions run_options;
SessionOptions session_options;
const string export_dir =
io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataSharded);
Status st = LoadSavedModel(session_options, run_options, export_dir,
{"missing-tag"}, &bundle);
EXPECT_FALSE(st.ok());
EXPECT_TRUE(absl::StrContains(
st.error_message(),
"Could not find meta graph def matching supplied tags: { missing-tag }"))
<< st.error_message();
}
| 0 |
[
"CWE-20",
"CWE-703"
] |
tensorflow
|
adf095206f25471e864a8e63a0f1caef53a0e3a6
| 289,129,106,200,590,580,000,000,000,000,000,000,000 | 15 |
Validate `NodeDef`s from `FunctionDefLibrary` of a `GraphDef`.
We already validated `NodeDef`s from a `GraphDef` but missed validating those from the `FunctionDefLibrary`. Thus, some maliciously crafted models could evade detection and cause denial of service due to a `CHECK`-fail.
PiperOrigin-RevId: 332536309
Change-Id: I052efe919ff1fe2f90815e286a1aa4c54c7b94ff
|
static inline void address_space_stl_internal(AddressSpace *as,
hwaddr addr, uint32_t val,
MemTxAttrs attrs,
MemTxResult *result,
enum device_endian endian)
{
uint8_t *ptr;
MemoryRegion *mr;
hwaddr l = 4;
hwaddr addr1;
MemTxResult r;
bool release_lock = false;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
true);
if (l < 4 || !memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap32(val);
}
#else
if (endian == DEVICE_BIG_ENDIAN) {
val = bswap32(val);
}
#endif
r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
} else {
/* RAM case */
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
ptr = qemu_get_ram_ptr(addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
stl_le_p(ptr, val);
break;
case DEVICE_BIG_ENDIAN:
stl_be_p(ptr, val);
break;
default:
stl_p(ptr, val);
break;
}
invalidate_and_set_dirty(mr, addr1, 4);
r = MEMTX_OK;
}
if (result) {
*result = r;
}
if (release_lock) {
qemu_mutex_unlock_iothread();
}
rcu_read_unlock();
}
| 0 |
[] |
qemu
|
b242e0e0e2969c044a318e56f7988bbd84de1f63
| 293,075,194,335,952,270,000,000,000,000,000,000,000 | 55 |
exec: skip MMIO regions correctly in cpu_physical_memory_write_rom_internal
Loading the BIOS in the mac99 machine is interesting, because there is a
PROM in the middle of the BIOS region (from 16K to 32K). Before memory
region accesses were clamped, when QEMU was asked to load a BIOS from
0xfff00000 to 0xffffffff it would put even those 16K from the BIOS file
into the region. This is weird because those 16K were not actually
visible between 0xfff04000 and 0xfff07fff. However, it worked.
After clamping was added, this also worked. In this case, the
cpu_physical_memory_write_rom_internal function split the write in
three parts: the first 16K were copied, the PROM area (second 16K) were
ignored, then the rest was copied.
Problems then started with commit 965eb2f (exec: do not clamp accesses
to MMIO regions, 2015-06-17). Clamping accesses is not done for MMIO
regions because they can overlap wildly, and MMIO registers can be
expected to perform full-width accesses based only on their address
(with no respect for adjacent registers that could decode to completely
different MemoryRegions). However, this lack of clamping also applied
to the PROM area! cpu_physical_memory_write_rom_internal thus failed
to copy the third range above, i.e. only copied the first 16K of the BIOS.
In effect, address_space_translate is expecting _something else_ to do
the clamping for MMIO regions if the incoming length is large. This
"something else" is memory_access_size in the case of address_space_rw,
so use the same logic in cpu_physical_memory_write_rom_internal.
Reported-by: Alexander Graf <[email protected]>
Reviewed-by: Laurent Vivier <[email protected]>
Tested-by: Laurent Vivier <[email protected]>
Fixes: 965eb2f
Signed-off-by: Paolo Bonzini <[email protected]>
|
static int snd_ctl_release(struct inode *inode, struct file *file)
{
unsigned long flags;
struct snd_card *card;
struct snd_ctl_file *ctl;
struct snd_kcontrol *control;
unsigned int idx;
ctl = file->private_data;
file->private_data = NULL;
card = ctl->card;
write_lock_irqsave(&card->ctl_files_rwlock, flags);
list_del(&ctl->list);
write_unlock_irqrestore(&card->ctl_files_rwlock, flags);
down_write(&card->controls_rwsem);
list_for_each_entry(control, &card->controls, list)
for (idx = 0; idx < control->count; idx++)
if (control->vd[idx].owner == ctl)
control->vd[idx].owner = NULL;
up_write(&card->controls_rwsem);
snd_ctl_empty_read_queue(ctl);
put_pid(ctl->pid);
kfree(ctl);
module_put(card->module);
snd_card_file_remove(card, file);
return 0;
}
| 0 |
[
"CWE-190",
"CWE-189"
] |
linux
|
ac902c112d90a89e59916f751c2745f4dbdbb4bd
| 262,997,889,422,879,430,000,000,000,000,000,000,000 | 27 |
ALSA: control: Handle numid overflow
Each control gets automatically assigned its numids when the control is created.
The allocation is done by incrementing the numid by the amount of allocated
numids per allocation. This means that excessive creation and destruction of
controls (e.g. via SNDRV_CTL_IOCTL_ELEM_ADD/REMOVE) can cause the id to
eventually overflow. Currently when this happens for the control that caused the
overflow kctl->id.numid + kctl->count will also over flow causing it to be
smaller than kctl->id.numid. Most of the code assumes that this is something
that can not happen, so we need to make sure that it won't happen
Signed-off-by: Lars-Peter Clausen <[email protected]>
Acked-by: Jaroslav Kysela <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
|
void add_header(size_t &sum, HeaderRefs &headers, const StringRef &name,
const StringRef &value, bool no_index, int32_t token) {
sum += name.size() + value.size();
headers.emplace_back(name, value, no_index, token);
}
| 0 |
[] |
nghttp2
|
319d5ab1c6d916b6b8a0d85b2ae3f01b3ad04f2c
| 298,810,615,413,378,530,000,000,000,000,000,000,000 | 5 |
nghttpx: Fix request stall
Fix request stall if backend connection is reused and buffer is full.
|
static u32 read_nal_size_hdr(u8 *ptr, u32 nalh_size)
{
u32 nal_size=0;
u32 v = nalh_size;
while (v) {
nal_size |= (u8) *ptr;
ptr++;
v-=1;
if (v) nal_size <<= 8;
}
return nal_size;
}
| 0 |
[
"CWE-476",
"CWE-401"
] |
gpac
|
289ffce3e0d224d314f5f92a744d5fe35999f20b
| 204,915,393,376,485,420,000,000,000,000,000,000,000 | 12 |
fixed #1767 (fuzz)
|
static int dwc3_pci_runtime_resume(struct device *dev)
{
struct dwc3_pci *dwc = dev_get_drvdata(dev);
int ret;
ret = dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0);
if (ret)
return ret;
queue_work(pm_wq, &dwc->wakeup_work);
return 0;
}
| 0 |
[
"CWE-401"
] |
linux
|
9bbfceea12a8f145097a27d7c7267af25893c060
| 4,627,939,592,940,714,000,000,000,000,000,000,000 | 13 |
usb: dwc3: pci: prevent memory leak in dwc3_pci_probe
In dwc3_pci_probe a call to platform_device_alloc allocates a device
which is correctly put in case of error except one case: when the call to
platform_device_add_properties fails it directly returns instead of
going to error handling. This commit replaces return with the goto.
Fixes: 1a7b12f69a94 ("usb: dwc3: pci: Supply device properties via driver data")
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Felipe Balbi <[email protected]>
|
evdev_convert_to_mm(const struct input_absinfo *absinfo, double v)
{
double value = v - absinfo->minimum;
return value/absinfo->resolution;
}
| 0 |
[
"CWE-134"
] |
libinput
|
a423d7d3269dc32a87384f79e29bb5ac021c83d1
| 122,045,385,401,998,800,000,000,000,000,000,000,000 | 5 |
evdev: strip the device name of format directives
This fixes a format string vulnerabilty.
evdev_log_message() composes a format string consisting of a fixed
prefix (including the rendered device name) and the passed-in format
buffer. This format string is then passed with the arguments to the
actual log handler, which usually and eventually ends up being printf.
If the device name contains a printf-style format directive, these ended
up in the format string and thus get interpreted correctly, e.g. for a
device "Foo%sBar" the log message vs printf invocation ends up being:
evdev_log_message(device, "some message %s", "some argument");
printf("event9 - Foo%sBar: some message %s", "some argument");
This can enable an attacker to execute malicious code with the
privileges of the process using libinput.
To exploit this, an attacker needs to be able to create a kernel device
with a malicious name, e.g. through /dev/uinput or a Bluetooth device.
To fix this, convert any potential format directives in the device name
by duplicating percentages.
Pre-rendering the device to avoid the issue altogether would be nicer
but the current log level hooks do not easily allow for this. The device
name is the only user-controlled part of the format string.
A second potential issue is the sysname of the device which is also
sanitized.
This issue was found by Albin Eldstål-Ahrens and Benjamin Svensson from
Assured AB, and independently by Lukas Lamster.
Fixes #752
Signed-off-by: Peter Hutterer <[email protected]>
|
CtPtr ProtocolV1::handle_server_banner_and_identify(char *buffer, int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 1) << __func__ << " read banner and identify addresses failed"
<< dendl;
return _fault();
}
unsigned banner_len = strlen(CEPH_BANNER);
if (memcmp(buffer, CEPH_BANNER, banner_len)) {
ldout(cct, 0) << __func__ << " connect protocol error (bad banner) on peer "
<< connection->get_peer_addr() << dendl;
return _fault();
}
bufferlist bl;
entity_addr_t paddr, peer_addr_for_me;
bl.append(buffer + banner_len, sizeof(ceph_entity_addr) * 2);
auto p = bl.cbegin();
try {
decode(paddr, p);
decode(peer_addr_for_me, p);
} catch (const buffer::error &e) {
lderr(cct) << __func__ << " decode peer addr failed " << dendl;
return _fault();
}
ldout(cct, 20) << __func__ << " connect read peer addr " << paddr
<< " on socket " << connection->cs.fd() << dendl;
entity_addr_t peer_addr = connection->peer_addrs->legacy_addr();
if (peer_addr != paddr) {
if (paddr.is_blank_ip() && peer_addr.get_port() == paddr.get_port() &&
peer_addr.get_nonce() == paddr.get_nonce()) {
ldout(cct, 0) << __func__ << " connect claims to be " << paddr << " not "
<< peer_addr << " - presumably this is the same node!"
<< dendl;
} else {
ldout(cct, 10) << __func__ << " connect claims to be " << paddr << " not "
<< peer_addr << dendl;
return _fault();
}
}
ldout(cct, 20) << __func__ << " connect peer addr for me is "
<< peer_addr_for_me << dendl;
if (messenger->get_myaddrs().empty() ||
messenger->get_myaddrs().front().is_blank_ip()) {
sockaddr_storage ss;
socklen_t len = sizeof(ss);
getsockname(connection->cs.fd(), (sockaddr *)&ss, &len);
entity_addr_t a;
if (cct->_conf->ms_learn_addr_from_peer) {
ldout(cct, 1) << __func__ << " peer " << connection->target_addr
<< " says I am " << peer_addr_for_me << " (socket says "
<< (sockaddr*)&ss << ")" << dendl;
a = peer_addr_for_me;
} else {
ldout(cct, 1) << __func__ << " socket to " << connection->target_addr
<< " says I am " << (sockaddr*)&ss
<< " (peer says " << peer_addr_for_me << ")" << dendl;
a.set_sockaddr((sockaddr *)&ss);
}
a.set_type(entity_addr_t::TYPE_LEGACY); // anything but NONE; learned_addr ignores this
a.set_port(0);
connection->lock.unlock();
messenger->learned_addr(a);
if (cct->_conf->ms_inject_internal_delays &&
cct->_conf->ms_inject_socket_failures) {
if (rand() % cct->_conf->ms_inject_socket_failures == 0) {
ldout(cct, 10) << __func__ << " sleep for "
<< cct->_conf->ms_inject_internal_delays << dendl;
utime_t t;
t.set_from_double(cct->_conf->ms_inject_internal_delays);
t.sleep();
}
}
connection->lock.lock();
if (state != CONNECTING_WAIT_BANNER_AND_IDENTIFY) {
ldout(cct, 1) << __func__
<< " state changed while learned_addr, mark_down or "
<< " replacing must be happened just now" << dendl;
return nullptr;
}
}
bufferlist myaddrbl;
encode(messenger->get_myaddr_legacy(), myaddrbl, 0); // legacy
return WRITE(myaddrbl, handle_my_addr_write);
}
| 0 |
[
"CWE-294"
] |
ceph
|
6c14c2fb5650426285428dfe6ca1597e5ea1d07d
| 143,223,685,879,908,940,000,000,000,000,000,000,000 | 91 |
mon/MonClient: bring back CEPHX_V2 authorizer challenges
Commit c58c5754dfd2 ("msg/async/ProtocolV1: use AuthServer and
AuthClient") introduced a backwards compatibility issue into msgr1.
To fix it, commit 321548010578 ("mon/MonClient: skip CEPHX_V2
challenge if client doesn't support it") set out to skip authorizer
challenges for peers that don't support CEPHX_V2. However, it
made it so that authorizer challenges are skipped for all peers in
both msgr1 and msgr2 cases, effectively disabling the protection
against replay attacks that was put in place in commit f80b848d3f83
("auth/cephx: add authorizer challenge", CVE-2018-1128).
This is because con->get_features() always returns 0 at that
point. In msgr1 case, the peer shares its features along with the
authorizer, but while they are available in connect_msg.features they
aren't assigned to con until ProtocolV1::open(). In msgr2 case, the
peer doesn't share its features until much later (in CLIENT_IDENT
frame, i.e. after the authentication phase). The result is that
!CEPHX_V2 branch is taken in all cases and replay attack protection
is lost.
Only clusters with cephx_service_require_version set to 2 on the
service daemons would not be silently downgraded. But, since the
default is 1 and there are no reports of looping on BADAUTHORIZER
faults, I'm pretty sure that no one has ever done that. Note that
cephx_require_version set to 2 would have no effect even though it
is supposed to be stronger than cephx_service_require_version
because MonClient::handle_auth_request() didn't check it.
To fix:
- for msgr1, check connect_msg.features (as was done before commit
c58c5754dfd2) and challenge if CEPHX_V2 is supported. Together
with two preceding patches that resurrect proper cephx_* option
handling in msgr1, this covers both "I want old clients to work"
and "I wish to require better authentication" use cases.
- for msgr2, don't check anything and always challenge. CEPHX_V2
predates msgr2, anyone speaking msgr2 must support it.
Signed-off-by: Ilya Dryomov <[email protected]>
(cherry picked from commit 4a82c72e3bdddcb625933e83af8b50a444b961f1)
|
static void skb_clone_fraglist(struct sk_buff *skb)
{
struct sk_buff *list;
for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
skb_get(list);
}
| 0 |
[] |
linux
|
e89e9cf539a28df7d0eb1d0a545368e9920b34ac
| 214,040,171,446,119,800,000,000,000,000,000,000,000 | 7 |
[IPv4/IPv6]: UFO Scatter-gather approach
Attached is kernel patch for UDP Fragmentation Offload (UFO) feature.
1. This patch incorporate the review comments by Jeff Garzik.
2. Renamed USO as UFO (UDP Fragmentation Offload)
3. udp sendfile support with UFO
This patches uses scatter-gather feature of skb to generate large UDP
datagram. Below is a "how-to" on changes required in network device
driver to use the UFO interface.
UDP Fragmentation Offload (UFO) Interface:
-------------------------------------------
UFO is a feature wherein the Linux kernel network stack will offload the
IP fragmentation functionality of large UDP datagram to hardware. This
will reduce the overhead of stack in fragmenting the large UDP datagram to
MTU sized packets
1) Drivers indicate their capability of UFO using
dev->features |= NETIF_F_UFO | NETIF_F_HW_CSUM | NETIF_F_SG
NETIF_F_HW_CSUM is required for UFO over ipv6.
2) UFO packet will be submitted for transmission using driver xmit routine.
UFO packet will have a non-zero value for
"skb_shinfo(skb)->ufo_size"
skb_shinfo(skb)->ufo_size will indicate the length of data part in each IP
fragment going out of the adapter after IP fragmentation by hardware.
skb->data will contain MAC/IP/UDP header and skb_shinfo(skb)->frags[]
contains the data payload. The skb->ip_summed will be set to CHECKSUM_HW
indicating that hardware has to do checksum calculation. Hardware should
compute the UDP checksum of complete datagram and also ip header checksum of
each fragmented IP packet.
For IPV6 the UFO provides the fragment identification-id in
skb_shinfo(skb)->ip6_frag_id. The adapter should use this ID for generating
IPv6 fragments.
Signed-off-by: Ananda Raju <[email protected]>
Signed-off-by: Rusty Russell <[email protected]> (forwarded)
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
|
ft_outline_draw( FT_Outline* outline,
double scale,
int pen_x,
int pen_y,
FTDemo_Handle* handle,
FTDemo_Display* display,
grColor color )
{
FT_Outline transformed;
FT_BBox cbox;
FT_Bitmap bitm;
FT_Outline_New( handle->library,
outline->n_points,
outline->n_contours,
&transformed );
FT_Outline_Copy( outline, &transformed );
if ( scale != 1. )
{
int nn;
for ( nn = 0; nn < transformed.n_points; nn++ )
{
FT_Vector* vec = &transformed.points[nn];
vec->x = (FT_F26Dot6)(vec->x*scale);
vec->y = (FT_F26Dot6)(vec->y*scale);
}
}
FT_Outline_Get_CBox( &transformed, &cbox );
cbox.xMin &= ~63;
cbox.yMin &= ~63;
cbox.xMax = (cbox.xMax + 63) & ~63;
cbox.yMax = (cbox.yMax + 63) & ~63;
bitm.width = (cbox.xMax - cbox.xMin) >> 6;
bitm.rows = (cbox.yMax - cbox.yMin) >> 6;
bitm.pitch = bitm.width;
bitm.num_grays = 256;
bitm.pixel_mode = FT_PIXEL_MODE_GRAY;
bitm.buffer = (unsigned char*)calloc( bitm.pitch, bitm.rows );
FT_Outline_Translate( &transformed, -cbox.xMin, -cbox.yMin );
FT_Outline_Get_Bitmap( handle->library, &transformed, &bitm );
ft_bitmap_draw( &bitm,
pen_x + (cbox.xMin >> 6),
pen_y - (cbox.yMax >> 6),
display,
color );
free( bitm.buffer );
FT_Outline_Done( handle->library, &transformed );
}
| 0 |
[
"CWE-120"
] |
freetype2-demos
|
b995299b73ba4cd259f221f500d4e63095508bec
| 182,698,120,086,976,060,000,000,000,000,000,000,000 | 57 |
Fix Savannah bug #30054.
* src/ftdiff.c, src/ftgrid.c, src/ftmulti.c, src/ftstring.c,
src/ftview.c: Use precision for `%s' where appropriate to avoid
buffer overflows.
|
static void io_uring_clean_tctx(struct io_uring_task *tctx)
{
struct file *file;
unsigned long index;
xa_for_each(&tctx->xa, index, file)
io_uring_del_task_file(file);
if (tctx->io_wq) {
io_wq_put_and_exit(tctx->io_wq);
tctx->io_wq = NULL;
}
| 0 |
[
"CWE-667"
] |
linux
|
3ebba796fa251d042be42b929a2d916ee5c34a49
| 288,078,461,552,528,100,000,000,000,000,000,000,000 | 12 |
io_uring: ensure that SQPOLL thread is started for exit
If we create it in a disabled state because IORING_SETUP_R_DISABLED is
set on ring creation, we need to ensure that we've kicked the thread if
we're exiting before it's been explicitly disabled. Otherwise we can run
into a deadlock where exit is waiting go park the SQPOLL thread, but the
SQPOLL thread itself is waiting to get a signal to start.
That results in the below trace of both tasks hung, waiting on each other:
INFO: task syz-executor458:8401 blocked for more than 143 seconds.
Not tainted 5.11.0-next-20210226-syzkaller #0
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:syz-executor458 state:D stack:27536 pid: 8401 ppid: 8400 flags:0x00004004
Call Trace:
context_switch kernel/sched/core.c:4324 [inline]
__schedule+0x90c/0x21a0 kernel/sched/core.c:5075
schedule+0xcf/0x270 kernel/sched/core.c:5154
schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868
do_wait_for_common kernel/sched/completion.c:85 [inline]
__wait_for_common kernel/sched/completion.c:106 [inline]
wait_for_common kernel/sched/completion.c:117 [inline]
wait_for_completion+0x168/0x270 kernel/sched/completion.c:138
io_sq_thread_park fs/io_uring.c:7115 [inline]
io_sq_thread_park+0xd5/0x130 fs/io_uring.c:7103
io_uring_cancel_task_requests+0x24c/0xd90 fs/io_uring.c:8745
__io_uring_files_cancel+0x110/0x230 fs/io_uring.c:8840
io_uring_files_cancel include/linux/io_uring.h:47 [inline]
do_exit+0x299/0x2a60 kernel/exit.c:780
do_group_exit+0x125/0x310 kernel/exit.c:922
__do_sys_exit_group kernel/exit.c:933 [inline]
__se_sys_exit_group kernel/exit.c:931 [inline]
__x64_sys_exit_group+0x3a/0x50 kernel/exit.c:931
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x43e899
RSP: 002b:00007ffe89376d48 EFLAGS: 00000246 ORIG_RAX: 00000000000000e7
RAX: ffffffffffffffda RBX: 00000000004af2f0 RCX: 000000000043e899
RDX: 000000000000003c RSI: 00000000000000e7 RDI: 0000000000000000
RBP: 0000000000000000 R08: ffffffffffffffc0 R09: 0000000010000000
R10: 0000000000008011 R11: 0000000000000246 R12: 00000000004af2f0
R13: 0000000000000001 R14: 0000000000000000 R15: 0000000000000001
INFO: task iou-sqp-8401:8402 can't die for more than 143 seconds.
task:iou-sqp-8401 state:D stack:30272 pid: 8402 ppid: 8400 flags:0x00004004
Call Trace:
context_switch kernel/sched/core.c:4324 [inline]
__schedule+0x90c/0x21a0 kernel/sched/core.c:5075
schedule+0xcf/0x270 kernel/sched/core.c:5154
schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868
do_wait_for_common kernel/sched/completion.c:85 [inline]
__wait_for_common kernel/sched/completion.c:106 [inline]
wait_for_common kernel/sched/completion.c:117 [inline]
wait_for_completion+0x168/0x270 kernel/sched/completion.c:138
io_sq_thread+0x27d/0x1ae0 fs/io_uring.c:6717
ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:294
INFO: task iou-sqp-8401:8402 blocked for more than 143 seconds.
Reported-by: [email protected]
Signed-off-by: Jens Axboe <[email protected]>
|
int csr_read_varid_uint16(int dd, uint16_t seqnum, uint16_t varid, uint16_t *value)
{
unsigned char cmd[] = { 0x00, 0x00, 0x09, 0x00,
seqnum & 0xff, seqnum >> 8, varid & 0xff, varid >> 8, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
unsigned char cp[254], rp[254];
struct hci_request rq;
memset(&cp, 0, sizeof(cp));
cp[0] = 0xc2;
memcpy(cp + 1, cmd, sizeof(cmd));
memset(&rq, 0, sizeof(rq));
rq.ogf = OGF_VENDOR_CMD;
rq.ocf = 0x00;
rq.event = EVT_VENDOR;
rq.cparam = cp;
rq.clen = sizeof(cmd) + 1;
rq.rparam = rp;
rq.rlen = sizeof(rp);
if (hci_send_req(dd, &rq, 2000) < 0)
return -1;
if (rp[0] != 0xc2) {
errno = EIO;
return -1;
}
if ((rp[9] + (rp[10] << 8)) != 0) {
errno = ENXIO;
return -1;
}
*value = rp[11] + (rp[12] << 8);
return 0;
}
| 0 |
[
"CWE-119"
] |
bluez
|
8514068150759c1d6a46d4605d2351babfde1601
| 335,173,853,394,512,670,000,000,000,000,000,000,000 | 39 |
tools/csr: Fix possible buffer overflow
Make sure we don't write past the end of the array.
|
BGD_DECLARE(void *) gdImagePngPtr (gdImagePtr im, int *size)
{
void *rv;
gdIOCtx *out = gdNewDynamicCtx (2048, NULL);
if (out == NULL) return NULL;
if (!_gdImagePngCtxEx (im, out, -1)) {
rv = gdDPExtractData (out, size);
} else {
rv = NULL;
}
out->gd_free (out);
return rv;
}
| 0 |
[
"CWE-415"
] |
libgd
|
56ce6ef068b954ad28379e83cca04feefc51320c
| 128,640,033,431,828,520,000,000,000,000,000,000,000 | 13 |
Fix #381: libgd double-free vulnerability
The issue is that `gdImagePngCtxEx` (which is called by `gdImagePngPtr`
and the other PNG output functions to do the real work) does not return
whether it succeeded or failed, so this is not checked in
`gdImagePngPtr` and the function wrongly assumes everything is okay,
which is not, in this case, because the palette image contains no
palette entries.
We can't change the signature of `gdImagePngCtxEx` for API
compatibility reasons, so we introduce the static helper
`_gdImagePngCtxEx` which returns success respective failure, so
`gdImagePngPtr` and `gdImagePngPtrEx` can check the return value. We
leave it solely to libpng for now to report warnings regarding the
failing write.
CVE-2017-6362
(cherry picked from commit 2207e3c88a06a5c42230907554ab1e9f2ec021ea)
|
static int asn1_template_noexp_d2i(ASN1_VALUE **val,
const unsigned char **in, long len,
const ASN1_TEMPLATE *tt, char opt,
ASN1_TLC *ctx)
{
int flags, aclass;
int ret;
const unsigned char *p, *q;
if (!val)
return 0;
flags = tt->flags;
aclass = flags & ASN1_TFLG_TAG_CLASS;
p = *in;
q = p;
if (flags & ASN1_TFLG_SK_MASK) {
/* SET OF, SEQUENCE OF */
int sktag, skaclass;
char sk_eoc;
/* First work out expected inner tag value */
if (flags & ASN1_TFLG_IMPTAG) {
sktag = tt->tag;
skaclass = aclass;
} else {
skaclass = V_ASN1_UNIVERSAL;
if (flags & ASN1_TFLG_SET_OF)
sktag = V_ASN1_SET;
else
sktag = V_ASN1_SEQUENCE;
}
/* Get the tag */
ret = asn1_check_tlen(&len, NULL, NULL, &sk_eoc, NULL,
&p, len, sktag, skaclass, opt, ctx);
if (!ret) {
ASN1err(ASN1_F_ASN1_TEMPLATE_NOEXP_D2I, ERR_R_NESTED_ASN1_ERROR);
return 0;
} else if (ret == -1)
return -1;
if (!*val)
*val = (ASN1_VALUE *)sk_new_null();
else {
/*
* We've got a valid STACK: free up any items present
*/
STACK_OF(ASN1_VALUE) *sktmp = (STACK_OF(ASN1_VALUE) *)*val;
ASN1_VALUE *vtmp;
while (sk_ASN1_VALUE_num(sktmp) > 0) {
vtmp = sk_ASN1_VALUE_pop(sktmp);
ASN1_item_ex_free(&vtmp, ASN1_ITEM_ptr(tt->item));
}
}
if (!*val) {
ASN1err(ASN1_F_ASN1_TEMPLATE_NOEXP_D2I, ERR_R_MALLOC_FAILURE);
goto err;
}
/* Read as many items as we can */
while (len > 0) {
ASN1_VALUE *skfield;
q = p;
/* See if EOC found */
if (asn1_check_eoc(&p, len)) {
if (!sk_eoc) {
ASN1err(ASN1_F_ASN1_TEMPLATE_NOEXP_D2I,
ASN1_R_UNEXPECTED_EOC);
goto err;
}
len -= p - q;
sk_eoc = 0;
break;
}
skfield = NULL;
if (!ASN1_item_ex_d2i(&skfield, &p, len,
ASN1_ITEM_ptr(tt->item), -1, 0, 0, ctx)) {
ASN1err(ASN1_F_ASN1_TEMPLATE_NOEXP_D2I,
ERR_R_NESTED_ASN1_ERROR);
goto err;
}
len -= p - q;
if (!sk_ASN1_VALUE_push((STACK_OF(ASN1_VALUE) *)*val, skfield)) {
ASN1err(ASN1_F_ASN1_TEMPLATE_NOEXP_D2I, ERR_R_MALLOC_FAILURE);
goto err;
}
}
if (sk_eoc) {
ASN1err(ASN1_F_ASN1_TEMPLATE_NOEXP_D2I, ASN1_R_MISSING_EOC);
goto err;
}
} else if (flags & ASN1_TFLG_IMPTAG) {
/* IMPLICIT tagging */
ret = ASN1_item_ex_d2i(val, &p, len,
ASN1_ITEM_ptr(tt->item), tt->tag, aclass, opt,
ctx);
if (!ret) {
ASN1err(ASN1_F_ASN1_TEMPLATE_NOEXP_D2I, ERR_R_NESTED_ASN1_ERROR);
goto err;
} else if (ret == -1)
return -1;
} else {
/* Nothing special */
ret = ASN1_item_ex_d2i(val, &p, len, ASN1_ITEM_ptr(tt->item),
-1, 0, opt, ctx);
if (!ret) {
ASN1err(ASN1_F_ASN1_TEMPLATE_NOEXP_D2I, ERR_R_NESTED_ASN1_ERROR);
goto err;
} else if (ret == -1)
return -1;
}
*in = p;
return 1;
err:
ASN1_template_free(val, tt);
return 0;
}
| 1 |
[
"CWE-200"
] |
openssl
|
cc598f321fbac9c04da5766243ed55d55948637d
| 30,124,799,780,284,827,000,000,000,000,000,000,000 | 118 |
Fix leak with ASN.1 combine.
When parsing a combined structure pass a flag to the decode routine
so on error a pointer to the parent structure is not zeroed as
this will leak any additional components in the parent.
This can leak memory in any application parsing PKCS#7 or CMS structures.
CVE-2015-3195.
Thanks to Adam Langley (Google/BoringSSL) for discovering this bug using
libFuzzer.
PR#4131
Reviewed-by: Richard Levitte <[email protected]>
|
static int _lldp_send(struct lldpd *global,
struct lldpd_hardware *hardware,
u_int8_t c_id_subtype,
char *c_id,
int c_id_len,
u_int8_t p_id_subtype,
char *p_id,
int p_id_len,
int shutdown)
{
struct lldpd_port *port;
struct lldpd_chassis *chassis;
struct lldpd_frame *frame;
int length;
u_int8_t *packet, *pos, *tlv;
struct lldpd_mgmt *mgmt;
int proto;
u_int8_t mcastaddr_regular[] = LLDP_ADDR_NEAREST_BRIDGE;
u_int8_t mcastaddr_nontpmr[] = LLDP_ADDR_NEAREST_NONTPMR_BRIDGE;
u_int8_t mcastaddr_customer[] = LLDP_ADDR_NEAREST_CUSTOMER_BRIDGE;
u_int8_t *mcastaddr;
#ifdef ENABLE_DOT1
const u_int8_t dot1[] = LLDP_TLV_ORG_DOT1;
struct lldpd_vlan *vlan;
struct lldpd_ppvid *ppvid;
struct lldpd_pi *pi;
#endif
#ifdef ENABLE_DOT3
const u_int8_t dot3[] = LLDP_TLV_ORG_DOT3;
#endif
#ifdef ENABLE_LLDPMED
int i;
const u_int8_t med[] = LLDP_TLV_ORG_MED;
#endif
#ifdef ENABLE_CUSTOM
struct lldpd_custom *custom;
#endif
port = &hardware->h_lport;
chassis = port->p_chassis;
length = hardware->h_mtu;
if ((packet = (u_int8_t*)calloc(1, length)) == NULL)
return ENOMEM;
pos = packet;
/* Ethernet header */
switch (global->g_config.c_lldp_agent_type) {
case LLDP_AGENT_TYPE_NEAREST_NONTPMR_BRIDGE: mcastaddr = mcastaddr_nontpmr; break;
case LLDP_AGENT_TYPE_NEAREST_CUSTOMER_BRIDGE: mcastaddr = mcastaddr_customer; break;
case LLDP_AGENT_TYPE_NEAREST_BRIDGE:
default: mcastaddr = mcastaddr_regular; break;
}
if (!(
/* LLDP multicast address */
POKE_BYTES(mcastaddr, ETHER_ADDR_LEN) &&
/* Source MAC address */
POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN)))
goto toobig;
/* Insert VLAN tag if needed */
if (port->p_vlan_tx_enabled) {
if (!(
/* VLAN ethertype */
POKE_UINT16(ETHERTYPE_VLAN) &&
/* VLAN Tag Control Information (TCI) */
/* Priority(3bits) | DEI(1bit) | VID(12bit) */
POKE_UINT16(port->p_vlan_tx_tag)))
goto toobig;
}
if (!(
/* LLDP frame */
POKE_UINT16(ETHERTYPE_LLDP)))
goto toobig;
/* Chassis ID */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_CHASSIS_ID) &&
POKE_UINT8(c_id_subtype) &&
POKE_BYTES(c_id, c_id_len) &&
POKE_END_LLDP_TLV))
goto toobig;
/* Port ID */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_PORT_ID) &&
POKE_UINT8(p_id_subtype) &&
POKE_BYTES(p_id, p_id_len) &&
POKE_END_LLDP_TLV))
goto toobig;
/* Time to live */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_TTL) &&
POKE_UINT16(shutdown?0:(global?global->g_config.c_ttl:180)) &&
POKE_END_LLDP_TLV))
goto toobig;
if (shutdown)
goto end;
/* System name */
if (chassis->c_name && *chassis->c_name != '\0') {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_NAME) &&
POKE_BYTES(chassis->c_name, strlen(chassis->c_name)) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* System description (skip it if empty) */
if (chassis->c_descr && *chassis->c_descr != '\0') {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_DESCR) &&
POKE_BYTES(chassis->c_descr, strlen(chassis->c_descr)) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* System capabilities */
if (global->g_config.c_cap_advertise && chassis->c_cap_available) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_CAP) &&
POKE_UINT16(chassis->c_cap_available) &&
POKE_UINT16(chassis->c_cap_enabled) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* Management addresses */
TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) {
proto = lldpd_af_to_lldp_proto(mgmt->m_family);
if (proto == LLDP_MGMT_ADDR_NONE) continue;
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_MGMT_ADDR) &&
/* Size of the address, including its type */
POKE_UINT8(mgmt->m_addrsize + 1) &&
POKE_UINT8(proto) &&
POKE_BYTES(&mgmt->m_addr, mgmt->m_addrsize)))
goto toobig;
/* Interface port type, OID */
if (mgmt->m_iface == 0) {
if (!(
/* We don't know the management interface */
POKE_UINT8(LLDP_MGMT_IFACE_UNKNOWN) &&
POKE_UINT32(0)))
goto toobig;
} else {
if (!(
/* We have the index of the management interface */
POKE_UINT8(LLDP_MGMT_IFACE_IFINDEX) &&
POKE_UINT32(mgmt->m_iface)))
goto toobig;
}
if (!(
/* We don't provide an OID for management */
POKE_UINT8(0) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* Port description */
if (port->p_descr && *port->p_descr != '\0') {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_PORT_DESCR) &&
POKE_BYTES(port->p_descr, strlen(port->p_descr)) &&
POKE_END_LLDP_TLV))
goto toobig;
}
#ifdef ENABLE_DOT1
/* Port VLAN ID */
if(port->p_pvid != 0) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot1, sizeof(dot1)) &&
POKE_UINT8(LLDP_TLV_DOT1_PVID) &&
POKE_UINT16(port->p_pvid) &&
POKE_END_LLDP_TLV)) {
goto toobig;
}
}
/* Port and Protocol VLAN IDs */
TAILQ_FOREACH(ppvid, &port->p_ppvids, p_entries) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot1, sizeof(dot1)) &&
POKE_UINT8(LLDP_TLV_DOT1_PPVID) &&
POKE_UINT8(ppvid->p_cap_status) &&
POKE_UINT16(ppvid->p_ppvid) &&
POKE_END_LLDP_TLV)) {
goto toobig;
}
}
/* VLANs */
TAILQ_FOREACH(vlan, &port->p_vlans, v_entries) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot1, sizeof(dot1)) &&
POKE_UINT8(LLDP_TLV_DOT1_VLANNAME) &&
POKE_UINT16(vlan->v_vid) &&
POKE_UINT8(strlen(vlan->v_name)) &&
POKE_BYTES(vlan->v_name, strlen(vlan->v_name)) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* Protocol Identities */
TAILQ_FOREACH(pi, &port->p_pids, p_entries) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot1, sizeof(dot1)) &&
POKE_UINT8(LLDP_TLV_DOT1_PI) &&
POKE_UINT8(pi->p_pi_len) &&
POKE_BYTES(pi->p_pi, pi->p_pi_len) &&
POKE_END_LLDP_TLV))
goto toobig;
}
#endif
#ifdef ENABLE_DOT3
/* Aggregation status */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot3, sizeof(dot3)) &&
POKE_UINT8(LLDP_TLV_DOT3_LA) &&
/* Bit 0 = capability ; Bit 1 = status */
POKE_UINT8((port->p_aggregid) ? 3:1) &&
POKE_UINT32(port->p_aggregid) &&
POKE_END_LLDP_TLV))
goto toobig;
/* MAC/PHY */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot3, sizeof(dot3)) &&
POKE_UINT8(LLDP_TLV_DOT3_MAC) &&
POKE_UINT8(port->p_macphy.autoneg_support |
(port->p_macphy.autoneg_enabled << 1)) &&
POKE_UINT16(port->p_macphy.autoneg_advertised) &&
POKE_UINT16(port->p_macphy.mau_type) &&
POKE_END_LLDP_TLV))
goto toobig;
/* MFS */
if (port->p_mfs) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(dot3, sizeof(dot3)) &&
POKE_UINT8(LLDP_TLV_DOT3_MFS) &&
POKE_UINT16(port->p_mfs) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* Power */
if (port->p_power.devicetype) {
if (!(
(port->p_power.type_ext != LLDP_DOT3_POWER_8023BT_OFF ?
(tlv = pos, POKE_UINT16((LLDP_TLV_ORG << 9) | (0x1d))):
POKE_START_LLDP_TLV(LLDP_TLV_ORG)) &&
POKE_BYTES(dot3, sizeof(dot3)) &&
POKE_UINT8(LLDP_TLV_DOT3_POWER) &&
POKE_UINT8((
(((2 - port->p_power.devicetype) %(1<< 1))<<0) |
(( port->p_power.supported %(1<< 1))<<1) |
(( port->p_power.enabled %(1<< 1))<<2) |
(( port->p_power.paircontrol %(1<< 1))<<3))) &&
POKE_UINT8(port->p_power.pairs) &&
POKE_UINT8(port->p_power.class)))
goto toobig;
/* 802.3at */
if (port->p_power.powertype != LLDP_DOT3_POWER_8023AT_OFF) {
if (!(
POKE_UINT8(((((port->p_power.powertype ==
LLDP_DOT3_POWER_8023AT_TYPE1)?1:0) << 7) |
(((port->p_power.devicetype ==
LLDP_DOT3_POWER_PSE)?0:1) << 6) |
((port->p_power.source %(1<< 2))<<4) |
((port->p_power.pd_4pid %(1 << 1))<<2) |
((port->p_power.priority %(1<< 2))<<0))) &&
POKE_UINT16(port->p_power.requested) &&
POKE_UINT16(port->p_power.allocated)))
goto toobig;
}
if (port->p_power.type_ext != LLDP_DOT3_POWER_8023BT_OFF) {
if (!(
POKE_UINT16(port->p_power.requested_a) &&
POKE_UINT16(port->p_power.requested_b) &&
POKE_UINT16(port->p_power.allocated_a) &&
POKE_UINT16(port->p_power.allocated_b) &&
POKE_UINT16((
(port->p_power.pse_status << 14) |
(port->p_power.pd_status << 12) |
(port->p_power.pse_pairs_ext << 10) |
(port->p_power.class_a << 7) |
(port->p_power.class_b << 4) |
(port->p_power.class_ext << 0))) &&
POKE_UINT8(
/* Adjust by -1 to enable 0 to mean no 802.3bt support */
((port->p_power.type_ext -1) << 1) |
(port->p_power.pd_load << 0)) &&
POKE_UINT16(port->p_power.pse_max) &&
/* Send 0 for autoclass and power down requests */
POKE_UINT8(0) &&
POKE_UINT16(0) &&
POKE_UINT8(0)))
goto toobig;
}
if (!(POKE_END_LLDP_TLV))
goto toobig;
}
#endif
#ifdef ENABLE_LLDPMED
if (port->p_med_cap_enabled) {
/* LLDP-MED cap */
if (port->p_med_cap_enabled & LLDP_MED_CAP_CAP) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(med, sizeof(med)) &&
POKE_UINT8(LLDP_TLV_MED_CAP) &&
POKE_UINT16(chassis->c_med_cap_available) &&
POKE_UINT8(chassis->c_med_type) &&
POKE_END_LLDP_TLV))
goto toobig;
}
/* LLDP-MED inventory */
#define LLDP_INVENTORY(value, subtype) \
if (value) { \
if (!( \
POKE_START_LLDP_TLV(LLDP_TLV_ORG) && \
POKE_BYTES(med, sizeof(med)) && \
POKE_UINT8(subtype) && \
POKE_BYTES(value, \
(strlen(value)>32)?32:strlen(value)) && \
POKE_END_LLDP_TLV)) \
goto toobig; \
}
if (port->p_med_cap_enabled & LLDP_MED_CAP_IV) {
LLDP_INVENTORY(chassis->c_med_hw,
LLDP_TLV_MED_IV_HW);
LLDP_INVENTORY(chassis->c_med_fw,
LLDP_TLV_MED_IV_FW);
LLDP_INVENTORY(chassis->c_med_sw,
LLDP_TLV_MED_IV_SW);
LLDP_INVENTORY(chassis->c_med_sn,
LLDP_TLV_MED_IV_SN);
LLDP_INVENTORY(chassis->c_med_manuf,
LLDP_TLV_MED_IV_MANUF);
LLDP_INVENTORY(chassis->c_med_model,
LLDP_TLV_MED_IV_MODEL);
LLDP_INVENTORY(chassis->c_med_asset,
LLDP_TLV_MED_IV_ASSET);
}
/* LLDP-MED location */
for (i = 0; i < LLDP_MED_LOCFORMAT_LAST; i++) {
if (port->p_med_location[i].format == i + 1) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(med, sizeof(med)) &&
POKE_UINT8(LLDP_TLV_MED_LOCATION) &&
POKE_UINT8(port->p_med_location[i].format) &&
POKE_BYTES(port->p_med_location[i].data,
port->p_med_location[i].data_len) &&
POKE_END_LLDP_TLV))
goto toobig;
}
}
/* LLDP-MED network policy */
for (i = 0; i < LLDP_MED_APPTYPE_LAST; i++) {
if (port->p_med_policy[i].type == i + 1) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(med, sizeof(med)) &&
POKE_UINT8(LLDP_TLV_MED_POLICY) &&
POKE_UINT32((
((port->p_med_policy[i].type %(1<< 8))<<24) |
((port->p_med_policy[i].unknown %(1<< 1))<<23) |
((port->p_med_policy[i].tagged %(1<< 1))<<22) |
/*((0 %(1<< 1))<<21) |*/
((port->p_med_policy[i].vid %(1<<12))<< 9) |
((port->p_med_policy[i].priority %(1<< 3))<< 6) |
((port->p_med_policy[i].dscp %(1<< 6))<< 0) )) &&
POKE_END_LLDP_TLV))
goto toobig;
}
}
/* LLDP-MED POE-MDI */
if ((port->p_med_power.devicetype == LLDP_MED_POW_TYPE_PSE) ||
(port->p_med_power.devicetype == LLDP_MED_POW_TYPE_PD)) {
int devicetype = 0, source = 0;
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(med, sizeof(med)) &&
POKE_UINT8(LLDP_TLV_MED_MDI)))
goto toobig;
switch (port->p_med_power.devicetype) {
case LLDP_MED_POW_TYPE_PSE:
devicetype = 0;
switch (port->p_med_power.source) {
case LLDP_MED_POW_SOURCE_PRIMARY: source = 1; break;
case LLDP_MED_POW_SOURCE_BACKUP: source = 2; break;
case LLDP_MED_POW_SOURCE_RESERVED: source = 3; break;
default: source = 0; break;
}
break;
case LLDP_MED_POW_TYPE_PD:
devicetype = 1;
switch (port->p_med_power.source) {
case LLDP_MED_POW_SOURCE_PSE: source = 1; break;
case LLDP_MED_POW_SOURCE_LOCAL: source = 2; break;
case LLDP_MED_POW_SOURCE_BOTH: source = 3; break;
default: source = 0; break;
}
break;
}
if (!(
POKE_UINT8((
((devicetype %(1<< 2))<<6) |
((source %(1<< 2))<<4) |
((port->p_med_power.priority %(1<< 4))<<0) )) &&
POKE_UINT16(port->p_med_power.val) &&
POKE_END_LLDP_TLV))
goto toobig;
}
}
#endif
#ifdef ENABLE_CUSTOM
TAILQ_FOREACH(custom, &port->p_custom_list, next) {
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_ORG) &&
POKE_BYTES(custom->oui, sizeof(custom->oui)) &&
POKE_UINT8(custom->subtype) &&
POKE_BYTES(custom->oui_info, custom->oui_info_len) &&
POKE_END_LLDP_TLV))
goto toobig;
}
#endif
end:
/* END */
if (!(
POKE_START_LLDP_TLV(LLDP_TLV_END) &&
POKE_END_LLDP_TLV))
goto toobig;
if (interfaces_send_helper(global, hardware,
(char *)packet, pos - packet) == -1) {
log_warn("lldp", "unable to send packet on real device for %s",
hardware->h_ifname);
free(packet);
return ENETDOWN;
}
hardware->h_tx_cnt++;
/* We assume that LLDP frame is the reference */
if (!shutdown && (frame = (struct lldpd_frame*)malloc(
sizeof(int) + pos - packet)) != NULL) {
frame->size = pos - packet;
memcpy(&frame->frame, packet, frame->size);
if ((hardware->h_lport.p_lastframe == NULL) ||
(hardware->h_lport.p_lastframe->size != frame->size) ||
(memcmp(hardware->h_lport.p_lastframe->frame, frame->frame,
frame->size) != 0)) {
free(hardware->h_lport.p_lastframe);
hardware->h_lport.p_lastframe = frame;
hardware->h_lport.p_lastchange = time(NULL);
} else free(frame);
}
free(packet);
return 0;
toobig:
log_info("lldp", "Cannot send LLDP packet for %s, Too big message", p_id);
free(packet);
return E2BIG;
}
| 0 |
[
"CWE-400"
] |
lldpd
|
a8d3c90feca548fc0656d95b5d278713db86ff61
| 179,942,983,578,639,030,000,000,000,000,000,000,000 | 485 |
lldp: avoid memory leak from bad packets
A packet that contains multiple instances of certain TLVs will cause
lldpd to continually allocate memory and leak the old memory. As an
example, multiple instances of system name TLV will cause old values
to be dropped by the decoding routine.
Reported-at: https://github.com/openvswitch/ovs/pull/337
Reported-by: Jonas Rudloff <[email protected]>
Signed-off-by: Aaron Conole <[email protected]>
|
GF_Err stts_box_dump(GF_Box *a, FILE * trace)
{
GF_TimeToSampleBox *p;
u32 i, nb_samples;
if (dump_skip_samples)
return GF_OK;
p = (GF_TimeToSampleBox *)a;
gf_isom_box_dump_start(a, "TimeToSampleBox", trace);
gf_fprintf(trace, "EntryCount=\"%d\">\n", p->nb_entries);
nb_samples = 0;
for (i=0; i<p->nb_entries; i++) {
gf_fprintf(trace, "<TimeToSampleEntry SampleDelta=\"%d\" SampleCount=\"%d\"/>\n", p->entries[i].sampleDelta, p->entries[i].sampleCount);
nb_samples += p->entries[i].sampleCount;
}
if (p->size)
gf_fprintf(trace, "<!-- counted %d samples in STTS entries -->\n", nb_samples);
else
gf_fprintf(trace, "<TimeToSampleEntry SampleDelta=\"\" SampleCount=\"\"/>\n");
gf_isom_box_dump_done("TimeToSampleBox", a, trace);
return GF_OK;
}
| 0 |
[
"CWE-787"
] |
gpac
|
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
| 200,423,867,182,488,160,000,000,000,000,000,000,000 | 25 |
fixed #2138
|
alt_merge_opt_map(OnigEncoding enc, OptMap* to, OptMap* add)
{
int i, val;
/* if (! is_equal_mml(&to->mmd, &add->mmd)) return ; */
if (to->value == 0) return ;
if (add->value == 0 || to->mmd.max < add->mmd.min) {
clear_opt_map(to);
return ;
}
alt_merge_mml(&to->mmd, &add->mmd);
val = 0;
for (i = 0; i < CHAR_MAP_SIZE; i++) {
if (add->map[i])
to->map[i] = 1;
if (to->map[i])
val += map_position_value(enc, i);
}
to->value = val;
alt_merge_opt_anc_info(&to->anc, &add->anc);
}
| 0 |
[
"CWE-476",
"CWE-125"
] |
oniguruma
|
c509265c5f6ae7264f7b8a8aae1cfa5fc59d108c
| 170,400,982,434,939,730,000,000,000,000,000,000,000 | 25 |
Fix CVE-2019-13225: problem in converting if-then-else pattern to bytecode.
|
\param file File to write data to.
\param is_compressed Tells if data compression must be enabled.
**/
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 19,371,535,091,518,437,000,000,000,000,000,000,000 | 3 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
html_type_find (GstTypeFind * tf, gpointer unused)
{
const gchar *d, *data;
data = (const gchar *) gst_type_find_peek (tf, 0, 16);
if (!data)
return;
if (!g_ascii_strncasecmp (data, "<!DOCTYPE HTML", 14)) {
gst_type_find_suggest (tf, GST_TYPE_FIND_MAXIMUM, HTML_CAPS);
} else if (xml_check_first_element (tf, "html", 4, FALSE)) {
gst_type_find_suggest (tf, GST_TYPE_FIND_MAXIMUM, HTML_CAPS);
} else if ((d = memchr (data, '<', 16))) {
data = (const gchar *) gst_type_find_peek (tf, d - data, 6);
if (data && g_ascii_strncasecmp (data, "<html>", 6) == 0) {
gst_type_find_suggest (tf, GST_TYPE_FIND_MAXIMUM, HTML_CAPS);
}
}
}
| 0 |
[
"CWE-125"
] |
gst-plugins-base
|
2fdccfd64fc609e44e9c4b8eed5bfdc0ab9c9095
| 114,180,810,389,291,670,000,000,000,000,000,000,000 | 19 |
typefind: bounds check windows ico detection
Fixes out of bounds read
https://bugzilla.gnome.org/show_bug.cgi?id=774902
|
static inline void ConvertLuvToXYZ(const double L,const double u,const double v,
double *X,double *Y,double *Z)
{
double
gamma;
assert(X != (double *) NULL);
assert(Y != (double *) NULL);
assert(Z != (double *) NULL);
if (L > (CIEK*CIEEpsilon))
*Y=(double) pow((L+16.0)/116.0,3.0);
else
*Y=L/CIEK;
gamma=PerceptibleReciprocal((((52.0*L*PerceptibleReciprocal(u+13.0*L*
(4.0*D65X/(D65X+15.0*D65Y+3.0*D65Z))))-1.0)/3.0)-(-1.0/3.0));
*X=gamma*((*Y*((39.0*L*PerceptibleReciprocal(v+13.0*L*(9.0*D65Y/
(D65X+15.0*D65Y+3.0*D65Z))))-5.0))+5.0*(*Y));
*Z=(*X*(((52.0*L*PerceptibleReciprocal(u+13.0*L*(4.0*D65X/
(D65X+15.0*D65Y+3.0*D65Z))))-1.0)/3.0))-5.0*(*Y);
}
| 0 |
[] |
ImageMagick
|
a855d3ad660f307fdb071794351822f9ce878c4e
| 187,280,963,848,298,440,000,000,000,000,000,000,000 | 20 |
https://github.com/ImageMagick/ImageMagick/issues/3317
|
int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
{
static unsigned int thread_local_key = 0;
if (thread_local_key >= OPENSSL_CRYPTO_THREAD_LOCAL_KEY_MAX)
return 0;
*key = thread_local_key++;
thread_local_storage[*key] = NULL;
return 1;
}
| 0 |
[
"CWE-330"
] |
openssl
|
1b0fe00e2704b5e20334a16d3c9099d1ba2ef1be
| 184,460,457,980,318,040,000,000,000,000,000,000,000 | 13 |
drbg: ensure fork-safety without using a pthread_atfork handler
When the new OpenSSL CSPRNG was introduced in version 1.1.1,
it was announced in the release notes that it would be fork-safe,
which the old CSPRNG hadn't been.
The fork-safety was implemented using a fork count, which was
incremented by a pthread_atfork handler. Initially, this handler
was enabled by default. Unfortunately, the default behaviour
had to be changed for other reasons in commit b5319bdbd095, so
the new OpenSSL CSPRNG failed to keep its promise.
This commit restores the fork-safety using a different approach.
It replaces the fork count by a fork id, which coincides with
the process id on UNIX-like operating systems and is zero on other
operating systems. It is used to detect when an automatic reseed
after a fork is necessary.
To prevent a future regression, it also adds a test to verify that
the child reseeds after fork.
CVE-2019-1549
Reviewed-by: Paul Dale <[email protected]>
Reviewed-by: Matt Caswell <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/9802)
|
next_can_fast_zero (void *nxdata)
{
struct b_conn *b_conn = nxdata;
return backend_can_fast_zero (b_conn->b, b_conn->conn);
}
| 0 |
[
"CWE-406"
] |
nbdkit
|
a6b88b195a959b17524d1c8353fd425d4891dc5f
| 220,377,834,321,447,730,000,000,000,000,000,000,000 | 5 |
server: Fix regression for NBD_OPT_INFO before NBD_OPT_GO
Most known NBD clients do not bother with NBD_OPT_INFO (except for
clients like 'qemu-nbd --list' that don't ever intend to connect), but
go straight to NBD_OPT_GO. However, it's not too hard to hack up qemu
to add in an extra client step (whether info on the same name, or more
interestingly, info on a different name), as a patch against qemu
commit 6f214b30445:
| diff --git i/nbd/client.c w/nbd/client.c
| index f6733962b49b..425292ac5ea9 100644
| --- i/nbd/client.c
| +++ w/nbd/client.c
| @@ -1038,6 +1038,14 @@ int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
| * TLS). If it is not available, fall back to
| * NBD_OPT_LIST for nicer error messages about a missing
| * export, then use NBD_OPT_EXPORT_NAME. */
| + if (getenv ("HACK"))
| + info->name[0]++;
| + result = nbd_opt_info_or_go(ioc, NBD_OPT_INFO, info, errp);
| + if (getenv ("HACK"))
| + info->name[0]--;
| + if (result < 0) {
| + return -EINVAL;
| + }
| result = nbd_opt_info_or_go(ioc, NBD_OPT_GO, info, errp);
| if (result < 0) {
| return -EINVAL;
This works just fine in 1.14.0, where we call .open only once (so the
INFO and GO repeat calls into the same plugin handle), but in 1.14.1
it regressed into causing an assertion failure: we are now calling
.open a second time on a connection that is already opened:
$ nbdkit -rfv null &
$ hacked-qemu-io -f raw -r nbd://localhost -c quit
...
nbdkit: null[1]: debug: null: open readonly=1
nbdkit: backend.c:179: backend_open: Assertion `h->handle == NULL' failed.
Worse, on the mainline development, we have recently made it possible
for plugins to actively report different information for different
export names; for example, a plugin may choose to report different
answers for .can_write on export A than for export B; but if we share
cached handles, then an NBD_OPT_INFO on one export prevents correct
answers for NBD_OPT_GO on the second export name. (The HACK envvar in
my qemu modifications can be used to demonstrate cross-name requests,
which are even less likely in a real client).
The solution is to call .close after NBD_OPT_INFO, coupled with enough
glue logic to reset cached connection handles back to the state
expected by .open. This in turn means factoring out another backend_*
function, but also gives us an opportunity to change
backend_set_handle to no longer accept NULL.
The assertion failure is, to some extent, a possible denial of service
attack (one client can force nbdkit to exit by merely sending OPT_INFO
before OPT_GO, preventing the next client from connecting), although
this is mitigated by using TLS to weed out untrusted clients. Still,
the fact that we introduced a potential DoS attack while trying to fix
a traffic amplification security bug is not very nice.
Sadly, as there are no known clients that easily trigger this mode of
operation (OPT_INFO before OPT_GO), there is no easy way to cover this
via a testsuite addition. I may end up hacking something into libnbd.
Fixes: c05686f957
Signed-off-by: Eric Blake <[email protected]>
|
PHP_HASH_API void PHP_HAVAL256Final(unsigned char *digest, PHP_HAVAL_CTX * context)
{
unsigned char bits[10];
unsigned int index, padLen;
/* Version, Passes, and Digest Length */
bits[0] = (PHP_HASH_HAVAL_VERSION & 0x07) |
((context->passes & 0x07) << 3) |
((context->output & 0x03) << 6);
bits[1] = (context->output >> 2);
/* Save number of bits */
Encode(bits + 2, context->count, 8);
/* Pad out to 118 mod 128.
*/
index = (unsigned int) ((context->count[0] >> 3) & 0x3f);
padLen = (index < 118) ? (118 - index) : (246 - index);
PHP_HAVALUpdate(context, PADDING, padLen);
/* Append version, passes, digest length, and message length */
PHP_HAVALUpdate(context, bits, 10);
/* Store state in digest */
Encode(digest, context->state, 32);
/* Zeroize sensitive information.
*/
memset((unsigned char*) context, 0, sizeof(*context));
}
| 1 |
[] |
php-src
|
1390a5812b151e0ea8f74e64bfeaa5df4dd5b801
| 147,929,687,920,696,400,000,000,000,000,000,000,000 | 30 |
Fix bug #70312 - HAVAL gives wrong hashes in specific cases
|
static bool reg_type_may_be_null(enum bpf_reg_type type)
{
return type == PTR_TO_MAP_VALUE_OR_NULL ||
type == PTR_TO_SOCKET_OR_NULL ||
type == PTR_TO_SOCK_COMMON_OR_NULL ||
type == PTR_TO_TCP_SOCK_OR_NULL ||
type == PTR_TO_BTF_ID_OR_NULL ||
type == PTR_TO_MEM_OR_NULL ||
type == PTR_TO_RDONLY_BUF_OR_NULL ||
type == PTR_TO_RDWR_BUF_OR_NULL;
}
| 0 |
[
"CWE-119",
"CWE-681",
"CWE-787"
] |
linux
|
5b9fbeb75b6a98955f628e205ac26689bcb1383e
| 101,161,822,771,556,000,000,000,000,000,000,000,000 | 11 |
bpf: Fix scalar32_min_max_or bounds tracking
Simon reported an issue with the current scalar32_min_max_or() implementation.
That is, compared to the other 32 bit subreg tracking functions, the code in
scalar32_min_max_or() stands out that it's using the 64 bit registers instead
of 32 bit ones. This leads to bounds tracking issues, for example:
[...]
8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
8: (79) r1 = *(u64 *)(r0 +0)
R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
9: (b7) r0 = 1
10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
10: (18) r2 = 0x600000002
12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
12: (ad) if r1 < r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: (95) exit
14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
14: (25) if r1 > 0x0 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: (95) exit
16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
16: (47) r1 |= 0
17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x1; 0x700000000),s32_max_value=1,u32_max_value=1) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
[...]
The bound tests on the map value force the upper unsigned bound to be 25769803777
in 64 bit (0b11000000000000000000000000000000001) and then lower one to be 1. By
using OR they are truncated and thus result in the range [1,1] for the 32 bit reg
tracker. This is incorrect given the only thing we know is that the value must be
positive and thus 2147483647 (0b1111111111111111111111111111111) at max for the
subregs. Fix it by using the {u,s}32_{min,max}_value vars instead. This also makes
sense, for example, for the case where we update dst_reg->s32_{min,max}_value in
the else branch we need to use the newly computed dst_reg->u32_{min,max}_value as
we know that these are positive. Previously, in the else branch the 64 bit values
of umin_value=1 and umax_value=32212254719 were used and latter got truncated to
be 1 as upper bound there. After the fix the subreg range is now correct:
[...]
8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
8: (79) r1 = *(u64 *)(r0 +0)
R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
9: (b7) r0 = 1
10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
10: (18) r2 = 0x600000002
12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
12: (ad) if r1 < r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: (95) exit
14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
14: (25) if r1 > 0x0 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: (95) exit
16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
16: (47) r1 |= 0
17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
[...]
Fixes: 3f50f132d840 ("bpf: Verifier, do explicit ALU32 bounds tracking")
Reported-by: Simon Scannell <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Reviewed-by: John Fastabend <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
|
address_space_extend_translation(AddressSpace *as, hwaddr addr, hwaddr target_len,
MemoryRegion *mr, hwaddr base, hwaddr len,
bool is_write)
{
hwaddr done = 0;
hwaddr xlat;
MemoryRegion *this_mr;
for (;;) {
target_len -= len;
addr += len;
done += len;
if (target_len == 0) {
return done;
}
len = target_len;
this_mr = address_space_translate(as, addr, &xlat, &len, is_write);
if (this_mr != mr || xlat != base + done) {
return done;
}
}
}
| 0 |
[
"CWE-125"
] |
qemu
|
04bf2526ce87f21b32c9acba1c5518708c243ad0
| 167,848,834,295,479,240,000,000,000,000,000,000,000 | 23 |
exec: use qemu_ram_ptr_length to access guest ram
When accessing guest's ram block during DMA operation, use
'qemu_ram_ptr_length' to get ram block pointer. It ensures
that DMA operation of given length is possible; And avoids
any OOB memory access situations.
Reported-by: Alex <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
int X509_check_issued(X509 *issuer, X509 *subject)
{
if(X509_NAME_cmp(X509_get_subject_name(issuer),
X509_get_issuer_name(subject)))
return X509_V_ERR_SUBJECT_ISSUER_MISMATCH;
x509v3_cache_extensions(issuer);
x509v3_cache_extensions(subject);
if(subject->akid)
{
int ret = X509_check_akid(issuer, subject->akid);
if (ret != X509_V_OK)
return ret;
}
if(subject->ex_flags & EXFLAG_PROXY)
{
if(ku_reject(issuer, KU_DIGITAL_SIGNATURE))
return X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE;
}
else if(ku_reject(issuer, KU_KEY_CERT_SIGN))
return X509_V_ERR_KEYUSAGE_NO_CERTSIGN;
return X509_V_OK;
}
| 0 |
[] |
openssl
|
d65b8b2162f33ac0d53dace588a0847ed827626c
| 140,206,119,554,564,300,000,000,000,000,000,000,000 | 24 |
Backport OCSP fixes.
|
ldns_rdf2buffer_str_int16_data(ldns_buffer *output, const ldns_rdf *rdf)
{
size_t size;
char *b64;
if (ldns_rdf_size(rdf) < 2) {
return LDNS_STATUS_WIRE_RDATA_ERR;
}
/* Subtract the size (2) of the number that specifies the length */
size = ldns_b64_ntop_calculate_size(ldns_rdf_size(rdf) - 2);
ldns_buffer_printf(output, "%u ", ldns_rdf_size(rdf) - 2);
if (ldns_rdf_size(rdf) > 2) {
b64 = LDNS_XMALLOC(char, size);
if(!b64)
return LDNS_STATUS_MEM_ERR;
if (ldns_rdf_size(rdf) > 2 &&
ldns_b64_ntop(ldns_rdf_data(rdf) + 2,
ldns_rdf_size(rdf) - 2,
b64, size)) {
ldns_buffer_printf(output, "%s", b64);
}
LDNS_FREE(b64);
}
return ldns_buffer_status(output);
}
| 0 |
[
"CWE-415"
] |
ldns
|
070b4595981f48a21cc6b4f5047fdc2d09d3da91
| 122,963,861,343,031,700,000,000,000,000,000,000,000 | 25 |
CAA and URI
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.