func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
PHP_FUNCTION(imagestringup)
{
php_imagechar(INTERNAL_FUNCTION_PARAM_PASSTHRU, 3);
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
php-src
|
2938329ce19cb8c4197dec146c3ec887c6f61d01
| 137,060,597,273,483,200,000,000,000,000,000,000,000 | 4 |
Fixed bug #66356 (Heap Overflow Vulnerability in imagecrop())
And also fixed the bug: arguments are altered after some calls
|
term_replace_bs_del_keycode(char_u *ta_buf, int ta_len, int len)
{
int i;
int c;
for (i = ta_len; i < ta_len + len; ++i)
{
if (ta_buf[i] == CSI && len - i > 2)
{
c = TERMCAP2KEY(ta_buf[i + 1], ta_buf[i + 2]);
if (c == K_DEL || c == K_KDEL || c == K_BS)
{
mch_memmove(ta_buf + i + 1, ta_buf + i + 3,
(size_t)(len - i - 2));
if (c == K_DEL || c == K_KDEL)
ta_buf[i] = DEL;
else
ta_buf[i] = Ctrl_H;
len -= 2;
}
}
else if (ta_buf[i] == '\r')
ta_buf[i] = '\n';
if (has_mbyte)
i += (*mb_ptr2len_len)(ta_buf + i, ta_len + len - i) - 1;
}
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
vim
|
e178af5a586ea023622d460779fdcabbbfac0908
| 308,034,445,313,221,100,000,000,000,000,000,000,000 | 27 |
patch 8.2.5160: accessing invalid memory after changing terminal size
Problem: Accessing invalid memory after changing terminal size.
Solution: Adjust cmdline_row and msg_row to the value of Rows.
|
gs_window_raise (GSWindow *window)
{
GdkWindow *win;
g_return_if_fail (GS_IS_WINDOW (window));
gs_debug ("Raising screensaver window");
win = GTK_WIDGET (window)->window;
gdk_window_raise (win);
}
| 0 |
[] |
gnome-screensaver
|
a5f66339be6719c2b8fc478a1d5fc6545297d950
| 264,102,148,082,580,480,000,000,000,000,000,000,000 | 12 |
Ensure keyboard grab and unlock dialog exist after monitor removal
gnome-screensaver currently doesn't deal with monitors getting
removed properly. If the unlock dialog is on the removed monitor
then the unlock dialog and its associated keyboard grab are not
moved to an existing monitor when the monitor removal is processed.
This means that users can gain access to the locked system by placing
the mouse pointer on an external monitor and then disconnect the
external monitor.
CVE-2010-0414
https://bugzilla.gnome.org/show_bug.cgi?id=609337
|
static void rd_release_device_space(struct rd_dev *rd_dev)
{
u32 i, j, page_count = 0, sg_per_table;
struct rd_dev_sg_table *sg_table;
struct page *pg;
struct scatterlist *sg;
if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
return;
sg_table = rd_dev->sg_table_array;
for (i = 0; i < rd_dev->sg_table_count; i++) {
sg = sg_table[i].sg_table;
sg_per_table = sg_table[i].rd_sg_count;
for (j = 0; j < sg_per_table; j++) {
pg = sg_page(&sg[j]);
if (pg) {
__free_page(pg);
page_count++;
}
}
kfree(sg);
}
pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
" Device ID: %u, pages %u in %u tables total bytes %lu\n",
rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
kfree(sg_table);
rd_dev->sg_table_array = NULL;
rd_dev->sg_table_count = 0;
}
| 1 |
[
"CWE-200",
"CWE-264"
] |
linux
|
4442dc8a92b8f9ad8ee9e7f8438f4c04c03a22dc
| 57,054,723,544,141,300,000,000,000,000,000,000,000 | 36 |
target/rd: Refactor rd_build_device_space + rd_release_device_space
This patch refactors rd_build_device_space() + rd_release_device_space()
into rd_allocate_sgl_table() + rd_release_device_space() so that they
may be used seperatly for setup + release of protection information
scatterlists.
Also add explicit memset of pages within rd_allocate_sgl_table() based
upon passed 'init_payload' value.
v2 changes:
- Drop unused sg_table from rd_release_device_space (Wei)
Cc: Martin K. Petersen <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Hannes Reinecke <[email protected]>
Cc: Sagi Grimberg <[email protected]>
Cc: Or Gerlitz <[email protected]>
Signed-off-by: Nicholas Bellinger <[email protected]>
|
DocumentSourceGroup::rewriteGroupAsTransformOnFirstDocument() const {
if (_idExpressions.size() != 1) {
// This transformation is only intended for $group stages that group on a single field.
return nullptr;
}
auto fieldPathExpr = dynamic_cast<ExpressionFieldPath*>(_idExpressions.front().get());
if (!fieldPathExpr || !fieldPathExpr->isRootFieldPath()) {
return nullptr;
}
const auto fieldPath = fieldPathExpr->getFieldPath();
if (fieldPath.getPathLength() == 1) {
// The path is $$CURRENT or $$ROOT. This isn't really a sensible value to group by (since
// each document has a unique _id, it will just return the entire collection). We only
// apply the rewrite when grouping by a single field, so we cannot apply it in this case,
// where we are grouping by the entire document.
invariant(fieldPath.getFieldName(0) == "CURRENT" || fieldPath.getFieldName(0) == "ROOT");
return nullptr;
}
const auto groupId = fieldPath.tail().fullPath();
// We can't do this transformation if there are any non-$first accumulators.
for (auto&& accumulator : _accumulatedFields) {
if (AccumulatorDocumentsNeeded::kFirstDocument !=
accumulator.makeAccumulator()->documentsNeeded()) {
return nullptr;
}
}
std::vector<std::pair<std::string, boost::intrusive_ptr<Expression>>> fields;
boost::intrusive_ptr<Expression> idField;
// The _id field can be specified either as a fieldpath (ex. _id: "$a") or as a singleton
// object (ex. _id: {v: "$a"}).
if (_idFieldNames.empty()) {
idField = ExpressionFieldPath::create(pExpCtx.get(), groupId);
} else {
invariant(_idFieldNames.size() == 1);
idField = ExpressionObject::create(pExpCtx.get(),
{{_idFieldNames.front(), _idExpressions.front()}});
}
fields.push_back(std::make_pair("_id", idField));
for (auto&& accumulator : _accumulatedFields) {
fields.push_back(std::make_pair(accumulator.fieldName, accumulator.expr.argument));
// Since we don't attempt this transformation for non-$first accumulators,
// the initializer should always be trivial.
}
return GroupFromFirstDocumentTransformation::create(pExpCtx, groupId, std::move(fields));
}
| 0 |
[] |
mongo
|
07b8851825836911265e909d6842d4586832f9bb
| 303,450,538,027,197,030,000,000,000,000,000,000,000 | 54 |
SERVER-60218-44: SERVER-60218 add initialize helper function for document_source_group (cherry picked from commit 867f52afbb79bc00e35c70f8e0681b7d602f97b2)
|
MagickExport void SetStringInfoLength(StringInfo *string_info,
const size_t length)
{
assert(string_info != (StringInfo *) NULL);
assert(string_info->signature == MagickCoreSignature);
if (string_info->length == length)
return;
if (~length < MagickPathExtent)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
string_info->length=length;
if (string_info->datum == (unsigned char *) NULL)
string_info->datum=(unsigned char *) AcquireQuantumMemory(length+
MagickPathExtent,sizeof(*string_info->datum));
else
string_info->datum=(unsigned char *) ResizeQuantumMemory(string_info->datum,
length+MagickPathExtent,sizeof(*string_info->datum));
if (string_info->datum == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
| 0 |
[
"CWE-190"
] |
ImageMagick
|
be90a5395695f0d19479a5d46b06c678be7f7927
| 296,416,904,896,097,940,000,000,000,000,000,000,000 | 19 |
https://github.com/ImageMagick/ImageMagick/issues/1721
|
gs_window_set_logout_enabled (GSWindow *window,
gboolean logout_enabled)
{
g_return_if_fail (GS_IS_WINDOW (window));
window->priv->logout_enabled = logout_enabled;
}
| 0 |
[] |
gnome-screensaver
|
a5f66339be6719c2b8fc478a1d5fc6545297d950
| 339,067,289,330,869,500,000,000,000,000,000,000,000 | 7 |
Ensure keyboard grab and unlock dialog exist after monitor removal
gnome-screensaver currently doesn't deal with monitors getting
removed properly. If the unlock dialog is on the removed monitor
then the unlock dialog and its associated keyboard grab are not
moved to an existing monitor when the monitor removal is processed.
This means that users can gain access to the locked system by placing
the mouse pointer on an external monitor and then disconnect the
external monitor.
CVE-2010-0414
https://bugzilla.gnome.org/show_bug.cgi?id=609337
|
Item_func_sp::func_name() const
{
THD *thd= current_thd;
/* Calculate length to avoid reallocation of string for sure */
uint len= (((m_name->m_explicit_name ? m_name->m_db.length : 0) +
m_name->m_name.length)*2 + //characters*quoting
2 + // ` and `
(m_name->m_explicit_name ?
3 : 0) + // '`', '`' and '.' for the db
1 + // end of string
ALIGN_SIZE(1)); // to avoid String reallocation
String qname((char *)alloc_root(thd->mem_root, len), len,
system_charset_info);
qname.length(0);
if (m_name->m_explicit_name)
{
append_identifier(thd, &qname, m_name->m_db.str, m_name->m_db.length);
qname.append('.');
}
append_identifier(thd, &qname, m_name->m_name.str, m_name->m_name.length);
return qname.c_ptr_safe();
}
| 0 |
[
"CWE-120"
] |
server
|
eca207c46293bc72dd8d0d5622153fab4d3fccf1
| 162,411,946,468,196,060,000,000,000,000,000,000,000 | 23 |
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix.
|
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
{
struct kvm_memory_slot *memslot;
memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
| 0 |
[
"CWE-459"
] |
linux
|
683412ccf61294d727ead4a73d97397396e69a6b
| 303,301,617,425,409,540,000,000,000,000,000,000,000 | 7 |
KVM: SEV: add cache flush to solve SEV cache incoherency issues
Flush the CPU caches when memory is reclaimed from an SEV guest (where
reclaim also includes it being unmapped from KVM's memslots). Due to lack
of coherency for SEV encrypted memory, failure to flush results in silent
data corruption if userspace is malicious/broken and doesn't ensure SEV
guest memory is properly pinned and unpinned.
Cache coherency is not enforced across the VM boundary in SEV (AMD APM
vol.2 Section 15.34.7). Confidential cachelines, generated by confidential
VM guests have to be explicitly flushed on the host side. If a memory page
containing dirty confidential cachelines was released by VM and reallocated
to another user, the cachelines may corrupt the new user at a later time.
KVM takes a shortcut by assuming all confidential memory remain pinned
until the end of VM lifetime. Therefore, KVM does not flush cache at
mmu_notifier invalidation events. Because of this incorrect assumption and
the lack of cache flushing, malicous userspace can crash the host kernel:
creating a malicious VM and continuously allocates/releases unpinned
confidential memory pages when the VM is running.
Add cache flush operations to mmu_notifier operations to ensure that any
physical memory leaving the guest VM get flushed. In particular, hook
mmu_notifier_invalidate_range_start and mmu_notifier_release events and
flush cache accordingly. The hook after releasing the mmu lock to avoid
contention with other vCPUs.
Cc: [email protected]
Suggested-by: Sean Christpherson <[email protected]>
Reported-by: Mingwei Zhang <[email protected]>
Signed-off-by: Mingwei Zhang <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
QPDFObjectHandle::releaseResolved()
{
// Recursively break any resolved references to indirect objects.
// Do not cross over indirect object boundaries to avoid an
// infinite loop. This method may only be called during final
// destruction. See comments in QPDF::~QPDF().
if (isIndirect())
{
if (this->m->obj.getPointer())
{
this->m->obj = 0;
}
}
else
{
QPDFObject::ObjAccessor::releaseResolved(this->m->obj.getPointer());
}
}
| 0 |
[
"CWE-399",
"CWE-674"
] |
qpdf
|
b4d6cf6836ce025ba1811b7bbec52680c7204223
| 211,937,017,940,696,570,000,000,000,000,000,000,000 | 18 |
Limit depth of nesting in direct objects (fixes #202)
This fixes CVE-2018-9918.
|
utf32be_mbc_case_fold(OnigCaseFoldType flag,
const UChar** pp, const UChar* end, UChar* fold)
{
const UChar* p = *pp;
if (ONIGENC_IS_ASCII_CODE(*(p+3)) && *(p+2) == 0 && *(p+1) == 0 && *p == 0) {
*fold++ = 0;
*fold++ = 0;
#ifdef USE_UNICODE_CASE_FOLD_TURKISH_AZERI
if ((flag & ONIGENC_CASE_FOLD_TURKISH_AZERI) != 0) {
if (*(p+3) == 0x49) {
*fold++ = 0x01;
*fold = 0x31;
(*pp) += 4;
return 4;
}
}
#endif
*fold++ = 0;
*fold = ONIGENC_ASCII_CODE_TO_LOWER_CASE(*(p+3));
*pp += 4;
return 4;
}
else
return onigenc_unicode_mbc_case_fold(ONIG_ENCODING_UTF32_BE, flag, pp, end,
fold);
}
| 0 |
[
"CWE-125"
] |
php-src
|
b6fe458ef9ac1372b60c3d3810b0358e2e20840d
| 51,206,985,942,392,840,000,000,000,000,000,000,000 | 29 |
Fix bug #77418 - Heap overflow in utf32be_mbc_to_code
(cherry picked from commit aeec40cb50eca6a97975765e2bacc28a5950cfa9)
|
pop_context ()
{
pop_dollar_vars ();
variable_context--;
pop_var_context ();
sv_ifs ("IFS"); /* XXX here for now */
}
| 0 |
[] |
bash
|
863d31ae775d56b785dc5b0105b6d251515d81d5
| 4,057,386,013,870,455,000,000,000,000,000,000,000 | 8 |
commit bash-20120224 snapshot
|
fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
int len, struct sk_buff *skb, int rxp)
{
struct net_device *dev = port_to_dev(port);
int pi;
int rx_status;
dbg(DBG_TX, "fst_rx_dma_complete\n");
pi = port->index;
memcpy(skb_put(skb, len), card->rx_dma_handle_host, len);
/* Reset buffer descriptor */
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
/* Update stats */
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
/* Push upstream */
dbg(DBG_RX, "Pushing the frame up the stack\n");
if (port->mode == FST_RAW)
skb->protocol = farsync_type_trans(skb, dev);
else
skb->protocol = hdlc_type_trans(skb, dev);
rx_status = netif_rx(skb);
fst_process_rx_status(rx_status, port_to_dev(port)->name);
if (rx_status == NET_RX_DROP)
dev->stats.rx_dropped++;
}
| 0 |
[
"CWE-399"
] |
linux
|
96b340406724d87e4621284ebac5e059d67b2194
| 274,757,324,162,382,870,000,000,000,000,000,000,000 | 29 |
farsync: fix info leak in ioctl
The fst_get_iface() code fails to initialize the two padding bytes of
struct sync_serial_settings after the ->loopback member. Add an explicit
memset(0) before filling the structure to avoid the info leak.
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
TEST_F(Http1ClientConnectionImplTest, WatermarkTest) {
EXPECT_CALL(connection_, bufferLimit()).WillOnce(Return(10));
initialize();
InSequence s;
NiceMock<MockResponseDecoder> response_decoder;
Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);
Http::MockStreamCallbacks stream_callbacks;
request_encoder.getStream().addCallbacks(stream_callbacks);
// Fake a call from the underlying Network::Connection and verify the stream is notified.
EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark());
static_cast<ClientConnection*>(codec_.get())
->onUnderlyingConnectionAboveWriteBufferHighWatermark();
// Do a large write. This will result in the buffer temporarily going over the
// high watermark and then draining.
EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark());
EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark());
TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}};
request_encoder.encodeHeaders(headers, true);
// Fake out the underlying Network::Connection buffer being drained.
EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark());
static_cast<ClientConnection*>(codec_.get())
->onUnderlyingConnectionBelowWriteBufferLowWatermark();
}
| 0 |
[
"CWE-770"
] |
envoy
|
7ca28ff7d46454ae930e193d97b7d08156b1ba59
| 317,739,558,274,643,150,000,000,000,000,000,000,000 | 28 |
[http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145)
Signed-off-by: antonio <[email protected]>
|
static struct qrtr_sock *qrtr_port_lookup(int port)
{
struct qrtr_sock *ipc;
if (port == QRTR_PORT_CTRL)
port = 0;
rcu_read_lock();
ipc = xa_load(&qrtr_ports, port);
if (ipc)
sock_hold(&ipc->sk);
rcu_read_unlock();
return ipc;
}
| 0 |
[] |
net
|
7e78c597c3ebfd0cb329aa09a838734147e4f117
| 308,199,200,705,670,940,000,000,000,000,000,000,000 | 15 |
net: qrtr: fix another OOB Read in qrtr_endpoint_post
This check was incomplete, did not consider size is 0:
if (len != ALIGN(size, 4) + hdrlen)
goto err;
if size from qrtr_hdr is 0, the result of ALIGN(size, 4)
will be 0, In case of len == hdrlen and size == 0
in header this check won't fail and
if (cb->type == QRTR_TYPE_NEW_SERVER) {
/* Remote node endpoint can bridge other distant nodes */
const struct qrtr_ctrl_pkt *pkt = data + hdrlen;
qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
}
will also read out of bound from data, which is hdrlen allocated block.
Fixes: 194ccc88297a ("net: qrtr: Support decoding incoming v2 packets")
Fixes: ad9d24c9429e ("net: qrtr: fix OOB Read in qrtr_endpoint_post")
Signed-off-by: Xiaolong Huang <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
pixops_process (guchar *dest_buf,
int render_x0,
int render_y0,
int render_x1,
int render_y1,
int dest_rowstride,
int dest_channels,
gboolean dest_has_alpha,
const guchar *src_buf,
int src_width,
int src_height,
int src_rowstride,
int src_channels,
gboolean src_has_alpha,
double scale_x,
double scale_y,
int check_x,
int check_y,
int check_size,
guint32 color1,
guint32 color2,
PixopsFilter *filter,
PixopsLineFunc line_func,
PixopsPixelFunc pixel_func)
{
int i, j;
int x, y; /* X and Y position in source (fixed_point) */
guchar **line_bufs;
int *filter_weights;
int x_step;
int y_step;
int check_shift;
int scaled_x_offset;
int run_end_x;
int run_end_index;
x_step = (1 << SCALE_SHIFT) / scale_x; /* X step in source (fixed point) */
y_step = (1 << SCALE_SHIFT) / scale_y; /* Y step in source (fixed point) */
if (x_step == 0 || y_step == 0)
return; /* overflow, bail out */
filter_weights = make_filter_table (filter);
if (!filter_weights)
return; /* overflow, bail out */
line_bufs = g_new (guchar *, filter->y.n);
check_shift = check_size ? get_check_shift (check_size) : 0;
scaled_x_offset = floor (filter->x.offset * (1 << SCALE_SHIFT));
/* Compute the index where we run off the end of the source buffer. The
* furthest source pixel we access at index i is:
*
* ((render_x0 + i) * x_step + scaled_x_offset) >> SCALE_SHIFT + filter->x.n - 1
*
* So, run_end_index is the smallest i for which this pixel is src_width,
* i.e, for which:
*
* (i + render_x0) * x_step >= ((src_width - filter->x.n + 1) << SCALE_SHIFT) - scaled_x_offset
*
*/
#define MYDIV(a,b) ((a) > 0 ? (a) / (b) : ((a) - (b) + 1) / (b)) /* Division so that -1/5 = -1 */
run_end_x = (((src_width - filter->x.n + 1) << SCALE_SHIFT) - scaled_x_offset);
run_end_index = MYDIV (run_end_x + x_step - 1, x_step) - render_x0;
run_end_index = MIN (run_end_index, render_x1 - render_x0);
y = render_y0 * y_step + floor (filter->y.offset * (1 << SCALE_SHIFT));
for (i = 0; i < (render_y1 - render_y0); i++)
{
int dest_x;
int y_start = y >> SCALE_SHIFT;
int x_start;
int *run_weights = filter_weights +
((y >> (SCALE_SHIFT - SUBSAMPLE_BITS)) & SUBSAMPLE_MASK) *
filter->x.n * filter->y.n * SUBSAMPLE;
guchar *new_outbuf;
guint32 tcolor1, tcolor2;
guchar *outbuf = dest_buf + (gsize)dest_rowstride * i;
guchar *outbuf_end = outbuf + dest_channels * (render_x1 - render_x0);
if (((i + check_y) >> check_shift) & 1)
{
tcolor1 = color2;
tcolor2 = color1;
}
else
{
tcolor1 = color1;
tcolor2 = color2;
}
for (j=0; j<filter->y.n; j++)
{
if (y_start < 0)
line_bufs[j] = (guchar *)src_buf;
else if (y_start < src_height)
line_bufs[j] = (guchar *)src_buf + (gsize)src_rowstride * y_start;
else
line_bufs[j] = (guchar *)src_buf + (gsize)src_rowstride * (src_height - 1);
y_start++;
}
dest_x = check_x;
x = render_x0 * x_step + scaled_x_offset;
x_start = x >> SCALE_SHIFT;
while (x_start < 0 && outbuf < outbuf_end)
{
process_pixel (run_weights + ((x >> (SCALE_SHIFT - SUBSAMPLE_BITS)) & SUBSAMPLE_MASK) * (filter->x.n * filter->y.n), filter->x.n, filter->y.n,
outbuf, dest_x, dest_channels, dest_has_alpha,
line_bufs, src_channels, src_has_alpha,
x >> SCALE_SHIFT, src_width,
check_size, tcolor1, tcolor2, pixel_func);
x += x_step;
x_start = x >> SCALE_SHIFT;
dest_x++;
outbuf += dest_channels;
}
new_outbuf = (*line_func) (run_weights, filter->x.n, filter->y.n,
outbuf, dest_x, dest_buf + (gsize)dest_rowstride *
i + run_end_index * dest_channels,
dest_channels, dest_has_alpha,
line_bufs, src_channels, src_has_alpha,
x, x_step, src_width, check_size, tcolor1,
tcolor2);
dest_x += (new_outbuf - outbuf) / dest_channels;
x = (dest_x - check_x + render_x0) * x_step + scaled_x_offset;
outbuf = new_outbuf;
while (outbuf < outbuf_end)
{
process_pixel (run_weights + ((x >> (SCALE_SHIFT - SUBSAMPLE_BITS)) & SUBSAMPLE_MASK) * (filter->x.n * filter->y.n), filter->x.n, filter->y.n,
outbuf, dest_x, dest_channels, dest_has_alpha,
line_bufs, src_channels, src_has_alpha,
x >> SCALE_SHIFT, src_width,
check_size, tcolor1, tcolor2, pixel_func);
x += x_step;
dest_x++;
outbuf += dest_channels;
}
y += y_step;
}
g_free (line_bufs);
g_free (filter_weights);
}
| 1 |
[] |
gdk-pixbuf
|
dbfe8f70471864818bf458a39c8a99640895bd22
| 152,270,345,132,909,950,000,000,000,000,000,000,000 | 161 |
pixops: use gint64 in more places to avoid overflow when shifting
|
static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
unsigned int pgbase, unsigned int pglen)
{
struct nfs4_readlink args = {
.fh = NFS_FH(inode),
.pgbase = pgbase,
.pglen = pglen,
.pages = &page,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
.rpc_argp = &args,
.rpc_resp = NULL,
};
return rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
}
| 0 |
[
"CWE-703"
] |
linux
|
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
| 282,291,399,675,304,730,000,000,000,000,000,000,000 | 17 |
NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <[email protected]>
|
STACK_OF(X509_EXTENSION) *TS_REQ_get_exts(TS_REQ *a)
{
return a->extensions;
}
| 0 |
[] |
openssl
|
c7235be6e36c4bef84594aa3b2f0561db84b63d8
| 80,701,319,383,309,400,000,000,000,000,000,000,000 | 4 |
RFC 3161 compliant time stamp request creation, response generation
and response verification.
Submitted by: Zoltan Glozik <[email protected]>
Reviewed by: Ulf Moeller
|
const Address::InstanceConstSharedPtr& remoteAddress() const override { return remote_address_; }
| 0 |
[
"CWE-400"
] |
envoy
|
542f84c66e9f6479bc31c6f53157c60472b25240
| 92,841,236,611,219,540,000,000,000,000,000,000,000 | 1 |
overload: Runtime configurable global connection limits (#147)
Signed-off-by: Tony Allen <[email protected]>
|
evdev_init_left_handed(struct evdev_device *device,
void (*change_to_left_handed)(struct evdev_device *))
{
device->left_handed.config.has = evdev_left_handed_has;
device->left_handed.config.set = evdev_left_handed_set;
device->left_handed.config.get = evdev_left_handed_get;
device->left_handed.config.get_default = evdev_left_handed_get_default;
device->base.config.left_handed = &device->left_handed.config;
device->left_handed.enabled = false;
device->left_handed.want_enabled = false;
device->left_handed.change_to_enabled = change_to_left_handed;
}
| 0 |
[
"CWE-134"
] |
libinput
|
a423d7d3269dc32a87384f79e29bb5ac021c83d1
| 94,236,454,471,277,010,000,000,000,000,000,000,000 | 12 |
evdev: strip the device name of format directives
This fixes a format string vulnerabilty.
evdev_log_message() composes a format string consisting of a fixed
prefix (including the rendered device name) and the passed-in format
buffer. This format string is then passed with the arguments to the
actual log handler, which usually and eventually ends up being printf.
If the device name contains a printf-style format directive, these ended
up in the format string and thus get interpreted correctly, e.g. for a
device "Foo%sBar" the log message vs printf invocation ends up being:
evdev_log_message(device, "some message %s", "some argument");
printf("event9 - Foo%sBar: some message %s", "some argument");
This can enable an attacker to execute malicious code with the
privileges of the process using libinput.
To exploit this, an attacker needs to be able to create a kernel device
with a malicious name, e.g. through /dev/uinput or a Bluetooth device.
To fix this, convert any potential format directives in the device name
by duplicating percentages.
Pre-rendering the device to avoid the issue altogether would be nicer
but the current log level hooks do not easily allow for this. The device
name is the only user-controlled part of the format string.
A second potential issue is the sysname of the device which is also
sanitized.
This issue was found by Albin Eldstål-Ahrens and Benjamin Svensson from
Assured AB, and independently by Lukas Lamster.
Fixes #752
Signed-off-by: Peter Hutterer <[email protected]>
|
static void mld_clear_report(struct inet6_dev *idev)
{
struct sk_buff *skb;
spin_lock_bh(&idev->mc_report_lock);
while ((skb = __skb_dequeue(&idev->mc_report_queue)))
kfree_skb(skb);
spin_unlock_bh(&idev->mc_report_lock);
}
| 0 |
[
"CWE-703"
] |
linux
|
2d3916f3189172d5c69d33065c3c21119fe539fc
| 224,733,517,378,710,960,000,000,000,000,000,000,000 | 9 |
ipv6: fix skb drops in igmp6_event_query() and igmp6_event_report()
While investigating on why a synchronize_net() has been added recently
in ipv6_mc_down(), I found that igmp6_event_query() and igmp6_event_report()
might drop skbs in some cases.
Discussion about removing synchronize_net() from ipv6_mc_down()
will happen in a different thread.
Fixes: f185de28d9ae ("mld: add new workqueues for process mld events")
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Taehee Yoo <[email protected]>
Cc: Cong Wang <[email protected]>
Cc: David Ahern <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]>
|
QPDFObjectHandle::isInlineImage()
{
dereference();
return QPDFObjectTypeAccessor<QPDF_InlineImage>::check(obj.getPointer());
}
| 0 |
[
"CWE-835"
] |
qpdf
|
afe0242b263a9e1a8d51dd81e42ab6de2e5127eb
| 103,582,094,707,065,340,000,000,000,000,000,000,000 | 5 |
Handle object ID 0 (fixes #99)
This is CVE-2017-9208.
The QPDF library uses object ID 0 internally as a sentinel to
represent a direct object, but prior to this fix, was not blocking
handling of 0 0 obj or 0 0 R as a special case. Creating an object in
the file with 0 0 obj could cause various infinite loops. The PDF spec
doesn't allow for object 0. Having qpdf handle object 0 might be a
better fix, but changing all the places in the code that assumes objid
== 0 means direct would be risky.
|
xmlDumpElementContent(xmlBufferPtr buf, xmlElementContentPtr content, int glob) {
if (content == NULL) return;
if (glob) xmlBufferWriteChar(buf, "(");
switch (content->type) {
case XML_ELEMENT_CONTENT_PCDATA:
xmlBufferWriteChar(buf, "#PCDATA");
break;
case XML_ELEMENT_CONTENT_ELEMENT:
if (content->prefix != NULL) {
xmlBufferWriteCHAR(buf, content->prefix);
xmlBufferWriteChar(buf, ":");
}
xmlBufferWriteCHAR(buf, content->name);
break;
case XML_ELEMENT_CONTENT_SEQ:
if ((content->c1 != NULL) &&
((content->c1->type == XML_ELEMENT_CONTENT_OR) ||
(content->c1->type == XML_ELEMENT_CONTENT_SEQ)))
xmlDumpElementContent(buf, content->c1, 1);
else
xmlDumpElementContent(buf, content->c1, 0);
xmlBufferWriteChar(buf, " , ");
if ((content->c2 != NULL) &&
((content->c2->type == XML_ELEMENT_CONTENT_OR) ||
((content->c2->type == XML_ELEMENT_CONTENT_SEQ) &&
(content->c2->ocur != XML_ELEMENT_CONTENT_ONCE))))
xmlDumpElementContent(buf, content->c2, 1);
else
xmlDumpElementContent(buf, content->c2, 0);
break;
case XML_ELEMENT_CONTENT_OR:
if ((content->c1 != NULL) &&
((content->c1->type == XML_ELEMENT_CONTENT_OR) ||
(content->c1->type == XML_ELEMENT_CONTENT_SEQ)))
xmlDumpElementContent(buf, content->c1, 1);
else
xmlDumpElementContent(buf, content->c1, 0);
xmlBufferWriteChar(buf, " | ");
if ((content->c2 != NULL) &&
((content->c2->type == XML_ELEMENT_CONTENT_SEQ) ||
((content->c2->type == XML_ELEMENT_CONTENT_OR) &&
(content->c2->ocur != XML_ELEMENT_CONTENT_ONCE))))
xmlDumpElementContent(buf, content->c2, 1);
else
xmlDumpElementContent(buf, content->c2, 0);
break;
default:
xmlErrValid(NULL, XML_ERR_INTERNAL_ERROR,
"Internal: ELEMENT content corrupted invalid type\n",
NULL);
}
if (glob)
xmlBufferWriteChar(buf, ")");
switch (content->ocur) {
case XML_ELEMENT_CONTENT_ONCE:
break;
case XML_ELEMENT_CONTENT_OPT:
xmlBufferWriteChar(buf, "?");
break;
case XML_ELEMENT_CONTENT_MULT:
xmlBufferWriteChar(buf, "*");
break;
case XML_ELEMENT_CONTENT_PLUS:
xmlBufferWriteChar(buf, "+");
break;
}
}
| 0 |
[] |
libxml2
|
92b9e8c8b3787068565a1820ba575d042f9eec66
| 93,802,981,739,236,060,000,000,000,000,000,000,000 | 68 |
Fix type confusion in xmlValidateOneNamespace
Comment out code that casts xmlNsPtr to xmlAttrPtr. ID types on
namespace declarations make no practical sense anyway.
Fixes bug 780228.
Found with libFuzzer and ASan.
|
static void ldb_kv_request_done(struct ldb_kv_context *ctx, int error)
{
struct ldb_context *ldb;
struct ldb_request *req;
struct ldb_reply *ares;
ldb = ldb_module_get_ctx(ctx->module);
req = ctx->req;
/* if we already returned an error just return */
if (ldb_request_get_status(req) != LDB_SUCCESS) {
return;
}
ares = talloc_zero(req, struct ldb_reply);
if (!ares) {
ldb_oom(ldb);
req->callback(req, NULL);
return;
}
ares->type = LDB_REPLY_DONE;
ares->error = error;
req->callback(req, ares);
}
| 0 |
[
"CWE-20"
] |
samba
|
3c1fbb18321f61df44d7b0f0c7452ae230960293
| 26,913,606,822,740,033,000,000,000,000,000,000,000 | 25 |
CVE-2018-1140 ldb_tdb: Check for DN validity in add, rename and search
This ensures we fail with a good error code before an eventual ldb_dn_get_casefold() which
would otherwise fail.
Signed-off-by: Andrew Bartlett <[email protected]>
Reviewed-by: Douglas Bagnall <[email protected]>
BUG: https://bugzilla.samba.org/show_bug.cgi?id=13374
|
test_verified_unordered_chain (TestConnection *test,
gconstpointer data)
{
GTlsBackend *backend;
GTlsCertificate *server_cert;
GTlsCertificate *intermediate_cert;
GTlsCertificate *root_cert;
char *cert_data = NULL;
char *key_data = NULL;
GError *error = NULL;
backend = g_tls_backend_get_default ();
/* Prepare the intermediate cert (to be sent last, out of order)! */
intermediate_cert = g_tls_certificate_new_from_file (tls_test_file_path ("intermediate-ca.pem"),
&error);
g_assert_no_error (error);
g_assert_nonnull (intermediate_cert);
g_file_get_contents (tls_test_file_path ("ca.pem"), &cert_data, NULL, &error);
g_assert_no_error (error);
g_assert_nonnull (cert_data);
/* Prepare the root cert (to be sent in the middle of the chain). */
root_cert = g_initable_new (g_tls_backend_get_certificate_type (backend),
NULL, &error,
"issuer", intermediate_cert,
"certificate-pem", cert_data,
NULL);
g_assert_no_error (error);
g_assert_nonnull (root_cert);
g_clear_pointer (&cert_data, g_free);
g_file_get_contents (tls_test_file_path ("server-intermediate.pem"),
&cert_data, NULL, &error);
g_assert_no_error (error);
g_assert_nonnull (cert_data);
g_file_get_contents (tls_test_file_path ("server-intermediate-key.pem"),
&key_data, NULL, &error);
g_assert_no_error (error);
g_assert_nonnull (key_data);
/* Prepare the server cert. */
server_cert = g_initable_new (g_tls_backend_get_certificate_type (backend),
NULL, &error,
"issuer", root_cert,
"certificate-pem", cert_data,
"private-key-pem", key_data,
NULL);
g_assert_no_error (error);
g_assert_nonnull (server_cert);
g_object_unref (intermediate_cert);
g_object_unref (root_cert);
g_free (cert_data);
g_free (key_data);
test->server_certificate = server_cert;
test_verified_connection (test, data);
}
| 0 |
[
"CWE-295"
] |
glib-networking
|
29513946809590c4912550f6f8620468f9836d94
| 68,305,546,834,024,480,000,000,000,000,000,000,000 | 61 |
Return bad identity error if identity is unset
When the server-identity property of GTlsClientConnection is unset, the
documentation sasy we need to fail the certificate verification with
G_TLS_CERTIFICATE_BAD_IDENTITY. This is important because otherwise,
it's easy for applications to fail to specify server identity.
Unfortunately, we did not correctly implement the intended, documented
behavior. When server identity is missing, we check the validity of the
TLS certificate, but do not check if it corresponds to the expected
server (since we have no expected server). Then we assume the identity
is good, instead of returning bad identity, as documented. This means,
for example, that evil.com can present a valid certificate issued to
evil.com, and we would happily accept it for paypal.com.
Fixes #135
|
PHP_METHOD(Phar, count)
{
PHAR_ARCHIVE_OBJECT();
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_LONG(zend_hash_num_elements(&phar_obj->arc.archive->manifest));
}
| 0 |
[
"CWE-119"
] |
php-src
|
13ad4d3e971807f9a58ab5933182907dc2958539
| 192,428,582,173,493,600,000,000,000,000,000,000,000 | 10 |
Fix bug #71354 - remove UMR when size is 0
|
fill_rtt_message(struct interface *ifp)
{
babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
if((babel_ifp->flags & BABEL_IF_TIMESTAMPS) &&
(babel_ifp->buffered_hello >= 0)) {
if(babel_ifp->sendbuf[babel_ifp->buffered_hello + 8] == SUBTLV_PADN &&
babel_ifp->sendbuf[babel_ifp->buffered_hello + 9] == 4) {
unsigned int time;
/* Change the type of sub-TLV. */
babel_ifp->sendbuf[babel_ifp->buffered_hello + 8] =
SUBTLV_TIMESTAMP;
gettime(&babel_now);
time = time_us(babel_now);
DO_HTONL(babel_ifp->sendbuf + babel_ifp->buffered_hello + 10, time);
return 1;
} else {
flog_err(EC_BABEL_PACKET, "No space left for timestamp sub-TLV (this shouldn't happen)");
return -1;
}
}
return 0;
}
| 0 |
[
"CWE-787"
] |
frr
|
c3793352a8d76d2eee1edc38a9a16c1c8a6573f4
| 29,522,717,861,958,330,000,000,000,000,000,000,000 | 22 |
babeld: fix #10502 #10503 by repairing the checks on length
This patch repairs the checking conditions on length in four functions:
babel_packet_examin, parse_hello_subtlv, parse_ihu_subtlv, and parse_update_subtlv
Signed-off-by: qingkaishi <[email protected]>
|
int x86_emulate_instruction(struct kvm_vcpu *vcpu,
unsigned long cr2,
int emulation_type,
void *insn,
int insn_len)
{
int r;
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
bool writeback = true;
kvm_clear_exception_queue(vcpu);
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
init_emulate_ctxt(vcpu);
ctxt->interruptibility = 0;
ctxt->have_exception = false;
ctxt->perm_ok = false;
ctxt->only_vendor_specific_insn
= emulation_type & EMULTYPE_TRAP_UD;
r = x86_decode_insn(ctxt, insn, insn_len);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
if (r != EMULATION_OK) {
if (emulation_type & EMULTYPE_TRAP_UD)
return EMULATE_FAIL;
if (reexecute_instruction(vcpu, cr2))
return EMULATE_DONE;
if (emulation_type & EMULTYPE_SKIP)
return EMULATE_FAIL;
return handle_emulation_failure(vcpu);
}
}
if (emulation_type & EMULTYPE_SKIP) {
kvm_rip_write(vcpu, ctxt->_eip);
return EMULATE_DONE;
}
if (retry_instruction(ctxt, cr2, emulation_type))
return EMULATE_DONE;
/* this is needed for vmware backdoor interface to work since it
changes registers values during IO operation */
if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
memcpy(ctxt->regs, vcpu->arch.regs, sizeof ctxt->regs);
}
restart:
r = x86_emulate_insn(ctxt);
if (r == EMULATION_INTERCEPTED)
return EMULATE_DONE;
if (r == EMULATION_FAILED) {
if (reexecute_instruction(vcpu, cr2))
return EMULATE_DONE;
return handle_emulation_failure(vcpu);
}
if (ctxt->have_exception) {
inject_emulated_exception(vcpu);
r = EMULATE_DONE;
} else if (vcpu->arch.pio.count) {
if (!vcpu->arch.pio.in)
vcpu->arch.pio.count = 0;
else
writeback = false;
r = EMULATE_DO_MMIO;
} else if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write)
writeback = false;
r = EMULATE_DO_MMIO;
} else if (r == EMULATION_RESTART)
goto restart;
else
r = EMULATE_DONE;
if (writeback) {
toggle_interruptibility(vcpu, ctxt->interruptibility);
kvm_set_rflags(vcpu, ctxt->eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
} else
vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
return r;
}
| 0 |
[] |
kvm
|
0769c5de24621141c953fbe1f943582d37cb4244
| 55,790,626,577,104,320,000,000,000,000,000,000,000 | 94 |
KVM: x86: extend "struct x86_emulate_ops" with "get_cpuid"
In order to be able to proceed checks on CPU-specific properties
within the emulator, function "get_cpuid" is introduced.
With "get_cpuid" it is possible to virtually call the guests
"cpuid"-opcode without changing the VM's context.
[mtosatti: cleanup/beautify code]
Signed-off-by: Stephan Baerwolf <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
|
string t_cpp_generator::argument_list(t_struct* tstruct, bool name_params, bool start_comma) {
string result = "";
const vector<t_field*>& fields = tstruct->get_members();
vector<t_field*>::const_iterator f_iter;
bool first = !start_comma;
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
if (first) {
first = false;
} else {
result += ", ";
}
result += type_name((*f_iter)->get_type(), false, true) + " "
+ (name_params ? (*f_iter)->get_name() : "/* " + (*f_iter)->get_name() + " */");
}
return result;
}
| 0 |
[
"CWE-20"
] |
thrift
|
cfaadcc4adcfde2a8232c62ec89870b73ef40df1
| 209,301,740,492,246,940,000,000,000,000,000,000,000 | 17 |
THRIFT-3231 CPP: Limit recursion depth to 64
Client: cpp
Patch: Ben Craig <[email protected]>
|
static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
{
}
| 0 |
[] |
linux-2.6
|
8f1bc385cfbab474db6c27b5af1e439614f3025c
| 297,819,468,381,532,100,000,000,000,000,000,000,000 | 3 |
sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
static int rtw_wx_get_name(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct adapter *padapter = rtw_netdev_priv(dev);
u32 ht_ielen = 0;
char *p;
u8 ht_cap = false;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
NDIS_802_11_RATES_EX *prates = NULL;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("cmd_code =%x\n", info->cmd));
if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE)) {
/* parsing HT_CAP_IE */
p = rtw_get_ie(&pcur_bss->ies[12], WLAN_EID_HT_CAPABILITY, &ht_ielen, pcur_bss->ie_length - 12);
if (p && ht_ielen > 0)
ht_cap = true;
prates = &pcur_bss->SupportedRates;
if (rtw_is_cckratesonly_included((u8 *)prates)) {
if (ht_cap)
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bn");
else
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11b");
} else if (rtw_is_cckrates_included((u8 *)prates)) {
if (ht_cap)
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bgn");
else
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bg");
} else {
if (ht_cap)
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11gn");
else
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11g");
}
} else {
snprintf(wrqu->name, IFNAMSIZ, "unassociated");
}
return 0;
}
| 0 |
[
"CWE-787"
] |
linux
|
74b6b20df8cfe90ada777d621b54c32e69e27cd7
| 8,859,778,260,170,401,000,000,000,000,000,000,000 | 43 |
staging: rtl8188eu: prevent ->ssid overflow in rtw_wx_set_scan()
This code has a check to prevent read overflow but it needs another
check to prevent writing beyond the end of the ->ssid[] array.
Fixes: a2c60d42d97c ("staging: r8188eu: Add files for new driver - part 16")
Signed-off-by: Dan Carpenter <[email protected]>
Cc: stable <[email protected]>
Link: https://lore.kernel.org/r/YEHymwsnHewzoam7@mwanda
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
parser_tagged_template_literal_freeze_array (ecma_object_t *obj_p)
{
JERRY_ASSERT (ecma_get_object_type (obj_p) == ECMA_OBJECT_TYPE_ARRAY);
ecma_op_ordinary_object_prevent_extensions (obj_p);
ecma_extended_object_t *ext_obj_p = (ecma_extended_object_t *) obj_p;
ext_obj_p->u.array.length_prop_and_hole_count &= (uint32_t) ~ECMA_PROPERTY_FLAG_WRITABLE;
} /* parser_tagged_template_literal_freeze_array */
| 0 |
[
"CWE-416"
] |
jerryscript
|
3bcd48f72d4af01d1304b754ef19fe1a02c96049
| 135,614,721,707,308,520,000,000,000,000,000,000,000 | 7 |
Improve parse_identifier (#4691)
Ascii string length is no longer computed during string allocation.
JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected]
|
static int decode_update_thread_context(AVCodecContext *dst,
const AVCodecContext *src)
{
H264Context *h = dst->priv_data, *h1 = src->priv_data;
int inited = h->context_initialized, err = 0;
int context_reinitialized = 0;
int i, ret;
if (dst == src || !h1->context_initialized)
return 0;
if (inited &&
(h->width != h1->width ||
h->height != h1->height ||
h->mb_width != h1->mb_width ||
h->mb_height != h1->mb_height ||
h->sps.bit_depth_luma != h1->sps.bit_depth_luma ||
h->sps.chroma_format_idc != h1->sps.chroma_format_idc ||
h->sps.colorspace != h1->sps.colorspace)) {
/* set bits_per_raw_sample to the previous value. the check for changed
* bit depth in h264_set_parameter_from_sps() uses it and sets it to
* the current value */
h->avctx->bits_per_raw_sample = h->sps.bit_depth_luma;
av_freep(&h->bipred_scratchpad);
h->width = h1->width;
h->height = h1->height;
h->mb_height = h1->mb_height;
h->mb_width = h1->mb_width;
h->mb_num = h1->mb_num;
h->mb_stride = h1->mb_stride;
h->b_stride = h1->b_stride;
if ((err = h264_slice_header_init(h, 1)) < 0) {
av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
return err;
}
context_reinitialized = 1;
/* update linesize on resize. The decoder doesn't
* necessarily call h264_frame_start in the new thread */
h->linesize = h1->linesize;
h->uvlinesize = h1->uvlinesize;
/* copy block_offset since frame_start may not be called */
memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
}
if (!inited) {
for (i = 0; i < MAX_SPS_COUNT; i++)
av_freep(h->sps_buffers + i);
for (i = 0; i < MAX_PPS_COUNT; i++)
av_freep(h->pps_buffers + i);
memcpy(h, h1, sizeof(*h1));
memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
memset(&h->er, 0, sizeof(h->er));
memset(&h->me, 0, sizeof(h->me));
memset(&h->mb, 0, sizeof(h->mb));
memset(&h->mb_luma_dc, 0, sizeof(h->mb_luma_dc));
memset(&h->mb_padding, 0, sizeof(h->mb_padding));
h->context_initialized = 0;
memset(&h->cur_pic, 0, sizeof(h->cur_pic));
av_frame_unref(&h->cur_pic.f);
h->cur_pic.tf.f = &h->cur_pic.f;
h->avctx = dst;
h->DPB = NULL;
h->qscale_table_pool = NULL;
h->mb_type_pool = NULL;
h->ref_index_pool = NULL;
h->motion_val_pool = NULL;
ret = ff_h264_alloc_tables(h);
if (ret < 0) {
av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n");
return ret;
}
ret = context_init(h);
if (ret < 0) {
av_log(dst, AV_LOG_ERROR, "context_init() failed.\n");
return ret;
}
for (i = 0; i < 2; i++) {
h->rbsp_buffer[i] = NULL;
h->rbsp_buffer_size[i] = 0;
}
h->bipred_scratchpad = NULL;
h->edge_emu_buffer = NULL;
h->thread_context[0] = h;
h->context_initialized = 1;
}
h->avctx->coded_height = h1->avctx->coded_height;
h->avctx->coded_width = h1->avctx->coded_width;
h->avctx->width = h1->avctx->width;
h->avctx->height = h1->avctx->height;
h->coded_picture_number = h1->coded_picture_number;
h->first_field = h1->first_field;
h->picture_structure = h1->picture_structure;
h->qscale = h1->qscale;
h->droppable = h1->droppable;
h->low_delay = h1->low_delay;
for (i = 0; i < MAX_PICTURE_COUNT; i++) {
unref_picture(h, &h->DPB[i]);
if (h1->DPB[i].f.buf[0] &&
(ret = ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
return ret;
}
h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
unref_picture(h, &h->cur_pic);
if ((ret = ref_picture(h, &h->cur_pic, &h1->cur_pic)) < 0)
return ret;
h->workaround_bugs = h1->workaround_bugs;
h->low_delay = h1->low_delay;
h->droppable = h1->droppable;
/* frame_start may not be called for the next thread (if it's decoding
* a bottom field) so this has to be allocated here */
err = alloc_scratch_buffers(h, h1->linesize);
if (err < 0)
return err;
// extradata/NAL handling
h->is_avc = h1->is_avc;
// SPS/PPS
if ((ret = copy_parameter_set((void **)h->sps_buffers,
(void **)h1->sps_buffers,
MAX_SPS_COUNT, sizeof(SPS))) < 0)
return ret;
h->sps = h1->sps;
if ((ret = copy_parameter_set((void **)h->pps_buffers,
(void **)h1->pps_buffers,
MAX_PPS_COUNT, sizeof(PPS))) < 0)
return ret;
h->pps = h1->pps;
// Dequantization matrices
// FIXME these are big - can they be only copied when PPS changes?
copy_fields(h, h1, dequant4_buffer, dequant4_coeff);
for (i = 0; i < 6; i++)
h->dequant4_coeff[i] = h->dequant4_buffer[0] +
(h1->dequant4_coeff[i] - h1->dequant4_buffer[0]);
for (i = 0; i < 6; i++)
h->dequant8_coeff[i] = h->dequant8_buffer[0] +
(h1->dequant8_coeff[i] - h1->dequant8_buffer[0]);
h->dequant_coeff_pps = h1->dequant_coeff_pps;
// POC timing
copy_fields(h, h1, poc_lsb, redundant_pic_count);
// reference lists
copy_fields(h, h1, short_ref, cabac_init_idc);
copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
copy_picture_range(h->delayed_pic, h1->delayed_pic,
MAX_DELAYED_PIC_COUNT + 2, h, h1);
h->last_slice_type = h1->last_slice_type;
if (context_reinitialized)
h264_set_parameter_from_sps(h);
if (!h->cur_pic_ptr)
return 0;
if (!h->droppable) {
err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index);
h->prev_poc_msb = h->poc_msb;
h->prev_poc_lsb = h->poc_lsb;
}
h->prev_frame_num_offset = h->frame_num_offset;
h->prev_frame_num = h->frame_num;
h->outputed_poc = h->next_outputed_poc;
h->recovery_frame = h1->recovery_frame;
h->frame_recovered = h1->frame_recovered;
return err;
}
| 0 |
[
"CWE-787"
] |
FFmpeg
|
1f097d168d9cad473dd44010a337c1413a9cd198
| 118,280,190,716,967,550,000,000,000,000,000,000,000 | 196 |
h264: reset data partitioning at the beginning of each decode call
Prevents using GetBitContexts with data from previous calls.
Fixes access to freed memory.
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
CC:[email protected]
|
bool sql_slave_killed(THD* thd, Relay_log_info* rli)
{
bool ret= FALSE;
bool is_parallel_warn= FALSE;
DBUG_ENTER("sql_slave_killed");
DBUG_ASSERT(rli->info_thd == thd);
DBUG_ASSERT(rli->slave_running == 1);
if (abort_loop || thd->killed || rli->abort_slave)
{
is_parallel_warn= (rli->is_parallel_exec() &&
(rli->is_mts_in_group() || thd->killed));
/*
Slave can execute stop being in one of two MTS or Single-Threaded mode.
The modes define different criteria to accept the stop.
In particular that relates to the concept of groupping.
Killed Coordinator thread expects the worst so it warns on
possible consistency issue.
*/
if (is_parallel_warn ||
(!rli->is_parallel_exec() &&
thd->transaction.all.cannot_safely_rollback() && rli->is_in_group()))
{
char msg_stopped[]=
"... Slave SQL Thread stopped with incomplete event group "
"having non-transactional changes. "
"If the group consists solely of row-based events, you can try "
"to restart the slave with --slave-exec-mode=IDEMPOTENT, which "
"ignores duplicate key, key not found, and similar errors (see "
"documentation for details).";
char msg_stopped_mts[]=
"... The slave coordinator and worker threads are stopped, possibly "
"leaving data in inconsistent state. A restart should "
"restore consistency automatically, although using non-transactional "
"storage for data or info tables or DDL queries could lead to problems. "
"In such cases you have to examine your data (see documentation for "
"details).";
ret= TRUE;
if (rli->abort_slave)
{
DBUG_PRINT("info", ("Request to stop slave SQL Thread received while "
"applying an MTS group or a group that "
"has non-transactional "
"changes; waiting for completion of the group ... "));
/*
Slave sql thread shutdown in face of unfinished group modified
Non-trans table is handled via a timer. The slave may eventually
give out to complete the current group and in that case there
might be issues at consequent slave restart, see the error message.
WL#2975 offers a robust solution requiring to store the last exectuted
event's coordinates along with the group's coordianates
instead of waiting with @c last_event_start_time the timer.
*/
if (rli->last_event_start_time == 0)
rli->last_event_start_time= my_time(0);
ret= difftime(my_time(0), rli->last_event_start_time) <=
SLAVE_WAIT_GROUP_DONE ? FALSE : TRUE;
DBUG_EXECUTE_IF("stop_slave_middle_group",
DBUG_EXECUTE_IF("incomplete_group_in_relay_log",
ret= TRUE;);); // time is over
if (!ret && !rli->reported_unsafe_warning)
{
rli->report(WARNING_LEVEL, 0,
!is_parallel_warn ?
"Request to stop slave SQL Thread received while "
"applying a group that has non-transactional "
"changes; waiting for completion of the group ... "
:
"Coordinator thread of multi-threaded slave is being "
"stopped in the middle of assigning a group of events; "
"deferring to exit until the group completion ... ");
rli->reported_unsafe_warning= true;
}
}
if (ret)
{
if (is_parallel_warn)
rli->report(!rli->is_error() ? ERROR_LEVEL :
WARNING_LEVEL, // an error was reported by Worker
ER_MTS_INCONSISTENT_DATA,
ER(ER_MTS_INCONSISTENT_DATA),
msg_stopped_mts);
else
rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
ER(ER_SLAVE_FATAL_ERROR), msg_stopped);
}
}
else
{
ret= TRUE;
}
}
if (ret)
{
rli->last_event_start_time= 0;
if (rli->mts_group_status == Relay_log_info::MTS_IN_GROUP)
{
rli->mts_group_status= Relay_log_info::MTS_KILLED_GROUP;
}
}
DBUG_RETURN(ret);
}
| 0 |
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
| 165,340,283,801,046,740,000,000,000,000,000,000,000 | 109 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
|
widget_set_best_colormap (GtkWidget *widget)
{
GdkColormap *colormap;
g_return_if_fail (widget != NULL);
colormap = get_best_colormap_for_screen (gtk_widget_get_screen (widget));
if (colormap != NULL) {
gtk_widget_set_colormap (widget, colormap);
g_object_unref (colormap);
}
}
| 0 |
[] |
gnome-screensaver
|
a5f66339be6719c2b8fc478a1d5fc6545297d950
| 6,683,192,037,283,422,000,000,000,000,000,000,000 | 12 |
Ensure keyboard grab and unlock dialog exist after monitor removal
gnome-screensaver currently doesn't deal with monitors getting
removed properly. If the unlock dialog is on the removed monitor
then the unlock dialog and its associated keyboard grab are not
moved to an existing monitor when the monitor removal is processed.
This means that users can gain access to the locked system by placing
the mouse pointer on an external monitor and then disconnect the
external monitor.
CVE-2010-0414
https://bugzilla.gnome.org/show_bug.cgi?id=609337
|
SAPI_API SAPI_POST_HANDLER_FUNC(php_std_post_handler)
{
zval *arr = (zval *) arg;
php_stream *s = SG(request_info).request_body;
post_var_data_t post_data;
if (s && SUCCESS == php_stream_rewind(s)) {
memset(&post_data, 0, sizeof(post_data));
while (!php_stream_eof(s)) {
char buf[BUFSIZ] = {0};
size_t len = php_stream_read(s, buf, BUFSIZ);
if (len && len != (size_t) -1) {
smart_str_appendl(&post_data.str, buf, len);
if (SUCCESS != add_post_vars(arr, &post_data, 0 TSRMLS_CC)) {
if (post_data.str.c) {
efree(post_data.str.c);
}
return;
}
}
if (len != BUFSIZ){
break;
}
}
add_post_vars(arr, &post_data, 1 TSRMLS_CC);
if (post_data.str.c) {
efree(post_data.str.c);
}
}
}
| 0 |
[] |
php-src
|
8d1099ac0574f3a42036085641c2df03a1d5f731
| 139,899,877,261,047,600,000,000,000,000,000,000,000 | 35 |
duplicate value's string for the SAPI filter
reported by sesser; tyrael, do you take care of the bug/NEWS?
|
static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
__wsum csum;
struct udphdr *uh;
struct iphdr *iph;
if (skb->encapsulation &&
(skb_shinfo(skb)->gso_type &
(SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
segs = skb_udp_tunnel_segment(skb, features, false);
goto out;
}
if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4)))
goto out;
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto out;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
return __udp_gso_segment(skb, features);
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
goto out;
/* Do software UFO. Complete and fill in the UDP checksum as
* HW cannot do checksum of UDP packets sent as multiple
* IP fragments.
*/
uh = udp_hdr(skb);
iph = ip_hdr(skb);
uh->check = 0;
csum = skb_checksum(skb, 0, skb->len, 0);
uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* If there is no outer header we can fake a checksum offload
* due to the fact that we have already done the checksum in
* software prior to segmenting the frame.
*/
if (!skb->encap_hdr_csum)
features |= NETIF_F_HW_CSUM;
/* Fragment the skb. IP headers of the fragments are updated in
* inet_gso_segment()
*/
segs = skb_segment(skb, features);
out:
return segs;
}
| 0 |
[
"CWE-787"
] |
net
|
4dd2b82d5adfbe0b1587ccad7a8f76d826120f37
| 5,134,567,565,706,781,400,000,000,000,000,000,000 | 59 |
udp: fix GRO packet of death
syzbot was able to crash host by sending UDP packets with a 0 payload.
TCP does not have this issue since we do not aggregate packets without
payload.
Since dev_gro_receive() sets gso_size based on skb_gro_len(skb)
it seems not worth trying to cope with padded packets.
BUG: KASAN: slab-out-of-bounds in skb_gro_receive+0xf5f/0x10e0 net/core/skbuff.c:3826
Read of size 16 at addr ffff88808893fff0 by task syz-executor612/7889
CPU: 0 PID: 7889 Comm: syz-executor612 Not tainted 5.1.0-rc7+ #96
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x172/0x1f0 lib/dump_stack.c:113
print_address_description.cold+0x7c/0x20d mm/kasan/report.c:187
kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317
__asan_report_load16_noabort+0x14/0x20 mm/kasan/generic_report.c:133
skb_gro_receive+0xf5f/0x10e0 net/core/skbuff.c:3826
udp_gro_receive_segment net/ipv4/udp_offload.c:382 [inline]
call_gro_receive include/linux/netdevice.h:2349 [inline]
udp_gro_receive+0xb61/0xfd0 net/ipv4/udp_offload.c:414
udp4_gro_receive+0x763/0xeb0 net/ipv4/udp_offload.c:478
inet_gro_receive+0xe72/0x1110 net/ipv4/af_inet.c:1510
dev_gro_receive+0x1cd0/0x23c0 net/core/dev.c:5581
napi_gro_frags+0x36b/0xd10 net/core/dev.c:5843
tun_get_user+0x2f24/0x3fb0 drivers/net/tun.c:1981
tun_chr_write_iter+0xbd/0x156 drivers/net/tun.c:2027
call_write_iter include/linux/fs.h:1866 [inline]
do_iter_readv_writev+0x5e1/0x8e0 fs/read_write.c:681
do_iter_write fs/read_write.c:957 [inline]
do_iter_write+0x184/0x610 fs/read_write.c:938
vfs_writev+0x1b3/0x2f0 fs/read_write.c:1002
do_writev+0x15e/0x370 fs/read_write.c:1037
__do_sys_writev fs/read_write.c:1110 [inline]
__se_sys_writev fs/read_write.c:1107 [inline]
__x64_sys_writev+0x75/0xb0 fs/read_write.c:1107
do_syscall_64+0x103/0x610 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x441cc0
Code: 05 48 3d 01 f0 ff ff 0f 83 9d 09 fc ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 83 3d 51 93 29 00 00 75 14 b8 14 00 00 00 0f 05 <48> 3d 01 f0 ff ff 0f 83 74 09 fc ff c3 48 83 ec 08 e8 ba 2b 00 00
RSP: 002b:00007ffe8c716118 EFLAGS: 00000246 ORIG_RAX: 0000000000000014
RAX: ffffffffffffffda RBX: 00007ffe8c716150 RCX: 0000000000441cc0
RDX: 0000000000000001 RSI: 00007ffe8c716170 RDI: 00000000000000f0
RBP: 0000000000000000 R08: 000000000000ffff R09: 0000000000a64668
R10: 0000000020000040 R11: 0000000000000246 R12: 000000000000c2d9
R13: 0000000000402b50 R14: 0000000000000000 R15: 0000000000000000
Allocated by task 5143:
save_stack+0x45/0xd0 mm/kasan/common.c:75
set_track mm/kasan/common.c:87 [inline]
__kasan_kmalloc mm/kasan/common.c:497 [inline]
__kasan_kmalloc.constprop.0+0xcf/0xe0 mm/kasan/common.c:470
kasan_slab_alloc+0xf/0x20 mm/kasan/common.c:505
slab_post_alloc_hook mm/slab.h:437 [inline]
slab_alloc mm/slab.c:3393 [inline]
kmem_cache_alloc+0x11a/0x6f0 mm/slab.c:3555
mm_alloc+0x1d/0xd0 kernel/fork.c:1030
bprm_mm_init fs/exec.c:363 [inline]
__do_execve_file.isra.0+0xaa3/0x23f0 fs/exec.c:1791
do_execveat_common fs/exec.c:1865 [inline]
do_execve fs/exec.c:1882 [inline]
__do_sys_execve fs/exec.c:1958 [inline]
__se_sys_execve fs/exec.c:1953 [inline]
__x64_sys_execve+0x8f/0xc0 fs/exec.c:1953
do_syscall_64+0x103/0x610 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Freed by task 5351:
save_stack+0x45/0xd0 mm/kasan/common.c:75
set_track mm/kasan/common.c:87 [inline]
__kasan_slab_free+0x102/0x150 mm/kasan/common.c:459
kasan_slab_free+0xe/0x10 mm/kasan/common.c:467
__cache_free mm/slab.c:3499 [inline]
kmem_cache_free+0x86/0x260 mm/slab.c:3765
__mmdrop+0x238/0x320 kernel/fork.c:677
mmdrop include/linux/sched/mm.h:49 [inline]
finish_task_switch+0x47b/0x780 kernel/sched/core.c:2746
context_switch kernel/sched/core.c:2880 [inline]
__schedule+0x81b/0x1cc0 kernel/sched/core.c:3518
preempt_schedule_irq+0xb5/0x140 kernel/sched/core.c:3745
retint_kernel+0x1b/0x2d
arch_local_irq_restore arch/x86/include/asm/paravirt.h:767 [inline]
kmem_cache_free+0xab/0x260 mm/slab.c:3766
anon_vma_chain_free mm/rmap.c:134 [inline]
unlink_anon_vmas+0x2ba/0x870 mm/rmap.c:401
free_pgtables+0x1af/0x2f0 mm/memory.c:394
exit_mmap+0x2d1/0x530 mm/mmap.c:3144
__mmput kernel/fork.c:1046 [inline]
mmput+0x15f/0x4c0 kernel/fork.c:1067
exec_mmap fs/exec.c:1046 [inline]
flush_old_exec+0x8d9/0x1c20 fs/exec.c:1279
load_elf_binary+0x9bc/0x53f0 fs/binfmt_elf.c:864
search_binary_handler fs/exec.c:1656 [inline]
search_binary_handler+0x17f/0x570 fs/exec.c:1634
exec_binprm fs/exec.c:1698 [inline]
__do_execve_file.isra.0+0x1394/0x23f0 fs/exec.c:1818
do_execveat_common fs/exec.c:1865 [inline]
do_execve fs/exec.c:1882 [inline]
__do_sys_execve fs/exec.c:1958 [inline]
__se_sys_execve fs/exec.c:1953 [inline]
__x64_sys_execve+0x8f/0xc0 fs/exec.c:1953
do_syscall_64+0x103/0x610 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
The buggy address belongs to the object at ffff88808893f7c0
which belongs to the cache mm_struct of size 1496
The buggy address is located 600 bytes to the right of
1496-byte region [ffff88808893f7c0, ffff88808893fd98)
The buggy address belongs to the page:
page:ffffea0002224f80 count:1 mapcount:0 mapping:ffff88821bc40ac0 index:0xffff88808893f7c0 compound_mapcount: 0
flags: 0x1fffc0000010200(slab|head)
raw: 01fffc0000010200 ffffea00025b4f08 ffffea00027b9d08 ffff88821bc40ac0
raw: ffff88808893f7c0 ffff88808893e440 0000000100000001 0000000000000000
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff88808893fe80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
ffff88808893ff00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
>ffff88808893ff80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
^
ffff888088940000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
ffff888088940080: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
Fixes: e20cf8d3f1f7 ("udp: implement GRO for plain UDP sockets.")
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Paolo Abeni <[email protected]>
Reported-by: syzbot <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
__acquires(fc->lock)
{
while (!list_empty(&fc->io)) {
struct fuse_req *req =
list_entry(fc->io.next, struct fuse_req, list);
void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
req->aborted = 1;
req->out.h.error = -ECONNABORTED;
req->state = FUSE_REQ_FINISHED;
list_del_init(&req->list);
wake_up(&req->waitq);
if (end) {
req->end = NULL;
__fuse_get_request(req);
spin_unlock(&fc->lock);
wait_event(req->waitq, !req->locked);
end(fc, req);
fuse_put_request(fc, req);
spin_lock(&fc->lock);
}
}
}
| 0 |
[
"CWE-120",
"CWE-119",
"CWE-787"
] |
linux
|
c2183d1e9b3f313dd8ba2b1b0197c8d9fb86a7ae
| 246,703,613,125,362,700,000,000,000,000,000,000,000 | 23 |
fuse: check size of FUSE_NOTIFY_INVAL_ENTRY message
FUSE_NOTIFY_INVAL_ENTRY didn't check the length of the write so the
message processing could overrun and result in a "kernel BUG at
fs/fuse/dev.c:629!"
Reported-by: Han-Wen Nienhuys <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
CC: [email protected]
|
static int nft_fwd_netdev_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
struct nft_fwd_netdev *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_FWD_SREG_DEV, priv->sreg_dev))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
| 0 |
[
"CWE-269"
] |
nf
|
b1a5983f56e371046dcf164f90bfaf704d2b89f6
| 81,656,590,109,946,050,000,000,000,000,000,000,000 | 12 |
netfilter: nf_tables_offload: incorrect flow offload action array size
immediate verdict expression needs to allocate one slot in the flow offload
action array, however, immediate data expression does not need to do so.
fwd and dup expression need to allocate one slot, this is missing.
Add a new offload_action interface to report if this expression needs to
allocate one slot in the flow offload action array.
Fixes: be2861dc36d7 ("netfilter: nft_{fwd,dup}_netdev: add offload support")
Reported-and-tested-by: Nick Gregory <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
has_nonascii(const char *ptr, size_t len)
{
while (len > 0) {
if (!ISASCII(*ptr)) return 1;
ptr++;
--len;
}
return 0;
}
| 0 |
[
"CWE-22"
] |
ruby
|
bd5661a3cbb38a8c3a3ea10cd76c88bbef7871b8
| 307,476,830,830,447,940,000,000,000,000,000,000,000 | 9 |
dir.c: check NUL bytes
* dir.c (GlobPathValue): should be used in rb_push_glob only.
other methods should use FilePathValue.
https://hackerone.com/reports/302338
* dir.c (rb_push_glob): expand GlobPathValue
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62989 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
|
int qcow2_snapshot_delete(BlockDriverState *bs,
const char *snapshot_id,
const char *name,
Error **errp)
{
BDRVQcowState *s = bs->opaque;
QCowSnapshot sn;
int snapshot_index, ret;
/* Search the snapshot */
snapshot_index = find_snapshot_by_id_and_name(bs, snapshot_id, name);
if (snapshot_index < 0) {
error_setg(errp, "Can't find the snapshot");
return -ENOENT;
}
sn = s->snapshots[snapshot_index];
/* Remove it from the snapshot list */
memmove(s->snapshots + snapshot_index,
s->snapshots + snapshot_index + 1,
(s->nb_snapshots - snapshot_index - 1) * sizeof(sn));
s->nb_snapshots--;
ret = qcow2_write_snapshots(bs);
if (ret < 0) {
error_setg_errno(errp, -ret,
"Failed to remove snapshot from snapshot list");
return ret;
}
/*
* The snapshot is now unused, clean up. If we fail after this point, we
* won't recover but just leak clusters.
*/
g_free(sn.id_str);
g_free(sn.name);
/*
* Now decrease the refcounts of clusters referenced by the snapshot and
* free the L1 table.
*/
ret = qcow2_update_snapshot_refcount(bs, sn.l1_table_offset,
sn.l1_size, -1);
if (ret < 0) {
error_setg_errno(errp, -ret, "Failed to free the cluster and L1 table");
return ret;
}
qcow2_free_clusters(bs, sn.l1_table_offset, sn.l1_size * sizeof(uint64_t),
QCOW2_DISCARD_SNAPSHOT);
/* must update the copied flag on the current cluster offsets */
ret = qcow2_update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, 0);
if (ret < 0) {
error_setg_errno(errp, -ret,
"Failed to update snapshot status in disk");
return ret;
}
#ifdef DEBUG_ALLOC
{
BdrvCheckResult result = {0};
qcow2_check_refcounts(bs, &result, 0);
}
#endif
return 0;
}
| 0 |
[
"CWE-119"
] |
qemu
|
c05e4667be91b46ab42b5a11babf8e84d476cc6b
| 56,010,499,829,024,450,000,000,000,000,000,000,000 | 65 |
qcow2: Fix L1 allocation size in qcow2_snapshot_load_tmp() (CVE-2014-0145)
For the L1 table to loaded for an internal snapshot, the code allocated
only enough memory to hold the currently active L1 table. If the
snapshot's L1 table is actually larger than the current one, this leads
to a buffer overflow.
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>
|
CAMLexport void caml_deserialize_block_float_8(void * data, intnat len)
{
#if ARCH_FLOAT_ENDIANNESS == 0x01234567
memmove(data, intern_src, len * 8);
intern_src += len * 8;
#elif ARCH_FLOAT_ENDIANNESS == 0x76543210
unsigned char * p, * q;
for (p = intern_src, q = data; len > 0; len--, p += 8, q += 8)
Reverse_64(q, p);
intern_src = p;
#else
unsigned char * p, * q;
for (p = intern_src, q = data; len > 0; len--, p += 8, q += 8)
Permute_64(q, ARCH_FLOAT_ENDIANNESS, p, 0x01234567);
intern_src = p;
#endif
}
| 0 |
[
"CWE-200"
] |
ocaml
|
659615c7b100a89eafe6253e7a5b9d84d0e8df74
| 23,832,962,213,282,850,000,000,000,000,000,000,000 | 17 |
fix PR#7003 and a few other bugs caused by misuse of Int_val
git-svn-id: http://caml.inria.fr/svn/ocaml/trunk@16525 f963ae5c-01c2-4b8c-9fe0-0dff7051ff02
|
void allocHeaders() override {
ASSERT(nullptr == absl::get<ResponseHeaderMapPtr>(headers_or_trailers_));
headers_or_trailers_.emplace<ResponseHeaderMapPtr>(ResponseHeaderMapImpl::create());
}
| 1 |
[
"CWE-770"
] |
envoy
|
7ca28ff7d46454ae930e193d97b7d08156b1ba59
| 3,864,457,986,010,174,700,000,000,000,000,000,000 | 4 |
[http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145)
Signed-off-by: antonio <[email protected]>
|
LoRaMacStatus_t LoRaMacQueryNextTxDelay( int8_t datarate, TimerTime_t* time )
{
NextChanParams_t nextChan;
uint8_t channel = 0;
CalcNextAdrParams_t adrNext;
uint32_t adrAckCounter = MacCtx.NvmCtx->AdrAckCounter;
int8_t txPower = MacCtx.NvmCtx->MacParamsDefaults.ChannelsTxPower;
if( time == NULL )
{
return LORAMAC_STATUS_PARAMETER_INVALID;
}
if( MacCtx.NvmCtx->LastTxDoneTime == 0 )
{
*time = 0;
return LORAMAC_STATUS_OK;
}
// Update back-off
CalculateBackOff( MacCtx.NvmCtx->LastTxChannel );
nextChan.AggrTimeOff = MacCtx.NvmCtx->AggregatedTimeOff;
nextChan.Datarate = datarate;
nextChan.DutyCycleEnabled = MacCtx.NvmCtx->DutyCycleOn;
nextChan.QueryNextTxDelayOnly = true;
nextChan.Joined = true;
nextChan.LastAggrTx = MacCtx.NvmCtx->LastTxDoneTime;
if( MacCtx.NvmCtx->NetworkActivation == ACTIVATION_TYPE_NONE )
{
nextChan.Joined = false;
}
if( MacCtx.NvmCtx->AdrCtrlOn == true )
{
// Setup ADR request
adrNext.UpdateChanMask = false;
adrNext.AdrEnabled = MacCtx.NvmCtx->AdrCtrlOn;
adrNext.AdrAckCounter = MacCtx.NvmCtx->AdrAckCounter;
adrNext.AdrAckLimit = MacCtx.AdrAckLimit;
adrNext.AdrAckDelay = MacCtx.AdrAckDelay;
adrNext.Datarate = MacCtx.NvmCtx->MacParams.ChannelsDatarate;
adrNext.TxPower = MacCtx.NvmCtx->MacParams.ChannelsTxPower;
adrNext.UplinkDwellTime = MacCtx.NvmCtx->MacParams.UplinkDwellTime;
adrNext.Region = MacCtx.NvmCtx->Region;
// We call the function for information purposes only. We don't want to
// apply the datarate, the tx power and the ADR ack counter.
LoRaMacAdrCalcNext( &adrNext, &nextChan.Datarate, &txPower, &adrAckCounter );
}
// Select channel
return RegionNextChannel( MacCtx.NvmCtx->Region, &nextChan, &channel, time, &MacCtx.NvmCtx->AggregatedTimeOff );
}
| 0 |
[
"CWE-120",
"CWE-787"
] |
LoRaMac-node
|
e3063a91daa7ad8a687223efa63079f0c24568e4
| 301,553,052,552,285,960,000,000,000,000,000,000,000 | 55 |
Added received buffer size checks.
|
static int alloc_pebs_buffer(int cpu)
{
struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
struct debug_store *ds = hwev->ds;
size_t bsiz = x86_pmu.pebs_buffer_size;
int max, node = cpu_to_node(cpu);
void *buffer, *insn_buff, *cea;
if (!x86_pmu.pebs)
return 0;
buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu);
if (unlikely(!buffer))
return -ENOMEM;
/*
* HSW+ already provides us the eventing ip; no need to allocate this
* buffer then.
*/
if (x86_pmu.intel_cap.pebs_format < 2) {
insn_buff = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
if (!insn_buff) {
dsfree_pages(buffer, bsiz);
return -ENOMEM;
}
per_cpu(insn_buffer, cpu) = insn_buff;
}
hwev->ds_pebs_vaddr = buffer;
/* Update the cpu entry area mapping */
cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
ds->pebs_buffer_base = (unsigned long) cea;
ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL);
ds->pebs_index = ds->pebs_buffer_base;
max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size);
ds->pebs_absolute_maximum = ds->pebs_buffer_base + max;
return 0;
}
| 0 |
[
"CWE-755"
] |
linux
|
d88d05a9e0b6d9356e97129d4ff9942d765f46ea
| 273,524,607,851,932,130,000,000,000,000,000,000,000 | 37 |
perf/x86/intel: Fix a crash caused by zero PEBS status
A repeatable crash can be triggered by the perf_fuzzer on some Haswell
system.
https://lore.kernel.org/lkml/[email protected]/
For some old CPUs (HSW and earlier), the PEBS status in a PEBS record
may be mistakenly set to 0. To minimize the impact of the defect, the
commit was introduced to try to avoid dropping the PEBS record for some
cases. It adds a check in the intel_pmu_drain_pebs_nhm(), and updates
the local pebs_status accordingly. However, it doesn't correct the PEBS
status in the PEBS record, which may trigger the crash, especially for
the large PEBS.
It's possible that all the PEBS records in a large PEBS have the PEBS
status 0. If so, the first get_next_pebs_record_by_bit() in the
__intel_pmu_pebs_event() returns NULL. The at = NULL. Since it's a large
PEBS, the 'count' parameter must > 1. The second
get_next_pebs_record_by_bit() will crash.
Besides the local pebs_status, correct the PEBS status in the PEBS
record as well.
Fixes: 01330d7288e0 ("perf/x86: Allow zero PEBS status with only single active event")
Reported-by: Vince Weaver <[email protected]>
Suggested-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Kan Liang <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
|
void Compute(OpKernelContext* const context) override {
// node_id_range
const Tensor* node_id_range_t;
OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t));
OP_REQUIRES(
context, node_id_range_t->NumElements() == 2,
errors::InvalidArgument("node_id_range argument must have shape [2]"));
const auto node_id_range = node_id_range_t->vec<int32>();
const int32_t node_id_first = node_id_range(0); // inclusive
const int32_t node_id_last = node_id_range(1); // exclusive
const Tensor* stats_summary_t;
OP_REQUIRES_OK(context, context->input("stats_summary", &stats_summary_t));
OP_REQUIRES(
context, stats_summary_t->shape().dims() == 4,
errors::InvalidArgument("stats_summary argument must have rank 4"));
TTypes<float, 4>::ConstTensor stats_summary =
stats_summary_t->tensor<float, 4>();
const int32_t feature_dims = stats_summary_t->dim_size(1);
// The last bucket is for default/missing value.
const int32_t num_buckets = stats_summary_t->dim_size(2) - 1;
const int32_t logits_dim = logits_dim_;
const int32_t hessian_dim = stats_summary_t->dim_size(3) - logits_dim;
OP_REQUIRES(context, hessian_dim > 0,
errors::InvalidArgument("hessian dim should be < 0, got ",
hessian_dim));
OP_REQUIRES(context, hessian_dim <= logits_dim * logits_dim,
errors::InvalidArgument(
"hessian dim should be <= ", logits_dim * logits_dim,
" but got: ", hessian_dim));
const Tensor* l1_t;
OP_REQUIRES_OK(context, context->input("l1", &l1_t));
OP_REQUIRES(context, l1_t->NumElements() == 1,
errors::InvalidArgument("l1 argument must be a scalar"));
const auto l1 = l1_t->scalar<float>()();
DCHECK_GE(l1, 0);
if (logits_dim_ > 1) {
// Multi-class L1 regularization not supported yet.
DCHECK_EQ(l1, 0);
}
const Tensor* l2_t;
OP_REQUIRES_OK(context, context->input("l2", &l2_t));
OP_REQUIRES(context, l2_t->NumElements() == 1,
errors::InvalidArgument("l2 argument must be a scalar"));
const auto l2 = l2_t->scalar<float>()();
DCHECK_GE(l2, 0);
const Tensor* tree_complexity_t;
OP_REQUIRES_OK(context,
context->input("tree_complexity", &tree_complexity_t));
OP_REQUIRES(
context, tree_complexity_t->NumElements() == 1,
errors::InvalidArgument("tree_complexity argument must be a scalar"));
const auto tree_complexity = tree_complexity_t->scalar<float>()();
const Tensor* min_node_weight_t;
OP_REQUIRES_OK(context,
context->input("min_node_weight", &min_node_weight_t));
OP_REQUIRES(
context, min_node_weight_t->NumElements() == 1,
errors::InvalidArgument("min_node_weight argument must be a scalar"));
const auto min_node_weight = min_node_weight_t->scalar<float>()();
std::vector<int32> output_node_ids;
std::vector<float> output_gains;
std::vector<int32> output_feature_dimensions;
std::vector<int32> output_thresholds;
std::vector<Eigen::VectorXf> output_left_node_contribs;
std::vector<Eigen::VectorXf> output_right_node_contribs;
std::vector<std::string> output_split_types;
// TODO(tanzheny) parallelize the computation.
// Iterate each node and find the best gain per node.
for (int32_t node_id = node_id_first; node_id < node_id_last; ++node_id) {
float best_gain = std::numeric_limits<float>::lowest();
int32_t best_bucket = 0;
int32_t best_f_dim = 0;
string best_split_type;
Eigen::VectorXf best_contrib_for_left(logits_dim);
Eigen::VectorXf best_contrib_for_right(logits_dim);
float parent_gain;
// Including default bucket.
ConstMatrixMap stats_mat(&stats_summary(node_id, 0, 0, 0),
num_buckets + 1, logits_dim + hessian_dim);
const Eigen::VectorXf total_grad =
stats_mat.leftCols(logits_dim).colwise().sum();
const Eigen::VectorXf total_hess =
stats_mat.rightCols(hessian_dim).colwise().sum();
if (total_hess.norm() < min_node_weight) {
continue;
}
Eigen::VectorXf parent_weight(logits_dim);
CalculateWeightsAndGains(total_grad, total_hess, l1, l2, &parent_weight,
&parent_gain);
if (split_type_ == "inequality") {
CalculateBestInequalitySplit(
stats_summary, node_id, feature_dims, logits_dim, hessian_dim,
num_buckets, min_node_weight, l1, l2, &best_gain, &best_bucket,
&best_f_dim, &best_split_type, &best_contrib_for_left,
&best_contrib_for_right);
} else {
CalculateBestEqualitySplit(
stats_summary, total_grad, total_hess, node_id, feature_dims,
logits_dim, hessian_dim, num_buckets, l1, l2, &best_gain,
&best_bucket, &best_f_dim, &best_split_type, &best_contrib_for_left,
&best_contrib_for_right);
}
if (best_gain == std::numeric_limits<float>::lowest()) {
// Do not add the node if not split if found.
continue;
}
output_node_ids.push_back(node_id);
// Remove the parent gain for the parent node.
output_gains.push_back(best_gain - parent_gain);
output_feature_dimensions.push_back(best_f_dim);
// default direction is fixed for dense splits.
// TODO(tanzheny) account for default values.
output_split_types.push_back(best_split_type);
output_thresholds.push_back(best_bucket);
output_left_node_contribs.push_back(best_contrib_for_left);
output_right_node_contribs.push_back(best_contrib_for_right);
} // for node id
const int num_nodes = output_node_ids.size();
// output_node_ids
Tensor* output_node_ids_t = nullptr;
OP_REQUIRES_OK(context, context->allocate_output("node_ids", {num_nodes},
&output_node_ids_t));
auto output_node_ids_vec = output_node_ids_t->vec<int32>();
// output_gains
Tensor* output_gains_t;
OP_REQUIRES_OK(context, context->allocate_output("gains", {num_nodes},
&output_gains_t));
auto output_gains_vec = output_gains_t->vec<float>();
// output_feature_dimensions
Tensor* output_feature_dimension_t;
OP_REQUIRES_OK(context,
context->allocate_output("feature_dimensions", {num_nodes},
&output_feature_dimension_t));
auto output_feature_dimensions_vec =
output_feature_dimension_t->vec<int32>();
// output_thresholds
Tensor* output_thresholds_t;
OP_REQUIRES_OK(context, context->allocate_output("thresholds", {num_nodes},
&output_thresholds_t));
auto output_thresholds_vec = output_thresholds_t->vec<int32>();
// output_left_node_contribs
Tensor* output_left_node_contribs_t;
OP_REQUIRES_OK(context, context->allocate_output(
"left_node_contribs", {num_nodes, logits_dim},
&output_left_node_contribs_t));
auto output_left_node_contribs_matrix =
output_left_node_contribs_t->matrix<float>();
// output_right_node_contribs
Tensor* output_right_node_contribs_t;
OP_REQUIRES_OK(context, context->allocate_output(
"right_node_contribs", {num_nodes, logits_dim},
&output_right_node_contribs_t));
auto output_right_node_contribs_matrix =
output_right_node_contribs_t->matrix<float>();
// split type
Tensor* output_split_types_t;
OP_REQUIRES_OK(
context, context->allocate_output("split_with_default_directions",
{num_nodes}, &output_split_types_t));
auto output_split_types_vec = output_split_types_t->vec<tstring>();
// Sets output tensors from vectors.
for (int i = 0; i < num_nodes; ++i) {
output_node_ids_vec(i) = output_node_ids[i];
// Adjust the gains to penalize by tree complexity.
output_gains_vec(i) = output_gains[i] - tree_complexity;
output_feature_dimensions_vec(i) = output_feature_dimensions[i];
output_thresholds_vec(i) = output_thresholds[i];
for (int j = 0; j < logits_dim; ++j) {
output_left_node_contribs_matrix(i, j) =
output_left_node_contribs[i][j];
output_right_node_contribs_matrix(i, j) =
output_right_node_contribs[i][j];
}
output_split_types_vec(i) = output_split_types[i];
}
}
| 0 |
[
"CWE-476",
"CWE-369"
] |
tensorflow
|
5c8c9a8bfe750f9743d0c859bae112060b216f5c
| 106,804,609,066,543,900,000,000,000,000,000,000,000 | 193 |
Fixing security fixes in boosted trees ops
PiperOrigin-RevId: 405669548
Change-Id: Iae224d240d1779bcc02405c2fff99785644fbd0d
|
/**
\param filename Filename to write data to.
\param is_compressed Tells if data compression must be enabled.
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 308,500,014,155,112,450,000,000,000,000,000,000,000 | 3 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
url_uses_proxy (struct url * u)
{
bool ret;
if (!u)
return false;
ret = getproxy (u) != NULL;
return ret;
}
| 0 |
[
"CWE-20"
] |
wget
|
3e25a9817f47fbb8660cc6a3b2f3eea239526c6c
| 3,236,229,823,494,197,000,000,000,000,000,000,000 | 8 |
Introduce --trust-server-names. Close CVE-2010-2252.
|
static void log_callback(void *ptr, int level, const char *fmt, va_list vl)
{
AVClass* avc = ptr ? *(AVClass **) ptr : NULL;
va_list vl2;
char line[1024];
static int print_prefix = 1;
void *new_log_buffer;
va_copy(vl2, vl);
av_log_default_callback(ptr, level, fmt, vl);
av_log_format_line(ptr, level, fmt, vl2, line, sizeof(line), &print_prefix);
va_end(vl2);
#if HAVE_THREADS
pthread_mutex_lock(&log_mutex);
new_log_buffer = av_realloc_array(log_buffer, log_buffer_size + 1, sizeof(*log_buffer));
if (new_log_buffer) {
char *msg;
int i;
log_buffer = new_log_buffer;
memset(&log_buffer[log_buffer_size], 0, sizeof(log_buffer[log_buffer_size]));
log_buffer[log_buffer_size].context_name= avc ? av_strdup(avc->item_name(ptr)) : NULL;
if (avc) {
if (avc->get_category) log_buffer[log_buffer_size].category = avc->get_category(ptr);
else log_buffer[log_buffer_size].category = avc->category;
}
log_buffer[log_buffer_size].log_level = level;
msg = log_buffer[log_buffer_size].log_message = av_strdup(line);
for (i=strlen(msg) - 1; i>=0 && msg[i] == '\n'; i--) {
msg[i] = 0;
}
if (avc && avc->parent_log_context_offset) {
AVClass** parent = *(AVClass ***) (((uint8_t *) ptr) +
avc->parent_log_context_offset);
if (parent && *parent) {
log_buffer[log_buffer_size].parent_name = av_strdup((*parent)->item_name(parent));
log_buffer[log_buffer_size].parent_category =
(*parent)->get_category ? (*parent)->get_category(parent) :(*parent)->category;
}
}
log_buffer_size ++;
}
pthread_mutex_unlock(&log_mutex);
#endif
}
| 0 |
[
"CWE-476"
] |
FFmpeg
|
837cb4325b712ff1aab531bf41668933f61d75d2
| 280,253,689,307,777,450,000,000,000,000,000,000,000 | 48 |
ffprobe: Fix null pointer dereference with color primaries
Found-by: AD-lab of venustech
Signed-off-by: Michael Niedermayer <[email protected]>
|
static AioContext *nvme_get_aio_context(BlockAIOCB *acb)
{
return qemu_get_aio_context();
}
| 0 |
[] |
qemu
|
736b01642d85be832385063f278fe7cd4ffb5221
| 175,130,208,125,334,530,000,000,000,000,000,000,000 | 4 |
hw/nvme: fix CVE-2021-3929
This fixes CVE-2021-3929 "locally" by denying DMA to the iomem of the
device itself. This still allows DMA to MMIO regions of other devices
(e.g. doing P2P DMA to the controller memory buffer of another NVMe
device).
Fixes: CVE-2021-3929
Reported-by: Qiuhao Li <[email protected]>
Reviewed-by: Keith Busch <[email protected]>
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Signed-off-by: Klaus Jensen <[email protected]>
|
int LuaSettings::l_get_np_group(lua_State *L)
{
NO_MAP_LOCK_REQUIRED;
LuaSettings *o = checkobject(L, 1);
std::string key = std::string(luaL_checkstring(L, 2));
if (o->m_settings->exists(key)) {
NoiseParams np;
o->m_settings->getNoiseParams(key, np);
push_noiseparams(L, &np);
} else {
lua_pushnil(L);
}
return 1;
}
| 0 |
[] |
minetest
|
da71e86633d0b27cd02d7aac9fdac625d141ca13
| 194,426,673,328,880,770,000,000,000,000,000,000,000 | 16 |
Protect a few more settings from being set from mods
Of those settings main_menu_script has concrete security impact, the rest are added out of abundance of caution.
|
static apr_byte_t oidc_refresh_access_token_before_expiry(request_rec *r,
oidc_cfg *cfg, oidc_session_t *session, int ttl_minimum, int logout_on_error) {
const char *s_access_token_expires = NULL;
apr_time_t t_expires = -1;
oidc_provider_t *provider = NULL;
oidc_debug(r, "ttl_minimum=%d", ttl_minimum);
if (ttl_minimum < 0)
return FALSE;
s_access_token_expires = oidc_session_get_access_token_expires(r, session);
if (s_access_token_expires == NULL) {
oidc_debug(r,
"no access token expires_in stored in the session (i.e. returned from in the authorization response), so cannot refresh the access token based on TTL requirement");
return FALSE;
}
if (oidc_session_get_refresh_token(r, session) == NULL) {
oidc_debug(r,
"no refresh token stored in the session, so cannot refresh the access token based on TTL requirement");
return FALSE;
}
if (sscanf(s_access_token_expires, "%" APR_TIME_T_FMT, &t_expires) != 1) {
oidc_error(r, "could not parse s_access_token_expires %s",
s_access_token_expires);
return FALSE;
}
t_expires = apr_time_from_sec(t_expires - ttl_minimum);
oidc_debug(r, "refresh needed in: %" APR_TIME_T_FMT " seconds",
apr_time_sec(t_expires - apr_time_now()));
if (t_expires > apr_time_now())
return FALSE;
if (oidc_get_provider_from_session(r, cfg, session, &provider) == FALSE)
return FALSE;
if (oidc_refresh_access_token(r, cfg, session, provider,
NULL) == FALSE) {
oidc_warn(r, "access_token could not be refreshed, logout=%d", logout_on_error & OIDC_LOGOUT_ON_ERROR_REFRESH);
if (logout_on_error & OIDC_LOGOUT_ON_ERROR_REFRESH)
return ERROR;
else
return FALSE;
}
return TRUE;
}
| 0 |
[
"CWE-601"
] |
mod_auth_openidc
|
5c15dfb08106c2451c2c44ce7ace6813c216ba75
| 327,631,012,462,218,670,000,000,000,000,000,000,000 | 53 |
improve validation of the post-logout URL; closes #449
- to avoid an open redirect; thanks AIMOTO Norihito
- release 2.4.0.1
Signed-off-by: Hans Zandbelt <[email protected]>
|
std::string Magick::Image::comment(void) const
{
const char
*value;
GetPPException;
value=GetImageProperty(constImage(),"Comment",exceptionInfo);
ThrowImageException;
if (value)
return(std::string(value));
return(std::string()); // Intentionally no exception
}
| 0 |
[
"CWE-416"
] |
ImageMagick
|
8c35502217c1879cb8257c617007282eee3fe1cc
| 205,083,173,622,617,440,000,000,000,000,000,000,000 | 14 |
Added missing return to avoid use after free.
|
static void fib6_net_exit(struct net *net)
{
unsigned int i;
del_timer_sync(&net->ipv6.ip6_fib_timer);
for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
struct hlist_head *head = &net->ipv6.fib_table_hash[i];
struct hlist_node *tmp;
struct fib6_table *tb;
hlist_for_each_entry_safe(tb, tmp, head, tb6_hlist) {
hlist_del(&tb->tb6_hlist);
fib6_free_table(tb);
}
}
kfree(net->ipv6.fib_table_hash);
kfree(net->ipv6.rt6_stats);
fib6_notifier_exit(net);
}
| 0 |
[
"CWE-755"
] |
linux
|
7b09c2d052db4b4ad0b27b97918b46a7746966fa
| 55,406,126,953,454,450,000,000,000,000,000,000,000 | 21 |
ipv6: fix a typo in fib6_rule_lookup()
Yi Ren reported an issue discovered by syzkaller, and bisected
to the cited commit.
Many thanks to Yi, this trivial patch does not reflect the patient
work that has been done.
Fixes: d64a1f574a29 ("ipv6: honor RT6_LOOKUP_F_DST_NOREF in rule lookup logic")
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Wei Wang <[email protected]>
Bisected-and-reported-by: Yi Ren <[email protected]>
Signed-off-by: Jakub Kicinski <[email protected]>
|
static int chown_common(struct path *path, uid_t user, gid_t group)
{
struct inode *inode = path->dentry->d_inode;
int error;
struct iattr newattrs;
newattrs.ia_valid = ATTR_CTIME;
if (user != (uid_t) -1) {
newattrs.ia_valid |= ATTR_UID;
newattrs.ia_uid = user;
}
if (group != (gid_t) -1) {
newattrs.ia_valid |= ATTR_GID;
newattrs.ia_gid = group;
}
if (!S_ISDIR(inode->i_mode))
newattrs.ia_valid |=
ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
mutex_lock(&inode->i_mutex);
error = security_path_chown(path, user, group);
if (!error)
error = notify_change(path->dentry, &newattrs);
mutex_unlock(&inode->i_mutex);
return error;
}
| 0 |
[
"CWE-732"
] |
linux-stable
|
e57712ebebbb9db7d8dcef216437b3171ddcf115
| 164,660,677,076,924,970,000,000,000,000,000,000,000 | 26 |
merge fchmod() and fchmodat() guts, kill ancient broken kludge
The kludge in question is undocumented and doesn't work for 32bit
binaries on amd64, sparc64 and s390. Passing (mode_t)-1 as
mode had (since 0.99.14v and contrary to behaviour of any
other Unix, prescriptions of POSIX, SuS and our own manpages)
was kinda-sorta no-op. Note that any software relying on
that (and looking for examples shows none) would be visibly
broken on sparc64, where practically all userland is built
32bit. No such complaints noticed...
Signed-off-by: Al Viro <[email protected]>
|
evdev_to_left_handed(struct evdev_device *device,
uint32_t button)
{
if (device->left_handed.enabled) {
if (button == BTN_LEFT)
return BTN_RIGHT;
else if (button == BTN_RIGHT)
return BTN_LEFT;
}
return button;
}
| 0 |
[
"CWE-134"
] |
libinput
|
a423d7d3269dc32a87384f79e29bb5ac021c83d1
| 54,811,745,313,549,670,000,000,000,000,000,000,000 | 11 |
evdev: strip the device name of format directives
This fixes a format string vulnerabilty.
evdev_log_message() composes a format string consisting of a fixed
prefix (including the rendered device name) and the passed-in format
buffer. This format string is then passed with the arguments to the
actual log handler, which usually and eventually ends up being printf.
If the device name contains a printf-style format directive, these ended
up in the format string and thus get interpreted correctly, e.g. for a
device "Foo%sBar" the log message vs printf invocation ends up being:
evdev_log_message(device, "some message %s", "some argument");
printf("event9 - Foo%sBar: some message %s", "some argument");
This can enable an attacker to execute malicious code with the
privileges of the process using libinput.
To exploit this, an attacker needs to be able to create a kernel device
with a malicious name, e.g. through /dev/uinput or a Bluetooth device.
To fix this, convert any potential format directives in the device name
by duplicating percentages.
Pre-rendering the device to avoid the issue altogether would be nicer
but the current log level hooks do not easily allow for this. The device
name is the only user-controlled part of the format string.
A second potential issue is the sysname of the device which is also
sanitized.
This issue was found by Albin Eldstål-Ahrens and Benjamin Svensson from
Assured AB, and independently by Lukas Lamster.
Fixes #752
Signed-off-by: Peter Hutterer <[email protected]>
|
std::string index(Halffacet_iterator f) const
{ return FI(f,verbose); }
| 0 |
[
"CWE-125"
] |
cgal
|
5a1ab45058112f8647c14c02f58905ecc597ec76
| 42,893,625,032,284,800,000,000,000,000,000,000,000 | 2 |
Fix Nef_3
|
pdf14_text_begin(gx_device * dev, gs_gstate * pgs,
const gs_text_params_t * text, gs_font * font,
gx_path * path, const gx_device_color * pdcolor,
const gx_clip_path * pcpath, gs_memory_t * memory,
gs_text_enum_t ** ppenum)
{
int code;
gs_text_enum_t *penum;
gs_blend_mode_t blend_mode = gs_currentblendmode(pgs);
float opacity = gs_currentopacityalpha(pgs);
bool blend_issue = !(blend_mode == BLEND_MODE_Normal || blend_mode == BLEND_MODE_Compatible);
pdf14_device *pdev = (pdf14_device*)dev;
bool draw = !(text->operation & TEXT_DO_NONE);
if_debug0m('v', memory, "[v]pdf14_text_begin\n");
pdf14_set_marking_params(dev, pgs);
code = gx_default_text_begin(dev, pgs, text, font, path, pdcolor, pcpath,
memory, &penum);
if (code < 0)
return code;
/* We may need to push a non-isolated transparency group if the following
is true.
1) We are not currently in one that we pushed for text and we are in
a BT/ET pair. This is determined by looking at the pdf14 text_group.
2) The blend mode is not Normal or the opacity is not 1.0
3) Text knockout is set to true
4) We are actually doing a text drawing
Special note: If text-knockout is set to false while we are within a
BT ET pair, we should pop the group. I need to create a test file for
this case. */
if (gs_currenttextknockout(pgs) && (blend_issue || opacity != 1.0) &&
gs_currenttextrenderingmode(pgs) != 3 && /* don't bother with invisible text */
pdev->text_group == PDF14_TEXTGROUP_BT_NOT_PUSHED)
if (draw) {
code = pdf14_push_text_group(dev, pgs, path, pcpath, blend_mode, opacity,
false);
}
*ppenum = (gs_text_enum_t *)penum;
return code;
}
| 0 |
[] |
ghostpdl
|
c432131c3fdb2143e148e8ba88555f7f7a63b25e
| 111,135,455,933,207,820,000,000,000,000,000,000,000 | 42 |
Bug 699661: Avoid sharing pointers between pdf14 compositors
If a copdevice is triggered when the pdf14 compositor is the device, we make
a copy of the device, then throw an error because, by default we're only allowed
to copy the device prototype - then freeing it calls the finalize, which frees
several pointers shared with the parent.
Make a pdf14 specific finish_copydevice() which NULLs the relevant pointers,
before, possibly, throwing the same error as the default method.
This also highlighted a problem with reopening the X11 devices, where a custom
error handler could be replaced with itself, meaning it also called itself,
and infifite recursion resulted.
Keep a note of if the handler replacement has been done, and don't do it a
second time.
|
static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
int connectable)
{
struct inode *inode = dentry->d_inode;
if (*len < 3)
return 255;
if (hlist_unhashed(&inode->i_hash)) {
/* Unfortunately insert_inode_hash is not idempotent,
* so as we hash inodes here rather than at creation
* time, we need a lock to ensure we only try
* to do it once
*/
static DEFINE_SPINLOCK(lock);
spin_lock(&lock);
if (hlist_unhashed(&inode->i_hash))
__insert_inode_hash(inode,
inode->i_ino + inode->i_generation);
spin_unlock(&lock);
}
fh[0] = inode->i_generation;
fh[1] = inode->i_ino;
fh[2] = ((__u64)inode->i_ino) >> 32;
*len = 3;
return 1;
}
| 0 |
[
"CWE-200"
] |
linux-2.6
|
e84e2e132c9c66d8498e7710d4ea532d1feaaac5
| 100,837,547,704,393,590,000,000,000,000,000,000,000 | 29 |
tmpfs: restore missing clear_highpage
tmpfs was misconverted to __GFP_ZERO in 2.6.11. There's an unusual case in
which shmem_getpage receives the page from its caller instead of allocating.
We must cover this case by clear_highpage before SetPageUptodate, as before.
Signed-off-by: Hugh Dickins <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
if (!data)
return 0;
if (data[IFLA_MACSEC_CIPHER_SUITE] ||
data[IFLA_MACSEC_ICV_LEN] ||
data[IFLA_MACSEC_SCI] ||
data[IFLA_MACSEC_PORT])
return -EINVAL;
macsec_changelink_common(dev, data);
return 0;
}
| 0 |
[
"CWE-119"
] |
net
|
5294b83086cc1c35b4efeca03644cf9d12282e5b
| 314,932,202,772,941,230,000,000,000,000,000,000,000 | 16 |
macsec: dynamically allocate space for sglist
We call skb_cow_data, which is good anyway to ensure we can actually
modify the skb as such (another error from prior). Now that we have the
number of fragments required, we can safely allocate exactly that amount
of memory.
Fixes: c09440f7dcb3 ("macsec: introduce IEEE 802.1AE driver")
Signed-off-by: Jason A. Donenfeld <[email protected]>
Acked-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int l2tp_ip_recv(struct sk_buff *skb)
{
struct sock *sk;
u32 session_id;
u32 tunnel_id;
unsigned char *ptr, *optr;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
int length;
int offset;
/* Point to L2TP header */
optr = ptr = skb->data;
if (!pskb_may_pull(skb, 4))
goto discard;
session_id = ntohl(*((__be32 *) ptr));
ptr += 4;
/* RFC3931: L2TP/IP packets have the first 4 bytes containing
* the session_id. If it is 0, the packet is a L2TP control
* frame and the session_id value can be discarded.
*/
if (session_id == 0) {
__skb_pull(skb, 4);
goto pass_up;
}
/* Ok, this is a data packet. Lookup the session. */
session = l2tp_session_find(&init_net, NULL, session_id);
if (session == NULL)
goto discard;
tunnel = session->tunnel;
if (tunnel == NULL)
goto discard;
/* Trace packet contents, if enabled */
if (tunnel->debug & L2TP_MSG_DATA) {
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
goto discard;
printk(KERN_DEBUG "%s: ip recv: ", tunnel->name);
offset = 0;
do {
printk(" %02X", ptr[offset]);
} while (++offset < length);
printk("\n");
}
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
return 0;
pass_up:
/* Get the tunnel_id from the L2TP header */
if (!pskb_may_pull(skb, 12))
goto discard;
if ((skb->data[0] & 0xc0) != 0xc0)
goto discard;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
if (tunnel != NULL)
sk = tunnel->sock;
else {
struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
read_lock_bh(&l2tp_ip_lock);
sk = __l2tp_ip_bind_lookup(&init_net, iph->daddr, 0, tunnel_id);
read_unlock_bh(&l2tp_ip_lock);
}
if (sk == NULL)
goto discard;
sock_hold(sk);
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
nf_reset(skb);
return sk_receive_skb(sk, skb, 1);
discard_put:
sock_put(sk);
discard:
kfree_skb(skb);
return 0;
}
| 0 |
[
"CWE-362"
] |
linux-2.6
|
f6d8bd051c391c1c0458a30b2a7abcd939329259
| 152,353,417,572,424,280,000,000,000,000,000,000,000 | 97 |
inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return 0;
}
iterate_and_advance(i, bytes, v,
__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
)
return bytes;
}
| 0 |
[
"CWE-200"
] |
linux
|
b9dc6f65bc5e232d1c05fe34b5daadc7e8bbf1fb
| 267,331,339,239,807,500,000,000,000,000,000,000,000 | 17 |
fix a fencepost error in pipe_advance()
The logics in pipe_advance() used to release all buffers past the new
position failed in cases when the number of buffers to release was equal
to pipe->buffers. If that happened, none of them had been released,
leaving pipe full. Worse, it was trivial to trigger and we end up with
pipe full of uninitialized pages. IOW, it's an infoleak.
Cc: [email protected] # v4.9
Reported-by: "Alan J. Wylie" <[email protected]>
Tested-by: "Alan J. Wylie" <[email protected]>
Signed-off-by: Al Viro <[email protected]>
|
static inline int mailimf_comment_parse(const char * message, size_t length,
size_t * indx)
{
size_t cur_token;
int r;
cur_token = * indx;
r = mailimf_oparenth_parse(message, length, &cur_token);
if (r != MAILIMF_NO_ERROR)
return r;
while (1) {
r = mailimf_comment_fws_ccontent_parse(message, length, &cur_token);
if (r != MAILIMF_NO_ERROR) {
if (r == MAILIMF_ERROR_PARSE)
break;
else
return r;
}
}
r = mailimf_fws_parse(message, length, &cur_token);
if ((r != MAILIMF_NO_ERROR) && (r != MAILIMF_ERROR_PARSE))
return r;
r = mailimf_cparenth_parse(message, length, &cur_token);
if (r != MAILIMF_NO_ERROR)
return r;
* indx = cur_token;
return MAILIMF_NO_ERROR;
}
| 0 |
[
"CWE-476"
] |
libetpan
|
1fe8fbc032ccda1db9af66d93016b49c16c1f22d
| 171,394,647,398,385,060,000,000,000,000,000,000,000 | 34 |
Fixed crash #274
|
flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
{
/* Update this function whenever struct flow changes. */
BUILD_ASSERT_DECL(FLOW_WC_SEQ == 42);
memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
wc->masks.actset_output = 0;
wc->masks.conj_id = 0;
}
| 0 |
[
"CWE-400"
] |
ovs
|
79349cbab0b2a755140eedb91833ad2760520a83
| 187,712,718,223,452,530,000,000,000,000,000,000,000 | 10 |
flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]>
|
void Downstream::inspect_http2_request() {
if (req_.method == HTTP_CONNECT) {
req_.upgrade_request = true;
}
}
| 0 |
[] |
nghttp2
|
319d5ab1c6d916b6b8a0d85b2ae3f01b3ad04f2c
| 285,085,725,122,495,960,000,000,000,000,000,000,000 | 5 |
nghttpx: Fix request stall
Fix request stall if backend connection is reused and buffer is full.
|
redirecting(void)
{
return redir_fd != NULL || *p_vfile != NUL
#ifdef FEAT_EVAL
|| redir_reg || redir_vname || redir_execute
#endif
;
}
| 0 |
[
"CWE-416"
] |
vim
|
9f1a39a5d1cd7989ada2d1cb32f97d84360e050f
| 261,700,512,011,238,600,000,000,000,000,000,000,000 | 8 |
patch 8.2.4040: keeping track of allocated lines is too complicated
Problem: Keeping track of allocated lines in user functions is too
complicated.
Solution: Instead of freeing individual lines keep them all until the end.
|
mono_image_module_basic_init (MonoReflectionModuleBuilder *moduleb)
{
MonoDynamicImage *image = moduleb->dynamic_image;
MonoReflectionAssemblyBuilder *ab = moduleb->assemblyb;
if (!image) {
MonoError error;
int module_count;
MonoImage **new_modules;
MonoImage *ass;
char *name, *fqname;
/*
* FIXME: we already created an image in mono_image_basic_init (), but
* we don't know which module it belongs to, since that is only
* determined at assembly save time.
*/
/*image = (MonoDynamicImage*)ab->dynamic_assembly->assembly.image; */
name = mono_string_to_utf8 (ab->name);
fqname = mono_string_to_utf8_checked (moduleb->module.fqname, &error);
if (!mono_error_ok (&error)) {
g_free (name);
mono_error_raise_exception (&error);
}
image = create_dynamic_mono_image (ab->dynamic_assembly, name, fqname);
moduleb->module.image = &image->image;
moduleb->dynamic_image = image;
register_module (mono_object_domain (moduleb), moduleb, image);
/* register the module with the assembly */
ass = ab->dynamic_assembly->assembly.image;
module_count = ass->module_count;
new_modules = g_new0 (MonoImage *, module_count + 1);
if (ass->modules)
memcpy (new_modules, ass->modules, module_count * sizeof (MonoImage *));
new_modules [module_count] = &image->image;
mono_image_addref (&image->image);
g_free (ass->modules);
ass->modules = new_modules;
ass->module_count ++;
}
}
| 0 |
[
"CWE-20"
] |
mono
|
4905ef1130feb26c3150b28b97e4a96752e0d399
| 88,505,800,025,411,300,000,000,000,000,000,000,000 | 43 |
Handle invalid instantiation of generic methods.
* verify.c: Add new function to internal verifier API to check
method instantiations.
* reflection.c (mono_reflection_bind_generic_method_parameters):
Check the instantiation before returning it.
Fixes #655847
|
static int phar_zip_changed_apply_int(phar_entry_info *entry, void *arg) /* {{{ */
{
phar_zip_file_header local;
phar_zip_unix3 perms;
phar_zip_central_dir_file central;
struct _phar_zip_pass *p;
php_uint32 newcrc32;
zend_off_t offset;
int not_really_modified = 0;
p = (struct _phar_zip_pass*) arg;
if (entry->is_mounted) {
return ZEND_HASH_APPLY_KEEP;
}
if (entry->is_deleted) {
if (entry->fp_refcount <= 0) {
return ZEND_HASH_APPLY_REMOVE;
} else {
/* we can't delete this in-memory until it is closed */
return ZEND_HASH_APPLY_KEEP;
}
}
phar_add_virtual_dirs(entry->phar, entry->filename, entry->filename_len);
memset(&local, 0, sizeof(local));
memset(¢ral, 0, sizeof(central));
memset(&perms, 0, sizeof(perms));
strncpy(local.signature, "PK\3\4", 4);
strncpy(central.signature, "PK\1\2", 4);
PHAR_SET_16(central.extra_len, sizeof(perms));
PHAR_SET_16(local.extra_len, sizeof(perms));
perms.tag[0] = 'n';
perms.tag[1] = 'u';
PHAR_SET_16(perms.size, sizeof(perms) - 4);
PHAR_SET_16(perms.perms, entry->flags & PHAR_ENT_PERM_MASK);
{
php_uint32 crc = (php_uint32) ~0;
CRC32(crc, perms.perms[0]);
CRC32(crc, perms.perms[1]);
PHAR_SET_32(perms.crc32, ~crc);
}
if (entry->flags & PHAR_ENT_COMPRESSED_GZ) {
PHAR_SET_16(central.compressed, PHAR_ZIP_COMP_DEFLATE);
PHAR_SET_16(local.compressed, PHAR_ZIP_COMP_DEFLATE);
}
if (entry->flags & PHAR_ENT_COMPRESSED_BZ2) {
PHAR_SET_16(central.compressed, PHAR_ZIP_COMP_BZIP2);
PHAR_SET_16(local.compressed, PHAR_ZIP_COMP_BZIP2);
}
/* do not use PHAR_GET_16 on either field of the next line */
phar_zip_u2d_time(entry->timestamp, local.timestamp, local.datestamp);
memcpy(central.timestamp, local.timestamp, sizeof(local.timestamp));
memcpy(central.datestamp, local.datestamp, sizeof(local.datestamp));
PHAR_SET_16(central.filename_len, entry->filename_len + (entry->is_dir ? 1 : 0));
PHAR_SET_16(local.filename_len, entry->filename_len + (entry->is_dir ? 1 : 0));
PHAR_SET_32(central.offset, php_stream_tell(p->filefp));
/* do extra field for perms later */
if (entry->is_modified) {
php_uint32 loc;
php_stream_filter *filter;
php_stream *efp;
if (entry->is_dir) {
entry->is_modified = 0;
if (entry->fp_type == PHAR_MOD && entry->fp != entry->phar->fp && entry->fp != entry->phar->ufp) {
php_stream_close(entry->fp);
entry->fp = NULL;
entry->fp_type = PHAR_FP;
}
goto continue_dir;
}
if (FAILURE == phar_open_entry_fp(entry, p->error, 0)) {
spprintf(p->error, 0, "unable to open file contents of file \"%s\" in zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
/* we can be modified and already be compressed, such as when chmod() is executed */
if (entry->flags & PHAR_ENT_COMPRESSION_MASK && (entry->old_flags == entry->flags || !entry->old_flags)) {
not_really_modified = 1;
goto is_compressed;
}
if (-1 == phar_seek_efp(entry, 0, SEEK_SET, 0, 0)) {
spprintf(p->error, 0, "unable to seek to start of file \"%s\" to zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
efp = phar_get_efp(entry, 0);
newcrc32 = ~0;
for (loc = 0;loc < entry->uncompressed_filesize; ++loc) {
CRC32(newcrc32, php_stream_getc(efp));
}
entry->crc32 = ~newcrc32;
PHAR_SET_32(central.uncompsize, entry->uncompressed_filesize);
PHAR_SET_32(local.uncompsize, entry->uncompressed_filesize);
if (!(entry->flags & PHAR_ENT_COMPRESSION_MASK)) {
/* not compressed */
entry->compressed_filesize = entry->uncompressed_filesize;
PHAR_SET_32(central.compsize, entry->uncompressed_filesize);
PHAR_SET_32(local.compsize, entry->uncompressed_filesize);
goto not_compressed;
}
filter = php_stream_filter_create(phar_compress_filter(entry, 0), NULL, 0);
if (!filter) {
if (entry->flags & PHAR_ENT_COMPRESSED_GZ) {
spprintf(p->error, 0, "unable to gzip compress file \"%s\" to zip-based phar \"%s\"", entry->filename, entry->phar->fname);
} else {
spprintf(p->error, 0, "unable to bzip2 compress file \"%s\" to zip-based phar \"%s\"", entry->filename, entry->phar->fname);
}
return ZEND_HASH_APPLY_STOP;
}
/* create new file that holds the compressed version */
/* work around inability to specify freedom in write and strictness
in read count */
entry->cfp = php_stream_fopen_tmpfile();
if (!entry->cfp) {
spprintf(p->error, 0, "unable to create temporary file for file \"%s\" while creating zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
php_stream_flush(efp);
if (-1 == phar_seek_efp(entry, 0, SEEK_SET, 0, 0)) {
spprintf(p->error, 0, "unable to seek to start of file \"%s\" to zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
php_stream_filter_append((&entry->cfp->writefilters), filter);
if (SUCCESS != php_stream_copy_to_stream_ex(efp, entry->cfp, entry->uncompressed_filesize, NULL)) {
spprintf(p->error, 0, "unable to copy compressed file contents of file \"%s\" while creating new phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
php_stream_filter_flush(filter, 1);
php_stream_flush(entry->cfp);
php_stream_filter_remove(filter, 1);
php_stream_seek(entry->cfp, 0, SEEK_END);
entry->compressed_filesize = (php_uint32) php_stream_tell(entry->cfp);
PHAR_SET_32(central.compsize, entry->compressed_filesize);
PHAR_SET_32(local.compsize, entry->compressed_filesize);
/* generate crc on compressed file */
php_stream_rewind(entry->cfp);
entry->old_flags = entry->flags;
entry->is_modified = 1;
} else {
is_compressed:
PHAR_SET_32(central.uncompsize, entry->uncompressed_filesize);
PHAR_SET_32(local.uncompsize, entry->uncompressed_filesize);
PHAR_SET_32(central.compsize, entry->compressed_filesize);
PHAR_SET_32(local.compsize, entry->compressed_filesize);
if (p->old) {
if (-1 == php_stream_seek(p->old, entry->offset_abs, SEEK_SET)) {
spprintf(p->error, 0, "unable to seek to start of file \"%s\" while creating zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
}
}
not_compressed:
PHAR_SET_32(central.crc32, entry->crc32);
PHAR_SET_32(local.crc32, entry->crc32);
continue_dir:
/* set file metadata */
if (Z_TYPE(entry->metadata) != IS_UNDEF) {
php_serialize_data_t metadata_hash;
if (entry->metadata_str.s) {
smart_str_free(&entry->metadata_str);
}
entry->metadata_str.s = NULL;
PHP_VAR_SERIALIZE_INIT(metadata_hash);
php_var_serialize(&entry->metadata_str, &entry->metadata, &metadata_hash);
PHP_VAR_SERIALIZE_DESTROY(metadata_hash);
PHAR_SET_16(central.comment_len, ZSTR_LEN(entry->metadata_str.s));
}
entry->header_offset = php_stream_tell(p->filefp);
offset = entry->header_offset + sizeof(local) + entry->filename_len + (entry->is_dir ? 1 : 0) + sizeof(perms);
if (sizeof(local) != php_stream_write(p->filefp, (char *)&local, sizeof(local))) {
spprintf(p->error, 0, "unable to write local file header of file \"%s\" to zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
if (sizeof(central) != php_stream_write(p->centralfp, (char *)¢ral, sizeof(central))) {
spprintf(p->error, 0, "unable to write central directory entry for file \"%s\" while creating zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
if (entry->is_dir) {
if (entry->filename_len != php_stream_write(p->filefp, entry->filename, entry->filename_len)) {
spprintf(p->error, 0, "unable to write filename to local directory entry for directory \"%s\" while creating zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
if (1 != php_stream_write(p->filefp, "/", 1)) {
spprintf(p->error, 0, "unable to write filename to local directory entry for directory \"%s\" while creating zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
if (entry->filename_len != php_stream_write(p->centralfp, entry->filename, entry->filename_len)) {
spprintf(p->error, 0, "unable to write filename to central directory entry for directory \"%s\" while creating zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
if (1 != php_stream_write(p->centralfp, "/", 1)) {
spprintf(p->error, 0, "unable to write filename to central directory entry for directory \"%s\" while creating zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
} else {
if (entry->filename_len != php_stream_write(p->filefp, entry->filename, entry->filename_len)) {
spprintf(p->error, 0, "unable to write filename to local directory entry for file \"%s\" while creating zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
if (entry->filename_len != php_stream_write(p->centralfp, entry->filename, entry->filename_len)) {
spprintf(p->error, 0, "unable to write filename to central directory entry for file \"%s\" while creating zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
}
if (sizeof(perms) != php_stream_write(p->filefp, (char *)&perms, sizeof(perms))) {
spprintf(p->error, 0, "unable to write local extra permissions file header of file \"%s\" to zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
if (sizeof(perms) != php_stream_write(p->centralfp, (char *)&perms, sizeof(perms))) {
spprintf(p->error, 0, "unable to write central extra permissions file header of file \"%s\" to zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
if (!not_really_modified && entry->is_modified) {
if (entry->cfp) {
if (SUCCESS != php_stream_copy_to_stream_ex(entry->cfp, p->filefp, entry->compressed_filesize, NULL)) {
spprintf(p->error, 0, "unable to write compressed contents of file \"%s\" in zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
php_stream_close(entry->cfp);
entry->cfp = NULL;
} else {
if (FAILURE == phar_open_entry_fp(entry, p->error, 0)) {
return ZEND_HASH_APPLY_STOP;
}
phar_seek_efp(entry, 0, SEEK_SET, 0, 0);
if (SUCCESS != php_stream_copy_to_stream_ex(phar_get_efp(entry, 0), p->filefp, entry->uncompressed_filesize, NULL)) {
spprintf(p->error, 0, "unable to write contents of file \"%s\" in zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
}
if (entry->fp_type == PHAR_MOD && entry->fp != entry->phar->fp && entry->fp != entry->phar->ufp && entry->fp_refcount == 0) {
php_stream_close(entry->fp);
}
entry->is_modified = 0;
} else {
entry->is_modified = 0;
if (entry->fp_refcount) {
/* open file pointers refer to this fp, do not free the stream */
switch (entry->fp_type) {
case PHAR_FP:
p->free_fp = 0;
break;
case PHAR_UFP:
p->free_ufp = 0;
default:
break;
}
}
if (!entry->is_dir && entry->compressed_filesize && SUCCESS != php_stream_copy_to_stream_ex(p->old, p->filefp, entry->compressed_filesize, NULL)) {
spprintf(p->error, 0, "unable to copy contents of file \"%s\" while creating zip-based phar \"%s\"", entry->filename, entry->phar->fname);
return ZEND_HASH_APPLY_STOP;
}
}
entry->fp = NULL;
entry->offset = entry->offset_abs = offset;
entry->fp_type = PHAR_FP;
if (entry->metadata_str.s) {
if (ZSTR_LEN(entry->metadata_str.s) != php_stream_write(p->centralfp, ZSTR_VAL(entry->metadata_str.s), ZSTR_LEN(entry->metadata_str.s))) {
spprintf(p->error, 0, "unable to write metadata as file comment for file \"%s\" while creating zip-based phar \"%s\"", entry->filename, entry->phar->fname);
smart_str_free(&entry->metadata_str);
return ZEND_HASH_APPLY_STOP;
}
smart_str_free(&entry->metadata_str);
}
return ZEND_HASH_APPLY_KEEP;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
php-src
|
0bfb970f43acd1e81d11be1154805f86655f15d5
| 128,490,555,986,428,400,000,000,000,000,000,000,000 | 308 |
Fix bug #72928 - Out of bound when verify signature of zip phar in phar_parse_zipfile
(cherry picked from commit 19484ab77466f99c78fc0e677f7e03da0584d6a2)
|
static void worker_leave_idle(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
return;
worker_clr_flags(worker, WORKER_IDLE);
pool->nr_idle--;
list_del_init(&worker->entry);
}
| 0 |
[
"CWE-200"
] |
tip
|
dfb4357da6ddbdf57d583ba64361c9d792b0e0b1
| 258,860,403,838,402,100,000,000,000,000,000,000,000 | 10 |
time: Remove CONFIG_TIMER_STATS
Currently CONFIG_TIMER_STATS exposes process information across namespaces:
kernel/time/timer_list.c print_timer():
SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
/proc/timer_list:
#11: <0000000000000000>, hrtimer_wakeup, S:01, do_nanosleep, cron/2570
Given that the tracer can give the same information, this patch entirely
removes CONFIG_TIMER_STATS.
Suggested-by: Thomas Gleixner <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Acked-by: John Stultz <[email protected]>
Cc: Nicolas Pitre <[email protected]>
Cc: [email protected]
Cc: Lai Jiangshan <[email protected]>
Cc: Shuah Khan <[email protected]>
Cc: Xing Gao <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Jessica Frazelle <[email protected]>
Cc: [email protected]
Cc: Nicolas Iooss <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Petr Mladek <[email protected]>
Cc: Richard Cochran <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Michal Marek <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: Olof Johansson <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Cc: Arjan van de Ven <[email protected]>
Link: http://lkml.kernel.org/r/20170208192659.GA32582@beast
Signed-off-by: Thomas Gleixner <[email protected]>
|
unsigned int avpriv_toupper4(unsigned int x)
{
return av_toupper(x & 0xFF) +
(av_toupper((x >> 8) & 0xFF) << 8) +
(av_toupper((x >> 16) & 0xFF) << 16) +
(av_toupper((x >> 24) & 0xFF) << 24);
}
| 0 |
[
"CWE-703"
] |
FFmpeg
|
e5c7229999182ad1cef13b9eca050dba7a5a08da
| 74,235,188,386,619,930,000,000,000,000,000,000,000 | 7 |
avcodec/utils: set AVFrame format unconditional
Fixes inconsistency and out of array accesses
Fixes: 10cdd7e63e7f66e3e66273939e0863dd-asan_heap-oob_1a4ff32_7078_cov_4056274555_mov_h264_aac__mp4box_frag.mp4
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <[email protected]>
|
SpoolssDeletePrinter_q(tvbuff_t *tvb, int offset,
packet_info *pinfo, proto_tree *tree,
dcerpc_info *di, guint8 *drep _U_)
{
/* Parse packet */
offset = dissect_nt_policy_hnd(
tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL,
FALSE, FALSE);
return offset;
}
| 0 |
[
"CWE-399"
] |
wireshark
|
b4d16b4495b732888e12baf5b8a7e9bf2665e22b
| 294,256,275,614,520,400,000,000,000,000,000,000,000 | 12 |
SPOOLSS: Try to avoid an infinite loop.
Use tvb_reported_length_remaining in dissect_spoolss_uint16uni. Make
sure our offset always increments in dissect_spoolss_keybuffer.
Change-Id: I7017c9685bb2fa27161d80a03b8fca4ef630e793
Reviewed-on: https://code.wireshark.org/review/14687
Reviewed-by: Gerald Combs <[email protected]>
Petri-Dish: Gerald Combs <[email protected]>
Tested-by: Petri Dish Buildbot <[email protected]>
Reviewed-by: Michael Mann <[email protected]>
|
static int nbd_negotiate_send_rep_len(QIOChannel *ioc, uint32_t type,
uint32_t opt, uint32_t len, Error **errp)
{
uint64_t magic;
trace_nbd_negotiate_send_rep_len(opt, nbd_opt_lookup(opt),
type, nbd_rep_lookup(type), len);
assert(len < NBD_MAX_BUFFER_SIZE);
magic = cpu_to_be64(NBD_REP_MAGIC);
if (nbd_write(ioc, &magic, sizeof(magic), errp) < 0) {
error_prepend(errp, "write failed (rep magic): ");
return -EINVAL;
}
opt = cpu_to_be32(opt);
if (nbd_write(ioc, &opt, sizeof(opt), errp) < 0) {
error_prepend(errp, "write failed (rep opt): ");
return -EINVAL;
}
type = cpu_to_be32(type);
if (nbd_write(ioc, &type, sizeof(type), errp) < 0) {
error_prepend(errp, "write failed (rep type): ");
return -EINVAL;
}
len = cpu_to_be32(len);
if (nbd_write(ioc, &len, sizeof(len), errp) < 0) {
error_prepend(errp, "write failed (rep data length): ");
return -EINVAL;
}
return 0;
}
| 0 |
[] |
qemu
|
f37708f6b8e0bef0dd85c6aad7fc2062071f8227
| 119,488,033,203,799,380,000,000,000,000,000,000,000 | 34 |
nbd: Implement NBD_OPT_GO on server
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires us to close the connection rather than report an error.
Therefore, upstream NBD recently added NBD_OPT_GO as the improved
version of the option that does what we want [1], along with
NBD_OPT_INFO that returns the same information but does not
transition to transmission phase.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at the information types, and only passes the
same information already available through NBD_OPT_LIST and
NBD_OPT_EXPORT_NAME; items like NBD_INFO_BLOCK_SIZE (and thus any
use of NBD_REP_ERR_BLOCK_SIZE_REQD) are intentionally left for
later patches.
Signed-off-by: Eric Blake <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
BOOL glyph_cache_fragment_put(rdpGlyphCache* glyphCache, UINT32 index, UINT32 size,
const void* fragment)
{
void* prevFragment;
void* copy;
if (index > 255)
{
WLog_ERR(TAG, "invalid glyph cache fragment index: %" PRIu32 "", index);
return FALSE;
}
copy = malloc(size);
if (!copy)
return FALSE;
WLog_Print(glyphCache->log, WLOG_DEBUG,
"GlyphCacheFragmentPut: index: %" PRIu32 " size: %" PRIu32 "", index, size);
CopyMemory(copy, fragment, size);
prevFragment = glyphCache->fragCache.entries[index].fragment;
glyphCache->fragCache.entries[index].fragment = copy;
glyphCache->fragCache.entries[index].size = size;
free(prevFragment);
return TRUE;
}
| 0 |
[
"CWE-703",
"CWE-125"
] |
FreeRDP
|
c0fd449ec0870b050d350d6d844b1ea6dad4bc7d
| 184,650,086,189,964,760,000,000,000,000,000,000,000 | 26 |
Fixed Out-of-bound read in glyph_cache_put
CVE-2020-11098 thanks to @antonio-morales for finding this.
|
template<typename t>
CImg<Tfloat> get_blur_guided(const CImg<t>& guide, const float radius, const float regularization) const {
if (!is_sameXYZ(guide))
throw CImgArgumentException(_cimg_instance
"blur_guided(): Invalid size for specified guide image (%u,%u,%u,%u,%p).",
cimg_instance,
guide._width,guide._height,guide._depth,guide._spectrum,guide._data);
if (is_empty() || !radius) return *this;
const int _radius = radius>=0?(int)radius:(int)(-radius*cimg::max(_width,_height,_depth)/100);
float _regularization = regularization;
if (regularization<0) {
T edge_min, edge_max = guide.max_min(edge_min);
if (edge_min==edge_max) return *this;
_regularization = -regularization*(edge_max - edge_min)/100;
}
_regularization = std::max(_regularization,0.01f);
const unsigned int psize = (unsigned int)(1 + 2*_radius);
const CImg<uintT> N = CImg<uintT>(_width,_height,_depth,1,1)._blur_guided(psize);
CImg<Tfloat>
mean_I = CImg<Tfloat>(guide,false)._blur_guided(psize).div(N),
mean_p = CImg<Tfloat>(*this,false)._blur_guided(psize).div(N),
cov_Ip = CImg<Tfloat>(*this,false).mul(guide)._blur_guided(psize).div(N)-=mean_p.get_mul(mean_I),
var_I = CImg<Tfloat>(guide,false).sqr()._blur_guided(psize).div(N)-=mean_I.get_sqr(),
&a = cov_Ip.div(var_I+=_regularization),
&b = mean_p-=a.get_mul(mean_I);
a._blur_guided(psize).div(N);
b._blur_guided(psize).div(N);
return a.mul(guide)+=b;
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 41,252,114,661,356,247,000,000,000,000,000,000,000 | 28 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
static int decl_die(int status, const char *phase, request_rec *r)
{
if (status == DECLINED) {
ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(00025)
"configuration error: couldn't %s: %s", phase, r->uri);
return HTTP_INTERNAL_SERVER_ERROR;
}
else {
ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
"auth phase '%s' gave status %d: %s", phase,
status, r->uri);
return status;
}
}
| 0 |
[] |
httpd
|
eb986059aa5aa0b6c1d52714ea83e3dd758afdd1
| 54,335,311,935,781,180,000,000,000,000,000,000,000 | 14 |
Merge r1889036 from trunk:
legacy default slash-matching behavior w/ 'MergeSlashes OFF'
Submitted By: Ruediger Pluem
Reviewed By: covener, rpluem, ylavic
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1889038 13f79535-47bb-0310-9956-ffa450edef68
|
static void* swoole_unserialize_arr(void *buffer, zval *zvalue, uint32_t nNumOfElements, long flag)
{
//Initialize zend array
zend_ulong h, nIndex, max_index = 0;
uint32_t size = cp_zend_hash_check_size(nNumOfElements);
CHECK_STEP;
if (!size)
{
return NULL;
}
if (!buffer)
{
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "illegal unserialize data");
return NULL;
}
ZVAL_NEW_ARR(zvalue);
//Initialize buckets
zend_array *ht = Z_ARR_P(zvalue);
ht->nTableSize = size;
ht->nNumUsed = nNumOfElements;
ht->nNumOfElements = nNumOfElements;
ht->nNextFreeElement = 0;
#ifdef HASH_FLAG_APPLY_PROTECTION
ht->u.flags = HASH_FLAG_APPLY_PROTECTION;
#endif
ht->nTableMask = -(ht->nTableSize);
ht->pDestructor = ZVAL_PTR_DTOR;
GC_SET_REFCOUNT(ht, 1);
GC_TYPE_INFO(ht) = IS_ARRAY;
// if (ht->nNumUsed)
//{
// void *arData = ecalloc(1, len);
HT_SET_DATA_ADDR(ht, emalloc(HT_SIZE(ht)));
ht->u.flags |= HASH_FLAG_INITIALIZED;
int ht_hash_size = HT_HASH_SIZE((ht)->nTableMask);
if (ht_hash_size <= 0)
{
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "illegal unserialize data");
return NULL;
}
HT_HASH_RESET(ht);
//}
int idx;
Bucket *p;
for(idx = 0; idx < nNumOfElements; idx++)
{
if (!buffer)
{
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "illegal array unserialize data");
return NULL;
}
SBucketType type = *((SBucketType*) buffer);
buffer += sizeof (SBucketType);
p = ht->arData + idx;
/* Initialize key */
if (type.key_type == KEY_TYPE_STRING)
{
size_t key_len;
if (type.key_len == 3)
{//read the same mem
void *str_pool_addr = get_pack_string_len_addr(&buffer, &key_len);
p->key = zend_string_init((char*) str_pool_addr, key_len, 0);
h = zend_inline_hash_func((char*) str_pool_addr, key_len);
p->key->h = p->h = h;
}
else
{//move step
if (type.key_len == 1)
{
key_len = *((zend_uchar*) buffer);
buffer += sizeof (zend_uchar);
}
else if (type.key_len == 2)
{
key_len = *((unsigned short*) buffer);
buffer += sizeof (unsigned short);
}
else
{
key_len = *((size_t*) buffer);
buffer += sizeof (size_t);
}
CHECK_STEP;
p->key = zend_string_init((char*) buffer, key_len, 0);
// h = zend_inline_hash_func((char*) buffer, key_len);
h = zend_inline_hash_func((char*) buffer, key_len);
buffer += key_len;
p->key->h = p->h = h;
}
}
else
{
if (type.key_len == 0)
{
//means pack
h = p->h = idx;
p->key = NULL;
max_index = p->h + 1;
// ht->u.flags |= HASH_FLAG_PACKED;
}
else
{
if (type.key_len == 1)
{
h = *((zend_uchar*) buffer);
buffer += sizeof (zend_uchar);
}
else if (type.key_len == 2)
{
h = *((unsigned short*) buffer);
buffer += sizeof (unsigned short);
}
else
{
h = *((zend_ulong*) buffer);
buffer += sizeof (zend_ulong);
}
p->h = h;
p->key = NULL;
if (h >= max_index)
{
max_index = h + 1;
}
}
}
/* Initialize hash */
nIndex = h | ht->nTableMask;
Z_NEXT(p->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
/* Initialize data type */
p->val.u1.v.type = type.data_type;
Z_TYPE_FLAGS(p->val) = 0;
/* Initialize data */
if (type.data_type == IS_STRING)
{
size_t data_len;
if (type.data_len == 3)
{//read the same mem
void *str_pool_addr = get_pack_string_len_addr(&buffer, &data_len);
p->val.value.str = zend_string_init((char*) str_pool_addr, data_len, 0);
}
else
{
if (type.data_len == 1)
{
data_len = *((zend_uchar*) buffer);
buffer += sizeof (zend_uchar);
}
else if (type.data_len == 2)
{
data_len = *((unsigned short*) buffer);
buffer += sizeof (unsigned short);
}
else
{
data_len = *((size_t*) buffer);
buffer += sizeof (size_t);
}
CHECK_STEP;
p->val.value.str = zend_string_init((char*) buffer, data_len, 0);
buffer += data_len;
}
Z_TYPE_INFO(p->val) = IS_STRING_EX;
}
else if (type.data_type == IS_ARRAY)
{
uint32_t num = 0;
buffer = get_array_real_len(buffer, type.data_len, &num);
buffer = swoole_unserialize_arr(buffer, &p->val, num, flag);
}
else if (type.data_type == IS_LONG)
{
buffer = swoole_unserialize_long(buffer, &p->val, type);
}
else if (type.data_type == IS_DOUBLE)
{
p->val.value = *((zend_value*) buffer);
buffer += sizeof (zend_value);
}
else if (type.data_type == IS_UNDEF)
{
buffer = swoole_unserialize_object(buffer, &p->val, type.data_len, NULL, flag);
Z_TYPE_INFO(p->val) = IS_OBJECT_EX;
}
}
ht->nNextFreeElement = max_index;
CHECK_STEP;
return buffer;
}
| 0 |
[
"CWE-200",
"CWE-502"
] |
swoole-src
|
4cdbce5d9bf2fe596bb6acd7d6611f9e8c253a76
| 22,562,421,770,826,120,000,000,000,000,000,000,000 | 197 |
add buffer end check
|
setup_decode_folder(struct archive_read *a, struct _7z_folder *folder,
int header)
{
struct _7zip *zip = (struct _7zip *)a->format->data;
const struct _7z_coder *coder1, *coder2;
const char *cname = (header)?"archive header":"file content";
unsigned i;
int r, found_bcj2 = 0;
/*
* Release the memory which the previous folder used for BCJ2.
*/
for (i = 0; i < 3; i++) {
if (zip->sub_stream_buff[i] != NULL)
free(zip->sub_stream_buff[i]);
zip->sub_stream_buff[i] = NULL;
}
/*
* Initialize a stream reader.
*/
zip->pack_stream_remaining = (unsigned)folder->numPackedStreams;
zip->pack_stream_index = (unsigned)folder->packIndex;
zip->folder_outbytes_remaining = folder_uncompressed_size(folder);
zip->uncompressed_buffer_bytes_remaining = 0;
/*
* Check coder types.
*/
for (i = 0; i < folder->numCoders; i++) {
switch(folder->coders[i].codec) {
case _7Z_CRYPTO_MAIN_ZIP:
case _7Z_CRYPTO_RAR_29:
case _7Z_CRYPTO_AES_256_SHA_256: {
/* For entry that is associated with this folder, mark
it as encrypted (data+metadata). */
zip->has_encrypted_entries = 1;
if (a->entry) {
archive_entry_set_is_data_encrypted(a->entry, 1);
archive_entry_set_is_metadata_encrypted(a->entry, 1);
}
archive_set_error(&(a->archive),
ARCHIVE_ERRNO_MISC,
"The %s is encrypted, "
"but currently not supported", cname);
return (ARCHIVE_FATAL);
}
case _7Z_X86_BCJ2: {
found_bcj2++;
break;
}
}
}
/* Now that we've checked for encryption, if there were still no
* encrypted entries found we can say for sure that there are none.
*/
if (zip->has_encrypted_entries == ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW) {
zip->has_encrypted_entries = 0;
}
if ((folder->numCoders > 2 && !found_bcj2) || found_bcj2 > 1) {
archive_set_error(&(a->archive),
ARCHIVE_ERRNO_MISC,
"The %s is encoded with many filters, "
"but currently not supported", cname);
return (ARCHIVE_FATAL);
}
coder1 = &(folder->coders[0]);
if (folder->numCoders == 2)
coder2 = &(folder->coders[1]);
else
coder2 = NULL;
if (found_bcj2) {
/*
* Preparation to decode BCJ2.
* Decoding BCJ2 requires four sources. Those are at least,
* as far as I know, two types of the storage form.
*/
const struct _7z_coder *fc = folder->coders;
static const struct _7z_coder coder_copy = {0, 1, 1, 0, NULL};
const struct _7z_coder *scoder[3] =
{&coder_copy, &coder_copy, &coder_copy};
const void *buff;
ssize_t bytes;
unsigned char *b[3] = {NULL, NULL, NULL};
uint64_t sunpack[3] ={-1, -1, -1};
size_t s[3] = {0, 0, 0};
int idx[3] = {0, 1, 2};
if (folder->numCoders == 4 && fc[3].codec == _7Z_X86_BCJ2 &&
folder->numInStreams == 7 && folder->numOutStreams == 4 &&
zip->pack_stream_remaining == 4) {
/* Source type 1 made by 7zr or 7z with -m options. */
if (folder->bindPairs[0].inIndex == 5) {
/* The form made by 7zr */
idx[0] = 1; idx[1] = 2; idx[2] = 0;
scoder[1] = &(fc[1]);
scoder[2] = &(fc[0]);
sunpack[1] = folder->unPackSize[1];
sunpack[2] = folder->unPackSize[0];
coder1 = &(fc[2]);
} else {
/*
* NOTE: Some patterns do not work.
* work:
* 7z a -m0=BCJ2 -m1=COPY -m2=COPY
* -m3=(any)
* 7z a -m0=BCJ2 -m1=COPY -m2=(any)
* -m3=COPY
* 7z a -m0=BCJ2 -m1=(any) -m2=COPY
* -m3=COPY
* not work:
* other patterns.
*
* We have to handle this like `pipe' or
* our libarchive7s filter frame work,
* decoding the BCJ2 main stream sequentially,
* m3 -> m2 -> m1 -> BCJ2.
*
*/
if (fc[0].codec == _7Z_COPY &&
fc[1].codec == _7Z_COPY)
coder1 = &(folder->coders[2]);
else if (fc[0].codec == _7Z_COPY &&
fc[2].codec == _7Z_COPY)
coder1 = &(folder->coders[1]);
else if (fc[1].codec == _7Z_COPY &&
fc[2].codec == _7Z_COPY)
coder1 = &(folder->coders[0]);
else {
archive_set_error(&(a->archive),
ARCHIVE_ERRNO_MISC,
"Unsupported form of "
"BCJ2 streams");
return (ARCHIVE_FATAL);
}
}
coder2 = &(fc[3]);
zip->main_stream_bytes_remaining =
(size_t)folder->unPackSize[2];
} else if (coder2 != NULL && coder2->codec == _7Z_X86_BCJ2 &&
zip->pack_stream_remaining == 4 &&
folder->numInStreams == 5 && folder->numOutStreams == 2) {
/* Source type 0 made by 7z */
zip->main_stream_bytes_remaining =
(size_t)folder->unPackSize[0];
} else {
/* We got an unexpected form. */
archive_set_error(&(a->archive),
ARCHIVE_ERRNO_MISC,
"Unsupported form of BCJ2 streams");
return (ARCHIVE_FATAL);
}
/* Skip the main stream at this time. */
if ((r = seek_pack(a)) < 0)
return (r);
zip->pack_stream_bytes_unconsumed =
(size_t)zip->pack_stream_inbytes_remaining;
read_consume(a);
/* Read following three sub streams. */
for (i = 0; i < 3; i++) {
const struct _7z_coder *coder = scoder[i];
if ((r = seek_pack(a)) < 0) {
free(b[0]); free(b[1]); free(b[2]);
return (r);
}
if (sunpack[i] == (uint64_t)-1)
zip->folder_outbytes_remaining =
zip->pack_stream_inbytes_remaining;
else
zip->folder_outbytes_remaining = sunpack[i];
r = init_decompression(a, zip, coder, NULL);
if (r != ARCHIVE_OK) {
free(b[0]); free(b[1]); free(b[2]);
return (ARCHIVE_FATAL);
}
/* Allocate memory for the decoded data of a sub
* stream. */
b[i] = malloc((size_t)zip->folder_outbytes_remaining);
if (b[i] == NULL) {
free(b[0]); free(b[1]); free(b[2]);
archive_set_error(&a->archive, ENOMEM,
"No memory for 7-Zip decompression");
return (ARCHIVE_FATAL);
}
/* Extract a sub stream. */
while (zip->pack_stream_inbytes_remaining > 0) {
r = (int)extract_pack_stream(a, 0);
if (r < 0) {
free(b[0]); free(b[1]); free(b[2]);
return (r);
}
bytes = get_uncompressed_data(a, &buff,
zip->uncompressed_buffer_bytes_remaining,
0);
if (bytes < 0) {
free(b[0]); free(b[1]); free(b[2]);
return ((int)bytes);
}
memcpy(b[i]+s[i], buff, bytes);
s[i] += bytes;
if (zip->pack_stream_bytes_unconsumed)
read_consume(a);
}
}
/* Set the sub streams to the right place. */
for (i = 0; i < 3; i++) {
zip->sub_stream_buff[i] = b[idx[i]];
zip->sub_stream_size[i] = s[idx[i]];
zip->sub_stream_bytes_remaining[i] = s[idx[i]];
}
/* Allocate memory used for decoded main stream bytes. */
if (zip->tmp_stream_buff == NULL) {
zip->tmp_stream_buff_size = 32 * 1024;
zip->tmp_stream_buff =
malloc(zip->tmp_stream_buff_size);
if (zip->tmp_stream_buff == NULL) {
archive_set_error(&a->archive, ENOMEM,
"No memory for 7-Zip decompression");
return (ARCHIVE_FATAL);
}
}
zip->tmp_stream_bytes_avail = 0;
zip->tmp_stream_bytes_remaining = 0;
zip->odd_bcj_size = 0;
zip->bcj2_outPos = 0;
/*
* Reset a stream reader in order to read the main stream
* of BCJ2.
*/
zip->pack_stream_remaining = 1;
zip->pack_stream_index = (unsigned)folder->packIndex;
zip->folder_outbytes_remaining =
folder_uncompressed_size(folder);
zip->uncompressed_buffer_bytes_remaining = 0;
}
/*
* Initialize the decompressor for the new folder's pack streams.
*/
r = init_decompression(a, zip, coder1, coder2);
if (r != ARCHIVE_OK)
return (ARCHIVE_FATAL);
return (ARCHIVE_OK);
}
| 0 |
[
"CWE-125"
] |
libarchive
|
65a23f5dbee4497064e9bb467f81138a62b0dae1
| 114,349,700,104,049,400,000,000,000,000,000,000,000 | 256 |
7zip: fix crash when parsing certain archives
Fuzzing with CRCs disabled revealed that a call to get_uncompressed_data()
would sometimes fail to return at least 'minimum' bytes. This can cause
the crc32() invocation in header_bytes to read off into invalid memory.
A specially crafted archive can use this to cause a crash.
An ASAN trace is below, but ASAN is not required - an uninstrumented
binary will also crash.
==7719==ERROR: AddressSanitizer: SEGV on unknown address 0x631000040000 (pc 0x7fbdb3b3ec1d bp 0x7ffe77a51310 sp 0x7ffe77a51150 T0)
==7719==The signal is caused by a READ memory access.
#0 0x7fbdb3b3ec1c in crc32_z (/lib/x86_64-linux-gnu/libz.so.1+0x2c1c)
#1 0x84f5eb in header_bytes (/tmp/libarchive/bsdtar+0x84f5eb)
#2 0x856156 in read_Header (/tmp/libarchive/bsdtar+0x856156)
#3 0x84e134 in slurp_central_directory (/tmp/libarchive/bsdtar+0x84e134)
#4 0x849690 in archive_read_format_7zip_read_header (/tmp/libarchive/bsdtar+0x849690)
#5 0x5713b7 in _archive_read_next_header2 (/tmp/libarchive/bsdtar+0x5713b7)
#6 0x570e63 in _archive_read_next_header (/tmp/libarchive/bsdtar+0x570e63)
#7 0x6f08bd in archive_read_next_header (/tmp/libarchive/bsdtar+0x6f08bd)
#8 0x52373f in read_archive (/tmp/libarchive/bsdtar+0x52373f)
#9 0x5257be in tar_mode_x (/tmp/libarchive/bsdtar+0x5257be)
#10 0x51daeb in main (/tmp/libarchive/bsdtar+0x51daeb)
#11 0x7fbdb27cab96 in __libc_start_main /build/glibc-OTsEL5/glibc-2.27/csu/../csu/libc-start.c:310
#12 0x41dd09 in _start (/tmp/libarchive/bsdtar+0x41dd09)
This was primarly done with afl and FairFuzz. Some early corpus entries
may have been generated by qsym.
|
respip_set_apply_cfg(struct respip_set* set, char* const* tagname, int num_tags,
struct config_strbytelist* respip_tags,
struct config_str2list* respip_actions,
struct config_str2list* respip_data)
{
struct config_strbytelist* p;
struct config_str2list* pa;
struct config_str2list* pd;
set->tagname = tagname;
set->num_tags = num_tags;
p = respip_tags;
while(p) {
struct config_strbytelist* np = p->next;
log_assert(p->str && p->str2);
if(!respip_tag_cfg(set, p->str, p->str2, p->str2len)) {
config_del_strbytelist(p);
return 0;
}
free(p->str);
free(p->str2);
free(p);
p = np;
}
pa = respip_actions;
while(pa) {
struct config_str2list* np = pa->next;
log_assert(pa->str && pa->str2);
if(!respip_action_cfg(set, pa->str, pa->str2)) {
config_deldblstrlist(pa);
return 0;
}
free(pa->str);
free(pa->str2);
free(pa);
pa = np;
}
pd = respip_data;
while(pd) {
struct config_str2list* np = pd->next;
log_assert(pd->str && pd->str2);
if(!respip_data_cfg(set, pd->str, pd->str2)) {
config_deldblstrlist(pd);
return 0;
}
free(pd->str);
free(pd->str2);
free(pd);
pd = np;
}
addr_tree_init_parents(&set->ip_tree);
return 1;
}
| 0 |
[
"CWE-190"
] |
unbound
|
02080f6b180232f43b77f403d0c038e9360a460f
| 115,266,168,500,356,930,000,000,000,000,000,000,000 | 58 |
- Fix Integer Overflows in Size Calculations,
reported by X41 D-Sec.
|
static void spl_ptr_heap_zval_dtor(spl_ptr_heap_element elem TSRMLS_DC) { /* {{{ */
if (elem) {
zval_ptr_dtor((zval **)&elem);
}
}
| 0 |
[] |
php-src
|
1cbd25ca15383394ffa9ee8601c5de4c0f2f90e1
| 305,699,366,144,439,380,000,000,000,000,000,000,000 | 5 |
Fix bug #69737 - Segfault when SplMinHeap::compare produces fatal error
|
static int peer_recv_callback(rdpTransport* transport, wStream* s, void* extra)
{
freerdp_peer* client = (freerdp_peer*) extra;
rdpRdp* rdp = client->context->rdp;
switch (rdp->state)
{
case CONNECTION_STATE_INITIAL:
if (!rdp_server_accept_nego(rdp, s))
return -1;
if (rdp->nego->selected_protocol & PROTOCOL_NLA)
{
sspi_CopyAuthIdentity(&client->identity, &(rdp->nego->transport->credssp->identity));
IFCALLRET(client->Logon, client->authenticated, client, &client->identity, TRUE);
credssp_free(rdp->nego->transport->credssp);
rdp->nego->transport->credssp = NULL;
}
else
{
IFCALLRET(client->Logon, client->authenticated, client, &client->identity, FALSE);
}
break;
case CONNECTION_STATE_NEGO:
if (!rdp_server_accept_mcs_connect_initial(rdp, s))
return -1;
break;
case CONNECTION_STATE_MCS_CONNECT:
if (!rdp_server_accept_mcs_erect_domain_request(rdp, s))
return -1;
break;
case CONNECTION_STATE_MCS_ERECT_DOMAIN:
if (!rdp_server_accept_mcs_attach_user_request(rdp, s))
return -1;
break;
case CONNECTION_STATE_MCS_ATTACH_USER:
if (!rdp_server_accept_mcs_channel_join_request(rdp, s))
return -1;
break;
case CONNECTION_STATE_MCS_CHANNEL_JOIN:
if (rdp->settings->DisableEncryption)
{
if (!rdp_server_accept_client_keys(rdp, s))
return -1;
break;
}
rdp->state = CONNECTION_STATE_ESTABLISH_KEYS;
/* FALLTHROUGH */
case CONNECTION_STATE_ESTABLISH_KEYS:
if (!rdp_server_accept_client_info(rdp, s))
return -1;
IFCALL(client->Capabilities, client);
if (!rdp_send_demand_active(rdp))
return -1;
break;
case CONNECTION_STATE_LICENSE:
if (!rdp_server_accept_confirm_active(rdp, s))
{
/**
* During reactivation sequence the client might sent some input or channel data
* before receiving the Deactivate All PDU. We need to process them as usual.
*/
Stream_SetPosition(s, 0);
return peer_recv_pdu(client, s);
}
break;
case CONNECTION_STATE_ACTIVE:
if (peer_recv_pdu(client, s) < 0)
return -1;
break;
default:
fprintf(stderr, "Invalid state %d\n", rdp->state);
return -1;
}
return 0;
}
| 0 |
[
"CWE-476",
"CWE-125"
] |
FreeRDP
|
0773bb9303d24473fe1185d85a424dfe159aff53
| 183,404,690,899,410,860,000,000,000,000,000,000,000 | 89 |
nla: invalidate sec handle after creation
If sec pointer isn't invalidated after creation it is not possible
to check if the upper and lower pointers are valid.
This fixes a segfault in the server part if the client disconnects before
the authentication was finished.
|
option_nosocket_cb (const gchar *option_name,
const gchar *value,
gpointer data,
GError **error)
{
FlatpakContext *context = data;
FlatpakContextSockets socket;
socket = flatpak_context_socket_from_string (value, error);
if (socket == 0)
return FALSE;
if (socket == FLATPAK_CONTEXT_SOCKET_FALLBACK_X11)
socket |= FLATPAK_CONTEXT_SOCKET_X11;
flatpak_context_remove_sockets (context, socket);
return TRUE;
}
| 0 |
[
"CWE-94",
"CWE-74"
] |
flatpak
|
6e5ae7a109cdfa9735ea7ccbd8cb79f9e8d3ae8b
| 278,538,823,811,116,800,000,000,000,000,000,000,000 | 19 |
context: Add --env-fd option
This allows environment variables to be added to the context without
making their values visible to processes running under a different uid,
which might be significant if the variable's value is a token or some
other secret value.
Signed-off-by: Simon McVittie <[email protected]>
Part-of: https://github.com/flatpak/flatpak/security/advisories/GHSA-4ppf-fxf6-vxg2
|
gdk_pixbuf_loader_class_init (GdkPixbufLoaderClass *class)
{
GObjectClass *object_class;
object_class = (GObjectClass *) class;
parent_class = g_type_class_peek_parent (class);
object_class->finalize = gdk_pixbuf_loader_finalize;
pixbuf_loader_signals[AREA_PREPARED] =
g_signal_newc ("area_prepared",
G_TYPE_FROM_CLASS (object_class),
G_SIGNAL_RUN_LAST,
G_STRUCT_OFFSET (GdkPixbufLoaderClass, area_prepared),
NULL, NULL,
gdk_pixbuf_marshal_VOID__VOID,
G_TYPE_NONE, 0);
pixbuf_loader_signals[AREA_UPDATED] =
g_signal_newc ("area_updated",
G_TYPE_FROM_CLASS (object_class),
G_SIGNAL_RUN_LAST,
G_STRUCT_OFFSET (GdkPixbufLoaderClass, area_updated),
NULL, NULL,
gdk_pixbuf_marshal_VOID__INT_INT_INT_INT,
G_TYPE_NONE, 4,
G_TYPE_INT,
G_TYPE_INT,
G_TYPE_INT,
G_TYPE_INT);
pixbuf_loader_signals[CLOSED] =
g_signal_newc ("closed",
G_TYPE_FROM_CLASS (object_class),
G_SIGNAL_RUN_LAST,
G_STRUCT_OFFSET (GdkPixbufLoaderClass, closed),
NULL, NULL,
gdk_pixbuf_marshal_VOID__VOID,
G_TYPE_NONE, 0);
}
| 0 |
[
"CWE-20"
] |
gdk-pixbuf
|
3bac204e0d0241a0d68586ece7099e6acf0e9bea
| 294,571,316,790,631,400,000,000,000,000,000,000,000 | 41 |
Initial stab at getting the focus code to work.
Fri Jun 1 18:54:47 2001 Jonathan Blandford <[email protected]>
* gtk/gtktreeview.c: (gtk_tree_view_focus): Initial stab at
getting the focus code to work.
(gtk_tree_view_class_init): Add a bunch of keybindings.
* gtk/gtktreeviewcolumn.c
(gtk_tree_view_column_set_cell_data_func):
s/GtkCellDataFunc/GtkTreeCellDataFunc.
(_gtk_tree_view_column_set_tree_view): Use "notify::model" instead
of "properties_changed" to help justify the death of the latter
signal. (-:
* tests/testtreefocus.c (main): Let some columns be focussable to
test focus better.
|
static inline struct os_desc *to_os_desc(struct config_item *item)
{
return container_of(to_config_group(item), struct os_desc, group);
}
| 0 |
[
"CWE-125"
] |
linux
|
15753588bcd4bbffae1cca33c8ced5722477fe1f
| 126,876,186,784,076,040,000,000,000,000,000,000,000 | 4 |
USB: gadget: fix illegal array access in binding with UDC
FuzzUSB (a variant of syzkaller) found an illegal array access
using an incorrect index while binding a gadget with UDC.
Reference: https://www.spinics.net/lists/linux-usb/msg194331.html
This bug occurs when a size variable used for a buffer
is misused to access its strcpy-ed buffer.
Given a buffer along with its size variable (taken from user input),
from which, a new buffer is created using kstrdup().
Due to the original buffer containing 0 value in the middle,
the size of the kstrdup-ed buffer becomes smaller than that of the original.
So accessing the kstrdup-ed buffer with the same size variable
triggers memory access violation.
The fix makes sure no zero value in the buffer,
by comparing the strlen() of the orignal buffer with the size variable,
so that the access to the kstrdup-ed buffer is safe.
BUG: KASAN: slab-out-of-bounds in gadget_dev_desc_UDC_store+0x1ba/0x200
drivers/usb/gadget/configfs.c:266
Read of size 1 at addr ffff88806a55dd7e by task syz-executor.0/17208
CPU: 2 PID: 17208 Comm: syz-executor.0 Not tainted 5.6.8 #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0xce/0x128 lib/dump_stack.c:118
print_address_description.constprop.4+0x21/0x3c0 mm/kasan/report.c:374
__kasan_report+0x131/0x1b0 mm/kasan/report.c:506
kasan_report+0x12/0x20 mm/kasan/common.c:641
__asan_report_load1_noabort+0x14/0x20 mm/kasan/generic_report.c:132
gadget_dev_desc_UDC_store+0x1ba/0x200 drivers/usb/gadget/configfs.c:266
flush_write_buffer fs/configfs/file.c:251 [inline]
configfs_write_file+0x2f1/0x4c0 fs/configfs/file.c:283
__vfs_write+0x85/0x110 fs/read_write.c:494
vfs_write+0x1cd/0x510 fs/read_write.c:558
ksys_write+0x18a/0x220 fs/read_write.c:611
__do_sys_write fs/read_write.c:623 [inline]
__se_sys_write fs/read_write.c:620 [inline]
__x64_sys_write+0x73/0xb0 fs/read_write.c:620
do_syscall_64+0x9e/0x510 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Signed-off-by: Kyungtae Kim <[email protected]>
Reported-and-tested-by: Kyungtae Kim <[email protected]>
Cc: Felipe Balbi <[email protected]>
Cc: stable <[email protected]>
Link: https://lore.kernel.org/r/20200510054326.GA19198@pizza01
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static void test_bug14845()
{
MYSQL_STMT *stmt;
int rc;
const ulong type= CURSOR_TYPE_READ_ONLY;
const char *query= "select count(*) from t1 where 1 = 0";
myheader("test_bug14845");
rc= mysql_query(mysql, "drop table if exists t1");
myquery(rc);
rc= mysql_query(mysql, "create table t1 (id int(11) default null, "
"name varchar(20) default null)"
"engine=MyISAM DEFAULT CHARSET=utf8");
myquery(rc);
rc= mysql_query(mysql, "insert into t1 values (1,'abc'),(2,'def')");
myquery(rc);
stmt= mysql_stmt_init(mysql);
rc= mysql_stmt_attr_set(stmt, STMT_ATTR_CURSOR_TYPE, (const void*) &type);
check_execute(stmt, rc);
rc= mysql_stmt_prepare(stmt, query, strlen(query));
check_execute(stmt, rc);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
DIE_UNLESS(rc == 0);
rc= mysql_stmt_fetch(stmt);
DIE_UNLESS(rc == MYSQL_NO_DATA);
/* Cleanup */
mysql_stmt_close(stmt);
rc= mysql_query(mysql, "drop table t1");
myquery(rc);
}
| 0 |
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
| 154,684,224,911,419,200,000,000,000,000,000,000,000 | 39 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
|
void hns_nic_poll_controller(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
unsigned long flags;
int i;
local_irq_save(flags);
for (i = 0; i < priv->ae_handle->q_num * 2; i++)
napi_schedule(&priv->ring_data[i].napi);
local_irq_restore(flags);
}
| 0 |
[
"CWE-416"
] |
linux
|
27463ad99f738ed93c7c8b3e2e5bc8c4853a2ff2
| 237,782,391,784,589,040,000,000,000,000,000,000,000 | 11 |
net: hns: Fix a skb used after free bug
skb maybe freed in hns_nic_net_xmit_hw() and return NETDEV_TX_OK,
which cause hns_nic_net_xmit to use a freed skb.
BUG: KASAN: use-after-free in hns_nic_net_xmit_hw+0x62c/0x940...
[17659.112635] alloc_debug_processing+0x18c/0x1a0
[17659.117208] __slab_alloc+0x52c/0x560
[17659.120909] kmem_cache_alloc_node+0xac/0x2c0
[17659.125309] __alloc_skb+0x6c/0x260
[17659.128837] tcp_send_ack+0x8c/0x280
[17659.132449] __tcp_ack_snd_check+0x9c/0xf0
[17659.136587] tcp_rcv_established+0x5a4/0xa70
[17659.140899] tcp_v4_do_rcv+0x27c/0x620
[17659.144687] tcp_prequeue_process+0x108/0x170
[17659.149085] tcp_recvmsg+0x940/0x1020
[17659.152787] inet_recvmsg+0x124/0x180
[17659.156488] sock_recvmsg+0x64/0x80
[17659.160012] SyS_recvfrom+0xd8/0x180
[17659.163626] __sys_trace_return+0x0/0x4
[17659.167506] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=23 cpu=1 pid=13
[17659.174000] free_debug_processing+0x1d4/0x2c0
[17659.178486] __slab_free+0x240/0x390
[17659.182100] kmem_cache_free+0x24c/0x270
[17659.186062] kfree_skbmem+0xa0/0xb0
[17659.189587] __kfree_skb+0x28/0x40
[17659.193025] napi_gro_receive+0x168/0x1c0
[17659.197074] hns_nic_rx_up_pro+0x58/0x90
[17659.201038] hns_nic_rx_poll_one+0x518/0xbc0
[17659.205352] hns_nic_common_poll+0x94/0x140
[17659.209576] net_rx_action+0x458/0x5e0
[17659.213363] __do_softirq+0x1b8/0x480
[17659.217062] run_ksoftirqd+0x64/0x80
[17659.220679] smpboot_thread_fn+0x224/0x310
[17659.224821] kthread+0x150/0x170
[17659.228084] ret_from_fork+0x10/0x40
BUG: KASAN: use-after-free in hns_nic_net_xmit+0x8c/0xc0...
[17751.080490] __slab_alloc+0x52c/0x560
[17751.084188] kmem_cache_alloc+0x244/0x280
[17751.088238] __build_skb+0x40/0x150
[17751.091764] build_skb+0x28/0x100
[17751.095115] __alloc_rx_skb+0x94/0x150
[17751.098900] __napi_alloc_skb+0x34/0x90
[17751.102776] hns_nic_rx_poll_one+0x180/0xbc0
[17751.107097] hns_nic_common_poll+0x94/0x140
[17751.111333] net_rx_action+0x458/0x5e0
[17751.115123] __do_softirq+0x1b8/0x480
[17751.118823] run_ksoftirqd+0x64/0x80
[17751.122437] smpboot_thread_fn+0x224/0x310
[17751.126575] kthread+0x150/0x170
[17751.129838] ret_from_fork+0x10/0x40
[17751.133454] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=19 cpu=7 pid=43
[17751.139951] free_debug_processing+0x1d4/0x2c0
[17751.144436] __slab_free+0x240/0x390
[17751.148051] kmem_cache_free+0x24c/0x270
[17751.152014] kfree_skbmem+0xa0/0xb0
[17751.155543] __kfree_skb+0x28/0x40
[17751.159022] napi_gro_receive+0x168/0x1c0
[17751.163074] hns_nic_rx_up_pro+0x58/0x90
[17751.167041] hns_nic_rx_poll_one+0x518/0xbc0
[17751.171358] hns_nic_common_poll+0x94/0x140
[17751.175585] net_rx_action+0x458/0x5e0
[17751.179373] __do_softirq+0x1b8/0x480
[17751.183076] run_ksoftirqd+0x64/0x80
[17751.186691] smpboot_thread_fn+0x224/0x310
[17751.190826] kthread+0x150/0x170
[17751.194093] ret_from_fork+0x10/0x40
Fixes: 13ac695e7ea1 ("net:hns: Add support of Hip06 SoC to the Hislicon Network Subsystem")
Signed-off-by: Yunsheng Lin <[email protected]>
Signed-off-by: lipeng <[email protected]>
Reported-by: Jun He <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
AP_CORE_DECLARE_NONSTD(apr_status_t) ap_sub_req_output_filter(ap_filter_t *f,
apr_bucket_brigade *bb)
{
apr_bucket *e = APR_BRIGADE_LAST(bb);
if (APR_BUCKET_IS_EOS(e)) {
apr_bucket_delete(e);
}
if (!APR_BRIGADE_EMPTY(bb)) {
return ap_pass_brigade(f->next, bb);
}
return APR_SUCCESS;
}
| 0 |
[] |
httpd
|
eb986059aa5aa0b6c1d52714ea83e3dd758afdd1
| 256,484,658,391,758,350,000,000,000,000,000,000,000 | 15 |
Merge r1889036 from trunk:
legacy default slash-matching behavior w/ 'MergeSlashes OFF'
Submitted By: Ruediger Pluem
Reviewed By: covener, rpluem, ylavic
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1889038 13f79535-47bb-0310-9956-ffa450edef68
|
static inline bool d_is_file(const struct dentry *dentry)
{
return d_is_reg(dentry) || d_is_special(dentry);
}
| 0 |
[
"CWE-284"
] |
linux
|
54d5ca871e72f2bb172ec9323497f01cd5091ec7
| 29,696,132,764,114,635,000,000,000,000,000,000,000 | 4 |
vfs: add vfs_select_inode() helper
Signed-off-by: Miklos Szeredi <[email protected]>
Cc: <[email protected]> # v4.2+
|
static int rds_loop_conn_connect(struct rds_connection *conn)
{
rds_connect_complete(conn);
return 0;
}
| 0 |
[] |
linux-2.6
|
6094628bfd94323fc1cea05ec2c6affd98c18f7f
| 199,486,694,022,085,170,000,000,000,000,000,000,000 | 5 |
rds: prevent BUG_ON triggering on congestion map updates
Recently had this bug halt reported to me:
kernel BUG at net/rds/send.c:329!
Oops: Exception in kernel mode, sig: 5 [#1]
SMP NR_CPUS=1024 NUMA pSeries
Modules linked in: rds sunrpc ipv6 dm_mirror dm_region_hash dm_log ibmveth sg
ext4 jbd2 mbcache sd_mod crc_t10dif ibmvscsic scsi_transport_srp scsi_tgt
dm_mod [last unloaded: scsi_wait_scan]
NIP: d000000003ca68f4 LR: d000000003ca67fc CTR: d000000003ca8770
REGS: c000000175cab980 TRAP: 0700 Not tainted (2.6.32-118.el6.ppc64)
MSR: 8000000000029032 <EE,ME,CE,IR,DR> CR: 44000022 XER: 00000000
TASK = c00000017586ec90[1896] 'krdsd' THREAD: c000000175ca8000 CPU: 0
GPR00: 0000000000000150 c000000175cabc00 d000000003cb7340 0000000000002030
GPR04: ffffffffffffffff 0000000000000030 0000000000000000 0000000000000030
GPR08: 0000000000000001 0000000000000001 c0000001756b1e30 0000000000010000
GPR12: d000000003caac90 c000000000fa2500 c0000001742b2858 c0000001742b2a00
GPR16: c0000001742b2a08 c0000001742b2820 0000000000000001 0000000000000001
GPR20: 0000000000000040 c0000001742b2814 c000000175cabc70 0800000000000000
GPR24: 0000000000000004 0200000000000000 0000000000000000 c0000001742b2860
GPR28: 0000000000000000 c0000001756b1c80 d000000003cb68e8 c0000001742b27b8
NIP [d000000003ca68f4] .rds_send_xmit+0x4c4/0x8a0 [rds]
LR [d000000003ca67fc] .rds_send_xmit+0x3cc/0x8a0 [rds]
Call Trace:
[c000000175cabc00] [d000000003ca67fc] .rds_send_xmit+0x3cc/0x8a0 [rds]
(unreliable)
[c000000175cabd30] [d000000003ca7e64] .rds_send_worker+0x54/0x100 [rds]
[c000000175cabdb0] [c0000000000b475c] .worker_thread+0x1dc/0x3c0
[c000000175cabed0] [c0000000000baa9c] .kthread+0xbc/0xd0
[c000000175cabf90] [c000000000032114] .kernel_thread+0x54/0x70
Instruction dump:
4bfffd50 60000000 60000000 39080001 935f004c f91f0040 41820024 813d017c
7d094a78 7d290074 7929d182 394a0020 <0b090000> 40e2ff68 4bffffa4 39200000
Kernel panic - not syncing: Fatal exception
Call Trace:
[c000000175cab560] [c000000000012e04] .show_stack+0x74/0x1c0 (unreliable)
[c000000175cab610] [c0000000005a365c] .panic+0x80/0x1b4
[c000000175cab6a0] [c00000000002fbcc] .die+0x21c/0x2a0
[c000000175cab750] [c000000000030000] ._exception+0x110/0x220
[c000000175cab910] [c000000000004b9c] program_check_common+0x11c/0x180
Signed-off-by: David S. Miller <[email protected]>
|
static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
struct kvm_device_attr *attr)
{
if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
sizeof(struct kvm_s390_vm_cpu_subfunc)))
return -EFAULT;
VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.km)[0],
((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
return 0;
}
| 0 |
[
"CWE-416"
] |
linux
|
0774a964ef561b7170d8d1b1bfe6f88002b6d219
| 222,607,716,446,035,100,000,000,000,000,000,000,000 | 67 |
KVM: Fix out of range accesses to memslots
Reset the LRU slot if it becomes invalid when deleting a memslot to fix
an out-of-bounds/use-after-free access when searching through memslots.
Explicitly check for there being no used slots in search_memslots(), and
in the caller of s390's approximation variant.
Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots")
Reported-by: Qian Cai <[email protected]>
Cc: Peter Xu <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Acked-by: Christian Borntraeger <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
ebews_populate_full_name (EBookBackendEws *bbews,
EContact *contact,
EEwsItem *item,
GCancellable *cancellable,
GError **error)
{
const EwsCompleteName *cn;
cn = e_ews_item_get_complete_name (item);
if (cn)
e_contact_set (contact, E_CONTACT_FULL_NAME, cn->full_name);
}
| 0 |
[
"CWE-295"
] |
evolution-ews
|
915226eca9454b8b3e5adb6f2fff9698451778de
| 193,916,919,595,314,730,000,000,000,000,000,000,000 | 12 |
I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
|
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
struct inode *inode, struct page *page,
nid_t ino, enum page_type type)
{
__submit_merged_write_cond(sbi, inode, page, ino, type, false);
}
| 0 |
[
"CWE-476"
] |
linux
|
4969c06a0d83c9c3dc50b8efcdc8eeedfce896f6
| 223,735,651,187,971,000,000,000,000,000,000,000,000 | 6 |
f2fs: support swap file w/ DIO
Signed-off-by: Jaegeuk Kim <[email protected]>
|
PHP_FUNCTION(radius_cvt_addr)
{
const void *data;
char *addr_dot;
int len;
struct in_addr addr;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &data, &len) == FAILURE) {
return;
}
addr = rad_cvt_addr(data);
addr_dot = inet_ntoa(addr);
RETURN_STRINGL(addr_dot, strlen(addr_dot), 1);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
php-radius
|
13c149b051f82b709e8d7cc32111e84b49d57234
| 260,164,971,639,891,470,000,000,000,000,000,000,000 | 15 |
Fix a security issue in radius_get_vendor_attr().
The underlying rad_get_vendor_attr() function assumed that it would always be
given valid VSA data. Indeed, the buffer length wasn't even passed in; the
assumption was that the length field within the VSA structure would be valid.
This could result in denial of service by providing a length that would be
beyond the memory limit, or potential arbitrary memory access by providing a
length greater than the actual data given.
rad_get_vendor_attr() has been changed to require the raw data length be
provided, and this is then used to check that the VSA is valid.
Conflicts:
radlib_vs.h
|
search_nonascii(const char *p, const char *e)
{
#if SIZEOF_VALUE == 8
# define NONASCII_MASK 0x8080808080808080LL
#elif SIZEOF_VALUE == 4
# define NONASCII_MASK 0x80808080UL
#endif
#ifdef NONASCII_MASK
if ((int)sizeof(VALUE) * 2 < e - p) {
const VALUE *s, *t;
const VALUE lowbits = sizeof(VALUE) - 1;
s = (const VALUE*)(~lowbits & ((VALUE)p + lowbits));
while (p < (const char *)s) {
if (!ISASCII(*p))
return p;
p++;
}
t = (const VALUE*)(~lowbits & (VALUE)e);
while (s < t) {
if (*s & NONASCII_MASK) {
t = s;
break;
}
s++;
}
p = (const char *)t;
}
#endif
while (p < e) {
if (!ISASCII(*p))
return p;
p++;
}
return NULL;
}
| 0 |
[
"CWE-119"
] |
ruby
|
1c2ef610358af33f9ded3086aa2d70aac03dcac5
| 18,460,502,217,723,588,000,000,000,000,000,000,000 | 35 |
* string.c (rb_str_justify): CVE-2009-4124.
Fixes a bug reported by
Emmanouel Kellinis <Emmanouel.Kellinis AT kpmg.co.uk>, KPMG London;
Patch by nobu.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@26038 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
|
cifs_parse_smb_version(char *value, struct smb_vol *vol)
{
substring_t args[MAX_OPT_ARGS];
switch (match_token(value, cifs_smb_version_tokens, args)) {
case Smb_1:
vol->ops = &smb1_operations;
vol->vals = &smb1_values;
break;
#ifdef CONFIG_CIFS_SMB2
case Smb_20:
vol->ops = &smb21_operations; /* currently identical with 2.1 */
vol->vals = &smb20_values;
break;
case Smb_21:
vol->ops = &smb21_operations;
vol->vals = &smb21_values;
break;
case Smb_30:
vol->ops = &smb30_operations;
vol->vals = &smb30_values;
break;
#endif
default:
cifs_dbg(VFS, "Unknown vers= option specified: %s\n", value);
return 1;
}
return 0;
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
linux
|
1fc29bacedeabb278080e31bb9c1ecb49f143c3b
| 241,040,933,987,693,160,000,000,000,000,000,000,000 | 29 |
cifs: fix off-by-one bug in build_unc_path_to_root
commit 839db3d10a (cifs: fix up handling of prefixpath= option) changed
the code such that the vol->prepath no longer contained a leading
delimiter and then fixed up the places that accessed that field to
account for that change.
One spot in build_unc_path_to_root was missed however. When doing the
pointer addition on pos, that patch failed to account for the fact that
we had already incremented "pos" by one when adding the length of the
prepath. This caused a buffer overrun by one byte.
This patch fixes the problem by correcting the handling of "pos".
Cc: <[email protected]> # v3.8+
Reported-by: Marcus Moeller <[email protected]>
Reported-by: Ken Fallon <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]>
|
static void v4l_print_freq_band(const void *arg, bool write_only)
{
const struct v4l2_frequency_band *p = arg;
pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, rangelow=%u, rangehigh=%u, modulation=0x%x\n",
p->tuner, p->type, p->index,
p->capability, p->rangelow,
p->rangehigh, p->modulation);
}
| 0 |
[
"CWE-401"
] |
linux
|
fb18802a338b36f675a388fc03d2aa504a0d0899
| 232,047,819,040,746,430,000,000,000,000,000,000,000 | 9 |
media: v4l: ioctl: Fix memory leak in video_usercopy
When an IOCTL with argument size larger than 128 that also used array
arguments were handled, two memory allocations were made but alas, only
the latter one of them was released. This happened because there was only
a single local variable to hold such a temporary allocation.
Fix this by adding separate variables to hold the pointers to the
temporary allocations.
Reported-by: Arnd Bergmann <[email protected]>
Reported-by: [email protected]
Fixes: d14e6d76ebf7 ("[media] v4l: Add multi-planar ioctl handling code")
Cc: [email protected]
Signed-off-by: Sakari Ailus <[email protected]>
Acked-by: Arnd Bergmann <[email protected]>
Acked-by: Hans Verkuil <[email protected]>
Reviewed-by: Laurent Pinchart <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
e1000e_calc_per_desc_buf_size(E1000ECore *core)
{
int i;
core->rx_desc_buf_size = 0;
for (i = 0; i < ARRAY_SIZE(core->rxbuf_sizes); i++) {
core->rx_desc_buf_size += core->rxbuf_sizes[i];
}
}
| 0 |
[
"CWE-835"
] |
qemu
|
4154c7e03fa55b4cf52509a83d50d6c09d743b77
| 104,315,895,376,132,330,000,000,000,000,000,000,000 | 9 |
net: e1000e: fix an infinite loop issue
This issue is like the issue in e1000 network card addressed in
this commit:
e1000: eliminate infinite loops on out-of-bounds transfer start.
Signed-off-by: Li Qiang <[email protected]>
Reviewed-by: Dmitry Fleytman <[email protected]>
Signed-off-by: Jason Wang <[email protected]>
|
UI_METHOD *UI_stunnel() {
static UI_METHOD *ui_method=NULL;
if(ui_method) /* already initialized */
return ui_method;
ui_method=UI_create_method("stunnel WIN32 UI");
if(!ui_method) {
sslerror("UI_create_method");
return NULL;
}
UI_method_set_reader(ui_method, pin_cb);
return ui_method;
}
| 0 |
[
"CWE-295"
] |
stunnel
|
ebad9ddc4efb2635f37174c9d800d06206f1edf9
| 286,284,922,265,932,900,000,000,000,000,000,000,000 | 13 |
stunnel-5.57
|
TEST(ExistsMatchExpression, MatchesElement) {
BSONObj existsInt = BSON("a" << 5);
BSONObj existsNull = BSON("a" << BSONNULL);
BSONObj doesntExist = BSONObj();
ExistsMatchExpression exists("");
ASSERT(exists.matchesSingleElement(existsInt.firstElement()));
ASSERT(exists.matchesSingleElement(existsNull.firstElement()));
ASSERT(!exists.matchesSingleElement(doesntExist.firstElement()));
}
| 0 |
[] |
mongo
|
64095239f41e9f3841d8be9088347db56d35c891
| 121,233,325,893,532,180,000,000,000,000,000,000,000 | 9 |
SERVER-51083 Reject invalid UTF-8 from $regex match expressions
|
p11_mmap_open (const char *path,
struct stat *sb,
void **data,
size_t *size)
{
HANDLE mapping;
LARGE_INTEGER large;
DWORD errn;
p11_mmap *map;
map = calloc (1, sizeof (p11_mmap));
if (map == NULL) {
errno = ENOMEM;
return NULL;
}
map->file = CreateFile (path, GENERIC_READ, 0, NULL, OPEN_EXISTING, FILE_FLAG_RANDOM_ACCESS, NULL);
if (map->file == INVALID_HANDLE_VALUE) {
errn = GetLastError ();
free (map);
SetLastError (errn);
if (errn == ERROR_PATH_NOT_FOUND || errn == ERROR_FILE_NOT_FOUND)
errno = ENOENT;
else if (errn == ERROR_ACCESS_DENIED)
errno = EPERM;
return NULL;
}
if (sb == NULL) {
if (!GetFileSizeEx (map->file, &large)) {
errn = GetLastError ();
CloseHandle (map->file);
free (map);
SetLastError (errn);
if (errn == ERROR_ACCESS_DENIED)
errno = EPERM;
return NULL;
}
} else {
large.QuadPart = sb->st_size;
}
mapping = CreateFileMapping (map->file, NULL, PAGE_READONLY, 0, 0, NULL);
if (!mapping) {
errn = GetLastError ();
CloseHandle (map->file);
free (map);
SetLastError (errn);
if (errn == ERROR_ACCESS_DENIED)
errno = EPERM;
return NULL;
}
map->data = MapViewOfFile (mapping, FILE_MAP_READ, 0, 0, large.QuadPart);
CloseHandle (mapping);
if (map->data == NULL) {
errn = GetLastError ();
CloseHandle (map->file);
free (map);
SetLastError (errn);
if (errn == ERROR_ACCESS_DENIED)
errno = EPERM;
return NULL;
}
*data = map->data;
*size = large.QuadPart;
return map;
}
| 0 |
[
"CWE-190"
] |
p11-kit
|
bd670b1d4984b27d6a397b9ddafaf89ab26e4e7f
| 90,079,364,855,065,440,000,000,000,000,000,000,000 | 70 |
Follow-up to arithmetic overflow fix
Check if nmemb is zero in p11_rpc_message_alloc_extra_array to avoid a
division by zero trap. Additionally, change the reallocarray
compatibility shim so that it won't assert when resizing an array to
zero, and add the same nmemb != 0 check there.
|
static void compat_standard_from_user(void *dst, const void *src)
{
int v = *(compat_int_t *)src;
if (v > 0)
v += xt_compat_calc_jump(AF_INET, v);
memcpy(dst, &v, sizeof(v));
}
| 0 |
[
"CWE-200"
] |
linux-2.6
|
78b79876761b86653df89c48a7010b5cbd41a84a
| 204,142,318,869,620,280,000,000,000,000,000,000,000 | 8 |
netfilter: ip_tables: fix infoleak to userspace
Structures ipt_replace, compat_ipt_replace, and xt_get_revision are
copied from userspace. Fields of these structs that are
zero-terminated strings are not checked. When they are used as argument
to a format string containing "%s" in request_module(), some sensitive
information is leaked to userspace via argument of spawned modprobe
process.
The first and the third bugs were introduced before the git epoch; the
second was introduced in 2722971c (v2.6.17-rc1). To trigger the bug
one should have CAP_NET_ADMIN.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: Patrick McHardy <[email protected]>
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.