func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static inline int xt_osf_ttl(const struct sk_buff *skb, const struct xt_osf_info *info,
unsigned char f_ttl)
{
const struct iphdr *ip = ip_hdr(skb);
if (info->flags & XT_OSF_TTL) {
if (info->ttl == XT_OSF_TTL_TRUE)
return ip->ttl == f_ttl;
if (info->ttl == XT_OSF_TTL_NOCHECK)
return 1;
else if (ip->ttl <= f_ttl)
return 1;
else {
struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
int ret = 0;
for_ifa(in_dev) {
if (inet_ifa_match(ip->saddr, ifa)) {
ret = (ip->ttl == f_ttl);
break;
}
}
endfor_ifa(in_dev);
return ret;
}
}
return ip->ttl == f_ttl;
}
| 0 |
[
"CWE-862"
] |
linux
|
916a27901de01446bcf57ecca4783f6cff493309
| 318,923,194,090,119,350,000,000,000,000,000,000,000 | 30 |
netfilter: xt_osf: Add missing permission checks
The capability check in nfnetlink_rcv() verifies that the caller
has CAP_NET_ADMIN in the namespace that "owns" the netlink socket.
However, xt_osf_fingers is shared by all net namespaces on the
system. An unprivileged user can create user and net namespaces
in which he holds CAP_NET_ADMIN to bypass the netlink_net_capable()
check:
vpnns -- nfnl_osf -f /tmp/pf.os
vpnns -- nfnl_osf -f /tmp/pf.os -d
These non-root operations successfully modify the systemwide OS
fingerprint list. Add new capable() checks so that they can't.
Signed-off-by: Kevin Cernekee <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
void PrepareOutputBuffer(complex<float>* output_data, int fft_height,
int fft_width, double** fft_input_output) {
int cnt = 0;
for (int i = 0; i < fft_height; ++i) {
for (int j = 0; j < fft_width / 2 + 1; ++j) {
output_data[cnt++] = complex<float>(fft_input_output[i][j * 2],
fft_input_output[i][j * 2 + 1]);
}
}
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
1970c2158b1ffa416d159d03c3370b9a462aee35
| 101,578,740,509,019,500,000,000,000,000,000,000,000 | 10 |
[tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
|
int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
u32 result)
{
struct sk_buff *msg;
void *hdr;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!msg)
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
NFC_CMD_FW_DOWNLOAD);
if (!hdr)
goto free_msg;
if (nla_put_string(msg, NFC_ATTR_FIRMWARE_NAME, firmware_name) ||
nla_put_u32(msg, NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS, result) ||
nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
goto nla_put_failure;
genlmsg_end(msg, hdr);
genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC);
return 0;
nla_put_failure:
free_msg:
nlmsg_free(msg);
return -EMSGSIZE;
}
| 0 |
[] |
linux
|
4071bf121d59944d5cd2238de0642f3d7995a997
| 16,940,869,776,914,674,000,000,000,000,000,000,000 | 31 |
NFC: netlink: fix sleep in atomic bug when firmware download timeout
There are sleep in atomic bug that could cause kernel panic during
firmware download process. The root cause is that nlmsg_new with
GFP_KERNEL parameter is called in fw_dnld_timeout which is a timer
handler. The call trace is shown below:
BUG: sleeping function called from invalid context at include/linux/sched/mm.h:265
Call Trace:
kmem_cache_alloc_node
__alloc_skb
nfc_genl_fw_download_done
call_timer_fn
__run_timers.part.0
run_timer_softirq
__do_softirq
...
The nlmsg_new with GFP_KERNEL parameter may sleep during memory
allocation process, and the timer handler is run as the result of
a "software interrupt" that should not call any other function
that could sleep.
This patch changes allocation mode of netlink message from GFP_KERNEL
to GFP_ATOMIC in order to prevent sleep in atomic bug. The GFP_ATOMIC
flag makes memory allocation operation could be used in atomic context.
Fixes: 9674da8759df ("NFC: Add firmware upload netlink command")
Fixes: 9ea7187c53f6 ("NFC: netlink: Rename CMD_FW_UPLOAD to CMD_FW_DOWNLOAD")
Signed-off-by: Duoming Zhou <[email protected]>
Reviewed-by: Krzysztof Kozlowski <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Paolo Abeni <[email protected]>
|
int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
{
struct dev_ifalias *new_alias = NULL;
if (len >= IFALIASZ)
return -EINVAL;
if (len) {
new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
if (!new_alias)
return -ENOMEM;
memcpy(new_alias->ifalias, alias, len);
new_alias->ifalias[len] = 0;
}
mutex_lock(&ifalias_mutex);
rcu_swap_protected(dev->ifalias, new_alias,
mutex_is_locked(&ifalias_mutex));
mutex_unlock(&ifalias_mutex);
if (new_alias)
kfree_rcu(new_alias, rcuhead);
return len;
}
| 0 |
[
"CWE-416"
] |
linux
|
a4270d6795b0580287453ea55974d948393e66ef
| 30,432,089,970,985,610,000,000,000,000,000,000,000 | 26 |
net-gro: fix use-after-free read in napi_gro_frags()
If a network driver provides to napi_gro_frags() an
skb with a page fragment of exactly 14 bytes, the call
to gro_pull_from_frag0() will 'consume' the fragment
by calling skb_frag_unref(skb, 0), and the page might
be freed and reused.
Reading eth->h_proto at the end of napi_frags_skb() might
read mangled data, or crash under specific debugging features.
BUG: KASAN: use-after-free in napi_frags_skb net/core/dev.c:5833 [inline]
BUG: KASAN: use-after-free in napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841
Read of size 2 at addr ffff88809366840c by task syz-executor599/8957
CPU: 1 PID: 8957 Comm: syz-executor599 Not tainted 5.2.0-rc1+ #32
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x172/0x1f0 lib/dump_stack.c:113
print_address_description.cold+0x7c/0x20d mm/kasan/report.c:188
__kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317
kasan_report+0x12/0x20 mm/kasan/common.c:614
__asan_report_load_n_noabort+0xf/0x20 mm/kasan/generic_report.c:142
napi_frags_skb net/core/dev.c:5833 [inline]
napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841
tun_get_user+0x2f3c/0x3ff0 drivers/net/tun.c:1991
tun_chr_write_iter+0xbd/0x156 drivers/net/tun.c:2037
call_write_iter include/linux/fs.h:1872 [inline]
do_iter_readv_writev+0x5f8/0x8f0 fs/read_write.c:693
do_iter_write fs/read_write.c:970 [inline]
do_iter_write+0x184/0x610 fs/read_write.c:951
vfs_writev+0x1b3/0x2f0 fs/read_write.c:1015
do_writev+0x15b/0x330 fs/read_write.c:1058
Fixes: a50e233c50db ("net-gro: restore frag0 optimization")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: syzbot <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
PyObject *PyString_AsDecodedObject(PyObject *str,
const char *encoding,
const char *errors)
{
PyObject *v;
if (!PyString_Check(str)) {
PyErr_BadArgument();
goto onError;
}
if (encoding == NULL) {
#ifdef Py_USING_UNICODE
encoding = PyUnicode_GetDefaultEncoding();
#else
PyErr_SetString(PyExc_ValueError, "no encoding specified");
goto onError;
#endif
}
/* Decode via the codec registry */
v = _PyCodec_DecodeText(str, encoding, errors);
if (v == NULL)
goto onError;
return v;
onError:
return NULL;
}
| 0 |
[
"CWE-190"
] |
cpython
|
c3c9db89273fabc62ea1b48389d9a3000c1c03ae
| 163,305,740,020,034,350,000,000,000,000,000,000,000 | 30 |
[2.7] bpo-30657: Check & prevent integer overflow in PyString_DecodeEscape (#2174)
|
void CLASS process_Sony_0x940c(uchar *buf)
{
ushort lid2;
if ((imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF) &&
(imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Sigma_X3F))
{
switch (SonySubstitution[buf[0x0008]])
{
case 1:
case 5:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A;
break;
case 4:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E;
break;
}
}
lid2 = (((ushort)SonySubstitution[buf[0x000a]]) << 8) | ((ushort)SonySubstitution[buf[0x0009]]);
if ((lid2 > 0) && (lid2 < 32784))
parseSonyLensType2(SonySubstitution[buf[0x000a]], // LensType2 - Sony lens ids
SonySubstitution[buf[0x0009]]);
return;
}
| 0 |
[
"CWE-476",
"CWE-119"
] |
LibRaw
|
d7c3d2cb460be10a3ea7b32e9443a83c243b2251
| 319,101,862,704,562,800,000,000,000,000,000,000,000 | 23 |
Secunia SA75000 advisory: several buffer overruns
|
static int ldb_kv_init_rootdse(struct ldb_module *module)
{
/* ignore errors on this - we expect it for non-sam databases */
ldb_mod_register_control(module, LDB_CONTROL_PERMISSIVE_MODIFY_OID);
/* there can be no module beyond the backend, just return */
return LDB_SUCCESS;
}
| 0 |
[
"CWE-20"
] |
samba
|
3c1fbb18321f61df44d7b0f0c7452ae230960293
| 284,105,161,860,486,200,000,000,000,000,000,000,000 | 8 |
CVE-2018-1140 ldb_tdb: Check for DN validity in add, rename and search
This ensures we fail with a good error code before an eventual ldb_dn_get_casefold() which
would otherwise fail.
Signed-off-by: Andrew Bartlett <[email protected]>
Reviewed-by: Douglas Bagnall <[email protected]>
BUG: https://bugzilla.samba.org/show_bug.cgi?id=13374
|
xmlIsXHTML(const xmlChar *systemID, const xmlChar *publicID) {
if ((systemID == NULL) && (publicID == NULL))
return(-1);
if (publicID != NULL) {
if (xmlStrEqual(publicID, XHTML_STRICT_PUBLIC_ID)) return(1);
if (xmlStrEqual(publicID, XHTML_FRAME_PUBLIC_ID)) return(1);
if (xmlStrEqual(publicID, XHTML_TRANS_PUBLIC_ID)) return(1);
}
if (systemID != NULL) {
if (xmlStrEqual(systemID, XHTML_STRICT_SYSTEM_ID)) return(1);
if (xmlStrEqual(systemID, XHTML_FRAME_SYSTEM_ID)) return(1);
if (xmlStrEqual(systemID, XHTML_TRANS_SYSTEM_ID)) return(1);
}
return(0);
}
| 0 |
[
"CWE-502"
] |
libxml2
|
c97750d11bb8b6f3303e7131fe526a61ac65bcfd
| 123,044,908,918,203,440,000,000,000,000,000,000,000 | 15 |
Avoid an out of bound access when serializing malformed strings
For https://bugzilla.gnome.org/show_bug.cgi?id=766414
* xmlsave.c: xmlBufAttrSerializeTxtContent() if an attribute value
is not UTF-8 be more careful when serializing it as we may do an
out of bound access as a result.
|
dtls1_buffer_record(SSL *s, record_pqueue *queue, PQ_64BIT *priority)
{
DTLS1_RECORD_DATA *rdata;
pitem *item;
/* Limit the size of the queue to prevent DOS attacks */
if (pqueue_size(queue->q) >= 100)
return 0;
rdata = OPENSSL_malloc(sizeof(DTLS1_RECORD_DATA));
item = pitem_new(*priority, rdata);
if (rdata == NULL || item == NULL)
{
if (rdata != NULL) OPENSSL_free(rdata);
if (item != NULL) pitem_free(item);
SSLerr(SSL_F_DTLS1_BUFFER_RECORD, ERR_R_INTERNAL_ERROR);
return(0);
}
rdata->packet = s->packet;
rdata->packet_length = s->packet_length;
memcpy(&(rdata->rbuf), &(s->s3->rbuf), sizeof(SSL3_BUFFER));
memcpy(&(rdata->rrec), &(s->s3->rrec), sizeof(SSL3_RECORD));
item->data = rdata;
/* insert should not fail, since duplicates are dropped */
if (pqueue_insert(queue->q, item) == NULL)
{
OPENSSL_free(rdata);
pitem_free(item);
return(0);
}
s->packet = NULL;
s->packet_length = 0;
memset(&(s->s3->rbuf), 0, sizeof(SSL3_BUFFER));
memset(&(s->s3->rrec), 0, sizeof(SSL3_RECORD));
if (!ssl3_setup_buffers(s))
{
SSLerr(SSL_F_DTLS1_BUFFER_RECORD, ERR_R_INTERNAL_ERROR);
OPENSSL_free(rdata);
pitem_free(item);
return(0);
}
return(1);
}
| 0 |
[
"CWE-310"
] |
openssl
|
270881316664396326c461ec7a124aec2c6cc081
| 4,548,088,663,877,482,000,000,000,000,000,000,000 | 50 |
Add and use a constant-time memcmp.
This change adds CRYPTO_memcmp, which compares two vectors of bytes in
an amount of time that's independent of their contents. It also changes
several MAC compares in the code to use this over the standard memcmp,
which may leak information about the size of a matching prefix.
(cherry picked from commit 2ee798880a246d648ecddadc5b91367bee4a5d98)
Conflicts:
crypto/crypto.h
ssl/t1_lib.c
(cherry picked from commit dc406b59f3169fe191e58906df08dce97edb727c)
Conflicts:
crypto/crypto.h
ssl/d1_pkt.c
ssl/s3_pkt.c
|
dse_apply_nolock(struct dse *pdse, IFP fp, caddr_t arg)
{
avl_apply(pdse->dse_tree, fp, arg, STOP_TRAVERSAL, AVL_INORDER);
return 1;
}
| 0 |
[
"CWE-200",
"CWE-203"
] |
389-ds-base
|
b6aae4d8e7c8a6ddd21646f94fef1bf7f22c3f32
| 152,920,435,130,837,640,000,000,000,000,000,000,000 | 5 |
Issue 4609 - CVE - info disclosure when authenticating
Description: If you bind as a user that does not exist. Error 49 is returned
instead of error 32. As error 32 discloses that the entry does
not exist. When you bind as an entry that does not have userpassword
set then error 48 (inappropriate auth) is returned, but this
discloses that the entry does indeed exist. Instead we should
always return error 49, even if the password is not set in the
entry. This way we do not disclose to an attacker if the Bind
DN exists or not.
Relates: https://github.com/389ds/389-ds-base/issues/4609
Reviewed by: tbordaz(Thanks!)
|
void PeerListWidget::loadSettings()
{
header()->restoreState(Preferences::instance()->getPeerListState());
}
| 0 |
[
"CWE-20",
"CWE-79"
] |
qBittorrent
|
6ca3e4f094da0a0017cb2d483ec1db6176bb0b16
| 305,119,533,350,192,160,000,000,000,000,000,000,000 | 4 |
Add Utils::String::toHtmlEscaped
|
daemon_seraddr(struct sockaddr_storage *sockaddrin, struct rpcap_sockaddr *sockaddrout)
{
memset(sockaddrout, 0, sizeof(struct sockaddr_storage));
// There can be the case in which the sockaddrin is not available
if (sockaddrin == NULL) return;
// Warning: we support only AF_INET and AF_INET6
switch (sockaddrin->ss_family)
{
case AF_INET:
{
struct sockaddr_in *sockaddrin_ipv4;
struct rpcap_sockaddr_in *sockaddrout_ipv4;
sockaddrin_ipv4 = (struct sockaddr_in *) sockaddrin;
sockaddrout_ipv4 = (struct rpcap_sockaddr_in *) sockaddrout;
sockaddrout_ipv4->family = htons(RPCAP_AF_INET);
sockaddrout_ipv4->port = htons(sockaddrin_ipv4->sin_port);
memcpy(&sockaddrout_ipv4->addr, &sockaddrin_ipv4->sin_addr, sizeof(sockaddrout_ipv4->addr));
memset(sockaddrout_ipv4->zero, 0, sizeof(sockaddrout_ipv4->zero));
break;
}
#ifdef AF_INET6
case AF_INET6:
{
struct sockaddr_in6 *sockaddrin_ipv6;
struct rpcap_sockaddr_in6 *sockaddrout_ipv6;
sockaddrin_ipv6 = (struct sockaddr_in6 *) sockaddrin;
sockaddrout_ipv6 = (struct rpcap_sockaddr_in6 *) sockaddrout;
sockaddrout_ipv6->family = htons(RPCAP_AF_INET6);
sockaddrout_ipv6->port = htons(sockaddrin_ipv6->sin6_port);
sockaddrout_ipv6->flowinfo = htonl(sockaddrin_ipv6->sin6_flowinfo);
memcpy(&sockaddrout_ipv6->addr, &sockaddrin_ipv6->sin6_addr, sizeof(sockaddrout_ipv6->addr));
sockaddrout_ipv6->scope_id = htonl(sockaddrin_ipv6->sin6_scope_id);
break;
}
#endif
}
}
| 0 |
[
"CWE-703",
"CWE-918"
] |
libpcap
|
33834cb2a4d035b52aa2a26742f832a112e90a0a
| 50,304,744,810,091,590,000,000,000,000,000,000,000 | 42 |
In the open request, reject capture sources that are URLs.
You shouldn't be able to ask a server to open a remote device on some
*other* server; just open it yourself.
This addresses Include Security issue F13: [libpcap] Remote Packet
Capture Daemon Allows Opening Capture URLs.
|
GF_Err gf_isom_hint_sample_data(GF_ISOFile *the_file, u32 trackNumber, GF_ISOTrackID SourceTrackID, u32 SampleNumber, u16 DataLength, u32 offsetInSample, u8 *extra_data, u8 AtBegin)
{
GF_TrackBox *trak;
GF_HintSampleEntryBox *entry;
u32 count;
u16 refIndex;
GF_HintPacket *pck;
GF_SampleDTE *dte;
GF_Err e;
GF_TrackReferenceTypeBox *hint;
trak = gf_isom_get_track_from_file(the_file, trackNumber);
if (!trak || !IsHintTrack(trak)) return GF_BAD_PARAM;
e = Media_GetSampleDesc(trak->Media, trak->Media->information->sampleTable->currentEntryIndex, (GF_SampleEntryBox **) &entry, &count);
if (e) return e;
if (!entry->hint_sample) return GF_BAD_PARAM;
count = gf_list_count(entry->hint_sample->packetTable);
if (!count) return GF_BAD_PARAM;
pck = (GF_HintPacket *)gf_list_get(entry->hint_sample->packetTable, count - 1);
dte = (GF_SampleDTE *) NewDTE(2);
dte->dataLength = DataLength;
dte->sampleNumber = SampleNumber;
dte->byteOffset = offsetInSample;
//we're getting data from another track
if (SourceTrackID != trak->Header->trackID) {
//get (or set) the track reference index
e = Track_FindRef(trak, GF_ISOM_REF_HINT, &hint);
if (e) return e;
e = reftype_AddRefTrack(hint, SourceTrackID, &refIndex);
if (e) return e;
//WARNING: IN QT, MUST BE 0-based !!!
dte->trackRefIndex = (u8) (refIndex - 1);
} else {
//we're in the hint track
dte->trackRefIndex = (s8) -1;
//basic check...
if (SampleNumber > trak->Media->information->sampleTable->SampleSize->sampleCount + 1) {
DelDTE((GF_GenericDTE *)dte);
return GF_BAD_PARAM;
}
//are we in the current sample ??
if (!SampleNumber || (SampleNumber == trak->Media->information->sampleTable->SampleSize->sampleCount + 1)) {
//we adding some stuff in the current sample ...
dte->byteOffset += entry->hint_sample->dataLength;
entry->hint_sample->AdditionalData = (char*)gf_realloc(entry->hint_sample->AdditionalData, sizeof(char) * (entry->hint_sample->dataLength + DataLength));
if (AtBegin) {
if (entry->hint_sample->dataLength)
memmove(entry->hint_sample->AdditionalData + DataLength, entry->hint_sample->AdditionalData, entry->hint_sample->dataLength);
memcpy(entry->hint_sample->AdditionalData, extra_data, DataLength);
/*offset existing DTE*/
gf_isom_hint_pck_offset(pck, DataLength, SampleNumber);
} else {
memcpy(entry->hint_sample->AdditionalData + entry->hint_sample->dataLength, extra_data, DataLength);
}
entry->hint_sample->dataLength += DataLength;
//and set the sample number ...
dte->sampleNumber = trak->Media->information->sampleTable->SampleSize->sampleCount + 1;
}
}
//OK, add the entry
return gf_isom_hint_pck_add_dte(pck, (GF_GenericDTE *)dte, AtBegin);
}
| 0 |
[
"CWE-787"
] |
gpac
|
86c1566f040b2b84c72afcb6cbd444c5aff56cfe
| 91,117,415,326,680,570,000,000,000,000,000,000,000 | 68 |
fixed #1894
|
_PUBLIC_ bool islower_m(codepoint_t val)
{
return (toupper_m(val) != val);
}
| 0 |
[
"CWE-200"
] |
samba
|
ba5dbda6d0174a59d221c45cca52ecd232820d48
| 98,918,278,260,785,370,000,000,000,000,000,000,000 | 4 |
CVE-2015-5330: Fix handling of unicode near string endings
Until now next_codepoint_ext() and next_codepoint_handle_ext() were
using strnlen(str, 5) to determine how much string they should try to
decode. This ended up looking past the end of the string when it was not
null terminated and the final character looked like a multi-byte encoding.
The fix is to let the caller say how long the string can be.
Bug: https://bugzilla.samba.org/show_bug.cgi?id=11599
Signed-off-by: Douglas Bagnall <[email protected]>
Pair-programmed-with: Andrew Bartlett <[email protected]>
Reviewed-by: Ralph Boehme <[email protected]>
|
static int devicendomain(i_ctx_t * i_ctx_p, ref *space, float *ptr)
{
int i, limit, code;
ref namesarray;
code = array_get(imemory, space, 1, &namesarray);
if (code < 0)
return code;
limit = r_size(&namesarray) * 2;
for (i = 0;i < limit;i+=2) {
ptr[i] = 0;
ptr[i+1] = 1;
}
return 0;
}
| 0 |
[] |
ghostpdl
|
b326a71659b7837d3acde954b18bda1a6f5e9498
| 68,941,089,012,410,620,000,000,000,000,000,000,000 | 16 |
Bug 699655: Properly check the return value....
...when getting a value from a dictionary
|
int clientHasPendingReplies(client *c) {
return c->bufpos || listLength(c->reply);
}
| 0 |
[
"CWE-254"
] |
redis
|
874804da0c014a7d704b3d285aa500098a931f50
| 77,623,120,764,550,210,000,000,000,000,000,000,000 | 3 |
Security: Cross Protocol Scripting protection.
This is an attempt at mitigating problems due to cross protocol
scripting, an attack targeting services using line oriented protocols
like Redis that can accept HTTP requests as valid protocol, by
discarding the invalid parts and accepting the payloads sent, for
example, via a POST request.
For this to be effective, when we detect POST and Host: and terminate
the connection asynchronously, the networking code was modified in order
to never process further input. It was later verified that in a
pipelined request containing a POST command, the successive commands are
not executed.
|
address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
hwaddr *plen, bool resolve_subpage)
{
MemoryRegionSection *section;
MemoryRegion *mr;
Int128 diff;
section = address_space_lookup_region(d, addr, resolve_subpage);
/* Compute offset within MemoryRegionSection */
addr -= section->offset_within_address_space;
/* Compute offset within MemoryRegion */
*xlat = addr + section->offset_within_region;
mr = section->mr;
/* MMIO registers can be expected to perform full-width accesses based only
* on their address, without considering adjacent registers that could
* decode to completely different MemoryRegions. When such registers
* exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
* regions overlap wildly. For this reason we cannot clamp the accesses
* here.
*
* If the length is small (as is the case for address_space_ldl/stl),
* everything works fine. If the incoming length is large, however,
* the caller really has to do the clamping through memory_access_size.
*/
if (memory_region_is_ram(mr)) {
diff = int128_sub(section->size, int128_make64(addr));
*plen = int128_get64(int128_min(diff, int128_make64(*plen)));
}
return section;
}
| 0 |
[] |
qemu
|
b242e0e0e2969c044a318e56f7988bbd84de1f63
| 99,516,664,147,289,150,000,000,000,000,000,000,000 | 33 |
exec: skip MMIO regions correctly in cpu_physical_memory_write_rom_internal
Loading the BIOS in the mac99 machine is interesting, because there is a
PROM in the middle of the BIOS region (from 16K to 32K). Before memory
region accesses were clamped, when QEMU was asked to load a BIOS from
0xfff00000 to 0xffffffff it would put even those 16K from the BIOS file
into the region. This is weird because those 16K were not actually
visible between 0xfff04000 and 0xfff07fff. However, it worked.
After clamping was added, this also worked. In this case, the
cpu_physical_memory_write_rom_internal function split the write in
three parts: the first 16K were copied, the PROM area (second 16K) were
ignored, then the rest was copied.
Problems then started with commit 965eb2f (exec: do not clamp accesses
to MMIO regions, 2015-06-17). Clamping accesses is not done for MMIO
regions because they can overlap wildly, and MMIO registers can be
expected to perform full-width accesses based only on their address
(with no respect for adjacent registers that could decode to completely
different MemoryRegions). However, this lack of clamping also applied
to the PROM area! cpu_physical_memory_write_rom_internal thus failed
to copy the third range above, i.e. only copied the first 16K of the BIOS.
In effect, address_space_translate is expecting _something else_ to do
the clamping for MMIO regions if the incoming length is large. This
"something else" is memory_access_size in the case of address_space_rw,
so use the same logic in cpu_physical_memory_write_rom_internal.
Reported-by: Alexander Graf <[email protected]>
Reviewed-by: Laurent Vivier <[email protected]>
Tested-by: Laurent Vivier <[email protected]>
Fixes: 965eb2f
Signed-off-by: Paolo Bonzini <[email protected]>
|
static unsigned short atalk_checksum(const struct sk_buff *skb, int len)
{
unsigned long sum;
/* skip header 4 bytes */
sum = atalk_sum_skb(skb, 4, len-4, 0);
/* Use 0xFFFF for 0. 0 itself means none */
return sum ? htons((unsigned short)sum) : 0xFFFF;
}
| 0 |
[] |
history
|
7ab442d7e0a76402c12553ee256f756097cae2d2
| 306,209,186,879,013,500,000,000,000,000,000,000,000 | 10 |
[DDP]: Convert to new protocol interface.
Convert ddp to the new protocol interface which means it has to
handle fragmented skb's. The only big change is in the checksum
routine which has to do more work (like skb_checksum).
Minor speedup is folding the carry to avoid a branch.
Tested against a 2.4 system and by running both code over
a range of packets.
|
static inline void SplineWeights(const double x,double (*weights)[4])
{
double
alpha,
beta;
/*
Nicolas Robidoux' 12 flops (6* + 5- + 1+) refactoring of the computation
of the standard four 1D cubic B-spline smoothing weights. The sampling
location is assumed between the second and third input pixel locations,
and x is the position relative to the second input pixel location.
*/
alpha=(double) 1.0-x;
(*weights)[3]=(double) (1.0/6.0)*x*x*x;
(*weights)[0]=(double) (1.0/6.0)*alpha*alpha*alpha;
beta=(*weights)[3]-(*weights)[0];
(*weights)[1]=alpha-(*weights)[0]+beta;
(*weights)[2]=x-(*weights)[3]-beta;
}
| 0 |
[
"CWE-190"
] |
ImageMagick
|
406da3af9e09649cda152663c179902edf5ab3ac
| 168,663,936,929,631,130,000,000,000,000,000,000,000 | 19 |
https://github.com/ImageMagick/ImageMagick/issues/1732
|
static MagickBooleanType WriteTIFFImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#if !defined(TIFFDefaultStripSize)
#define TIFFDefaultStripSize(tiff,request) (8192UL/TIFFScanlineSize(tiff))
#endif
const char
*mode,
*option;
CompressionType
compression;
EndianType
endian_type;
MagickBooleanType
debug,
status;
MagickOffsetType
scene;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
register ssize_t
i;
size_t
length;
ssize_t
y;
TIFF
*tiff;
TIFFInfo
tiff_info;
uint16
bits_per_sample,
compress_tag,
endian,
photometric;
uint32
rows_per_strip;
unsigned char
*pixels;
/*
Open TIFF file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
(void) SetMagickThreadValue(tiff_exception,exception);
endian_type=UndefinedEndian;
option=GetImageOption(image_info,"tiff:endian");
if (option != (const char *) NULL)
{
if (LocaleNCompare(option,"msb",3) == 0)
endian_type=MSBEndian;
if (LocaleNCompare(option,"lsb",3) == 0)
endian_type=LSBEndian;;
}
switch (endian_type)
{
case LSBEndian: mode="wl"; break;
case MSBEndian: mode="wb"; break;
default: mode="w"; break;
}
#if defined(TIFF_VERSION_BIG)
if (LocaleCompare(image_info->magick,"TIFF64") == 0)
switch (endian_type)
{
case LSBEndian: mode="wl8"; break;
case MSBEndian: mode="wb8"; break;
default: mode="w8"; break;
}
#endif
tiff=TIFFClientOpen(image->filename,mode,(thandle_t) image,TIFFReadBlob,
TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob,
TIFFUnmapBlob);
if (tiff == (TIFF *) NULL)
return(MagickFalse);
scene=0;
debug=IsEventLogging();
(void) debug;
do
{
/*
Initialize TIFF fields.
*/
if ((image_info->type != UndefinedType) &&
(image_info->type != OptimizeType))
(void) SetImageType(image,image_info->type,exception);
compression=UndefinedCompression;
if (image->compression != JPEGCompression)
compression=image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
switch (compression)
{
case FaxCompression:
case Group4Compression:
{
(void) SetImageType(image,BilevelType,exception);
(void) SetImageDepth(image,1,exception);
break;
}
case JPEGCompression:
{
(void) SetImageStorageClass(image,DirectClass,exception);
(void) SetImageDepth(image,8,exception);
break;
}
default:
break;
}
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
if ((image->storage_class != PseudoClass) && (image->depth >= 32) &&
(quantum_info->format == UndefinedQuantumFormat) &&
(IsHighDynamicRangeImage(image,exception) != MagickFalse))
{
status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat);
if (status == MagickFalse)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
if ((LocaleCompare(image_info->magick,"PTIF") == 0) &&
(GetPreviousImageInList(image) != (Image *) NULL))
(void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_REDUCEDIMAGE);
if ((image->columns != (uint32) image->columns) ||
(image->rows != (uint32) image->rows))
ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit");
(void) TIFFSetField(tiff,TIFFTAG_IMAGELENGTH,(uint32) image->rows);
(void) TIFFSetField(tiff,TIFFTAG_IMAGEWIDTH,(uint32) image->columns);
switch (compression)
{
case FaxCompression:
{
compress_tag=COMPRESSION_CCITTFAX3;
SetQuantumMinIsWhite(quantum_info,MagickTrue);
break;
}
case Group4Compression:
{
compress_tag=COMPRESSION_CCITTFAX4;
SetQuantumMinIsWhite(quantum_info,MagickTrue);
break;
}
#if defined(COMPRESSION_JBIG)
case JBIG1Compression:
{
compress_tag=COMPRESSION_JBIG;
break;
}
#endif
case JPEGCompression:
{
compress_tag=COMPRESSION_JPEG;
break;
}
#if defined(COMPRESSION_LZMA)
case LZMACompression:
{
compress_tag=COMPRESSION_LZMA;
break;
}
#endif
case LZWCompression:
{
compress_tag=COMPRESSION_LZW;
break;
}
case RLECompression:
{
compress_tag=COMPRESSION_PACKBITS;
break;
}
case ZipCompression:
{
compress_tag=COMPRESSION_ADOBE_DEFLATE;
break;
}
case NoCompression:
default:
{
compress_tag=COMPRESSION_NONE;
break;
}
}
#if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919)
if ((compress_tag != COMPRESSION_NONE) &&
(TIFFIsCODECConfigured(compress_tag) == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CoderError,
"CompressionNotSupported","`%s'",CommandOptionToMnemonic(
MagickCompressOptions,(ssize_t) compression));
compress_tag=COMPRESSION_NONE;
compression=NoCompression;
}
#else
switch (compress_tag)
{
#if defined(CCITT_SUPPORT)
case COMPRESSION_CCITTFAX3:
case COMPRESSION_CCITTFAX4:
#endif
#if defined(YCBCR_SUPPORT) && defined(JPEG_SUPPORT)
case COMPRESSION_JPEG:
#endif
#if defined(LZMA_SUPPORT) && defined(COMPRESSION_LZMA)
case COMPRESSION_LZMA:
#endif
#if defined(LZW_SUPPORT)
case COMPRESSION_LZW:
#endif
#if defined(PACKBITS_SUPPORT)
case COMPRESSION_PACKBITS:
#endif
#if defined(ZIP_SUPPORT)
case COMPRESSION_ADOBE_DEFLATE:
#endif
case COMPRESSION_NONE:
break;
default:
{
(void) ThrowMagickException(exception,GetMagickModule(),CoderError,
"CompressionNotSupported","`%s'",CommandOptionToMnemonic(
MagickCompressOptions,(ssize_t) compression));
compress_tag=COMPRESSION_NONE;
compression=NoCompression;
break;
}
}
#endif
if (image->colorspace == CMYKColorspace)
{
photometric=PHOTOMETRIC_SEPARATED;
(void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,4);
(void) TIFFSetField(tiff,TIFFTAG_INKSET,INKSET_CMYK);
}
else
{
/*
Full color TIFF raster.
*/
if (image->colorspace == LabColorspace)
{
photometric=PHOTOMETRIC_CIELAB;
EncodeLabImage(image,exception);
}
else
if (image->colorspace == YCbCrColorspace)
{
photometric=PHOTOMETRIC_YCBCR;
(void) TIFFSetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,1,1);
(void) SetImageStorageClass(image,DirectClass,exception);
(void) SetImageDepth(image,8,exception);
}
else
photometric=PHOTOMETRIC_RGB;
(void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,3);
if ((image_info->type != TrueColorType) &&
(image_info->type != TrueColorAlphaType))
{
if ((image_info->type != PaletteType) &&
(SetImageGray(image,exception) != MagickFalse))
{
photometric=(uint16) (quantum_info->min_is_white !=
MagickFalse ? PHOTOMETRIC_MINISWHITE :
PHOTOMETRIC_MINISBLACK);
(void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,1);
if ((image->depth == 1) &&
(image->alpha_trait == UndefinedPixelTrait))
SetImageMonochrome(image,exception);
}
else
if (image->storage_class == PseudoClass)
{
size_t
depth;
/*
Colormapped TIFF raster.
*/
(void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,1);
photometric=PHOTOMETRIC_PALETTE;
depth=1;
while ((GetQuantumRange(depth)+1) < image->colors)
depth<<=1;
status=SetQuantumDepth(image,quantum_info,depth);
if (status == MagickFalse)
ThrowWriterException(ResourceLimitError,
"MemoryAllocationFailed");
}
}
}
(void) TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian);
if ((compress_tag == COMPRESSION_CCITTFAX3) &&
(photometric != PHOTOMETRIC_MINISWHITE))
{
compress_tag=COMPRESSION_NONE;
endian=FILLORDER_MSB2LSB;
}
else
if ((compress_tag == COMPRESSION_CCITTFAX4) &&
(photometric != PHOTOMETRIC_MINISWHITE))
{
compress_tag=COMPRESSION_NONE;
endian=FILLORDER_MSB2LSB;
}
option=GetImageOption(image_info,"tiff:fill-order");
if (option != (const char *) NULL)
{
if (LocaleNCompare(option,"msb",3) == 0)
endian=FILLORDER_MSB2LSB;
if (LocaleNCompare(option,"lsb",3) == 0)
endian=FILLORDER_LSB2MSB;
}
(void) TIFFSetField(tiff,TIFFTAG_COMPRESSION,compress_tag);
(void) TIFFSetField(tiff,TIFFTAG_FILLORDER,endian);
(void) TIFFSetField(tiff,TIFFTAG_BITSPERSAMPLE,quantum_info->depth);
if (image->alpha_trait != UndefinedPixelTrait)
{
uint16
extra_samples,
sample_info[1],
samples_per_pixel;
/*
TIFF has a matte channel.
*/
extra_samples=1;
sample_info[0]=EXTRASAMPLE_UNASSALPHA;
option=GetImageOption(image_info,"tiff:alpha");
if (option != (const char *) NULL)
{
if (LocaleCompare(option,"associated") == 0)
sample_info[0]=EXTRASAMPLE_ASSOCALPHA;
else
if (LocaleCompare(option,"unspecified") == 0)
sample_info[0]=EXTRASAMPLE_UNSPECIFIED;
}
(void) TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL,
&samples_per_pixel);
(void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,samples_per_pixel+1);
(void) TIFFSetField(tiff,TIFFTAG_EXTRASAMPLES,extra_samples,
&sample_info);
if (sample_info[0] == EXTRASAMPLE_ASSOCALPHA)
SetQuantumAlphaType(quantum_info,AssociatedQuantumAlpha);
}
(void) TIFFSetField(tiff,TIFFTAG_PHOTOMETRIC,photometric);
switch (quantum_info->format)
{
case FloatingPointQuantumFormat:
{
(void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_IEEEFP);
(void) TIFFSetField(tiff,TIFFTAG_SMINSAMPLEVALUE,quantum_info->minimum);
(void) TIFFSetField(tiff,TIFFTAG_SMAXSAMPLEVALUE,quantum_info->maximum);
break;
}
case SignedQuantumFormat:
{
(void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_INT);
break;
}
case UnsignedQuantumFormat:
{
(void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_UINT);
break;
}
default:
break;
}
(void) TIFFSetField(tiff,TIFFTAG_ORIENTATION,ORIENTATION_TOPLEFT);
(void) TIFFSetField(tiff,TIFFTAG_PLANARCONFIG,PLANARCONFIG_CONTIG);
if (photometric == PHOTOMETRIC_RGB)
if ((image_info->interlace == PlaneInterlace) ||
(image_info->interlace == PartitionInterlace))
(void) TIFFSetField(tiff,TIFFTAG_PLANARCONFIG,PLANARCONFIG_SEPARATE);
rows_per_strip=TIFFDefaultStripSize(tiff,0);
option=GetImageOption(image_info,"tiff:rows-per-strip");
if (option != (const char *) NULL)
rows_per_strip=(size_t) strtol(option,(char **) NULL,10);
switch (compress_tag)
{
case COMPRESSION_JPEG:
{
#if defined(JPEG_SUPPORT)
const char
*sampling_factor;
GeometryInfo
geometry_info;
MagickStatusType
flags;
rows_per_strip+=(16-(rows_per_strip % 16));
if (image_info->quality != UndefinedCompressionQuality)
(void) TIFFSetField(tiff,TIFFTAG_JPEGQUALITY,image_info->quality);
(void) TIFFSetField(tiff,TIFFTAG_JPEGCOLORMODE,JPEGCOLORMODE_RAW);
if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse)
{
const char
*value;
(void) TIFFSetField(tiff,TIFFTAG_JPEGCOLORMODE,JPEGCOLORMODE_RGB);
sampling_factor=(const char *) NULL;
value=GetImageProperty(image,"jpeg:sampling-factor",exception);
if (value != (char *) NULL)
{
sampling_factor=value;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Input sampling-factors=%s",sampling_factor);
}
if (image_info->sampling_factor != (char *) NULL)
sampling_factor=image_info->sampling_factor;
if (sampling_factor != (const char *) NULL)
{
flags=ParseGeometry(sampling_factor,&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
if (image->colorspace == YCbCrColorspace)
(void) TIFFSetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,(uint16)
geometry_info.rho,(uint16) geometry_info.sigma);
}
}
(void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE,
&bits_per_sample);
if (bits_per_sample == 12)
(void) TIFFSetField(tiff,TIFFTAG_JPEGTABLESMODE,JPEGTABLESMODE_QUANT);
#endif
break;
}
case COMPRESSION_ADOBE_DEFLATE:
{
rows_per_strip=(uint32) image->rows;
(void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE,
&bits_per_sample);
if (((photometric == PHOTOMETRIC_RGB) ||
(photometric == PHOTOMETRIC_MINISBLACK)) &&
((bits_per_sample == 8) || (bits_per_sample == 16)))
(void) TIFFSetField(tiff,TIFFTAG_PREDICTOR,PREDICTOR_HORIZONTAL);
(void) TIFFSetField(tiff,TIFFTAG_ZIPQUALITY,(long) (
image_info->quality == UndefinedCompressionQuality ? 7 :
MagickMin((ssize_t) image_info->quality/10,9)));
break;
}
case COMPRESSION_CCITTFAX3:
{
/*
Byte-aligned EOL.
*/
rows_per_strip=(uint32) image->rows;
(void) TIFFSetField(tiff,TIFFTAG_GROUP3OPTIONS,4);
break;
}
case COMPRESSION_CCITTFAX4:
{
rows_per_strip=(uint32) image->rows;
break;
}
#if defined(LZMA_SUPPORT) && defined(COMPRESSION_LZMA)
case COMPRESSION_LZMA:
{
if (((photometric == PHOTOMETRIC_RGB) ||
(photometric == PHOTOMETRIC_MINISBLACK)) &&
((bits_per_sample == 8) || (bits_per_sample == 16)))
(void) TIFFSetField(tiff,TIFFTAG_PREDICTOR,PREDICTOR_HORIZONTAL);
(void) TIFFSetField(tiff,TIFFTAG_LZMAPRESET,(long) (
image_info->quality == UndefinedCompressionQuality ? 7 :
MagickMin((ssize_t) image_info->quality/10,9)));
break;
}
#endif
case COMPRESSION_LZW:
{
(void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE,
&bits_per_sample);
if (((photometric == PHOTOMETRIC_RGB) ||
(photometric == PHOTOMETRIC_MINISBLACK)) &&
((bits_per_sample == 8) || (bits_per_sample == 16)))
(void) TIFFSetField(tiff,TIFFTAG_PREDICTOR,PREDICTOR_HORIZONTAL);
break;
}
default:
break;
}
if (rows_per_strip < 1)
rows_per_strip=1;
if ((image->rows/rows_per_strip) >= (1UL << 15))
rows_per_strip=(uint32) (image->rows >> 15);
(void) TIFFSetField(tiff,TIFFTAG_ROWSPERSTRIP,rows_per_strip);
if ((image->resolution.x != 0.0) && (image->resolution.y != 0.0))
{
unsigned short
units;
/*
Set image resolution.
*/
units=RESUNIT_NONE;
if (image->units == PixelsPerInchResolution)
units=RESUNIT_INCH;
if (image->units == PixelsPerCentimeterResolution)
units=RESUNIT_CENTIMETER;
(void) TIFFSetField(tiff,TIFFTAG_RESOLUTIONUNIT,(uint16) units);
(void) TIFFSetField(tiff,TIFFTAG_XRESOLUTION,image->resolution.x);
(void) TIFFSetField(tiff,TIFFTAG_YRESOLUTION,image->resolution.y);
if ((image->page.x < 0) || (image->page.y < 0))
(void) ThrowMagickException(exception,GetMagickModule(),CoderError,
"TIFF: negative image positions unsupported","%s",image->filename);
if ((image->page.x > 0) && (image->resolution.x > 0.0))
{
/*
Set horizontal image position.
*/
(void) TIFFSetField(tiff,TIFFTAG_XPOSITION,(float) image->page.x/
image->resolution.x);
}
if ((image->page.y > 0) && (image->resolution.y > 0.0))
{
/*
Set vertical image position.
*/
(void) TIFFSetField(tiff,TIFFTAG_YPOSITION,(float) image->page.y/
image->resolution.y);
}
}
if (image->chromaticity.white_point.x != 0.0)
{
float
chromaticity[6];
/*
Set image chromaticity.
*/
chromaticity[0]=(float) image->chromaticity.red_primary.x;
chromaticity[1]=(float) image->chromaticity.red_primary.y;
chromaticity[2]=(float) image->chromaticity.green_primary.x;
chromaticity[3]=(float) image->chromaticity.green_primary.y;
chromaticity[4]=(float) image->chromaticity.blue_primary.x;
chromaticity[5]=(float) image->chromaticity.blue_primary.y;
(void) TIFFSetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,chromaticity);
chromaticity[0]=(float) image->chromaticity.white_point.x;
chromaticity[1]=(float) image->chromaticity.white_point.y;
(void) TIFFSetField(tiff,TIFFTAG_WHITEPOINT,chromaticity);
}
if ((LocaleCompare(image_info->magick,"PTIF") != 0) &&
(image_info->adjoin != MagickFalse) && (GetImageListLength(image) > 1))
{
(void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE);
if (image->scene != 0)
(void) TIFFSetField(tiff,TIFFTAG_PAGENUMBER,(uint16) image->scene,
GetImageListLength(image));
}
if (image->orientation != UndefinedOrientation)
(void) TIFFSetField(tiff,TIFFTAG_ORIENTATION,(uint16) image->orientation);
(void) TIFFSetProfiles(tiff,image);
{
uint16
page,
pages;
page=(uint16) scene;
pages=(uint16) GetImageListLength(image);
if ((LocaleCompare(image_info->magick,"PTIF") != 0) &&
(image_info->adjoin != MagickFalse) && (pages > 1))
(void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE);
(void) TIFFSetField(tiff,TIFFTAG_PAGENUMBER,page,pages);
}
(void) TIFFSetProperties(tiff,image_info,image,exception);
DisableMSCWarning(4127)
if (0)
RestoreMSCWarning
(void) TIFFSetEXIFProperties(tiff,image,exception);
/*
Write image scanlines.
*/
if (GetTIFFInfo(image_info,tiff,&tiff_info) == MagickFalse)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
quantum_info->endian=LSBEndian;
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
tiff_info.scanline=(unsigned char *) GetQuantumPixels(quantum_info);
switch (photometric)
{
case PHOTOMETRIC_CIELAB:
case PHOTOMETRIC_YCBCR:
case PHOTOMETRIC_RGB:
{
/*
RGB TIFF image.
*/
switch (image_info->interlace)
{
case NoInterlace:
default:
{
quantum_type=RGBQuantum;
if (image->alpha_trait != UndefinedPixelTrait)
quantum_type=RGBAQuantum;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
(void) length;
if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case PlaneInterlace:
case PartitionInterlace:
{
/*
Plane interlacing: RRRRRR...GGGGGG...BBBBBB...
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
RedQuantum,pixels,exception);
if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,100,400);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
GreenQuantum,pixels,exception);
if (TIFFWritePixels(tiff,&tiff_info,y,1,image) == -1)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,200,400);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
BlueQuantum,pixels,exception);
if (TIFFWritePixels(tiff,&tiff_info,y,2,image) == -1)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,300,400);
if (status == MagickFalse)
break;
}
if (image->alpha_trait != UndefinedPixelTrait)
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,
quantum_info,AlphaQuantum,pixels,exception);
if (TIFFWritePixels(tiff,&tiff_info,y,3,image) == -1)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,400,400);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case PHOTOMETRIC_SEPARATED:
{
/*
CMYK TIFF image.
*/
quantum_type=CMYKQuantum;
if (image->alpha_trait != UndefinedPixelTrait)
quantum_type=CMYKAQuantum;
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case PHOTOMETRIC_PALETTE:
{
uint16
*blue,
*green,
*red;
/*
Colormapped TIFF image.
*/
red=(uint16 *) AcquireQuantumMemory(65536,sizeof(*red));
green=(uint16 *) AcquireQuantumMemory(65536,sizeof(*green));
blue=(uint16 *) AcquireQuantumMemory(65536,sizeof(*blue));
if ((red == (uint16 *) NULL) || (green == (uint16 *) NULL) ||
(blue == (uint16 *) NULL))
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
/*
Initialize TIFF colormap.
*/
(void) ResetMagickMemory(red,0,65536*sizeof(*red));
(void) ResetMagickMemory(green,0,65536*sizeof(*green));
(void) ResetMagickMemory(blue,0,65536*sizeof(*blue));
for (i=0; i < (ssize_t) image->colors; i++)
{
red[i]=ScaleQuantumToShort(image->colormap[i].red);
green[i]=ScaleQuantumToShort(image->colormap[i].green);
blue[i]=ScaleQuantumToShort(image->colormap[i].blue);
}
(void) TIFFSetField(tiff,TIFFTAG_COLORMAP,red,green,blue);
red=(uint16 *) RelinquishMagickMemory(red);
green=(uint16 *) RelinquishMagickMemory(green);
blue=(uint16 *) RelinquishMagickMemory(blue);
}
default:
{
/*
Convert PseudoClass packets to contiguous grayscale scanlines.
*/
quantum_type=IndexQuantum;
if (image->alpha_trait != UndefinedPixelTrait)
{
if (photometric != PHOTOMETRIC_PALETTE)
quantum_type=GrayAlphaQuantum;
else
quantum_type=IndexAlphaQuantum;
}
else
if (photometric != PHOTOMETRIC_PALETTE)
quantum_type=GrayQuantum;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
if (image->colorspace == LabColorspace)
DecodeLabImage(image,exception);
DestroyTIFFInfo(&tiff_info);
DisableMSCWarning(4127)
if (0 && (image_info->verbose != MagickFalse))
RestoreMSCWarning
TIFFPrintDirectory(tiff,stdout,MagickFalse);
(void) TIFFWriteDirectory(tiff);
image=SyncNextImageInList(image);
if (image == (Image *) NULL)
break;
status=SetImageProgress(image,SaveImagesTag,scene++,
GetImageListLength(image));
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
TIFFClose(tiff);
return(MagickTrue);
}
| 1 |
[
"CWE-369"
] |
ImageMagick
|
728dc6a600cf4cbdac846964c85cc04339db8ac1
| 211,818,984,846,135,230,000,000,000,000,000,000,000 | 857 |
Fix TIFF divide by zero (bug report from Donghai Zhu)
|
cmsTagTypeSignature DecideCurveType(cmsFloat64Number ICCVersion, const void *Data)
{
cmsToneCurve* Curve = (cmsToneCurve*) Data;
if (ICCVersion < 4.0) return cmsSigCurveType;
if (Curve ->nSegments != 1) return cmsSigCurveType; // Only 1-segment curves can be saved as parametric
if (Curve ->Segments[0].Type < 0) return cmsSigCurveType; // Only non-inverted curves
if (Curve ->Segments[0].Type > 5) return cmsSigCurveType; // Only ICC parametric curves
return cmsSigParametricCurveType;
}
| 0 |
[] |
Little-CMS
|
41d222df1bc6188131a8f46c32eab0a4d4cdf1b6
| 249,679,252,521,110,000,000,000,000,000,000,000,000 | 11 |
Memory squeezing fix: lcms2 cmsPipeline construction
When creating a new pipeline, lcms would often try to allocate a stage
and pass it to cmsPipelineInsertStage without checking whether the
allocation succeeded. cmsPipelineInsertStage would then assert (or crash)
if it had not.
The fix here is to change cmsPipelineInsertStage to check and return
an error value. All calling code is then checked to test this return
value and cope.
|
static bool check_client_passwd(PgSocket *client, const char *passwd)
{
char md5[MD5_PASSWD_LEN + 1];
const char *correct;
PgUser *user = client->auth_user;
/* auth_user may be missing */
if (!user) {
slog_error(client, "Password packet before auth packet?");
return false;
}
/* disallow empty passwords */
if (!*passwd || !*user->passwd)
return false;
switch (cf_auth_type) {
case AUTH_PLAIN:
return strcmp(user->passwd, passwd) == 0;
case AUTH_CRYPT:
correct = crypt(user->passwd, (char *)client->tmp_login_salt);
return correct && strcmp(correct, passwd) == 0;
case AUTH_MD5:
if (strlen(passwd) != MD5_PASSWD_LEN)
return false;
if (!isMD5(user->passwd))
pg_md5_encrypt(user->passwd, user->name, strlen(user->name), user->passwd);
pg_md5_encrypt(user->passwd + 3, (char *)client->tmp_login_salt, 4, md5);
return strcmp(md5, passwd) == 0;
}
return false;
}
| 0 |
[
"CWE-476"
] |
pgbouncer
|
edab5be6665b9e8de66c25ba527509b229468573
| 148,178,490,662,925,000,000,000,000,000,000,000,000 | 32 |
Check if auth_user is set.
Fixes a crash if password packet appears before startup packet (#42).
|
bool is_equal(const LEX_STRING *a, const LEX_STRING *b)
{
return a->length == b->length && !strncmp(a->str, b->str, a->length);
}
| 0 |
[] |
server
|
0168d1eda30dad4b517659422e347175eb89e923
| 26,115,020,386,942,730,000,000,000,000,000,000,000 | 4 |
MDEV-25766 Unused CTE lead to a crash in find_field_in_tables/find_order_in_list
Do not assume that subquery Item always present.
|
static gps_mask_t rtcm104v3_analyze(struct gps_device_t *session)
{
uint16_t type = getbeu16(session->packet.inbuffer, 3) >> 4;
gpsd_report(LOG_RAW, "RTCM 3.x packet %d\n", type);
rtcm3_unpack(&session->gpsdata.rtcm3, (char *)session->packet.outbuffer);
session->cycle_end_reliable = true;
return RTCM3_SET;
}
| 0 |
[] |
gpsd
|
08edc49d8f63c75bfdfb480b083b0d960310f94f
| 126,439,299,393,504,800,000,000,000,000,000,000,000 | 9 |
Address Savannah bug #38511:
ais_context->bitlen <= sizeof(ais_context->bits)/8 is not checked
|
void VvcUnit::decodeBuffer(const uint8_t* buffer, const uint8_t* end)
{
delete[] m_nalBuffer;
m_nalBuffer = new uint8_t[end - buffer];
m_nalBufferLen = NALUnit::decodeNAL(buffer, end, m_nalBuffer, end - buffer);
}
| 0 |
[
"CWE-22"
] |
tsMuxer
|
3763dd34755a8944d903aa19578fa22cd3734165
| 146,094,414,338,167,780,000,000,000,000,000,000,000 | 6 |
Fix Buffer Overflow
Fixes issue #509.
|
static int vsr_active(struct task_struct *target,
const struct user_regset *regset)
{
flush_vsx_to_thread(target);
return target->thread.used_vsr ? regset->n : 0;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
c1fa0768a8713b135848f78fd43ffc208d8ded70
| 322,669,073,710,396,460,000,000,000,000,000,000,000 | 6 |
powerpc/tm: Flush TM only if CPU has TM feature
Commit cd63f3c ("powerpc/tm: Fix saving of TM SPRs in core dump")
added code to access TM SPRs in flush_tmregs_to_thread(). However
flush_tmregs_to_thread() does not check if TM feature is available on
CPU before trying to access TM SPRs in order to copy live state to
thread structures. flush_tmregs_to_thread() is indeed guarded by
CONFIG_PPC_TRANSACTIONAL_MEM but it might be the case that kernel
was compiled with CONFIG_PPC_TRANSACTIONAL_MEM enabled and ran on
a CPU without TM feature available, thus rendering the execution
of TM instructions that are treated by the CPU as illegal instructions.
The fix is just to add proper checking in flush_tmregs_to_thread()
if CPU has the TM feature before accessing any TM-specific resource,
returning immediately if TM is no available on the CPU. Adding
that checking in flush_tmregs_to_thread() instead of in places
where it is called, like in vsr_get() and vsr_set(), is better because
avoids the same problem cropping up elsewhere.
Cc: [email protected] # v4.13+
Fixes: cd63f3c ("powerpc/tm: Fix saving of TM SPRs in core dump")
Signed-off-by: Gustavo Romero <[email protected]>
Reviewed-by: Cyril Bur <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]>
|
cycle_job (GSWindow *window,
GSJob *job,
GSManager *manager)
{
gs_job_stop (job);
manager_select_theme_for_job (manager, job);
manager_maybe_start_job_for_window (manager, window);
}
| 0 |
[] |
gnome-screensaver
|
2f597ea9f1f363277fd4dfc109fa41bbc6225aca
| 261,628,735,235,125,230,000,000,000,000,000,000,000 | 8 |
Fix adding monitors
Make sure to show windows that are added. And fix an off by one bug.
|
CListener* CZNC::FindListener(u_short uPort, const CString& sBindHost,
EAddrType eAddr) {
for (CListener* pListener : m_vpListeners) {
if (pListener->GetPort() != uPort) continue;
if (pListener->GetBindHost() != sBindHost) continue;
if (pListener->GetAddrType() != eAddr) continue;
return pListener;
}
return nullptr;
}
| 0 |
[
"CWE-20"
] |
znc
|
64613bc8b6b4adf1e32231f9844d99cd512b8973
| 229,692,350,894,469,270,000,000,000,000,000,000,000 | 10 |
Don't crash if user specified invalid encoding.
This is CVE-2019-9917
|
void jas_iccattrtab_dump(jas_iccattrtab_t *attrtab, FILE *out)
{
int i;
jas_iccattr_t *attr;
jas_iccattrval_t *attrval;
jas_iccattrvalinfo_t *info;
char buf[16];
fprintf(out, "numattrs=%d\n", attrtab->numattrs);
fprintf(out, "---\n");
for (i = 0; i < attrtab->numattrs; ++i) {
attr = &attrtab->attrs[i];
attrval = attr->val;
info = jas_iccattrvalinfo_lookup(attrval->type);
if (!info) abort();
fprintf(out, "attrno=%d; attrname=\"%s\"(0x%08"PRIxFAST32"); attrtype=\"%s\"(0x%08"PRIxFAST32")\n",
i,
jas_iccsigtostr(attr->name, &buf[0]),
attr->name,
jas_iccsigtostr(attrval->type, &buf[8]),
attrval->type
);
jas_iccattrval_dump(attrval, out);
fprintf(out, "---\n");
}
}
| 0 |
[
"CWE-20",
"CWE-190"
] |
jasper
|
d42b2388f7f8e0332c846675133acea151fc557a
| 252,963,892,827,873,880,000,000,000,000,000,000,000 | 25 |
The generation of the configuration file jas_config.h has been completely
reworked in order to avoid pollution of the global namespace.
Some problematic types like uchar, ulong, and friends have been replaced
with names with a jas_ prefix.
An option max_samples has been added to the BMP and JPEG decoders to
restrict the maximum size of image that they can decode. This change
was made as a (possibly temporary) fix to address security concerns.
A max_samples command-line option has also been added to imginfo.
Whether an image component (for jas_image_t) is stored in memory or on
disk is now based on the component size (rather than the image size).
Some debug log message were added.
Some new integer overflow checks were added.
Some new safe integer add/multiply functions were added.
More pre-C99 cruft was removed. JasPer has numerous "hacks" to
handle pre-C99 compilers. JasPer now assumes C99 support. So, this
pre-C99 cruft is unnecessary and can be removed.
The regression jasper-doublefree-mem_close.jpg has been re-enabled.
Theoretically, it should work more predictably now.
|
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
{
struct kvm_vcpu_hv_synic *synic;
int r;
if (!to_hv_vcpu(vcpu)) {
r = kvm_hv_vcpu_init(vcpu);
if (r)
return r;
}
synic = to_hv_synic(vcpu);
/*
* Hyper-V SynIC auto EOI SINT's are
* not compatible with APICV, so request
* to deactivate APICV permanently.
*/
kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_HYPERV);
synic->active = true;
synic->dont_zero_synic_pages = dont_zero_synic_pages;
synic->control = HV_SYNIC_CONTROL_ENABLE;
return 0;
}
| 0 |
[
"CWE-476"
] |
linux
|
919f4ebc598701670e80e31573a58f1f2d2bf918
| 28,143,831,885,148,210,000,000,000,000,000,000,000 | 24 |
KVM: x86: hyper-v: Fix Hyper-V context null-ptr-deref
Reported by syzkaller:
KASAN: null-ptr-deref in range [0x0000000000000140-0x0000000000000147]
CPU: 1 PID: 8370 Comm: syz-executor859 Not tainted 5.11.0-syzkaller #0
RIP: 0010:synic_get arch/x86/kvm/hyperv.c:165 [inline]
RIP: 0010:kvm_hv_set_sint_gsi arch/x86/kvm/hyperv.c:475 [inline]
RIP: 0010:kvm_hv_irq_routing_update+0x230/0x460 arch/x86/kvm/hyperv.c:498
Call Trace:
kvm_set_irq_routing+0x69b/0x940 arch/x86/kvm/../../../virt/kvm/irqchip.c:223
kvm_vm_ioctl+0x12d0/0x2800 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3959
vfs_ioctl fs/ioctl.c:48 [inline]
__do_sys_ioctl fs/ioctl.c:753 [inline]
__se_sys_ioctl fs/ioctl.c:739 [inline]
__x64_sys_ioctl+0x193/0x200 fs/ioctl.c:739
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
Hyper-V context is lazily allocated until Hyper-V specific MSRs are accessed
or SynIC is enabled. However, the syzkaller testcase sets irq routing table
directly w/o enabling SynIC. This results in null-ptr-deref when accessing
SynIC Hyper-V context. This patch fixes it.
syzkaller source: https://syzkaller.appspot.com/x/repro.c?x=163342ccd00000
Reported-by: [email protected]
Fixes: 8f014550dfb1 ("KVM: x86: hyper-v: Make Hyper-V emulation enablement conditional")
Signed-off-by: Wanpeng Li <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
TEST_P(Http2MetadataIntegrationTest, ProxyLargeMetadataInRequest) {
initialize();
codec_client_ = makeHttpConnection(lookupPort("http"));
auto encoder_decoder = codec_client_->startRequest(default_request_headers_);
request_encoder_ = &encoder_decoder.first;
auto response = std::move(encoder_decoder.second);
std::string value = std::string(80 * 1024, '1');
Http::MetadataMap metadata_map = {{"key", value}};
codec_client_->sendMetadata(*request_encoder_, metadata_map);
codec_client_->sendData(*request_encoder_, 1, false);
codec_client_->sendMetadata(*request_encoder_, metadata_map);
codec_client_->sendData(*request_encoder_, 1, false);
codec_client_->sendMetadata(*request_encoder_, metadata_map);
Http::TestRequestTrailerMapImpl request_trailers{{"request", "trailer"}};
codec_client_->sendTrailers(*request_encoder_, request_trailers);
waitForNextUpstreamRequest();
// Verifies metadata is received upstream.
upstream_request_->encodeHeaders(default_response_headers_, true);
EXPECT_EQ(upstream_request_->metadata_map().find("key")->second, value);
EXPECT_EQ(upstream_request_->metadata_map().size(), 1);
EXPECT_EQ(upstream_request_->duplicated_metadata_key_count().find("key")->second, 3);
response->waitForEndStream();
ASSERT_TRUE(response->complete());
}
| 0 |
[
"CWE-400"
] |
envoy
|
0e49a495826ea9e29134c1bd54fdeb31a034f40c
| 124,594,388,393,897,660,000,000,000,000,000,000,000 | 28 |
http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]>
|
R_API int r_sys_sigaction(int *sig, void (*handler) (int)) {
struct sigaction sigact = { };
int ret, i;
if (!sig) {
return -EINVAL;
}
sigact.sa_handler = handler;
sigemptyset (&sigact.sa_mask);
for (i = 0; sig[i] != 0; i++) {
sigaddset (&sigact.sa_mask, sig[i]);
}
for (i = 0; sig[i] != 0; i++) {
ret = sigaction (sig[i], &sigact, NULL);
if (ret) {
eprintf ("Failed to set signal handler for signal '%d': %s\n", sig[i], strerror(errno));
return ret;
}
}
return 0;
}
| 0 |
[
"CWE-78"
] |
radare2
|
04edfa82c1f3fa2bc3621ccdad2f93bdbf00e4f9
| 77,406,544,419,369,280,000,000,000,000,000,000,000 | 25 |
Fix command injection on PDB download (#16966)
* Fix r_sys_mkdirp with absolute path on Windows
* Fix build with --with-openssl
* Use RBuffer in r_socket_http_answer()
* r_socket_http_answer: Fix read for big responses
* Implement r_str_escape_sh()
* Cleanup r_socket_connect() on Windows
* Fix socket being created without a protocol
* Fix socket connect with SSL ##socket
* Use select() in r_socket_ready()
* Fix read failing if received only protocol answer
* Fix double-free
* r_socket_http_get: Fail if req. SSL with no support
* Follow redirects in r_socket_http_answer()
* Fix r_socket_http_get result length with R2_CURL=1
* Also follow redirects
* Avoid using curl for downloading PDBs
* Use r_socket_http_get() on UNIXs
* Use WinINet API on Windows for r_socket_http_get()
* Fix command injection
* Fix r_sys_cmd_str_full output for binary data
* Validate GUID on PDB download
* Pass depth to socket_http_get_recursive()
* Remove 'r_' and '__' from static function names
* Fix is_valid_guid
* Fix for comments
|
AP_DECLARE(apr_status_t) ap_reopen_scoreboard(apr_pool_t *p, apr_shm_t **shm,
int detached)
{
#if APR_HAS_SHARED_MEMORY
if (!detached) {
return APR_SUCCESS;
}
if (apr_shm_size_get(ap_scoreboard_shm) < scoreboard_size) {
ap_log_error(APLOG_MARK, APLOG_CRIT, 0, ap_server_conf, APLOGNO(00005)
"Fatal error: shared scoreboard too small for child!");
apr_shm_detach(ap_scoreboard_shm);
ap_scoreboard_shm = NULL;
return APR_EINVAL;
}
/* everything will be cleared shortly */
if (*shm) {
*shm = ap_scoreboard_shm;
}
#endif
return APR_SUCCESS;
}
| 0 |
[
"CWE-476"
] |
httpd
|
fa7b2a5250e54363b3a6c8ac3aaa7de4e8da9b2e
| 52,467,633,336,157,080,000,000,000,000,000,000,000 | 21 |
Merge r1878092 from trunk:
Fix a NULL pointer dereference
* server/scoreboard.c (ap_increment_counts): In certain cases like certain
invalid requests r->method might be NULL here. r->method_number defaults
to M_GET and hence is M_GET in these cases.
Submitted by: rpluem
Reviewed by: covener, ylavic, jfclere
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1893051 13f79535-47bb-0310-9956-ffa450edef68
|
lys_getnext_union_type(const struct lys_type *last, const struct lys_type *type)
{
FUN_IN;
int found = 0;
if (!type || (type->base != LY_TYPE_UNION)) {
return NULL;
}
return lyp_get_next_union_type((struct lys_type *)type, (struct lys_type *)last, &found);
}
| 0 |
[
"CWE-617"
] |
libyang
|
5ce30801f9ccc372bbe9b7c98bb5324b15fb010a
| 119,821,619,336,385,760,000,000,000,000,000,000,000 | 12 |
schema tree BUGFIX freeing nodes with no module set
Context must be passed explicitly for these cases.
Fixes #1452
|
const char *MirrorJob::AddPatternsFrom(Ref<PatternSet>& exclude,char opt,const char *file)
{
FILE *f=fopen(file,"r");
if(!f)
return xstring::format("%s: %s",file,strerror(errno));
xstring line;
const char *err=0;
int c;
while(!feof(f)) {
line.truncate();
while((c=getc(f))!=EOF && c!='\n')
line.append(c);
if(line.length()>0) {
err=AddPattern(exclude,opt,line);
if(err)
break;
}
}
fclose(f);
return err;
}
| 0 |
[
"CWE-20",
"CWE-401"
] |
lftp
|
a27e07d90a4608ceaf928b1babb27d4d803e1992
| 31,850,489,180,606,187,000,000,000,000,000,000,000 | 23 |
mirror: prepend ./ to rm and chmod arguments to avoid URL recognition (fix #452)
|
rectify_client_ciphers(STACK_OF(SSL_CIPHER) **ciphers)
{
#ifdef V2_HANDSHAKE_CLIENT
if (PREDICT_UNLIKELY(!CLIENT_CIPHER_STACK)) {
/* We need to set CLIENT_CIPHER_STACK to an array of the ciphers
* we want.*/
int i = 0, j = 0;
/* First, create a dummy SSL_CIPHER for every cipher. */
CLIENT_CIPHER_DUMMIES =
tor_malloc_zero(sizeof(SSL_CIPHER)*N_CLIENT_CIPHERS);
for (i=0; i < N_CLIENT_CIPHERS; ++i) {
CLIENT_CIPHER_DUMMIES[i].valid = 1;
CLIENT_CIPHER_DUMMIES[i].id = CLIENT_CIPHER_INFO_LIST[i].id | (3<<24);
CLIENT_CIPHER_DUMMIES[i].name = CLIENT_CIPHER_INFO_LIST[i].name;
}
CLIENT_CIPHER_STACK = sk_SSL_CIPHER_new_null();
tor_assert(CLIENT_CIPHER_STACK);
log_debug(LD_NET, "List was: %s", CLIENT_CIPHER_LIST);
for (j = 0; j < sk_SSL_CIPHER_num(*ciphers); ++j) {
SSL_CIPHER *cipher = sk_SSL_CIPHER_value(*ciphers, j);
log_debug(LD_NET, "Cipher %d: %lx %s", j, cipher->id, cipher->name);
}
/* Then copy as many ciphers as we can from the good list, inserting
* dummies as needed. */
j=0;
for (i = 0; i < N_CLIENT_CIPHERS; ) {
SSL_CIPHER *cipher = NULL;
if (j < sk_SSL_CIPHER_num(*ciphers))
cipher = sk_SSL_CIPHER_value(*ciphers, j);
if (cipher && ((cipher->id >> 24) & 0xff) != 3) {
log_debug(LD_NET, "Skipping v2 cipher %s", cipher->name);
++j;
} else if (cipher &&
(cipher->id & 0xffff) == CLIENT_CIPHER_INFO_LIST[i].id) {
log_debug(LD_NET, "Found cipher %s", cipher->name);
sk_SSL_CIPHER_push(CLIENT_CIPHER_STACK, cipher);
++j;
++i;
} else {
log_debug(LD_NET, "Inserting fake %s", CLIENT_CIPHER_DUMMIES[i].name);
sk_SSL_CIPHER_push(CLIENT_CIPHER_STACK, &CLIENT_CIPHER_DUMMIES[i]);
++i;
}
}
}
sk_SSL_CIPHER_free(*ciphers);
*ciphers = sk_SSL_CIPHER_dup(CLIENT_CIPHER_STACK);
tor_assert(*ciphers);
#else
(void)ciphers;
#endif
}
| 0 |
[
"CWE-264"
] |
tor
|
638fdedcf16cf7d6f7c586d36f7ef335c1c9714f
| 55,333,133,737,451,060,000,000,000,000,000,000,000 | 58 |
Don't send a certificate chain on outgoing TLS connections from non-relays
|
static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
struct in6_addr *laddr,
int dif, u32 tunnel_id)
{
struct sock *sk;
sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
struct in6_addr *addr = inet6_rcv_saddr(sk);
struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
if (l2tp == NULL)
continue;
if ((l2tp->conn_id == tunnel_id) &&
net_eq(sock_net(sk), net) &&
!(addr && ipv6_addr_equal(addr, laddr)) &&
!(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
goto found;
}
sk = NULL;
found:
return sk;
}
| 0 |
[
"CWE-200"
] |
linux
|
b860d3cc62877fad02863e2a08efff69a19382d2
| 290,199,007,486,072,850,000,000,000,000,000,000,000 | 24 |
l2tp: fix info leak in l2tp_ip6_recvmsg()
The L2TP code for IPv6 fails to initialize the l2tp_conn_id member of
struct sockaddr_l2tpip6 and therefore leaks four bytes kernel stack
in l2tp_ip6_recvmsg() in case msg_name is set.
Initialize l2tp_conn_id with 0 to avoid the info leak.
Signed-off-by: Mathias Krause <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
vte_sequence_handler_ll (VteTerminal *terminal, GValueArray *params)
{
VteScreen *screen;
screen = terminal->pvt->screen;
screen->cursor_current.row = MAX(screen->insert_delta,
screen->insert_delta +
terminal->row_count - 1);
screen->cursor_current.col = 0;
}
| 0 |
[] |
vte
|
58bc3a942f198a1a8788553ca72c19d7c1702b74
| 13,313,815,881,442,128,000,000,000,000,000,000,000 | 9 |
fix bug #548272
svn path=/trunk/; revision=2365
|
GF_Box *abst_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_AdobeBootstrapInfoBox, GF_ISOM_BOX_TYPE_ABST);
tmp->server_entry_table = gf_list_new();
tmp->quality_entry_table = gf_list_new();
tmp->segment_run_table_entries = gf_list_new();
tmp->fragment_run_table_entries = gf_list_new();
return (GF_Box *)tmp;
}
| 0 |
[
"CWE-125"
] |
gpac
|
093283e727f396130651280609e687cd4778e0d1
| 190,790,768,089,279,020,000,000,000,000,000,000,000 | 9 |
fixed #1564
|
static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
struct ib_wc *wc,
struct bnxt_qplib_cqe *cqe)
{
struct bnxt_re_dev *rdev = qp->rdev;
struct bnxt_re_qp *qp1_qp = NULL;
struct bnxt_qplib_cqe *orig_cqe = NULL;
struct bnxt_re_sqp_entries *sqp_entry = NULL;
int nw_type;
u32 tbl_idx;
u16 vlan_id;
u8 sl;
tbl_idx = cqe->wr_id;
sqp_entry = &rdev->sqp_tbl[tbl_idx];
qp1_qp = sqp_entry->qp1_qp;
orig_cqe = &sqp_entry->cqe;
wc->wr_id = sqp_entry->wrid;
wc->byte_len = orig_cqe->length;
wc->qp = &qp1_qp->ib_qp;
wc->ex.imm_data = orig_cqe->immdata;
wc->src_qp = orig_cqe->src_qp;
memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
wc->vlan_id = vlan_id;
wc->sl = sl;
wc->wc_flags |= IB_WC_WITH_VLAN;
}
wc->port_num = 1;
wc->vendor_err = orig_cqe->status;
wc->opcode = IB_WC_RECV;
wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
wc->wc_flags |= IB_WC_GRH;
nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
orig_cqe->raweth_qp1_flags2);
if (nw_type >= 0) {
wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
}
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
4a9d46a9fe14401f21df69cea97c62396d5fb053
| 63,225,768,103,926,570,000,000,000,000,000,000,000 | 45 |
RDMA: Fix goto target to release the allocated memory
In bnxt_re_create_srq(), when ib_copy_to_udata() fails allocated memory
should be released by goto fail.
Fixes: 37cb11acf1f7 ("RDMA/bnxt_re: Add SRQ support for Broadcom adapters")
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Navid Emamdoost <[email protected]>
Reviewed-by: Jason Gunthorpe <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
|
**/
T cubic_cut_atXY(const float fx, const float fy, const int z, const int c, const T& out_value) const {
return cimg::type<T>::cut(cubic_atXY(fx,fy,z,c,out_value));
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 238,822,819,092,359,500,000,000,000,000,000,000,000 | 3 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_pcm_substream *substream = entry->private_data;
struct snd_pcm_runtime *runtime;
struct snd_pcm_status status;
int err;
mutex_lock(&substream->pcm->open_mutex);
runtime = substream->runtime;
if (!runtime) {
snd_iprintf(buffer, "closed\n");
goto unlock;
}
memset(&status, 0, sizeof(status));
err = snd_pcm_status(substream, &status);
if (err < 0) {
snd_iprintf(buffer, "error %d\n", err);
goto unlock;
}
snd_iprintf(buffer, "state: %s\n", snd_pcm_state_name(status.state));
snd_iprintf(buffer, "owner_pid : %d\n", pid_vnr(substream->pid));
snd_iprintf(buffer, "trigger_time: %ld.%09ld\n",
status.trigger_tstamp.tv_sec, status.trigger_tstamp.tv_nsec);
snd_iprintf(buffer, "tstamp : %ld.%09ld\n",
status.tstamp.tv_sec, status.tstamp.tv_nsec);
snd_iprintf(buffer, "delay : %ld\n", status.delay);
snd_iprintf(buffer, "avail : %ld\n", status.avail);
snd_iprintf(buffer, "avail_max : %ld\n", status.avail_max);
snd_iprintf(buffer, "-----\n");
snd_iprintf(buffer, "hw_ptr : %ld\n", runtime->status->hw_ptr);
snd_iprintf(buffer, "appl_ptr : %ld\n", runtime->control->appl_ptr);
unlock:
mutex_unlock(&substream->pcm->open_mutex);
}
| 0 |
[
"CWE-416"
] |
linux
|
362bca57f5d78220f8b5907b875961af9436e229
| 259,908,671,066,299,730,000,000,000,000,000,000,000 | 35 |
ALSA: pcm: prevent UAF in snd_pcm_info
When the device descriptor is closed, the `substream->runtime` pointer
is freed. But another thread may be in the ioctl handler, case
SNDRV_CTL_IOCTL_PCM_INFO. This case calls snd_pcm_info_user() which
calls snd_pcm_info() which accesses the now freed `substream->runtime`.
Note: this fixes CVE-2017-0861
Signed-off-by: Robb Glasser <[email protected]>
Signed-off-by: Nick Desaulniers <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
|
TfLiteRegistration* Register_BATCH_TO_SPACE_ND() {
return Register_BATCH_TO_SPACE_ND_GENERIC_OPT();
}
| 0 |
[
"CWE-369"
] |
tensorflow
|
2c74674348a4708ced58ad6eb1b23354df8ee044
| 31,153,406,769,341,370,000,000,000,000,000,000,000 | 3 |
Prevent division by 0
PiperOrigin-RevId: 370979352
Change-Id: Ic79191c316d986fc6072ecaebfec9d5f2b924d00
|
void unregister_binfmt(struct linux_binfmt * fmt)
{
write_lock(&binfmt_lock);
list_del(&fmt->lh);
write_unlock(&binfmt_lock);
}
| 0 |
[
"CWE-200"
] |
linux-2.6
|
b66c5984017533316fd1951770302649baf1aa33
| 167,226,098,159,871,200,000,000,000,000,000,000,000 | 6 |
exec: do not leave bprm->interp on stack
If a series of scripts are executed, each triggering module loading via
unprintable bytes in the script header, kernel stack contents can leak
into the command line.
Normally execution of binfmt_script and binfmt_misc happens recursively.
However, when modules are enabled, and unprintable bytes exist in the
bprm->buf, execution will restart after attempting to load matching
binfmt modules. Unfortunately, the logic in binfmt_script and
binfmt_misc does not expect to get restarted. They leave bprm->interp
pointing to their local stack. This means on restart bprm->interp is
left pointing into unused stack memory which can then be copied into the
userspace argv areas.
After additional study, it seems that both recursion and restart remains
the desirable way to handle exec with scripts, misc, and modules. As
such, we need to protect the changes to interp.
This changes the logic to require allocation for any changes to the
bprm->interp. To avoid adding a new kmalloc to every exec, the default
value is left as-is. Only when passing through binfmt_script or
binfmt_misc does an allocation take place.
For a proof of concept, see DoTest.sh from:
http://www.halfdog.net/Security/2012/LinuxKernelBinfmtScriptStackDataDisclosure/
Signed-off-by: Kees Cook <[email protected]>
Cc: halfdog <[email protected]>
Cc: P J P <[email protected]>
Cc: Alexander Viro <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
uint32 char_length() const
{
return (field_length - 1) / field_charset->mbmaxlen;
}
| 0 |
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
| 75,175,123,733,682,410,000,000,000,000,000,000,000 | 4 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]>
|
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
unsigned long rflags;
int i, r;
if (vcpu->arch.guest_state_protected)
return -EINVAL;
vcpu_load(vcpu);
if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
r = -EBUSY;
if (vcpu->arch.exception.pending)
goto out;
if (dbg->control & KVM_GUESTDBG_INJECT_DB)
kvm_queue_exception(vcpu, DB_VECTOR);
else
kvm_queue_exception(vcpu, BP_VECTOR);
}
/*
* Read rflags as long as potentially injected trace flags are still
* filtered out.
*/
rflags = kvm_get_rflags(vcpu);
vcpu->guest_debug = dbg->control;
if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
vcpu->guest_debug = 0;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
for (i = 0; i < KVM_NR_DB_REGS; ++i)
vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
} else {
for (i = 0; i < KVM_NR_DB_REGS; i++)
vcpu->arch.eff_db[i] = vcpu->arch.db[i];
}
kvm_update_dr7(vcpu);
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu);
/*
* Trigger an rflags update that will inject or remove the trace
* flags.
*/
kvm_set_rflags(vcpu, rflags);
static_call(kvm_x86_update_exception_bitmap)(vcpu);
kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm);
r = 0;
out:
vcpu_put(vcpu);
return r;
}
| 0 |
[
"CWE-476"
] |
linux
|
55749769fe608fa3f4a075e42e89d237c8e37637
| 217,690,561,408,338,300,000,000,000,000,000,000,000 | 60 |
KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty
When dirty ring logging is enabled, any dirty logging without an active
vCPU context will cause a kernel oops. But we've already declared that
the shared_info page doesn't get dirty tracking anyway, since it would
be kind of insane to mark it dirty every time we deliver an event channel
interrupt. Userspace is supposed to just assume it's always dirty any
time a vCPU can run or event channels are routed.
So stop using the generic kvm_write_wall_clock() and just write directly
through the gfn_to_pfn_cache that we already have set up.
We can make kvm_write_wall_clock() static in x86.c again now, but let's
not remove the 'sec_hi_ofs' argument even though it's not used yet. At
some point we *will* want to use that for KVM guests too.
Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region")
Reported-by: butt3rflyh4ck <[email protected]>
Signed-off-by: David Woodhouse <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static inline void fat_dir_readahead(struct inode *dir, sector_t iblock,
sector_t phys)
{
struct super_block *sb = dir->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *bh;
int sec;
/* This is not a first sector of cluster, or sec_per_clus == 1 */
if ((iblock & (sbi->sec_per_clus - 1)) || sbi->sec_per_clus == 1)
return;
/* root dir of FAT12/FAT16 */
if ((sbi->fat_bits != 32) && (dir->i_ino == MSDOS_ROOT_INO))
return;
bh = sb_find_get_block(sb, phys);
if (bh == NULL || !buffer_uptodate(bh)) {
for (sec = 0; sec < sbi->sec_per_clus; sec++)
sb_breadahead(sb, phys + sec);
}
brelse(bh);
}
| 0 |
[] |
linux-2.6
|
c483bab099cb89e92b7cad94a52fcdaf37e56657
| 177,607,143,046,777,580,000,000,000,000,000,000,000 | 22 |
fat: fix VFAT compat ioctls on 64-bit systems
If you compile and run the below test case in an msdos or vfat directory on
an x86-64 system with -m32 you'll get garbage in the kernel_dirent struct
followed by a SIGSEGV.
The patch fixes this.
Reported and initial fix by Bart Oldeman
#include <sys/types.h>
#include <sys/ioctl.h>
#include <dirent.h>
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
struct kernel_dirent {
long d_ino;
long d_off;
unsigned short d_reclen;
char d_name[256]; /* We must not include limits.h! */
};
#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct kernel_dirent [2])
#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct kernel_dirent [2])
int main(void)
{
int fd = open(".", O_RDONLY);
struct kernel_dirent de[2];
while (1) {
int i = ioctl(fd, VFAT_IOCTL_READDIR_BOTH, (long)de);
if (i == -1) break;
if (de[0].d_reclen == 0) break;
printf("SFN: reclen=%2d off=%d ino=%d, %-12s",
de[0].d_reclen, de[0].d_off, de[0].d_ino, de[0].d_name);
if (de[1].d_reclen)
printf("\tLFN: reclen=%2d off=%d ino=%d, %s",
de[1].d_reclen, de[1].d_off, de[1].d_ino, de[1].d_name);
printf("\n");
}
return 0;
}
Signed-off-by: Bart Oldeman <[email protected]>
Signed-off-by: OGAWA Hirofumi <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static RList* strings(RBinFile* bf) {
// hardcode minstrlen = 20
return r_bin_file_get_strings (bf, 20, 0, 2);
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
radare2
|
634b886e84a5c568d243e744becc6b3223e089cf
| 1,635,310,394,998,095,000,000,000,000,000,000,000 | 4 |
Fix DoS in PE/QNX/DYLDCACHE/PSX parsers ##crash
* Reported by lazymio
* Reproducer: AAA4AAAAAB4=
|
static inline u32 task_sid_obj(const struct task_struct *task)
{
u32 sid;
rcu_read_lock();
sid = cred_sid(__task_cred(task));
rcu_read_unlock();
return sid;
}
| 0 |
[
"CWE-416"
] |
linux
|
a3727a8bac0a9e77c70820655fd8715523ba3db7
| 325,684,094,404,699,700,000,000,000,000,000,000,000 | 9 |
selinux,smack: fix subjective/objective credential use mixups
Jann Horn reported a problem with commit eb1231f73c4d ("selinux:
clarify task subjective and objective credentials") where some LSM
hooks were attempting to access the subjective credentials of a task
other than the current task. Generally speaking, it is not safe to
access another task's subjective credentials and doing so can cause
a number of problems.
Further, while looking into the problem, I realized that Smack was
suffering from a similar problem brought about by a similar commit
1fb057dcde11 ("smack: differentiate between subjective and objective
task credentials").
This patch addresses this problem by restoring the use of the task's
objective credentials in those cases where the task is other than the
current executing task. Not only does this resolve the problem
reported by Jann, it is arguably the correct thing to do in these
cases.
Cc: [email protected]
Fixes: eb1231f73c4d ("selinux: clarify task subjective and objective credentials")
Fixes: 1fb057dcde11 ("smack: differentiate between subjective and objective task credentials")
Reported-by: Jann Horn <[email protected]>
Acked-by: Eric W. Biederman <[email protected]>
Acked-by: Casey Schaufler <[email protected]>
Signed-off-by: Paul Moore <[email protected]>
|
avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
{
int ret = 0;
unsigned long flags;
struct amd_svm_iommu_ir *ir;
struct vcpu_svm *svm = to_svm(vcpu);
if (!kvm_arch_has_assigned_device(vcpu->kvm))
return 0;
/*
* Here, we go through the per-vcpu ir_list to update all existing
* interrupt remapping table entry targeting this vcpu.
*/
spin_lock_irqsave(&svm->ir_list_lock, flags);
if (list_empty(&svm->ir_list))
goto out;
list_for_each_entry(ir, &svm->ir_list, node) {
ret = amd_iommu_update_ga(cpu, r, ir->data);
if (ret)
break;
}
out:
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
return ret;
}
| 0 |
[
"CWE-401"
] |
linux
|
d80b64ff297e40c2b6f7d7abc1b3eba70d22a068
| 298,875,588,876,812,460,000,000,000,000,000,000,000 | 28 |
KVM: SVM: Fix potential memory leak in svm_cpu_init()
When kmalloc memory for sd->sev_vmcbs failed, we forget to free the page
held by sd->save_area. Also get rid of the var r as '-ENOMEM' is actually
the only possible outcome here.
Reviewed-by: Liran Alon <[email protected]>
Reviewed-by: Vitaly Kuznetsov <[email protected]>
Signed-off-by: Miaohe Lin <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
const QString Smb4KGlobal::findUmountExecutable()
{
// Find the umount program.
QString umount;
QStringList paths;
paths << "/bin";
paths << "/sbin";
paths << "/usr/bin";
paths << "/usr/sbin";
paths << "/usr/local/bin";
paths << "/usr/local/sbin";
for ( int i = 0; i < paths.size(); ++i )
{
umount = KGlobal::dirs()->findExe("umount", paths.at(i));
if (!umount.isEmpty())
{
break;
}
else
{
continue;
}
}
return umount;
}
| 0 |
[
"CWE-20"
] |
smb4k
|
71554140bdaede27b95dbe4c9b5a028a83c83cce
| 105,761,645,422,685,200,000,000,000,000,000,000,000 | 28 |
Find the mount/umount commands in the helper
Instead of trusting what we get passed in
CVE-2017-8849
|
static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
struct datapath *dp;
int bucket = cb->args[0], skip = cb->args[1];
int i, j = 0;
rcu_read_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
if (!dp) {
rcu_read_unlock();
return -ENODEV;
}
for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
struct vport *vport;
j = 0;
hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
if (j >= skip &&
ovs_vport_cmd_fill_info(vport, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI,
OVS_VPORT_CMD_NEW) < 0)
goto out;
j++;
}
skip = 0;
}
out:
rcu_read_unlock();
cb->args[0] = i;
cb->args[1] = j;
return skb->len;
}
| 0 |
[
"CWE-416"
] |
net
|
36d5fe6a000790f56039afe26834265db0a3ad4c
| 65,744,413,906,703,360,000,000,000,000,000,000,000 | 38 |
core, nfqueue, openvswitch: Orphan frags in skb_zerocopy and handle errors
skb_zerocopy can copy elements of the frags array between skbs, but it doesn't
orphan them. Also, it doesn't handle errors, so this patch takes care of that
as well, and modify the callers accordingly. skb_tx_error() is also added to
the callers so they will signal the failed delivery towards the creator of the
skb.
Signed-off-by: Zoltan Kiss <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int llhttp__internal__c_update_header_state(
llhttp__internal_t* state,
const unsigned char* p,
const unsigned char* endp) {
state->header_state = 1;
return 0;
}
| 0 |
[
"CWE-444"
] |
node
|
641f786bb1a1f6eb1ff8750782ed939780f2b31a
| 148,732,658,051,233,500,000,000,000,000,000,000,000 | 7 |
http: unset `F_CHUNKED` on new `Transfer-Encoding`
Duplicate `Transfer-Encoding` header should be a treated as a single,
but with original header values concatenated with a comma separator. In
the light of this, even if the past `Transfer-Encoding` ended with
`chunked`, we should be not let the `F_CHUNKED` to leak into the next
header, because mere presence of another header indicates that `chunked`
is not the last transfer-encoding token.
CVE-ID: CVE-2020-8287
Refs: https://github.com/nodejs-private/llhttp-private/pull/3
Refs: https://hackerone.com/bugs?report_id=1002188&subject=nodejs
PR-URL: https://github.com/nodejs-private/node-private/pull/228
Reviewed-By: Fedor Indutny <[email protected]>
Reviewed-By: Rich Trott <[email protected]>
|
ttj_copy_file (ThunarTransferJob *job,
GFile *source_file,
GFile *target_file,
GFileCopyFlags copy_flags,
gboolean merge_directories,
GError **error)
{
GFileType source_type;
GFileType target_type;
gboolean target_exists;
GError *err = NULL;
_thunar_return_val_if_fail (THUNAR_IS_TRANSFER_JOB (job), FALSE);
_thunar_return_val_if_fail (G_IS_FILE (source_file), FALSE);
_thunar_return_val_if_fail (G_IS_FILE (target_file), FALSE);
_thunar_return_val_if_fail (error == NULL || *error == NULL, FALSE);
/* reset the file progress */
job->file_progress = 0;
if (exo_job_set_error_if_cancelled (EXO_JOB (job), error))
return FALSE;
source_type = g_file_query_file_type (source_file, G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS,
exo_job_get_cancellable (EXO_JOB (job)));
if (exo_job_set_error_if_cancelled (EXO_JOB (job), error))
return FALSE;
target_type = g_file_query_file_type (target_file, G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS,
exo_job_get_cancellable (EXO_JOB (job)));
if (exo_job_set_error_if_cancelled (EXO_JOB (job), error))
return FALSE;
/* check if the target is a symlink and we are in overwrite mode */
if (target_type == G_FILE_TYPE_SYMBOLIC_LINK && (copy_flags & G_FILE_COPY_OVERWRITE) != 0)
{
/* try to delete the symlink */
if (!g_file_delete (target_file, exo_job_get_cancellable (EXO_JOB (job)), &err))
{
g_propagate_error (error, err);
return FALSE;
}
}
/* try to copy the file */
g_file_copy (source_file, target_file, copy_flags,
exo_job_get_cancellable (EXO_JOB (job)),
thunar_transfer_job_progress, job, &err);
/* check if there were errors */
if (G_UNLIKELY (err != NULL && err->domain == G_IO_ERROR))
{
if (err->code == G_IO_ERROR_WOULD_MERGE
|| (err->code == G_IO_ERROR_EXISTS
&& source_type == G_FILE_TYPE_DIRECTORY
&& target_type == G_FILE_TYPE_DIRECTORY))
{
/* we tried to overwrite a directory with a directory. this normally results
* in a merge. ignore the error if we actually *want* to merge */
if (merge_directories)
g_clear_error (&err);
}
else if (err->code == G_IO_ERROR_WOULD_RECURSE)
{
g_clear_error (&err);
/* we tried to copy a directory and either
*
* - the target did not exist which means we simple have to
* create the target directory
*
* or
*
* - the target is not a directory and we tried to overwrite it in
* which case we have to delete it first and then create the target
* directory
*/
/* check if the target file exists */
target_exists = g_file_query_exists (target_file,
exo_job_get_cancellable (EXO_JOB (job)));
/* abort on cancellation, continue otherwise */
if (!exo_job_set_error_if_cancelled (EXO_JOB (job), &err))
{
if (target_exists)
{
/* the target still exists and thus is not a directory. try to remove it */
g_file_delete (target_file,
exo_job_get_cancellable (EXO_JOB (job)),
&err);
}
/* abort on error or cancellation, continue otherwise */
if (err == NULL)
{
/* now try to create the directory */
g_file_make_directory (target_file,
exo_job_get_cancellable (EXO_JOB (job)),
&err);
}
}
}
}
if (G_UNLIKELY (err != NULL))
{
g_propagate_error (error, err);
return FALSE;
}
else
{
return TRUE;
}
}
| 0 |
[
"CWE-399",
"CWE-134"
] |
thunar
|
03dd312e157d4fa8a11d5fa402706ae5b05806fa
| 237,189,758,571,454,900,000,000,000,000,000,000,000 | 117 |
Don't interpret file display names as format strings
This avoids a segfault when copying/moving files containing "%" formatters
in their name.
Signed-off-by: Jannis Pohlmann <[email protected]>
|
evutil_make_socket_closeonexec(evutil_socket_t fd)
{
#if !defined(_WIN32) && defined(EVENT__HAVE_SETFD)
int flags;
if ((flags = fcntl(fd, F_GETFD, NULL)) < 0) {
event_warn("fcntl(%d, F_GETFD)", fd);
return -1;
}
if (!(flags & FD_CLOEXEC)) {
if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) == -1) {
event_warn("fcntl(%d, F_SETFD)", fd);
return -1;
}
}
#endif
return 0;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
libevent
|
329acc18a0768c21ba22522f01a5c7f46cacc4d5
| 154,824,367,952,620,140,000,000,000,000,000,000,000 | 17 |
evutil_parse_sockaddr_port(): fix buffer overflow
@asn-the-goblin-slayer:
"Length between '[' and ']' is cast to signed 32 bit integer on line 1815. Is
the length is more than 2<<31 (INT_MAX), len will hold a negative value.
Consequently, it will pass the check at line 1816. Segfault happens at line
1819.
Generate a resolv.conf with generate-resolv.conf, then compile and run
poc.c. See entry-functions.txt for functions in tor that might be
vulnerable.
Please credit 'Guido Vranken' for this discovery through the Tor bug bounty
program."
Reproducer for gdb (https://gist.github.com/azat/be2b0d5e9417ba0dfe2c):
start
p (1ULL<<31)+1ULL
# $1 = 2147483649
p malloc(sizeof(struct sockaddr))
# $2 = (void *) 0x646010
p malloc(sizeof(int))
# $3 = (void *) 0x646030
p malloc($1)
# $4 = (void *) 0x7fff76a2a010
p memset($4, 1, $1)
# $5 = 1990369296
p (char *)$4
# $6 = 0x7fff76a2a010 '\001' <repeats 200 times>...
set $6[0]='['
set $6[$1]=']'
p evutil_parse_sockaddr_port($4, $2, $3)
# $7 = -1
Before:
$ gdb bin/http-connect < gdb
(gdb) $1 = 2147483649
(gdb) (gdb) $2 = (void *) 0x646010
(gdb) (gdb) $3 = (void *) 0x646030
(gdb) (gdb) $4 = (void *) 0x7fff76a2a010
(gdb) (gdb) $5 = 1990369296
(gdb) (gdb) $6 = 0x7fff76a2a010 '\001' <repeats 200 times>...
(gdb) (gdb) (gdb) (gdb)
Program received signal SIGSEGV, Segmentation fault.
__memcpy_sse2_unaligned () at memcpy-sse2-unaligned.S:36
After:
$ gdb bin/http-connect < gdb
(gdb) $1 = 2147483649
(gdb) (gdb) $2 = (void *) 0x646010
(gdb) (gdb) $3 = (void *) 0x646030
(gdb) (gdb) $4 = (void *) 0x7fff76a2a010
(gdb) (gdb) $5 = 1990369296
(gdb) (gdb) $6 = 0x7fff76a2a010 '\001' <repeats 200 times>...
(gdb) (gdb) (gdb) (gdb) $7 = -1
(gdb) (gdb) quit
Fixes: #318
|
static int ip_dev_loopback_xmit(struct sk_buff *newskb)
{
skb_reset_mac_header(newskb);
__skb_pull(newskb, skb_network_offset(newskb));
newskb->pkt_type = PACKET_LOOPBACK;
newskb->ip_summed = CHECKSUM_UNNECESSARY;
WARN_ON(!skb_dst(newskb));
netif_rx_ni(newskb);
return 0;
}
| 0 |
[
"CWE-362"
] |
linux-2.6
|
f6d8bd051c391c1c0458a30b2a7abcd939329259
| 31,596,596,323,405,317,000,000,000,000,000,000,000 | 10 |
inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
execstack_continue(i_ctx_t *i_ctx_p)
{
os_ptr op = osp;
return do_execstack(i_ctx_p, false, op);
}
| 1 |
[
"CWE-200"
] |
ghostpdl
|
34cc326eb2c5695833361887fe0b32e8d987741c
| 74,176,884,199,582,140,000,000,000,000,000,000,000 | 6 |
Bug 699927: don't include operator arrays in execstack output
When we transfer the contents of the execution stack into the array, take the
extra step of replacing any operator arrays on the stack with the operator
that reference them.
This prevents the contents of Postscript defined, internal only operators (those
created with .makeoperator) being exposed via execstack (and thus, via error
handling).
This necessitates a change in the resource remapping 'resource', which contains
a procedure which relies on the contents of the operators arrays being present.
As we already had internal-only variants of countexecstack and execstack
(.countexecstack and .execstack) - using those, and leaving thier operation
including the operator arrays means the procedure continues to work correctly.
Both .countexecstack and .execstack are undefined after initialization.
Also, when we store the execstack (or part thereof) for an execstackoverflow
error, make the same oparray/operator substitution as above for execstack.
|
static void sm712_setpalette(int regno, unsigned int red, unsigned int green,
unsigned int blue, struct fb_info *info)
{
/* set bit 5:4 = 01 (write LCD RAM only) */
smtc_seqw(0x66, (smtc_seqr(0x66) & 0xC3) | 0x10);
smtc_mmiowb(regno, dac_reg);
smtc_mmiowb(red >> 10, dac_val);
smtc_mmiowb(green >> 10, dac_val);
smtc_mmiowb(blue >> 10, dac_val);
}
| 0 |
[
"CWE-787"
] |
linux-fbdev
|
bd771cf5c4254511cc4abb88f3dab3bd58bdf8e8
| 142,957,994,837,222,980,000,000,000,000,000,000,000 | 11 |
video: fbdev: sm712fb: Fix crash in smtcfb_read()
Zheyu Ma reported this crash in the sm712fb driver when reading
three bytes from the framebuffer:
BUG: unable to handle page fault for address: ffffc90001ffffff
RIP: 0010:smtcfb_read+0x230/0x3e0
Call Trace:
vfs_read+0x198/0xa00
? do_sys_openat2+0x27d/0x350
? __fget_light+0x54/0x340
ksys_read+0xce/0x190
do_syscall_64+0x43/0x90
Fix it by removing the open-coded endianess fixup-code and
by moving the pointer post decrement out the fb_readl() function.
Reported-by: Zheyu Ma <[email protected]>
Signed-off-by: Helge Deller <[email protected]>
Tested-by: Zheyu Ma <[email protected]>
Cc: [email protected]
|
CString CClient::GetFullName() const {
if (!m_pUser) return GetRemoteIP();
CString sFullName = m_pUser->GetUsername();
if (!m_sIdentifier.empty()) sFullName += "@" + m_sIdentifier;
if (m_pNetwork) sFullName += "/" + m_pNetwork->GetName();
return sFullName;
}
| 0 |
[
"CWE-476"
] |
znc
|
2390ad111bde16a78c98ac44572090b33c3bd2d8
| 245,018,869,446,521,760,000,000,000,000,000,000,000 | 7 |
Fix null pointer dereference in echo-message
The bug was introduced while fixing #1705. If a client did not enable
echo-message, and doesn't have a network, it crashes.
Thanks to LunarBNC for reporting this
|
static int cp2112_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
unsigned long flags;
int ret;
spin_lock_irqsave(&dev->lock, flags);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
goto fail;
}
buf[1] |= 1 << offset;
buf[2] = gpio_push_pull;
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
if (ret < 0) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
goto fail;
}
spin_unlock_irqrestore(&dev->lock, flags);
/*
* Set gpio value when output direction is already set,
* as specified in AN495, Rev. 0.2, cpt. 4.4
*/
cp2112_gpio_set(chip, offset, value);
return 0;
fail:
spin_unlock_irqrestore(&dev->lock, flags);
return ret < 0 ? ret : -EIO;
}
| 1 |
[
"CWE-404",
"CWE-703"
] |
linux
|
7a7b5df84b6b4e5d599c7289526eed96541a0654
| 126,342,246,483,542,280,000,000,000,000,000,000,000 | 44 |
HID: cp2112: fix sleep-while-atomic
A recent commit fixing DMA-buffers on stack added a shared transfer
buffer protected by a spinlock. This is broken as the USB HID request
callbacks can sleep. Fix this up by replacing the spinlock with a mutex.
Fixes: 1ffb3c40ffb5 ("HID: cp2112: make transfer buffers DMA capable")
Cc: stable <[email protected]> # 4.9
Signed-off-by: Johan Hovold <[email protected]>
Reviewed-by: Benjamin Tissoires <[email protected]>
Signed-off-by: Jiri Kosina <[email protected]>
|
static int init_nss(struct crypto_instance *instance,
const char *crypto_cipher_type,
const char *crypto_hash_type,
const char *crypto_compat_type)
{
log_printf(instance->log_level_notice,
"Initializing transmit/receive security (NSS) crypto: %s hash: %s compat: %s",
crypto_cipher_type, crypto_hash_type, crypto_compat_type);
if (init_nss_db(instance) < 0) {
return -1;
}
if (init_nss_crypto(instance) < 0) {
return -1;
}
if (init_nss_hash(instance) < 0) {
return -1;
}
return 0;
}
| 0 |
[
"CWE-703"
] |
corosync
|
b3f456a8ceefac6e9f2e9acc2ea0c159d412b595
| 194,188,015,349,994,700,000,000,000,000,000,000,000 | 23 |
totemcrypto: fix hmac key initialization
Signed-off-by: Fabio M. Di Nitto <[email protected]>
Reviewed-by: Jan Friesse <[email protected]>
|
static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
{
struct sock *sk;
struct hlist_node *node;
struct net_device *dev = data;
struct net *net = dev_net(dev);
rcu_read_lock();
sk_for_each_rcu(sk, node, &net->packet.sklist) {
struct packet_sock *po = pkt_sk(sk);
switch (msg) {
case NETDEV_UNREGISTER:
if (po->mclist)
packet_dev_mclist(dev, po->mclist, -1);
/* fallthrough */
case NETDEV_DOWN:
if (dev->ifindex == po->ifindex) {
spin_lock(&po->bind_lock);
if (po->running) {
__dev_remove_pack(&po->prot_hook);
__sock_put(sk);
po->running = 0;
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_error_report(sk);
}
if (msg == NETDEV_UNREGISTER) {
po->ifindex = -1;
po->prot_hook.dev = NULL;
}
spin_unlock(&po->bind_lock);
}
break;
case NETDEV_UP:
if (dev->ifindex == po->ifindex) {
spin_lock(&po->bind_lock);
if (po->num && !po->running) {
dev_add_pack(&po->prot_hook);
sock_hold(sk);
po->running = 1;
}
spin_unlock(&po->bind_lock);
}
break;
}
}
rcu_read_unlock();
return NOTIFY_DONE;
}
| 0 |
[
"CWE-909"
] |
linux-2.6
|
67286640f638f5ad41a946b9a3dc75327950248f
| 20,533,205,582,047,354,000,000,000,000,000,000,000 | 51 |
net: packet: fix information leak to userland
packet_getname_spkt() doesn't initialize all members of sa_data field of
sockaddr struct if strlen(dev->name) < 13. This structure is then copied
to userland. It leads to leaking of contents of kernel stack memory.
We have to fully fill sa_data with strncpy() instead of strlcpy().
The same with packet_getname(): it doesn't initialize sll_pkttype field of
sockaddr_ll. Set it to zero.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int fts3SyncMethod(sqlite3_vtab *pVtab){
/* Following an incremental-merge operation, assuming that the input
** segments are not completely consumed (the usual case), they are updated
** in place to remove the entries that have already been merged. This
** involves updating the leaf block that contains the smallest unmerged
** entry and each block (if any) between the leaf and the root node. So
** if the height of the input segment b-trees is N, and input segments
** are merged eight at a time, updating the input segments at the end
** of an incremental-merge requires writing (8*(1+N)) blocks. N is usually
** small - often between 0 and 2. So the overhead of the incremental
** merge is somewhere between 8 and 24 blocks. To avoid this overhead
** dwarfing the actual productive work accomplished, the incremental merge
** is only attempted if it will write at least 64 leaf blocks. Hence
** nMinMerge.
**
** Of course, updating the input segments also involves deleting a bunch
** of blocks from the segments table. But this is not considered overhead
** as it would also be required by a crisis-merge that used the same input
** segments.
*/
const u32 nMinMerge = 64; /* Minimum amount of incr-merge work to do */
Fts3Table *p = (Fts3Table*)pVtab;
int rc;
i64 iLastRowid = sqlite3_last_insert_rowid(p->db);
rc = sqlite3Fts3PendingTermsFlush(p);
if( rc==SQLITE_OK
&& p->nLeafAdd>(nMinMerge/16)
&& p->nAutoincrmerge && p->nAutoincrmerge!=0xff
){
int mxLevel = 0; /* Maximum relative level value in db */
int A; /* Incr-merge parameter A */
rc = sqlite3Fts3MaxLevel(p, &mxLevel);
assert( rc==SQLITE_OK || mxLevel==0 );
A = p->nLeafAdd * mxLevel;
A += (A/2);
if( A>(int)nMinMerge ) rc = sqlite3Fts3Incrmerge(p, A, p->nAutoincrmerge);
}
sqlite3Fts3SegmentsClose(p);
sqlite3_set_last_insert_rowid(p->db, iLastRowid);
return rc;
}
| 0 |
[
"CWE-787"
] |
sqlite
|
c72f2fb7feff582444b8ffdc6c900c69847ce8a9
| 235,297,766,097,477,430,000,000,000,000,000,000,000 | 45 |
More improvements to shadow table corruption detection in FTS3.
FossilOrigin-Name: 51525f9c3235967bc00a090e84c70a6400698c897aa4742e817121c725b8c99d
|
uint16_t mobi_ordt_lookup(const MOBIOrdt *ordt, const uint16_t offset) {
uint16_t utf16;
if (offset < ordt->offsets_count) {
utf16 = ordt->ordt2[offset];
} else {
utf16 = offset;
}
return utf16;
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
libmobi
|
eafc415bc6067e72577f70d6dd5acbf057ce6e6f
| 319,413,529,125,350,660,000,000,000,000,000,000,000 | 9 |
Fix wrong boundary checks in inflections parser resulting in stack buffer over-read with corrupt input
|
static BOOL resize_vbar_entry(CLEAR_CONTEXT* clear, CLEAR_VBAR_ENTRY* vBarEntry)
{
if (vBarEntry->count > vBarEntry->size)
{
const UINT32 bpp = GetBytesPerPixel(clear->format);
const UINT32 oldPos = vBarEntry->size * bpp;
const UINT32 diffSize = (vBarEntry->count - vBarEntry->size) * bpp;
BYTE* tmp;
vBarEntry->size = vBarEntry->count;
tmp = (BYTE*)realloc(vBarEntry->pixels, vBarEntry->count * bpp);
if (!tmp)
{
WLog_ERR(TAG, "vBarEntry->pixels realloc %" PRIu32 " failed", vBarEntry->count * bpp);
return FALSE;
}
memset(&tmp[oldPos], 0, diffSize);
vBarEntry->pixels = tmp;
}
if (!vBarEntry->pixels && vBarEntry->size)
{
WLog_ERR(TAG, "vBarEntry->pixels is NULL but vBarEntry->size is %" PRIu32 "",
vBarEntry->size);
return FALSE;
}
return TRUE;
}
| 0 |
[
"CWE-125"
] |
FreeRDP
|
363d7046dfec4003b91aecf7867e3b05905f3843
| 301,666,549,911,025,760,000,000,000,000,000,000,000 | 30 |
Fixed oob read in clear_decompress_subcode_rlex
Fixed length checks before stream read.
Thanks to hac425 CVE-2020-11040
|
static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address,
pte_t pte)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int idx;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
kvm->mmu_notifier_seq++;
kvm_set_spte_hva(kvm, address, pte);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}
| 0 |
[
"CWE-399"
] |
kvm
|
5b40572ed5f0344b9dbee486a17c589ce1abe1a3
| 242,440,711,741,287,560,000,000,000,000,000,000,000 | 15 |
KVM: Ensure all vcpus are consistent with in-kernel irqchip settings
If some vcpus are created before KVM_CREATE_IRQCHIP, then
irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading
to potential NULL pointer dereferences.
Fix by:
- ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called
- ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP
This is somewhat long winded because vcpu->arch.apic is created without
kvm->lock held.
Based on earlier patch by Michael Ellerman.
Signed-off-by: Michael Ellerman <[email protected]>
Signed-off-by: Avi Kivity <[email protected]>
|
void conn_close_idle(conn *c) {
if (settings.idle_timeout > 0 &&
(current_time - c->last_cmd_time) > settings.idle_timeout) {
if (c->state != conn_new_cmd && c->state != conn_read) {
if (settings.verbose > 1)
fprintf(stderr,
"fd %d wants to timeout, but isn't in read state", c->sfd);
return;
}
if (settings.verbose > 1)
fprintf(stderr, "Closing idle fd %d\n", c->sfd);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.idle_kicks++;
pthread_mutex_unlock(&c->thread->stats.mutex);
conn_set_state(c, conn_closing);
drive_machine(c);
}
}
| 0 |
[] |
memcached
|
f249724cedcab6605ca8a0769ac4b356a8124f63
| 258,495,683,727,604,040,000,000,000,000,000,000,000 | 21 |
crash fix: errstr wasn't initialized in metaget
if meta_flag_preparse bailed out early it would try to read
uninitialized memory.
|
keepalived_alloc_log(bool final)
{
unsigned int overrun = 0, badptr = 0, zero_size = 0;
size_t sum = 0;
MEMCHECK *entry;
if (final) {
/* If this is a forked child, we don't want the dump */
if (skip_mem_check_final)
return;
fprintf(log_op, "\n---[ Keepalived memory dump for (%s) ]---\n\n", terminate_banner);
}
else
fprintf(log_op, "\n---[ Keepalived memory dump for (%s) at %s ]---\n\n", terminate_banner, format_time());
/* List the blocks currently allocated */
if (!RB_EMPTY_ROOT(&alloc_list)) {
fprintf(log_op, "Entries %s\n\n", final ? "not released" : "currently allocated");
rb_for_each_entry(entry, &alloc_list, t) {
sum += entry->size;
fprintf(log_op, "%9p [%3d:%3d], %4zu at %s, %3d, %s",
entry->ptr, entry->seq_num, number_alloc_list,
entry->size, entry->file, entry->line, entry->func);
if (entry->type != ALLOCATED)
fprintf(log_op, " type = %d", entry->type);
fprintf(log_op, "\n");
}
}
if (!list_empty(&bad_list)) {
if (!RB_EMPTY_ROOT(&alloc_list))
fprintf(log_op, "\n");
fprintf(log_op, "Bad entry list\n\n");
list_for_each_entry(entry, &bad_list, l) {
switch (entry->type) {
case FREE_NULL:
badptr++;
fprintf(log_op, "%9s %9s, %4s at %s, %3d, %s - null pointer to free\n",
"NULL", "", "", entry->file, entry->line, entry->func);
break;
case REALLOC_NULL:
badptr++;
fprintf(log_op, "%9s %9s, %4zu at %s, %3d, %s - null pointer to realloc (converted to malloc)\n",
"NULL", "", entry->size, entry->file, entry->line, entry->func);
break;
case FREE_NOT_ALLOC:
badptr++;
fprintf(log_op, "%9p %9s, %4s at %s, %3d, %s - pointer not found for free\n",
entry->ptr, "", "", entry->file, entry->line, entry->func);
break;
case REALLOC_NOT_ALLOC:
badptr++;
fprintf(log_op, "%9p %9s, %4zu at %s, %3d, %s - pointer not found for realloc\n",
entry->ptr, "", entry->size, entry->file, entry->line, entry->func);
break;
case DOUBLE_FREE:
badptr++;
fprintf(log_op, "%9p %9s, %4s at %s, %3d, %s - double free of pointer\n",
entry->ptr, "", "", entry->file, entry->line, entry->func);
break;
case REALLOC_DOUBLE_FREE:
badptr++;
fprintf(log_op, "%9p %9s, %4zu at %s, %3d, %s - realloc 0 size already freed\n",
entry->ptr, "", entry->size, entry->file, entry->line, entry->func);
break;
case OVERRUN:
overrun++;
fprintf(log_op, "%9p [%3d:%3d], %4zu at %s, %3d, %s - buffer overrun\n",
entry->ptr, entry->seq_num, number_alloc_list,
entry->size, entry->file, entry->line, entry->func);
break;
case MALLOC_ZERO_SIZE:
zero_size++;
fprintf(log_op, "%9p [%3d:%3d], %4zu at %s, %3d, %s - malloc zero size\n",
entry->ptr, entry->seq_num, number_alloc_list,
entry->size, entry->file, entry->line, entry->func);
break;
case REALLOC_ZERO_SIZE:
zero_size++;
fprintf(log_op, "%9p [%3d:%3d], %4zu at %s, %3d, %s - realloc zero size (handled as free)\n",
entry->ptr, entry->seq_num, number_alloc_list,
entry->size, entry->file, entry->line, entry->func);
break;
case ALLOCATED: /* not used - avoid compiler warning */
case FREE_SLOT:
case LAST_FREE:
break;
}
}
}
fprintf(log_op, "\n\n---[ Keepalived memory dump summary for (%s) ]---\n", terminate_banner);
fprintf(log_op, "Total number of bytes %s...: %zu\n", final ? "not freed" : "allocated", sum);
fprintf(log_op, "Number of entries %s.......: %d\n", final ? "not freed" : "allocated", number_alloc_list);
fprintf(log_op, "Maximum allocated entries.........: %d\n", max_alloc_list);
fprintf(log_op, "Maximum memory allocated..........: %zu\n", max_mem_allocated);
fprintf(log_op, "Number of mallocs.................: %d\n", num_mallocs);
fprintf(log_op, "Number of reallocs................: %d\n", num_reallocs);
fprintf(log_op, "Number of bad entries.............: %d\n", badptr);
fprintf(log_op, "Number of buffer overrun..........: %d\n", overrun);
fprintf(log_op, "Number of 0 size allocations......: %d\n\n", zero_size);
if (sum != mem_allocated)
fprintf(log_op, "ERROR - sum of allocated %zu != mem_allocated %zu\n", sum, mem_allocated);
if (final) {
if (sum || number_alloc_list || badptr || overrun)
fprintf(log_op, "=> Program seems to have some memory problem !!!\n\n");
else
fprintf(log_op, "=> Program seems to be memory allocation safe...\n\n");
}
}
| 0 |
[
"CWE-59",
"CWE-61"
] |
keepalived
|
04f2d32871bb3b11d7dc024039952f2fe2750306
| 89,696,053,156,726,440,000,000,000,000,000,000,000 | 113 |
When opening files for write, ensure they aren't symbolic links
Issue #1048 identified that if, for example, a non privileged user
created a symbolic link from /etc/keepalvied.data to /etc/passwd,
writing to /etc/keepalived.data (which could be invoked via DBus)
would cause /etc/passwd to be overwritten.
This commit stops keepalived writing to pathnames where the ultimate
component is a symbolic link, by setting O_NOFOLLOW whenever opening
a file for writing.
This might break some setups, where, for example, /etc/keepalived.data
was a symbolic link to /home/fred/keepalived.data. If this was the case,
instead create a symbolic link from /home/fred/keepalived.data to
/tmp/keepalived.data, so that the file is still accessible via
/home/fred/keepalived.data.
There doesn't appear to be a way around this backward incompatibility,
since even checking if the pathname is a symbolic link prior to opening
for writing would create a race condition.
Signed-off-by: Quentin Armitage <[email protected]>
|
//! Load image from a PNG file \newinstance.
static CImg<T> get_load_png(std::FILE *const file, unsigned int *const bits_per_pixel=0) {
return CImg<T>().load_png(file,bits_per_pixel);
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 104,833,063,842,807,410,000,000,000,000,000,000,000 | 3 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
static int aax_filter(uint8_t *input, int size, MOVContext *c)
{
int blocks = 0;
unsigned char iv[16];
memcpy(iv, c->file_iv, 16); // iv is overwritten
blocks = size >> 4; // trailing bytes are not encrypted!
av_aes_init(c->aes_decrypt, c->file_key, 128, 1);
av_aes_crypt(c->aes_decrypt, input, input, blocks, iv, 1);
return 0;
}
| 0 |
[
"CWE-399",
"CWE-834"
] |
FFmpeg
|
9cb4eb772839c5e1de2855d126bf74ff16d13382
| 28,335,141,557,321,137,000,000,000,000,000,000,000 | 12 |
avformat/mov: Fix DoS in read_tfra()
Fixes: Missing EOF check in loop
No testcase
Found-by: Xiaohei and Wangchu from Alibaba Security Team
Signed-off-by: Michael Niedermayer <[email protected]>
|
static void spl_filesystem_dir_it_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC)
{
spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter;
*data = &iterator->current;
}
| 0 |
[
"CWE-190"
] |
php-src
|
7245bff300d3fa8bacbef7897ff080a6f1c23eba
| 195,884,898,991,719,000,000,000,000,000,000,000,000 | 6 |
Fix bug #72262 - do not overflow int
|
ex_normal(exarg_T *eap)
{
save_state_T save_state;
char_u *arg = NULL;
int l;
char_u *p;
if (ex_normal_lock > 0)
{
emsg(_(e_secure));
return;
}
if (ex_normal_busy >= p_mmd)
{
emsg(_("E192: Recursive use of :normal too deep"));
return;
}
/*
* vgetc() expects a CSI and K_SPECIAL to have been escaped. Don't do
* this for the K_SPECIAL leading byte, otherwise special keys will not
* work.
*/
if (has_mbyte)
{
int len = 0;
// Count the number of characters to be escaped.
for (p = eap->arg; *p != NUL; ++p)
{
#ifdef FEAT_GUI
if (*p == CSI) // leadbyte CSI
len += 2;
#endif
for (l = (*mb_ptr2len)(p) - 1; l > 0; --l)
if (*++p == K_SPECIAL // trailbyte K_SPECIAL or CSI
#ifdef FEAT_GUI
|| *p == CSI
#endif
)
len += 2;
}
if (len > 0)
{
arg = alloc(STRLEN(eap->arg) + len + 1);
if (arg != NULL)
{
len = 0;
for (p = eap->arg; *p != NUL; ++p)
{
arg[len++] = *p;
#ifdef FEAT_GUI
if (*p == CSI)
{
arg[len++] = KS_EXTRA;
arg[len++] = (int)KE_CSI;
}
#endif
for (l = (*mb_ptr2len)(p) - 1; l > 0; --l)
{
arg[len++] = *++p;
if (*p == K_SPECIAL)
{
arg[len++] = KS_SPECIAL;
arg[len++] = KE_FILLER;
}
#ifdef FEAT_GUI
else if (*p == CSI)
{
arg[len++] = KS_EXTRA;
arg[len++] = (int)KE_CSI;
}
#endif
}
arg[len] = NUL;
}
}
}
}
++ex_normal_busy;
if (save_current_state(&save_state))
{
/*
* Repeat the :normal command for each line in the range. When no
* range given, execute it just once, without positioning the cursor
* first.
*/
do
{
if (eap->addr_count != 0)
{
curwin->w_cursor.lnum = eap->line1++;
curwin->w_cursor.col = 0;
check_cursor_moved(curwin);
}
exec_normal_cmd(arg != NULL
? arg
: eap->arg, eap->forceit ? REMAP_NONE : REMAP_YES, FALSE);
}
while (eap->addr_count > 0 && eap->line1 <= eap->line2 && !got_int);
}
// Might not return to the main loop when in an event handler.
update_topline_cursor();
restore_current_state(&save_state);
--ex_normal_busy;
setmouse();
#ifdef CURSOR_SHAPE
ui_cursor_shape(); // may show different cursor shape
#endif
vim_free(arg);
}
| 0 |
[
"CWE-122"
] |
vim
|
35a319b77f897744eec1155b736e9372c9c5575f
| 196,244,500,453,326,600,000,000,000,000,000,000,000 | 116 |
patch 8.2.3489: ml_get error after search with range
Problem: ml_get error after search with range.
Solution: Limit the line number to the buffer line count.
|
static void vt_disallocate_all(void)
{
struct vc_data *vc[MAX_NR_CONSOLES];
int i;
console_lock();
for (i = 1; i < MAX_NR_CONSOLES; i++)
if (!VT_BUSY(i))
vc[i] = vc_deallocate(i);
else
vc[i] = NULL;
console_unlock();
for (i = 1; i < MAX_NR_CONSOLES; i++) {
if (vc[i] && i >= MIN_NR_CONSOLES) {
tty_port_destroy(&vc[i]->port);
kfree(vc[i]);
}
}
}
| 0 |
[
"CWE-362",
"CWE-703"
] |
linux
|
6cd1ed50efd88261298577cd92a14f2768eddeeb
| 285,711,189,364,176,150,000,000,000,000,000,000,000 | 20 |
vt: vt_ioctl: fix race in VT_RESIZEX
We need to make sure vc_cons[i].d is not NULL after grabbing
console_lock(), or risk a crash.
general protection fault, probably for non-canonical address 0xdffffc0000000068: 0000 [#1] PREEMPT SMP KASAN
KASAN: null-ptr-deref in range [0x0000000000000340-0x0000000000000347]
CPU: 1 PID: 19462 Comm: syz-executor.5 Not tainted 5.5.0-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
RIP: 0010:vt_ioctl+0x1f96/0x26d0 drivers/tty/vt/vt_ioctl.c:883
Code: 74 41 e8 bd a6 84 fd 48 89 d8 48 c1 e8 03 42 80 3c 28 00 0f 85 e4 04 00 00 48 8b 03 48 8d b8 40 03 00 00 48 89 fa 48 c1 ea 03 <42> 0f b6 14 2a 84 d2 74 09 80 fa 03 0f 8e b1 05 00 00 44 89 b8 40
RSP: 0018:ffffc900086d7bb0 EFLAGS: 00010202
RAX: 0000000000000000 RBX: ffffffff8c34ee88 RCX: ffffc9001415c000
RDX: 0000000000000068 RSI: ffffffff83f0e6e3 RDI: 0000000000000340
RBP: ffffc900086d7cd0 R08: ffff888054ce0100 R09: fffffbfff16a2f6d
R10: ffff888054ce0998 R11: ffff888054ce0100 R12: 000000000000001d
R13: dffffc0000000000 R14: 1ffff920010daf79 R15: 000000000000ff7f
FS: 00007f7d13c12700(0000) GS:ffff8880ae900000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007ffd477e3c38 CR3: 0000000095d0a000 CR4: 00000000001406e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
tty_ioctl+0xa37/0x14f0 drivers/tty/tty_io.c:2660
vfs_ioctl fs/ioctl.c:47 [inline]
ksys_ioctl+0x123/0x180 fs/ioctl.c:763
__do_sys_ioctl fs/ioctl.c:772 [inline]
__se_sys_ioctl fs/ioctl.c:770 [inline]
__x64_sys_ioctl+0x73/0xb0 fs/ioctl.c:770
do_syscall_64+0xfa/0x790 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x45b399
Code: ad b6 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 7b b6 fb ff c3 66 2e 0f 1f 84 00 00 00 00
RSP: 002b:00007f7d13c11c78 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
RAX: ffffffffffffffda RBX: 00007f7d13c126d4 RCX: 000000000045b399
RDX: 0000000020000080 RSI: 000000000000560a RDI: 0000000000000003
RBP: 000000000075bf20 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff
R13: 0000000000000666 R14: 00000000004c7f04 R15: 000000000075bf2c
Modules linked in:
---[ end trace 80970faf7a67eb77 ]---
RIP: 0010:vt_ioctl+0x1f96/0x26d0 drivers/tty/vt/vt_ioctl.c:883
Code: 74 41 e8 bd a6 84 fd 48 89 d8 48 c1 e8 03 42 80 3c 28 00 0f 85 e4 04 00 00 48 8b 03 48 8d b8 40 03 00 00 48 89 fa 48 c1 ea 03 <42> 0f b6 14 2a 84 d2 74 09 80 fa 03 0f 8e b1 05 00 00 44 89 b8 40
RSP: 0018:ffffc900086d7bb0 EFLAGS: 00010202
RAX: 0000000000000000 RBX: ffffffff8c34ee88 RCX: ffffc9001415c000
RDX: 0000000000000068 RSI: ffffffff83f0e6e3 RDI: 0000000000000340
RBP: ffffc900086d7cd0 R08: ffff888054ce0100 R09: fffffbfff16a2f6d
R10: ffff888054ce0998 R11: ffff888054ce0100 R12: 000000000000001d
R13: dffffc0000000000 R14: 1ffff920010daf79 R15: 000000000000ff7f
FS: 00007f7d13c12700(0000) GS:ffff8880ae900000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007ffd477e3c38 CR3: 0000000095d0a000 CR4: 00000000001406e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Eric Dumazet <[email protected]>
Cc: stable <[email protected]>
Reported-by: syzbot <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
PHP_FUNCTION(linkinfo)
{
char *link;
char *dirname;
int link_len, dir_len;
struct stat sb;
int ret;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p", &link, &link_len) == FAILURE) {
return;
}
dirname = estrndup(link, link_len);
dir_len = php_dirname(dirname, link_len);
if (php_check_open_basedir(dirname TSRMLS_CC)) {
efree(dirname);
RETURN_FALSE;
}
ret = VCWD_LSTAT(link, &sb);
if (ret == -1) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", strerror(errno));
efree(dirname);
RETURN_LONG(-1L);
}
efree(dirname);
RETURN_LONG((long) sb.st_dev);
}
| 0 |
[
"CWE-20"
] |
php-src
|
52b93f0cfd3cba7ff98cc5198df6ca4f23865f80
| 97,036,498,889,608,950,000,000,000,000,000,000,000 | 30 |
Fixed bug #69353 (Missing null byte checks for paths in various PHP extensions)
|
static int32 TIFFReadPixels(TIFF *tiff,const tsample_t sample,const ssize_t row,
tdata_t scanline)
{
int32
status;
status=TIFFReadScanline(tiff,scanline,(uint32) row,sample);
return(status);
}
| 0 |
[
"CWE-399",
"CWE-772"
] |
ImageMagick
|
256825d4eb33dc301496710d15cf5a7ae924088b
| 271,938,989,315,326,100,000,000,000,000,000,000,000 | 9 |
Fixed possible memory leak reported in #1206
|
bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
{
return (bs->iostatus_enabled &&
(bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
bs->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
}
| 0 |
[
"CWE-190"
] |
qemu
|
8f4754ede56e3f9ea3fd7207f4a7c4453e59285b
| 81,793,972,465,035,250,000,000,000,000,000,000,000 | 7 |
block: Limit request size (CVE-2014-0143)
Limiting the size of a single request to INT_MAX not only fixes a
direct integer overflow in bdrv_check_request() (which would only
trigger bad behaviour with ridiculously huge images, as in close to
2^64 bytes), but can also prevent overflows in all block drivers.
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>
|
static int parse_diff_color_slot(const char *var, int ofs)
{
if (!strcasecmp(var+ofs, "plain"))
return DIFF_PLAIN;
if (!strcasecmp(var+ofs, "meta"))
return DIFF_METAINFO;
if (!strcasecmp(var+ofs, "frag"))
return DIFF_FRAGINFO;
if (!strcasecmp(var+ofs, "old"))
return DIFF_FILE_OLD;
if (!strcasecmp(var+ofs, "new"))
return DIFF_FILE_NEW;
if (!strcasecmp(var+ofs, "commit"))
return DIFF_COMMIT;
if (!strcasecmp(var+ofs, "whitespace"))
return DIFF_WHITESPACE;
die("bad config variable '%s'", var);
}
| 0 |
[
"CWE-119"
] |
git
|
fd55a19eb1d49ae54008d932a65f79cd6fda45c9
| 169,514,921,998,482,680,000,000,000,000,000,000,000 | 18 |
Fix buffer overflow in git diff
If PATH_MAX on your system is smaller than a path stored, it may cause
buffer overflow and stack corruption in diff_addremove() and diff_change()
functions when running git-diff
Signed-off-by: Dmitry Potapov <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]>
|
name_find(regex_t* reg, UChar* name, UChar* name_end)
{
int i, len;
NameEntry* e;
NameTable* t = (NameTable* )reg->name_table;
if (IS_NOT_NULL(t)) {
len = name_end - name;
for (i = 0; i < t->num; i++) {
e = &(t->e[i]);
if (len == e->name_len && onig_strncmp(name, e->name, len) == 0)
return e;
}
}
return (NameEntry* )NULL;
}
| 0 |
[
"CWE-125"
] |
oniguruma
|
65a9b1aa03c9bc2dc01b074295b9603232cb3b78
| 118,117,248,168,466,300,000,000,000,000,000,000,000 | 16 |
onig-5.9.2
|
unsigned long task_statm(struct mm_struct *mm,
unsigned long *shared, unsigned long *text,
unsigned long *data, unsigned long *resident)
{
*shared = get_mm_counter(mm, MM_FILEPAGES);
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>> PAGE_SHIFT;
*data = mm->total_vm - mm->shared_vm;
*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
return mm->total_vm;
}
| 0 |
[
"CWE-264"
] |
linux-2.6
|
1a5a9906d4e8d1976b701f889d8f35d54b928f25
| 236,715,911,275,035,930,000,000,000,000,000,000,000 | 11 |
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38+]
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void virgl_cmd_get_capset_info(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
struct virtio_gpu_get_capset_info info;
struct virtio_gpu_resp_capset_info resp;
VIRTIO_GPU_FILL_CMD(info);
if (info.capset_index == 0) {
resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL;
virgl_renderer_get_cap_set(resp.capset_id,
&resp.capset_max_version,
&resp.capset_max_size);
} else {
resp.capset_max_version = 0;
resp.capset_max_size = 0;
}
resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
}
| 0 |
[] |
qemu
|
2fe760554eb3769d70f608a158474f728ba45ba6
| 150,090,085,634,097,380,000,000,000,000,000,000,000 | 20 |
virtio-gpu: check max_outputs only
The scanout id should not be above the configured num_scanouts.
Signed-off-by: Marc-André Lureau <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]>
|
PHP_FUNCTION(imagecolorexact)
{
zval *IM;
zend_long red, green, blue;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "rlll", &IM, &red, &green, &blue) == FAILURE) {
return;
}
if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) {
RETURN_FALSE;
}
RETURN_LONG(gdImageColorExact(im, red, green, blue));
}
| 0 |
[
"CWE-787"
] |
php-src
|
28022c9b1fd937436ab67bb3d61f652c108baf96
| 123,989,868,668,438,850,000,000,000,000,000,000,000 | 16 |
Fix bug#72697 - select_colors write out-of-bounds
(cherry picked from commit b6f13a5ef9d6280cf984826a5de012a32c396cd4)
Conflicts:
ext/gd/gd.c
|
f_getchangelist(typval_T *argvars, typval_T *rettv)
{
#ifdef FEAT_JUMPLIST
buf_T *buf;
int i;
list_T *l;
dict_T *d;
#endif
if (rettv_list_alloc(rettv) != OK)
return;
#ifdef FEAT_JUMPLIST
(void)tv_get_number(&argvars[0]); /* issue errmsg if type error */
++emsg_off;
buf = tv_get_buf(&argvars[0], FALSE);
--emsg_off;
if (buf == NULL)
return;
l = list_alloc();
if (l == NULL)
return;
if (list_append_list(rettv->vval.v_list, l) == FAIL)
return;
/*
* The current window change list index tracks only the position in the
* current buffer change list. For other buffers, use the change list
* length as the current index.
*/
list_append_number(rettv->vval.v_list,
(varnumber_T)((buf == curwin->w_buffer)
? curwin->w_changelistidx : buf->b_changelistlen));
for (i = 0; i < buf->b_changelistlen; ++i)
{
if (buf->b_changelist[i].lnum == 0)
continue;
if ((d = dict_alloc()) == NULL)
return;
if (list_append_dict(l, d) == FAIL)
return;
dict_add_number(d, "lnum", (long)buf->b_changelist[i].lnum);
dict_add_number(d, "col", (long)buf->b_changelist[i].col);
dict_add_number(d, "coladd", (long)buf->b_changelist[i].coladd);
}
#endif
}
| 0 |
[
"CWE-78"
] |
vim
|
8c62a08faf89663e5633dc5036cd8695c80f1075
| 321,286,969,142,842,160,000,000,000,000,000,000,000 | 49 |
patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others.
|
static NTSTATUS pdb_samba_dsdb_getsampwsid(struct pdb_methods *m,
struct samu *sam_acct,
const struct dom_sid *sid)
{
NTSTATUS status;
struct pdb_samba_dsdb_state *state = talloc_get_type_abort(
m->private_data, struct pdb_samba_dsdb_state);
struct dom_sid_buf buf;
status = pdb_samba_dsdb_getsampwfilter(m, state, sam_acct,
"(&(objectsid=%s)(objectclass=user))",
dom_sid_str_buf(sid, &buf));
return status;
}
| 0 |
[
"CWE-200"
] |
samba
|
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
| 295,241,383,454,230,160,000,000,000,000,000,000,000 | 14 |
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]>
|
static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
const struct cred *f_cred)
{
const struct bpf_map *map;
struct bpf_insn *insns;
u32 off, type;
u64 imm;
u8 code;
int i;
insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
GFP_USER);
if (!insns)
return insns;
for (i = 0; i < prog->len; i++) {
code = insns[i].code;
if (code == (BPF_JMP | BPF_TAIL_CALL)) {
insns[i].code = BPF_JMP | BPF_CALL;
insns[i].imm = BPF_FUNC_tail_call;
/* fall-through */
}
if (code == (BPF_JMP | BPF_CALL) ||
code == (BPF_JMP | BPF_CALL_ARGS)) {
if (code == (BPF_JMP | BPF_CALL_ARGS))
insns[i].code = BPF_JMP | BPF_CALL;
if (!bpf_dump_raw_ok(f_cred))
insns[i].imm = 0;
continue;
}
if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
continue;
}
if (code != (BPF_LD | BPF_IMM | BPF_DW))
continue;
imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
map = bpf_map_from_imm(prog, imm, &off, &type);
if (map) {
insns[i].src_reg = type;
insns[i].imm = map->id;
insns[i + 1].imm = off;
continue;
}
}
return insns;
}
| 0 |
[
"CWE-307"
] |
linux
|
350a5c4dd2452ea999cc5e1d4a8dbf12de2f97ef
| 319,350,965,081,709,700,000,000,000,000,000,000,000 | 51 |
bpf: Dont allow vmlinux BTF to be used in map_create and prog_load.
The syzbot got FD of vmlinux BTF and passed it into map_create which caused
crash in btf_type_id_size() when it tried to access resolved_ids. The vmlinux
BTF doesn't have 'resolved_ids' and 'resolved_sizes' initialized to save
memory. To avoid such issues disallow using vmlinux BTF in prog_load and
map_create commands.
Fixes: 5329722057d4 ("bpf: Assign ID to vmlinux BTF and return extra info for BTF in GET_OBJ_INFO")
Reported-by: [email protected]
Signed-off-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Yonghong Song <[email protected]>
Link: https://lore.kernel.org/bpf/[email protected]
|
void ValidateInputTensors(OpKernelContext* ctx, const Tensor& in0,
const Tensor& in1) override {
OP_REQUIRES(
ctx, in0.dims() >= 2,
errors::InvalidArgument("In[0] ndims must be >= 2: ", in0.dims()));
OP_REQUIRES(
ctx, in1.dims() >= 2,
errors::InvalidArgument("In[0] ndims must be >= 2: ", in1.dims()));
}
| 1 |
[
"CWE-125"
] |
tensorflow
|
480641e3599775a8895254ffbc0fc45621334f68
| 21,538,206,924,826,293,000,000,000,000,000,000,000 | 10 |
Validate (and ensure validation sticks) inputs for `MatrixTriangularSolve`.
PiperOrigin-RevId: 370282444
Change-Id: Iaed61a0b0727cc42c830658b72eb69f785f48dc5
|
static int proc_ioctl(struct usb_dev_state *ps, struct usbdevfs_ioctl *ctl)
{
int size;
void *buf = NULL;
int retval = 0;
struct usb_interface *intf = NULL;
struct usb_driver *driver = NULL;
if (ps->privileges_dropped)
return -EACCES;
/* alloc buffer */
size = _IOC_SIZE(ctl->ioctl_code);
if (size > 0) {
buf = kmalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
if ((_IOC_DIR(ctl->ioctl_code) & _IOC_WRITE)) {
if (copy_from_user(buf, ctl->data, size)) {
kfree(buf);
return -EFAULT;
}
} else {
memset(buf, 0, size);
}
}
if (!connected(ps)) {
kfree(buf);
return -ENODEV;
}
if (ps->dev->state != USB_STATE_CONFIGURED)
retval = -EHOSTUNREACH;
else if (!(intf = usb_ifnum_to_if(ps->dev, ctl->ifno)))
retval = -EINVAL;
else switch (ctl->ioctl_code) {
/* disconnect kernel driver from interface */
case USBDEVFS_DISCONNECT:
if (intf->dev.driver) {
driver = to_usb_driver(intf->dev.driver);
dev_dbg(&intf->dev, "disconnect by usbfs\n");
usb_driver_release_interface(driver, intf);
} else
retval = -ENODATA;
break;
/* let kernel drivers try to (re)bind to the interface */
case USBDEVFS_CONNECT:
if (!intf->dev.driver)
retval = device_attach(&intf->dev);
else
retval = -EBUSY;
break;
/* talk directly to the interface's driver */
default:
if (intf->dev.driver)
driver = to_usb_driver(intf->dev.driver);
if (driver == NULL || driver->unlocked_ioctl == NULL) {
retval = -ENOTTY;
} else {
retval = driver->unlocked_ioctl(intf, ctl->ioctl_code, buf);
if (retval == -ENOIOCTLCMD)
retval = -ENOTTY;
}
}
/* cleanup and return */
if (retval >= 0
&& (_IOC_DIR(ctl->ioctl_code) & _IOC_READ) != 0
&& size > 0
&& copy_to_user(ctl->data, buf, size) != 0)
retval = -EFAULT;
kfree(buf);
return retval;
}
| 0 |
[
"CWE-200"
] |
linux
|
681fef8380eb818c0b845fca5d2ab1dcbab114ee
| 303,171,681,239,121,170,000,000,000,000,000,000,000 | 79 |
USB: usbfs: fix potential infoleak in devio
The stack object “ci” has a total size of 8 bytes. Its last 3 bytes
are padding bytes which are not initialized and leaked to userland
via “copy_to_user”.
Signed-off-by: Kangjie Lu <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
LIBOPENMPT_MODPLUG_API void ModPlug_GetSettings(ModPlug_Settings* settings)
{
if(!settings) return;
memcpy(settings,&globalsettings,sizeof(ModPlug_Settings));
}
| 0 |
[
"CWE-120",
"CWE-295"
] |
openmpt
|
927688ddab43c2b203569de79407a899e734fabe
| 184,998,330,408,457,270,000,000,000,000,000,000,000 | 5 |
[Fix] libmodplug: C API: Limit the length of strings copied to the output buffer of ModPlug_InstrumentName() and ModPlug_SampleName() to 32 bytes (including terminating null) as is done by original libmodplug. This avoids potential buffer overflows in software relying on this limit instead of querying the required buffer size beforehand. libopenmpt can return strings longer than 32 bytes here beacuse the internal limit of 32 bytes applies to strings encoded in arbitrary character encodings but the API returns them converted to UTF-8, which can be longer. (reported by Antonio Morales Maldonado of Semmle Security Research Team)
git-svn-id: https://source.openmpt.org/svn/openmpt/trunk/OpenMPT@12127 56274372-70c3-4bfc-bfc3-4c3a0b034d27
|
void LEX::link_first_table_back(TABLE_LIST *first,
bool link_to_local)
{
if (first)
{
if ((first->next_global= query_tables))
query_tables->prev_global= &first->next_global;
else
query_tables_last= &first->next_global;
query_tables= first;
if (link_to_local)
{
first->next_local= first_select_lex()->table_list.first;
first_select_lex()->context.table_list= first;
first_select_lex()->table_list.first= first;
first_select_lex()->table_list.elements++; //safety
}
}
}
| 0 |
[
"CWE-703"
] |
server
|
39feab3cd31b5414aa9b428eaba915c251ac34a2
| 30,020,659,998,015,430,000,000,000,000,000,000,000 | 20 |
MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <[email protected]>
|
static int vlv_search_by_dn_guid(struct ldb_module *module,
struct vlv_context *ac,
struct ldb_result **result,
const struct GUID *guid,
const char * const *attrs)
{
struct ldb_dn *dn;
struct ldb_request *req;
struct ldb_result *res;
int ret;
struct GUID_txt_buf guid_str;
struct ldb_control **controls = ac->store->down_controls;
struct ldb_context *ldb = ldb_module_get_ctx(module);
dn = ldb_dn_new_fmt(ac, ldb, "<GUID=%s>",
GUID_buf_string(guid, &guid_str));
if (dn == NULL) {
return ldb_oom(ldb);
}
res = talloc_zero(ac, struct ldb_result);
if (res == NULL) {
return ldb_oom(ldb);
}
ret = ldb_build_search_req(&req, ldb, ac,
dn,
LDB_SCOPE_BASE,
NULL,
attrs,
controls,
res,
ldb_search_default_callback,
ac->req);
if (ret != LDB_SUCCESS) {
talloc_free(res);
return ret;
}
ret = ldb_request(ldb, req);
if (ret == LDB_SUCCESS) {
ret = ldb_wait(req->handle, LDB_WAIT_ALL);
}
talloc_free(req);
if (ret != LDB_SUCCESS) {
talloc_free(res);
return ret;
}
*result = res;
return ret;
}
| 0 |
[
"CWE-416"
] |
samba
|
32c333def9ad5a1c67abee320cf5f3c4f2cb1e5c
| 108,151,614,948,650,860,000,000,000,000,000,000,000 | 53 |
CVE-2020-10760 dsdb: Ensure a proper talloc tree for saved controls
Otherwise a paged search on the GC port will fail as the ->data was
not kept around for the second page of searches.
An example command to produce this is
bin/ldbsearch --paged -H ldap://$SERVER:3268 -U$USERNAME%$PASSWORD
This shows up later in the partition module as:
ERROR: AddressSanitizer: heap-use-after-free on address 0x60b00151ef20 at pc 0x7fec3f801aac bp 0x7ffe8472c270 sp 0x7ffe8472c260
READ of size 4 at 0x60b00151ef20 thread T0 (ldap(0))
#0 0x7fec3f801aab in talloc_chunk_from_ptr ../../lib/talloc/talloc.c:526
#1 0x7fec3f801aab in __talloc_get_name ../../lib/talloc/talloc.c:1559
#2 0x7fec3f801aab in talloc_check_name ../../lib/talloc/talloc.c:1582
#3 0x7fec1b86b2e1 in partition_search ../../source4/dsdb/samdb/ldb_modules/partition.c:780
or
smb_panic_default: PANIC (pid 13287): Bad talloc magic value - unknown value
(from source4/dsdb/samdb/ldb_modules/partition.c:780)
BUG: https://bugzilla.samba.org/show_bug.cgi?id=14402
Signed-off-by: Andrew Bartlett <[email protected]>
|
Item *Field_new_decimal::get_equal_const_item(THD *thd, const Context &ctx,
Item *const_item)
{
if (flags & ZEROFILL_FLAG)
return Field_num::get_equal_zerofill_const_item(thd, ctx, const_item);
switch (ctx.subst_constraint()) {
case IDENTITY_SUBST:
if (const_item->field_type() != MYSQL_TYPE_NEWDECIMAL ||
const_item->decimal_scale() != decimals())
{
my_decimal *val, val_buffer, val_buffer2;
if (!(val= const_item->val_decimal(&val_buffer)))
{
DBUG_ASSERT(0);
return const_item;
}
/*
Truncate or extend the decimal value to the scale of the field.
See comments about truncation in the same place in
Field_time::get_equal_const_item().
*/
my_decimal_round(E_DEC_FATAL_ERROR, val, decimals(), true, &val_buffer2);
return new (thd->mem_root) Item_decimal(thd, field_name, &val_buffer2,
decimals(), field_length);
}
break;
case ANY_SUBST:
break;
}
return const_item;
}
| 0 |
[
"CWE-120"
] |
server
|
eca207c46293bc72dd8d0d5622153fab4d3fccf1
| 10,750,115,780,538,685,000,000,000,000,000,000,000 | 31 |
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix.
|
int mnt_table_parse_mtab(struct libmnt_table *tb, const char *filename)
{
return __mnt_table_parse_mtab(tb, filename, NULL);
}
| 0 |
[
"CWE-552",
"CWE-703"
] |
util-linux
|
166e87368ae88bf31112a30e078cceae637f4cdb
| 172,145,074,405,508,000,000,000,000,000,000,000,000 | 4 |
libmount: remove support for deleted mount table entries
The "(deleted)" suffix has been originally used by kernel for deleted
mountpoints. Since kernel commit 9d4d65748a5ca26ea8650e50ba521295549bf4e3
(Dec 2014) kernel does not use this suffix for mount stuff in /proc at
all. Let's remove this support from libmount too.
Signed-off-by: Karel Zak <[email protected]>
|
void gf_net_get_ntp(u32 *sec, u32 *frac)
{
u64 frac_part;
struct timeval now;
gettimeofday(&now, NULL);
if (sec) {
*sec = (u32) (now.tv_sec) + ntp_shift;
}
if (frac) {
frac_part = now.tv_usec * 0xFFFFFFFFULL;
frac_part /= 1000000;
*frac = (u32) ( frac_part );
}
}
| 0 |
[
"CWE-787"
] |
gpac
|
f3698bb1bce62402805c3fda96551a23101a32f9
| 295,350,296,361,187,470,000,000,000,000,000,000,000 | 15 |
fix buffer overrun in gf_bin128_parse
closes #1204
closes #1205
|
static void xfrm_byidx_resize(struct net *net, int total)
{
unsigned int hmask = net->xfrm.policy_idx_hmask;
unsigned int nhashmask = xfrm_new_hash_mask(hmask);
unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
struct hlist_head *oidx = net->xfrm.policy_byidx;
struct hlist_head *nidx = xfrm_hash_alloc(nsize);
int i;
if (!nidx)
return;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
for (i = hmask; i >= 0; i--)
xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
net->xfrm.policy_byidx = nidx;
net->xfrm.policy_idx_hmask = nhashmask;
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
}
| 0 |
[
"CWE-125"
] |
ipsec
|
7bab09631c2a303f87a7eb7e3d69e888673b9b7e
| 86,820,428,306,673,080,000,000,000,000,000,000,000 | 24 |
xfrm: policy: check policy direction value
The 'dir' parameter in xfrm_migrate() is a user-controlled byte which is used
as an array index. This can lead to an out-of-bound access, kernel lockup and
DoS. Add a check for the 'dir' value.
This fixes CVE-2017-11600.
References: https://bugzilla.redhat.com/show_bug.cgi?id=1474928
Fixes: 80c9abaabf42 ("[XFRM]: Extension for dynamic update of endpoint address(es)")
Cc: <[email protected]> # v2.6.21-rc1
Reported-by: "bo Zhang" <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]>
|
int commonio_sort_wrt (struct commonio_db *shadow,
const struct commonio_db *passwd)
{
struct commonio_entry *head = NULL, *pw_ptr, *spw_ptr;
const char *name;
if ((NULL == shadow) || (NULL == shadow->head)) {
return 0;
}
for (pw_ptr = passwd->head; NULL != pw_ptr; pw_ptr = pw_ptr->next) {
if (NULL == pw_ptr->eptr) {
continue;
}
name = passwd->ops->getname (pw_ptr->eptr);
for (spw_ptr = shadow->head;
NULL != spw_ptr;
spw_ptr = spw_ptr->next) {
if (NULL == spw_ptr->eptr) {
continue;
}
if (strcmp (name, shadow->ops->getname (spw_ptr->eptr))
== 0) {
break;
}
}
if (NULL == spw_ptr) {
continue;
}
commonio_del_entry (shadow, spw_ptr);
spw_ptr->next = head;
head = spw_ptr;
}
for (spw_ptr = head; NULL != spw_ptr; spw_ptr = head) {
head = head->next;
if (NULL != shadow->head) {
shadow->head->prev = spw_ptr;
}
spw_ptr->next = shadow->head;
shadow->head = spw_ptr;
}
shadow->head->prev = NULL;
shadow->changed = true;
return 0;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
shadow
|
954e3d2e7113e9ac06632aee3c69b8d818cc8952
| 284,539,289,513,904,000,000,000,000,000,000,000,000 | 49 |
Fix buffer overflow if NULL line is present in db.
If ptr->line == NULL for an entry, the first cycle will exit,
but the second one will happily write past entries buffer.
We actually do not want to exit the first cycle prematurely
on ptr->line == NULL.
Signed-off-by: Tomas Mraz <[email protected]>
|
inline void MaximumElementwise(int size, const ArithmeticParams& params,
const int8* input1_data, const int8* input2_data,
int8* output_data) {
ruy::profiler::ScopeLabel label("MaximumElementwiseInt8/8bit");
int i = 0;
#ifdef USE_NEON
for (; i <= size - 16; i += 16) {
const int8x16_t input1_val_original = vld1q_s8(input1_data + i);
const int8x16_t input2_val_original = vld1q_s8(input2_data + i);
const int8x16_t max_data =
vmaxq_s8(input1_val_original, input2_val_original);
vst1q_s8(output_data + i, max_data);
}
#endif // USE_NEON
for (; i < size; ++i) {
const int8 input1_val = input1_data[i];
const int8 input2_val = input2_data[i];
output_data[i] = std::max(input1_val, input2_val);
}
}
| 0 |
[
"CWE-476",
"CWE-369"
] |
tensorflow
|
15691e456c7dc9bd6be203b09765b063bf4a380c
| 260,251,343,923,055,340,000,000,000,000,000,000,000 | 20 |
Prevent dereferencing of null pointers in TFLite's `add.cc`.
PiperOrigin-RevId: 387244946
Change-Id: I56094233327fbd8439b92e1dbb1262176e00eeb9
|
static void vgacon_scrolldelta(struct vc_data *c, int lines)
{
int start, end, count, soff;
if (!lines) {
vgacon_restore_screen(c);
return;
}
if (!vgacon_scrollback_cur->data)
return;
if (!vgacon_scrollback_cur->save) {
vgacon_cursor(c, CM_ERASE);
vgacon_save_screen(c);
c->vc_origin = (unsigned long)c->vc_screenbuf;
vgacon_scrollback_cur->save = 1;
}
vgacon_scrollback_cur->restore = 0;
start = vgacon_scrollback_cur->cur + lines;
end = start + abs(lines);
if (start < 0)
start = 0;
if (start > vgacon_scrollback_cur->cnt)
start = vgacon_scrollback_cur->cnt;
if (end < 0)
end = 0;
if (end > vgacon_scrollback_cur->cnt)
end = vgacon_scrollback_cur->cnt;
vgacon_scrollback_cur->cur = start;
count = end - start;
soff = vgacon_scrollback_cur->tail -
((vgacon_scrollback_cur->cnt - end) * c->vc_size_row);
soff -= count * c->vc_size_row;
if (soff < 0)
soff += vgacon_scrollback_cur->size;
count = vgacon_scrollback_cur->cnt - start;
if (count > c->vc_rows)
count = c->vc_rows;
if (count) {
int copysize;
int diff = c->vc_rows - count;
void *d = (void *) c->vc_visible_origin;
void *s = (void *) c->vc_screenbuf;
count *= c->vc_size_row;
/* how much memory to end of buffer left? */
copysize = min(count, vgacon_scrollback_cur->size - soff);
scr_memcpyw(d, vgacon_scrollback_cur->data + soff, copysize);
d += copysize;
count -= copysize;
if (count) {
scr_memcpyw(d, vgacon_scrollback_cur->data, count);
d += count;
}
if (diff)
scr_memcpyw(d, s, diff * c->vc_size_row);
} else
vgacon_cursor(c, CM_MOVE);
}
| 1 |
[
"CWE-125"
] |
linux
|
973c096f6a85e5b5f2a295126ba6928d9a6afd45
| 64,966,259,810,882,480,000,000,000,000,000,000,000 | 73 |
vgacon: remove software scrollback support
Yunhai Zhang recently fixed a VGA software scrollback bug in commit
ebfdfeeae8c0 ("vgacon: Fix for missing check in scrollback handling"),
but that then made people look more closely at some of this code, and
there were more problems on the vgacon side, but also the fbcon software
scrollback.
We don't really have anybody who maintains this code - probably because
nobody actually _uses_ it any more. Sure, people still use both VGA and
the framebuffer consoles, but they are no longer the main user
interfaces to the kernel, and haven't been for decades, so these kinds
of extra features end up bitrotting and not really being used.
So rather than try to maintain a likely unused set of code, I'll just
aggressively remove it, and see if anybody even notices. Maybe there
are people who haven't jumped on the whole GUI badnwagon yet, and think
it's just a fad. And maybe those people use the scrollback code.
If that turns out to be the case, we can resurrect this again, once
we've found the sucker^Wmaintainer for it who actually uses it.
Reported-by: NopNop Nop <[email protected]>
Tested-by: Willy Tarreau <[email protected]>
Cc: 张云海 <[email protected]>
Acked-by: Andy Lutomirski <[email protected]>
Acked-by: Willy Tarreau <[email protected]>
Reviewed-by: Greg Kroah-Hartman <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
virtual ~IMkvReader() {}
| 0 |
[
"CWE-20"
] |
libvpx
|
f00890eecdf8365ea125ac16769a83aa6b68792d
| 227,032,187,345,793,250,000,000,000,000,000,000,000 | 1 |
update libwebm to libwebm-1.0.0.27-352-g6ab9fcf
https://chromium.googlesource.com/webm/libwebm/+log/af81f26..6ab9fcf
Change-Id: I9d56e1fbaba9b96404b4fbabefddc1a85b79c25d
|
static inline void ctx_commit_and_unlock(struct io_ring_ctx *ctx)
{
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
}
| 0 |
[
"CWE-416"
] |
linux
|
e677edbcabee849bfdd43f1602bccbecf736a646
| 240,393,171,966,274,200,000,000,000,000,000,000,000 | 6 |
io_uring: fix race between timeout flush and removal
io_flush_timeouts() assumes the timeout isn't in progress of triggering
or being removed/canceled, so it unconditionally removes it from the
timeout list and attempts to cancel it.
Leave it on the list and let the normal timeout cancelation take care
of it.
Cc: [email protected] # 5.5+
Signed-off-by: Jens Axboe <[email protected]>
|
void Gfx::opEndIgnoreUndef(Object args[], int numArgs) {
if (ignoreUndef > 0)
--ignoreUndef;
}
| 0 |
[] |
poppler
|
abf167af8b15e5f3b510275ce619e6fdb42edd40
| 284,452,858,201,077,400,000,000,000,000,000,000,000 | 4 |
Implement tiling/patterns in SplashOutputDev
Fixes bug 13518
|
static int com_objects_compare(zval *object1, zval *object2)
{
php_com_dotnet_object *obja, *objb;
int ret;
/* strange header bug problem here... the headers define the proto without the
* flags parameter. However, the MSDN docs state that there is a flags parameter,
* and my VC6 won't link unless the code uses the version with 4 parameters.
* So, we have this declaration here to fix it */
STDAPI VarCmp(LPVARIANT pvarLeft, LPVARIANT pvarRight, LCID lcid, DWORD flags);
obja = CDNO_FETCH(object1);
objb = CDNO_FETCH(object2);
switch (VarCmp(&obja->v, &objb->v, LOCALE_SYSTEM_DEFAULT, 0)) {
case VARCMP_LT:
ret = -1;
break;
case VARCMP_GT:
ret = 1;
break;
case VARCMP_EQ:
ret = 0;
break;
default:
/* either or both operands are NULL...
* not 100% sure how to handle this */
ret = -2;
}
return ret;
}
| 0 |
[
"CWE-502"
] |
php-src
|
115ee49b0be12e3df7d2c7027609fbe1a1297e42
| 214,795,145,063,089,240,000,000,000,000,000,000,000 | 31 |
Fix #77177: Serializing or unserializing COM objects crashes
Firstly, we avoid returning NULL from the get_property handler, but
instead return an empty HashTable, which already prevents the crashes.
Secondly, since (de-)serialization obviously makes no sense for COM,
DOTNET and VARIANT objects (at least with the current implementation),
we prohibit it right away.
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.