func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
TEST_F(QueryPlannerTest, ElemMatchNestedOrNotIndexed) {
addIndex(BSON("a.b" << 1));
runQuery(fromjson("{c: 1, a: {$elemMatch: {b: 3, $or: [{c: 4}, {c: 5}]}}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
assertSolutionExists(
"{fetch: {node: {ixscan: {pattern: {'a.b': 1}, bounds: "
"{'a.b': [[3,3,true,true]]}}}}}");
} | 0 | [
"CWE-834"
] | mongo | 94d0e046baa64d1aa1a6af97e2d19bb466cc1ff5 | 133,139,018,347,922,450,000,000,000,000,000,000,000 | 10 | SERVER-38164 $or pushdown optimization does not correctly handle $not within an $elemMatch |
pk_transaction_emit_property_changed (PkTransaction *transaction,
const gchar *property_name,
GVariant *property_value)
{
GVariantBuilder builder;
GVariantBuilder invalidated_builder;
/* build the dict */
g_variant_builder_init (&invalidated_builder, G_VARIANT_TYPE ("as"));
g_variant_builder_init (&builder, G_VARIANT_TYPE_ARRAY);
g_variant_builder_add (&builder,
"{sv}",
property_name,
property_value);
g_dbus_connection_emit_signal (transaction->priv->connection,
NULL,
transaction->priv->tid,
"org.freedesktop.DBus.Properties",
"PropertiesChanged",
g_variant_new ("(sa{sv}as)",
PK_DBUS_INTERFACE_TRANSACTION,
&builder,
&invalidated_builder),
NULL);
} | 0 | [
"CWE-287"
] | PackageKit | 7e8a7905ea9abbd1f384f05f36a4458682cd4697 | 44,845,058,922,642,710,000,000,000,000,000,000,000 | 25 | Do not set JUST_REINSTALL on any kind of auth failure
If we try to continue the auth queue when it has been cancelled (or failed)
then we fall upon the obscure JUST_REINSTALL transaction flag which only the
DNF backend actually verifies.
Many thanks to Matthias Gerstner <[email protected]> for spotting the problem. |
void* Elf_(r_bin_elf_free)(ELFOBJ* bin) {
int i;
if (!bin) {
return NULL;
}
free (bin->phdr);
free (bin->shdr);
free (bin->strtab);
free (bin->dyn_buf);
free (bin->shstrtab);
free (bin->dynstr);
//free (bin->strtab_section);
if (bin->imports_by_ord) {
for (i = 0; i<bin->imports_by_ord_size; i++) {
free (bin->imports_by_ord[i]);
}
free (bin->imports_by_ord);
}
if (bin->symbols_by_ord) {
for (i = 0; i<bin->symbols_by_ord_size; i++) {
free (bin->symbols_by_ord[i]);
}
free (bin->symbols_by_ord);
}
r_buf_free (bin->b);
if (bin->g_symbols != bin->phdr_symbols) {
R_FREE (bin->phdr_symbols);
}
if (bin->g_imports != bin->phdr_imports) {
R_FREE (bin->phdr_imports);
}
R_FREE (bin->g_sections);
R_FREE (bin->g_symbols);
R_FREE (bin->g_imports);
free (bin);
return NULL;
} | 0 | [
"CWE-125"
] | radare2 | c6d0076c924891ad9948a62d89d0bcdaf965f0cd | 184,094,687,066,999,800,000,000,000,000,000,000,000 | 37 | Fix #8731 - Crash in ELF parser with negative 32bit number |
static bool __stratum_send(struct pool *pool, char *s, ssize_t len)
{
SOCKETTYPE sock = pool->sock;
ssize_t ssent = 0;
if (opt_protocol)
applog(LOG_DEBUG, "SEND: %s", s);
strcat(s, "\n");
len++;
while (len > 0 ) {
struct timeval timeout = {0, 0};
ssize_t sent;
fd_set wd;
FD_ZERO(&wd);
FD_SET(sock, &wd);
if (select(sock + 1, NULL, &wd, NULL, &timeout) < 1) {
applog(LOG_DEBUG, "Write select failed on pool %d sock", pool->pool_no);
return false;
}
sent = send(pool->sock, s + ssent, len, 0);
if (sent < 0) {
if (errno != EAGAIN && errno != EWOULDBLOCK) {
applog(LOG_DEBUG, "Failed to curl_easy_send in stratum_send");
return false;
}
sent = 0;
}
ssent += sent;
len -= sent;
}
pool->cgminer_pool_stats.times_sent++;
pool->cgminer_pool_stats.bytes_sent += ssent;
total_bytes_xfer += ssent;
pool->cgminer_pool_stats.net_bytes_sent += ssent;
return true;
} | 0 | [
"CWE-119",
"CWE-787"
] | bfgminer | c80ad8548251eb0e15329fc240c89070640c9d79 | 147,807,223,058,916,220,000,000,000,000,000,000,000 | 40 | Stratum: extract_sockaddr: Truncate overlong addresses rather than stack overflow
Thanks to Mick Ayzenberg <[email protected]> for finding this! |
static int test_remove(struct libmnt_test *ts, int argc, char *argv[])
{
if (argc < 2)
return -1;
return update(argv[1], NULL, 0);
} | 0 | [
"CWE-399"
] | util-linux | 84ed14022e7d3d121bbcab60ebad11ed38d691b0 | 336,839,725,009,145,300,000,000,000,000,000,000,000 | 6 | libmount: more robust mtab and utab update (CVE-2011-1676, CVE-2011-1677)
http://thread.gmane.org/gmane.comp.security.oss.general/4374
Changes:
- always use temporary file
- use fflush() for the temporary file
- check fprintf() return value
Signed-off-by: Karel Zak <[email protected]> |
WChangeSize(p, w, h)
struct win *p;
int w, h;
{
int wok = 0;
struct canvas *cv;
if (p->w_layer.l_cvlist == 0)
{
/* window not displayed -> works always */
ChangeWindowSize(p, w, h, p->w_histheight);
return;
}
for (cv = p->w_layer.l_cvlist; cv; cv = cv->c_lnext)
{
display = cv->c_display;
if (p != D_fore)
continue; /* change only fore */
if (D_CWS)
break;
if (D_CZ0 && (w == Z0width || w == Z1width))
wok = 1;
}
if (cv == 0 && wok == 0) /* can't change any display */
return;
if (!D_CWS)
h = p->w_height;
ChangeWindowSize(p, w, h, p->w_histheight);
for (display = displays; display; display = display->d_next)
{
if (p == D_fore)
{
if (D_cvlist && D_cvlist->c_next == 0)
ResizeDisplay(w, h);
else
ResizeDisplay(w, D_height);
ResizeLayersToCanvases(); /* XXX Hmm ? */
continue;
}
for (cv = D_cvlist; cv; cv = cv->c_next)
if (cv->c_layer->l_bottom == &p->w_layer)
break;
if (cv)
Redisplay(0);
}
} | 0 | [] | screen | c5db181b6e017cfccb8d7842ce140e59294d9f62 | 110,245,474,088,977,900,000,000,000,000,000,000,000 | 46 | ansi: add support for xterm OSC 11
It allows for getting and setting the background color. Notably, Vim uses
OSC 11 to learn whether it's running on a light or dark colored terminal
and choose a color scheme accordingly.
Tested with gnome-terminal and xterm. When called with "?" argument the
current background color is returned:
$ echo -ne "\e]11;?\e\\"
$ 11;rgb:2323/2727/2929
Signed-off-by: Lubomir Rintel <[email protected]>
(cherry picked from commit 7059bff20a28778f9d3acf81cad07b1388d02309)
Signed-off-by: Amadeusz Sławiński <[email protected] |
static struct sk_buff *sfb_peek(struct Qdisc *sch)
{
struct sfb_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
return child->ops->peek(child);
} | 0 | [
"CWE-330"
] | linux | 55667441c84fa5e0911a0aac44fb059c15ba6da2 | 163,953,531,914,844,070,000,000,000,000,000,000,000 | 7 | net/flow_dissector: switch to siphash
UDP IPv6 packets auto flowlabels are using a 32bit secret
(static u32 hashrnd in net/core/flow_dissector.c) and
apply jhash() over fields known by the receivers.
Attackers can easily infer the 32bit secret and use this information
to identify a device and/or user, since this 32bit secret is only
set at boot time.
Really, using jhash() to generate cookies sent on the wire
is a serious security concern.
Trying to change the rol32(hash, 16) in ip6_make_flowlabel() would be
a dead end. Trying to periodically change the secret (like in sch_sfq.c)
could change paths taken in the network for long lived flows.
Let's switch to siphash, as we did in commit df453700e8d8
("inet: switch IP ID generator to siphash")
Using a cryptographically strong pseudo random function will solve this
privacy issue and more generally remove other weak points in the stack.
Packet schedulers using skb_get_hash_perturb() benefit from this change.
Fixes: b56774163f99 ("ipv6: Enable auto flow labels by default")
Fixes: 42240901f7c4 ("ipv6: Implement different admin modes for automatic flow labels")
Fixes: 67800f9b1f4e ("ipv6: Call skb_get_hash_flowi6 to get skb->hash in ip6_make_flowlabel")
Fixes: cb1ce2ef387b ("ipv6: Implement automatic flow label generation on transmit")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jonathan Berger <[email protected]>
Reported-by: Amit Klein <[email protected]>
Reported-by: Benny Pinkas <[email protected]>
Cc: Tom Herbert <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
smtp_server_connection_alloc(struct smtp_server *server,
const struct smtp_server_settings *set,
int fd_in, int fd_out,
const struct smtp_server_callbacks *callbacks,
void *context)
{
struct smtp_server_connection *conn;
pool_t pool;
pool = pool_alloconly_create("smtp server", 1024);
conn = p_new(pool, struct smtp_server_connection, 1);
conn->pool = pool;
conn->refcount = 1;
conn->server = server;
conn->callbacks = callbacks;
conn->context = context;
/* Merge settings with global server settings */
conn->set = server->set;
if (set != NULL) {
conn->set.protocol = server->set.protocol;
if (set->rawlog_dir != NULL && *set->rawlog_dir != '\0')
conn->set.rawlog_dir = p_strdup(pool, set->rawlog_dir);
if (set->ssl != NULL)
conn->set.ssl = ssl_iostream_settings_dup(pool, set->ssl);
if (set->hostname != NULL && *set->hostname != '\0')
conn->set.hostname = p_strdup(pool, set->hostname);
if (set->login_greeting != NULL &&
*set->login_greeting != '\0') {
conn->set.login_greeting =
p_strdup(pool, set->login_greeting);
}
if (set->capabilities != 0)
conn->set.capabilities = set->capabilities;
conn->set.workarounds |= set->workarounds;
if (set->max_client_idle_time_msecs > 0) {
conn->set.max_client_idle_time_msecs =
set->max_client_idle_time_msecs;
}
if (set->max_pipelined_commands > 0) {
conn->set.max_pipelined_commands =
set->max_pipelined_commands;
}
if (set->max_bad_commands > 0) {
conn->set.max_bad_commands = set->max_bad_commands;
}
if (set->max_recipients > 0)
conn->set.max_recipients = set->max_recipients;
smtp_command_limits_merge(&conn->set.command_limits,
&set->command_limits);
conn->set.max_message_size = set->max_message_size;
if (set->max_message_size == 0 ||
set->max_message_size == UOFF_T_MAX) {
conn->set.command_limits.max_data_size = UOFF_T_MAX;
} else if (conn->set.command_limits.max_data_size != 0) {
/* Explicit limit given */
} else if (set->max_message_size >
(UOFF_T_MAX - SMTP_SERVER_DEFAULT_MAX_SIZE_EXCESS_LIMIT)) {
/* Very high limit */
conn->set.command_limits.max_data_size = UOFF_T_MAX;
} else {
/* Absolute maximum before connection is closed in DATA
command */
conn->set.command_limits.max_data_size =
set->max_message_size +
SMTP_SERVER_DEFAULT_MAX_SIZE_EXCESS_LIMIT;
}
if (set->mail_param_extensions != NULL) {
conn->set.mail_param_extensions =
p_strarray_dup(pool, set->mail_param_extensions);
}
if (set->rcpt_param_extensions != NULL) {
conn->set.rcpt_param_extensions =
p_strarray_dup(pool, set->rcpt_param_extensions);
}
if (set->xclient_extensions != NULL) {
conn->set.xclient_extensions =
p_strarray_dup(pool, set->xclient_extensions);
}
if (set->socket_send_buffer_size > 0) {
conn->set.socket_send_buffer_size =
set->socket_send_buffer_size;
}
if (set->socket_recv_buffer_size > 0) {
conn->set.socket_recv_buffer_size =
set->socket_recv_buffer_size;
}
conn->set.tls_required =
conn->set.tls_required || set->tls_required;
conn->set.auth_optional =
conn->set.auth_optional || set->auth_optional;
conn->set.mail_path_allow_broken =
conn->set.mail_path_allow_broken ||
set->mail_path_allow_broken;
conn->set.rcpt_domain_optional =
conn->set.rcpt_domain_optional ||
set->rcpt_domain_optional;
conn->set.debug = conn->set.debug || set->debug;
}
if (set != NULL && set->mail_param_extensions != NULL) {
const char *const *extp;
p_array_init(&conn->mail_param_extensions, pool,
str_array_length(set->mail_param_extensions) + 8);
for (extp = set->mail_param_extensions; *extp != NULL; extp++) {
const char *ext = p_strdup(pool, *extp);
array_push_back(&conn->mail_param_extensions, &ext);
}
array_append_zero(&conn->mail_param_extensions);
}
if (set != NULL && set->rcpt_param_extensions != NULL) {
const char *const *extp;
p_array_init(&conn->rcpt_param_extensions, pool,
str_array_length(set->rcpt_param_extensions) + 8);
for (extp = set->rcpt_param_extensions; *extp != NULL; extp++) {
const char *ext = p_strdup(pool, *extp);
array_push_back(&conn->rcpt_param_extensions, &ext);
}
array_append_zero(&conn->rcpt_param_extensions);
}
net_set_nonblock(fd_in, TRUE);
if (fd_in != fd_out)
net_set_nonblock(fd_out, TRUE);
(void)net_set_tcp_nodelay(fd_out, TRUE);
set = &conn->set;
if (set->socket_send_buffer_size > 0 &&
net_set_send_buffer_size(fd_out,
set->socket_send_buffer_size) < 0) {
e_error(conn->event,
"net_set_send_buffer_size(%zu) failed: %m",
set->socket_send_buffer_size);
}
if (set->socket_recv_buffer_size > 0 &&
net_set_recv_buffer_size(fd_in,
set->socket_recv_buffer_size) < 0) {
e_error(conn->event,
"net_set_recv_buffer_size(%zu) failed: %m",
set->socket_recv_buffer_size);
}
return conn;
} | 0 | [
"CWE-77"
] | core | 321c339756f9b2b98fb7326359d1333adebb5295 | 141,894,813,687,326,560,000,000,000,000,000,000,000 | 153 | lib-smtp: smtp-server-connection - Fix STARTTLS command injection vulnerability.
The input handler kept reading more commands even though the input was locked by
the STARTTLS command, thereby causing it to read the command pipelined beyond
STARTTLS. This causes a STARTTLS command injection vulerability. |
static int nfc_genl_rcv_nl_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netlink_notify *n = ptr;
struct urelease_work *w;
if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC)
goto out;
pr_debug("NETLINK_URELEASE event from id %d\n", n->portid);
w = kmalloc(sizeof(*w), GFP_ATOMIC);
if (w) {
INIT_WORK(&w->w, nfc_urelease_event_work);
w->portid = n->portid;
schedule_work(&w->w);
}
out:
return NOTIFY_DONE;
} | 0 | [] | linux | 4071bf121d59944d5cd2238de0642f3d7995a997 | 267,739,586,087,407,530,000,000,000,000,000,000,000 | 21 | NFC: netlink: fix sleep in atomic bug when firmware download timeout
There are sleep in atomic bug that could cause kernel panic during
firmware download process. The root cause is that nlmsg_new with
GFP_KERNEL parameter is called in fw_dnld_timeout which is a timer
handler. The call trace is shown below:
BUG: sleeping function called from invalid context at include/linux/sched/mm.h:265
Call Trace:
kmem_cache_alloc_node
__alloc_skb
nfc_genl_fw_download_done
call_timer_fn
__run_timers.part.0
run_timer_softirq
__do_softirq
...
The nlmsg_new with GFP_KERNEL parameter may sleep during memory
allocation process, and the timer handler is run as the result of
a "software interrupt" that should not call any other function
that could sleep.
This patch changes allocation mode of netlink message from GFP_KERNEL
to GFP_ATOMIC in order to prevent sleep in atomic bug. The GFP_ATOMIC
flag makes memory allocation operation could be used in atomic context.
Fixes: 9674da8759df ("NFC: Add firmware upload netlink command")
Fixes: 9ea7187c53f6 ("NFC: netlink: Rename CMD_FW_UPLOAD to CMD_FW_DOWNLOAD")
Signed-off-by: Duoming Zhou <[email protected]>
Reviewed-by: Krzysztof Kozlowski <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Paolo Abeni <[email protected]> |
static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (WARN_ON(!is_guest_mode(vcpu)))
return true;
if (!vcpu->arch.pdptrs_from_userspace &&
!nested_npt_enabled(svm) && is_pae_paging(vcpu))
/*
* Reload the guest's PDPTRs since after a migration
* the guest CR3 might be restored prior to setting the nested
* state which can lead to a load of wrong PDPTRs.
*/
if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)))
return false;
if (!nested_svm_vmrun_msrpm(svm)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror =
KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
return false;
}
return true;
} | 0 | [
"CWE-862"
] | kvm | 0f923e07124df069ba68d8bb12324398f4b6b709 | 333,903,908,369,885,000,000,000,000,000,000,000,000 | 27 | KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653)
* Invert the mask of bits that we pick from L2 in
nested_vmcb02_prepare_control
* Invert and explicitly use VIRQ related bits bitmask in svm_clear_vintr
This fixes a security issue that allowed a malicious L1 to run L2 with
AVIC enabled, which allowed the L2 to exploit the uninitialized and enabled
AVIC to read/write the host physical memory at some offsets.
Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler")
Signed-off-by: Maxim Levitsky <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static void insert_hash(struct audit_chunk *chunk)
{
struct list_head *list = chunk_hash(chunk->watch.inode);
list_add_rcu(&chunk->hash, list);
} | 0 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 57,937,008,914,569,650,000,000,000,000,000,000,000 | 5 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int __io_async_cancel(struct io_cancel_data *cd,
struct io_uring_task *tctx,
unsigned int issue_flags)
{
bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
struct io_ring_ctx *ctx = cd->ctx;
struct io_tctx_node *node;
int ret, nr = 0;
do {
ret = io_try_cancel(tctx, cd, issue_flags);
if (ret == -ENOENT)
break;
if (!all)
return ret;
nr++;
} while (1);
/* slow path, try all io-wq's */
io_ring_submit_lock(ctx, issue_flags);
ret = -ENOENT;
list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
struct io_uring_task *tctx = node->task->io_uring;
ret = io_async_cancel_one(tctx, cd);
if (ret != -ENOENT) {
if (!all)
break;
nr++;
}
}
io_ring_submit_unlock(ctx, issue_flags);
return all ? nr : ret;
} | 0 | [
"CWE-193"
] | linux | 47abea041f897d64dbd5777f0cf7745148f85d75 | 55,946,925,674,620,330,000,000,000,000,000,000,000 | 34 | io_uring: fix off-by-one in sync cancelation file check
The passed in index should be validated against the number of registered
files we have, it needs to be smaller than the index value to avoid going
one beyond the end.
Fixes: 78a861b94959 ("io_uring: add sync cancelation API through io_uring_register()")
Reported-by: Luo Likang <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta)
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
if (test_sta_flag(sta, WLAN_STA_SP))
return;
if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
ieee80211_sta_ps_deliver_poll_response(sta);
else
set_sta_flag(sta, WLAN_STA_PSPOLL);
} | 0 | [] | linux | 588f7d39b3592a36fb7702ae3b8bdd9be4621e2f | 296,452,859,701,543,100,000,000,000,000,000,000,000 | 12 | mac80211: drop robust management frames from unknown TA
When receiving a robust management frame, drop it if we don't have
rx->sta since then we don't have a security association and thus
couldn't possibly validate the frame.
Cc: [email protected]
Signed-off-by: Johannes Berg <[email protected]> |
void proto_unregister(struct proto *prot)
{
mutex_lock(&proto_list_mutex);
release_proto_idx(prot);
list_del(&prot->node);
mutex_unlock(&proto_list_mutex);
kmem_cache_destroy(prot->slab);
prot->slab = NULL;
req_prot_cleanup(prot->rsk_prot);
if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
kmem_cache_destroy(prot->twsk_prot->twsk_slab);
kfree(prot->twsk_prot->twsk_slab_name);
prot->twsk_prot->twsk_slab = NULL;
}
} | 0 | [
"CWE-119",
"CWE-787"
] | linux | b98b0bc8c431e3ceb4b26b0dfc8db509518fb290 | 90,420,165,398,856,400,000,000,000,000,000,000,000 | 18 | net: avoid signed overflows for SO_{SND|RCV}BUFFORCE
CAP_NET_ADMIN users should not be allowed to set negative
sk_sndbuf or sk_rcvbuf values, as it can lead to various memory
corruptions, crashes, OOM...
Note that before commit 82981930125a ("net: cleanups in
sock_setsockopt()"), the bug was even more serious, since SO_SNDBUF
and SO_RCVBUF were vulnerable.
This needs to be backported to all known linux kernels.
Again, many thanks to syzkaller team for discovering this gem.
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static WC_INLINE int IsSigAlgoECDSA(int algoOID)
{
/* ECDSA sigAlgo must not have ASN1 NULL parameters */
if (algoOID == CTC_SHAwECDSA || algoOID == CTC_SHA256wECDSA ||
algoOID == CTC_SHA384wECDSA || algoOID == CTC_SHA512wECDSA) {
return 1;
}
return 0;
} | 0 | [
"CWE-125",
"CWE-345"
] | wolfssl | f93083be72a3b3d956b52a7ec13f307a27b6e093 | 131,238,601,321,496,670,000,000,000,000,000,000,000 | 10 | OCSP: improve handling of OCSP no check extension |
static void snippet_add_content(struct snippet_context *ctx,
struct snippet_data *target,
const unsigned char *data, size_t size,
size_t *count_r)
{
i_assert(target != NULL);
if (size >= 3 &&
((data[0] == 0xEF && data[1] == 0xBB && data[2] == 0xBF) ||
(data[0] == 0xBF && data[1] == 0xBB && data[2] == 0xEF))) {
*count_r = 3;
return;
}
if (data[0] == '\0') {
/* skip NULs without increasing snippet size */
return;
}
if (i_isspace(*data)) {
/* skip any leading whitespace */
if (str_len(target->snippet) > 1)
ctx->add_whitespace = TRUE;
if (data[0] == '\n')
ctx->state = SNIPPET_STATE_NEWLINE;
return;
}
if (ctx->add_whitespace) {
str_append_c(target->snippet, ' ');
ctx->add_whitespace = FALSE;
if (target->chars_left-- == 0)
return;
}
if (target->chars_left == 0)
return;
target->chars_left--;
*count_r = uni_utf8_char_bytes(data[0]);
i_assert(*count_r <= size);
str_append_data(target->snippet, data, *count_r);
} | 1 | [
"CWE-20"
] | core | 3a55f35c208b5fd3d52c0a6272bd5b8717a2ae54 | 137,396,597,788,333,420,000,000,000,000,000,000,000 | 37 | lib-mail: message_snippet_generate() - Fix potential crash when input ends with '>'
This happens only when the mail was large enough and full enough with
whitespace that message-parser returned multiple blocks before the snippet
was finished.
Broken by 74063ed8219d055489d5233b0c02a59886d2078c |
threshold_16_SSE_unaligned(byte *contone_ptr, byte *thresh_ptr, byte *ht_data)
{
__m128i input1;
__m128i input2;
int result_int;
byte *sse_data;
const unsigned int mask1 = 0x80808080;
__m128i sign_fix = _mm_set_epi32(mask1, mask1, mask1, mask1);
sse_data = (byte*) &(result_int);
/* Load */
input1 = _mm_loadu_si128((const __m128i *)contone_ptr);
input2 = _mm_loadu_si128((const __m128i *) thresh_ptr);
/* Unsigned subtraction does Unsigned saturation so we
have to use the signed operation */
input1 = _mm_xor_si128(input1, sign_fix);
input2 = _mm_xor_si128(input2, sign_fix);
/* Subtract the two */
input2 = _mm_subs_epi8(input1, input2);
/* Grab the sign mask */
result_int = _mm_movemask_epi8(input2);
/* bit wise reversal on 16 bit word */
ht_data[0] = bitreverse[sse_data[0]];
ht_data[1] = bitreverse[sse_data[1]];
} | 0 | [
"CWE-119"
] | ghostpdl | 362ec9daadb9992b0def3520cd1dc6fa52edd1c4 | 128,511,820,694,986,880,000,000,000,000,000,000,000 | 25 | Fix bug 697459 Buffer overflow in fill_threshold_buffer
There was an overflow check for ht_buffer size, but none for the larger
threshold_buffer. Note that this file didn't fail on Windows because the
combination of the ht_buffer and the size of the (miscalculated due to
overflow) threshold_buffer would have exceeded the 2Gb limit. |
read_timeout(void *arg)
{
FILE *f;
double temp, comp;
f = fopen(filename, "r");
if (f && fscanf(f, "%lf", &temp) == 1) {
comp = get_tempcomp(temp);
if (fabs(comp) <= MAX_COMP) {
comp = LCL_SetTempComp(comp);
DEBUG_LOG("tempcomp updated to %f for %f", comp, temp);
if (logfileid != -1) {
struct timespec now;
LCL_ReadCookedTime(&now, NULL);
LOG_FileWrite(logfileid, "%s %11.4e %11.4e",
UTI_TimeToLogForm(now.tv_sec), temp, comp);
}
} else {
LOG(LOGS_WARN, "Temperature compensation of %.3f ppm exceeds sanity limit of %.1f",
comp, MAX_COMP);
}
} else {
LOG(LOGS_WARN, "Could not read temperature from %s", filename);
}
if (f)
fclose(f);
timeout_id = SCH_AddTimeoutByDelay(update_interval, read_timeout, NULL);
} | 1 | [
"CWE-59"
] | chrony | e18903a6b56341481a2e08469c0602010bf7bfe3 | 56,903,675,889,499,250,000,000,000,000,000,000,000 | 35 | switch to new util file functions
Replace all fopen(), rename(), and unlink() calls with the new util
functions. |
concat_authorization_data(krb5_context context,
krb5_authdata **first, krb5_authdata **second,
krb5_authdata ***output)
{
register int i, j;
register krb5_authdata **ptr, **retdata;
/* count up the entries */
i = 0;
if (first)
for (ptr = first; *ptr; ptr++)
i++;
if (second)
for (ptr = second; *ptr; ptr++)
i++;
retdata = (krb5_authdata **)malloc((i+1)*sizeof(*retdata));
if (!retdata)
return ENOMEM;
retdata[i] = 0; /* null-terminated array */
for (i = 0, j = 0, ptr = first; j < 2 ; ptr = second, j++)
while (ptr && *ptr) {
/* now walk & copy */
retdata[i] = (krb5_authdata *)malloc(sizeof(*retdata[i]));
if (!retdata[i]) {
krb5_free_authdata(context, retdata);
return ENOMEM;
}
*retdata[i] = **ptr;
if (!(retdata[i]->contents =
(krb5_octet *)malloc(retdata[i]->length))) {
free(retdata[i]);
retdata[i] = 0;
krb5_free_authdata(context, retdata);
return ENOMEM;
}
memcpy(retdata[i]->contents, (*ptr)->contents, retdata[i]->length);
ptr++;
i++;
}
*output = retdata;
return 0;
} | 0 | [
"CWE-476"
] | krb5 | 93b4a6306a0026cf1cc31ac4bd8a49ba5d034ba7 | 60,380,108,898,503,580,000,000,000,000,000,000,000 | 44 | Fix S4U2Self KDC crash when anon is restricted
In validate_as_request(), when enforcing restrict_anonymous_to_tgt,
use client.princ instead of request->client; the latter is NULL when
validating S4U2Self requests.
CVE-2016-3120:
In MIT krb5 1.9 and later, an authenticated attacker can cause krb5kdc
to dereference a null pointer if the restrict_anonymous_to_tgt option
is set to true, by making an S4U2Self request.
CVSSv2 Vector: AV:N/AC:H/Au:S/C:N/I:N/A:C/E:H/RL:OF/RC:C
ticket: 8458 (new)
target_version: 1.14-next
target_version: 1.13-next |
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
{
struct sk_buff_fclones *fclones = container_of(skb,
struct sk_buff_fclones,
skb1);
struct sk_buff *n;
if (skb_orphan_frags(skb, gfp_mask))
return NULL;
if (skb->fclone == SKB_FCLONE_ORIG &&
atomic_read(&fclones->fclone_ref) == 1) {
n = &fclones->skb2;
atomic_set(&fclones->fclone_ref, 2);
} else {
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
if (!n)
return NULL;
kmemcheck_annotate_bitfield(n, flags1);
n->fclone = SKB_FCLONE_UNAVAILABLE;
}
return __skb_clone(n, skb); | 0 | [
"CWE-703",
"CWE-125"
] | linux | 8605330aac5a5785630aec8f64378a54891937cc | 66,247,929,440,166,895,000,000,000,000,000,000,000 | 28 | tcp: fix SCM_TIMESTAMPING_OPT_STATS for normal skbs
__sock_recv_timestamp can be called for both normal skbs (for
receive timestamps) and for skbs on the error queue (for transmit
timestamps).
Commit 1c885808e456
(tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING)
assumes any skb passed to __sock_recv_timestamp are from
the error queue, containing OPT_STATS in the content of the skb.
This results in accessing invalid memory or generating junk
data.
To fix this, set skb->pkt_type to PACKET_OUTGOING for packets
on the error queue. This is safe because on the receive path
on local sockets skb->pkt_type is never set to PACKET_OUTGOING.
With that, copy OPT_STATS from a packet, only if its pkt_type
is PACKET_OUTGOING.
Fixes: 1c885808e456 ("tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING")
Reported-by: JongHwan Kim <[email protected]>
Signed-off-by: Soheil Hassas Yeganeh <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: Willem de Bruijn <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static struct fib6_node *fib6_node_lookup_1(struct fib6_node *root,
struct lookup_args *args)
{
struct fib6_node *fn;
__be32 dir;
if (unlikely(args->offset == 0))
return NULL;
/*
* Descend on a tree
*/
fn = root;
for (;;) {
struct fib6_node *next;
dir = addr_bit_set(args->addr, fn->fn_bit);
next = dir ? rcu_dereference(fn->right) :
rcu_dereference(fn->left);
if (next) {
fn = next;
continue;
}
break;
}
while (fn) {
struct fib6_node *subtree = FIB6_SUBTREE(fn);
if (subtree || fn->fn_flags & RTN_RTINFO) {
struct fib6_info *leaf = rcu_dereference(fn->leaf);
struct rt6key *key;
if (!leaf)
goto backtrack;
key = (struct rt6key *) ((u8 *)leaf + args->offset);
if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
#ifdef CONFIG_IPV6_SUBTREES
if (subtree) {
struct fib6_node *sfn;
sfn = fib6_node_lookup_1(subtree,
args + 1);
if (!sfn)
goto backtrack;
fn = sfn;
}
#endif
if (fn->fn_flags & RTN_RTINFO)
return fn;
}
}
backtrack:
if (fn->fn_flags & RTN_ROOT)
break;
fn = rcu_dereference(fn->parent);
}
return NULL;
} | 0 | [
"CWE-755"
] | linux | 7b09c2d052db4b4ad0b27b97918b46a7746966fa | 94,320,823,331,848,120,000,000,000,000,000,000,000 | 66 | ipv6: fix a typo in fib6_rule_lookup()
Yi Ren reported an issue discovered by syzkaller, and bisected
to the cited commit.
Many thanks to Yi, this trivial patch does not reflect the patient
work that has been done.
Fixes: d64a1f574a29 ("ipv6: honor RT6_LOOKUP_F_DST_NOREF in rule lookup logic")
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Wei Wang <[email protected]>
Bisected-and-reported-by: Yi Ren <[email protected]>
Signed-off-by: Jakub Kicinski <[email protected]> |
static int snd_usb_fasttrackpro_boot_quirk(struct usb_device *dev)
{
int err;
if (dev->actconfig->desc.bConfigurationValue == 1) {
dev_info(&dev->dev,
"Fast Track Pro switching to config #2\n");
/* This function has to be available by the usb core module.
* if it is not avialable the boot quirk has to be left out
* and the configuration has to be set by udev or hotplug
* rules
*/
err = usb_driver_set_configuration(dev, 2);
if (err < 0)
dev_dbg(&dev->dev,
"error usb_driver_set_configuration: %d\n",
err);
/* Always return an error, so that we stop creating a device
that will just be destroyed and recreated with a new
configuration */
return -ENODEV;
} else
dev_info(&dev->dev, "Fast Track Pro config OK\n");
return 0;
} | 0 | [] | sound | 0f886ca12765d20124bd06291c82951fd49a33be | 330,285,657,966,872,760,000,000,000,000,000,000,000 | 26 | ALSA: usb-audio: Fix NULL dereference in create_fixed_stream_quirk()
create_fixed_stream_quirk() may cause a NULL-pointer dereference by
accessing the non-existing endpoint when a USB device with a malformed
USB descriptor is used.
This patch avoids it simply by adding a sanity check of bNumEndpoints
before the accesses.
Bugzilla: https://bugzilla.suse.com/show_bug.cgi?id=971125
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]> |
ExprResolveGroup(struct xkb_context *ctx, const ExprDef *expr,
xkb_layout_index_t *group_rtrn)
{
bool ok;
int result;
ok = ExprResolveIntegerLookup(ctx, expr, &result, SimpleLookup,
groupNames);
if (!ok)
return false;
if (result <= 0 || result > XKB_MAX_GROUPS) {
log_err(ctx, "Group index %u is out of range (1..%d)\n",
result, XKB_MAX_GROUPS);
return false;
}
*group_rtrn = (xkb_layout_index_t) result;
return true;
} | 0 | [
"CWE-400",
"CWE-703"
] | libxkbcommon | 1f9d1248c07cda8aaff762429c0dce146de8632a | 267,082,919,564,806,700,000,000,000,000,000,000,000 | 20 | xkbcomp: fix stack overflow when evaluating boolean negation
The expression evaluator would go into an infinite recursion when
evaluating something like this as a boolean: `!True`. Instead of
recursing to just `True` and negating, it recursed to `!True` itself
again.
Bug inherited from xkbcomp.
Caught with the afl fuzzer.
Signed-off-by: Ran Benita <[email protected]> |
static void gic_set_irq_11mpcore(GICState *s, int irq, int level,
int cm, int target)
{
if (level) {
GIC_DIST_SET_LEVEL(irq, cm);
if (GIC_DIST_TEST_EDGE_TRIGGER(irq) || GIC_DIST_TEST_ENABLED(irq, cm)) {
DPRINTF("Set %d pending mask %x\n", irq, target);
GIC_DIST_SET_PENDING(irq, target);
}
} else {
GIC_DIST_CLEAR_LEVEL(irq, cm);
}
} | 0 | [
"CWE-787"
] | qemu | edfe2eb4360cde4ed5d95bda7777edcb3510f76a | 7,995,802,234,284,553,000,000,000,000,000,000,000 | 13 | hw/intc/arm_gic: Fix interrupt ID in GICD_SGIR register
Per the ARM Generic Interrupt Controller Architecture specification
(document "ARM IHI 0048B.b (ID072613)"), the SGIINTID field is 4 bit,
not 10:
- 4.3 Distributor register descriptions
- 4.3.15 Software Generated Interrupt Register, GICD_SG
- Table 4-21 GICD_SGIR bit assignments
The Interrupt ID of the SGI to forward to the specified CPU
interfaces. The value of this field is the Interrupt ID, in
the range 0-15, for example a value of 0b0011 specifies
Interrupt ID 3.
Correct the irq mask to fix an undefined behavior (which eventually
lead to a heap-buffer-overflow, see [Buglink]):
$ echo 'writel 0x8000f00 0xff4affb0' | qemu-system-aarch64 -M virt,accel=qtest -qtest stdio
[I 1612088147.116987] OPENED
[R +0.278293] writel 0x8000f00 0xff4affb0
../hw/intc/arm_gic.c:1498:13: runtime error: index 944 out of bounds for type 'uint8_t [16][8]'
SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior ../hw/intc/arm_gic.c:1498:13
This fixes a security issue when running with KVM on Arm with
kernel-irqchip=off. (The default is kernel-irqchip=on, which is
unaffected, and which is also the correct choice for performance.)
Cc: [email protected]
Fixes: CVE-2021-20221
Fixes: 9ee6e8bb853 ("ARMv7 support.")
Buglink: https://bugs.launchpad.net/qemu/+bug/1913916
Buglink: https://bugs.launchpad.net/qemu/+bug/1913917
Reported-by: Alexander Bulekov <[email protected]>
Signed-off-by: Philippe Mathieu-Daudé <[email protected]>
Message-id: [email protected]
Reviewed-by: Peter Maydell <[email protected]>
Signed-off-by: Peter Maydell <[email protected]> |
static int ath6kl_usb_alloc_pipe_resources(struct ath6kl_usb_pipe *pipe,
int urb_cnt)
{
struct ath6kl_urb_context *urb_context;
int status = 0, i;
INIT_LIST_HEAD(&pipe->urb_list_head);
init_usb_anchor(&pipe->urb_submitted);
for (i = 0; i < urb_cnt; i++) {
urb_context = kzalloc(sizeof(struct ath6kl_urb_context),
GFP_KERNEL);
if (urb_context == NULL) {
status = -ENOMEM;
goto fail_alloc_pipe_resources;
}
urb_context->pipe = pipe;
/*
* we are only allocate the urb contexts here, the actual URB
* is allocated from the kernel as needed to do a transaction
*/
pipe->urb_alloc++;
ath6kl_usb_free_urb_to_pipe(pipe, urb_context);
}
ath6kl_dbg(ATH6KL_DBG_USB,
"ath6kl usb: alloc resources lpipe:%d hpipe:0x%X urbs:%d\n",
pipe->logical_pipe_num, pipe->usb_pipe_handle,
pipe->urb_alloc);
fail_alloc_pipe_resources:
return status;
} | 0 | [
"CWE-476"
] | linux | 39d170b3cb62ba98567f5c4f40c27b5864b304e5 | 262,161,230,818,220,880,000,000,000,000,000,000,000 | 35 | ath6kl: fix a NULL-ptr-deref bug in ath6kl_usb_alloc_urb_from_pipe()
The `ar_usb` field of `ath6kl_usb_pipe_usb_pipe` objects
are initialized to point to the containing `ath6kl_usb` object
according to endpoint descriptors read from the device side, as shown
below in `ath6kl_usb_setup_pipe_resources`:
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
// get the address from endpoint descriptor
pipe_num = ath6kl_usb_get_logical_pipe_num(ar_usb,
endpoint->bEndpointAddress,
&urbcount);
......
// select the pipe object
pipe = &ar_usb->pipes[pipe_num];
// initialize the ar_usb field
pipe->ar_usb = ar_usb;
}
The driver assumes that the addresses reported in endpoint
descriptors from device side to be complete. If a device is
malicious and does not report complete addresses, it may trigger
NULL-ptr-deref `ath6kl_usb_alloc_urb_from_pipe` and
`ath6kl_usb_free_urb_to_pipe`.
This patch fixes the bug by preventing potential NULL-ptr-deref
(CVE-2019-15098).
Signed-off-by: Hui Peng <[email protected]>
Reported-by: Hui Peng <[email protected]>
Reported-by: Mathias Payer <[email protected]>
Reviewed-by: Greg Kroah-Hartman <[email protected]>
Signed-off-by: Kalle Valo <[email protected]> |
static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
struct hstate *h, int nid,
unsigned long count, size_t len)
{
int err;
nodemask_t nodes_allowed, *n_mask;
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
return -EINVAL;
if (nid == NUMA_NO_NODE) {
/*
* global hstate attribute
*/
if (!(obey_mempolicy &&
init_nodemask_of_mempolicy(&nodes_allowed)))
n_mask = &node_states[N_MEMORY];
else
n_mask = &nodes_allowed;
} else {
/*
* Node specific request. count adjustment happens in
* set_max_huge_pages() after acquiring hugetlb_lock.
*/
init_nodemask_of_node(&nodes_allowed, nid);
n_mask = &nodes_allowed;
}
err = set_max_huge_pages(h, count, nid, n_mask);
return err ? err : len;
} | 0 | [
"CWE-362"
] | linux | 17743798d81238ab13050e8e2833699b54e15467 | 110,107,334,009,436,100,000,000,000,000,000,000,000 | 32 | mm/hugetlb: fix a race between hugetlb sysctl handlers
There is a race between the assignment of `table->data` and write value
to the pointer of `table->data` in the __do_proc_doulongvec_minmax() on
the other thread.
CPU0: CPU1:
proc_sys_write
hugetlb_sysctl_handler proc_sys_call_handler
hugetlb_sysctl_handler_common hugetlb_sysctl_handler
table->data = &tmp; hugetlb_sysctl_handler_common
table->data = &tmp;
proc_doulongvec_minmax
do_proc_doulongvec_minmax sysctl_head_finish
__do_proc_doulongvec_minmax unuse_table
i = table->data;
*i = val; // corrupt CPU1's stack
Fix this by duplicating the `table`, and only update the duplicate of
it. And introduce a helper of proc_hugetlb_doulongvec_minmax() to
simplify the code.
The following oops was seen:
BUG: kernel NULL pointer dereference, address: 0000000000000000
#PF: supervisor instruction fetch in kernel mode
#PF: error_code(0x0010) - not-present page
Code: Bad RIP value.
...
Call Trace:
? set_max_huge_pages+0x3da/0x4f0
? alloc_pool_huge_page+0x150/0x150
? proc_doulongvec_minmax+0x46/0x60
? hugetlb_sysctl_handler_common+0x1c7/0x200
? nr_hugepages_store+0x20/0x20
? copy_fd_bitmaps+0x170/0x170
? hugetlb_sysctl_handler+0x1e/0x20
? proc_sys_call_handler+0x2f1/0x300
? unregister_sysctl_table+0xb0/0xb0
? __fd_install+0x78/0x100
? proc_sys_write+0x14/0x20
? __vfs_write+0x4d/0x90
? vfs_write+0xef/0x240
? ksys_write+0xc0/0x160
? __ia32_sys_read+0x50/0x50
? __close_fd+0x129/0x150
? __x64_sys_write+0x43/0x50
? do_syscall_64+0x6c/0x200
? entry_SYSCALL_64_after_hwframe+0x44/0xa9
Fixes: e5ff215941d5 ("hugetlb: multiple hstates for multiple page sizes")
Signed-off-by: Muchun Song <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
Cc: Andi Kleen <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
file_s_fnmatch(int argc, VALUE *argv, VALUE obj)
{
VALUE pattern, path;
VALUE rflags;
int flags;
if (rb_scan_args(argc, argv, "21", &pattern, &path, &rflags) == 3)
flags = NUM2INT(rflags);
else
flags = 0;
StringValue(pattern);
FilePathStringValue(path);
if (flags & FNM_EXTGLOB) {
struct brace_args args;
args.value = path;
args.flags = flags;
if (ruby_brace_expand(RSTRING_PTR(pattern), flags, fnmatch_brace,
(VALUE)&args, rb_enc_get(pattern), pattern) > 0)
return Qtrue;
}
else {
rb_encoding *enc = rb_enc_compatible(pattern, path);
if (!enc) return Qfalse;
if (fnmatch(RSTRING_PTR(pattern), enc, RSTRING_PTR(path), flags) == 0)
return Qtrue;
}
RB_GC_GUARD(pattern);
return Qfalse;
} | 1 | [] | ruby | a0a2640b398cffd351f87d3f6243103add66575b | 232,136,107,297,225,570,000,000,000,000,000,000,000 | 33 | Fix for wrong fnmatch patttern
* dir.c (file_s_fnmatch): ensure that pattern does not contain a
NUL character. https://hackerone.com/reports/449617 |
static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
} | 0 | [
"CWE-276"
] | linux | cadfad870154e14f745ec845708bc17d166065f2 | 118,262,721,889,420,900,000,000,000,000,000,000,000 | 5 | x86/ioperm: Fix io bitmap invalidation on Xen PV
tss_invalidate_io_bitmap() wasn't wired up properly through the pvop
machinery, so the TSS and Xen's io bitmap would get out of sync
whenever disabling a valid io bitmap.
Add a new pvop for tss_invalidate_io_bitmap() to fix it.
This is XSA-329.
Fixes: 22fe5b0439dd ("x86/ioperm: Move TSS bitmap update to exit to user work")
Signed-off-by: Andy Lutomirski <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Reviewed-by: Juergen Gross <[email protected]>
Reviewed-by: Thomas Gleixner <[email protected]>
Cc: [email protected]
Link: https://lkml.kernel.org/r/d53075590e1f91c19f8af705059d3ff99424c020.1595030016.git.luto@kernel.org |
static inline struct mem_cgroup *page_memcg(struct page *page)
{
return NULL;
} | 0 | [
"CWE-119"
] | linux | 1be7107fbe18eed3e319a6c3e83c78254b693acb | 303,324,914,710,647,740,000,000,000,000,000,000,000 | 4 | mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]> |
static void tg3_mdio_start(struct tg3 *tp)
{
tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
tw32_f(MAC_MI_MODE, tp->mi_mode);
udelay(80);
if (tg3_flag(tp, MDIOBUS_INITED) &&
tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_mdio_config_5785(tp);
} | 0 | [
"CWE-476",
"CWE-119"
] | linux | 715230a44310a8cf66fbfb5a46f9a62a9b2de424 | 100,503,020,020,148,900,000,000,000,000,000,000,000 | 10 | tg3: fix length overflow in VPD firmware parsing
Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version
when present") introduced VPD parsing that contained a potential length
overflow.
Limit the hardware's reported firmware string length (max 255 bytes) to
stay inside the driver's firmware string length (32 bytes). On overflow,
truncate the formatted firmware string instead of potentially overwriting
portions of the tg3 struct.
http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf
Signed-off-by: Kees Cook <[email protected]>
Reported-by: Oded Horovitz <[email protected]>
Reported-by: Brad Spengler <[email protected]>
Cc: [email protected]
Cc: Matt Carlson <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int __bprm_mm_init(struct linux_binprm *bprm)
{
int err;
struct vm_area_struct *vma = NULL;
struct mm_struct *mm = bprm->mm;
bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma)
return -ENOMEM;
down_write(&mm->mmap_sem);
vma->vm_mm = mm;
/*
* Place the stack at the largest stack address the architecture
* supports. Later, we'll move this to an appropriate place. We don't
* use STACK_TOP because that can depend on attributes which aren't
* configured yet.
*/
BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
vma->vm_end = STACK_TOP_MAX;
vma->vm_start = vma->vm_end - PAGE_SIZE;
vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
INIT_LIST_HEAD(&vma->anon_vma_chain);
err = insert_vm_struct(mm, vma);
if (err)
goto err;
mm->stack_vm = mm->total_vm = 1;
arch_bprm_mm_init(mm, vma);
up_write(&mm->mmap_sem);
bprm->p = vma->vm_end - sizeof(void *);
return 0;
err:
up_write(&mm->mmap_sem);
bprm->vma = NULL;
kmem_cache_free(vm_area_cachep, vma);
return err;
} | 0 | [
"CWE-362"
] | linux | 8b01fc86b9f425899f8a3a8fc1c47d73c2c20543 | 234,055,343,652,164,400,000,000,000,000,000,000,000 | 41 | fs: take i_mutex during prepare_binprm for set[ug]id executables
This prevents a race between chown() and execve(), where chowning a
setuid-user binary to root would momentarily make the binary setuid
root.
This patch was mostly written by Linus Torvalds.
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
add_FID(i_ctx_t *i_ctx_p, ref * fp /* t_dictionary */ , gs_font * pfont,
gs_ref_memory_t *imem)
{
ref fid;
make_tav(&fid, t_fontID,
a_readonly | imemory_space(imem) | imemory_new_mask(imem),
pstruct, (void *)pfont);
return (i_ctx_p ? idict_put_string(fp, "FID", &fid) :
dict_put_string(fp, "FID", &fid, NULL));
} | 0 | [
"CWE-704"
] | ghostpdl | 548bb434e81dadcc9f71adf891a3ef5bea8e2b4e | 259,270,077,005,979,470,000,000,000,000,000,000,000 | 11 | PS interpreter - add some type checking
These were 'probably' safe anyway, since they mostly treat the objects
as integers without checking, which at least can't result in a crash.
Nevertheless, we ought to check.
The return from comparedictkeys could be wrong if one of the keys had
a value which was not an array, it could incorrectly decide the two
were in fact the same. |
static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr,
const void *data)
{
const struct nfs_renameargs *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->old_dir, &hdr);
encode_savefh(xdr, &hdr);
encode_putfh(xdr, args->new_dir, &hdr);
encode_rename(xdr, args->old_name, args->new_name, &hdr);
encode_nops(&hdr);
} | 0 | [
"CWE-787"
] | linux | b4487b93545214a9db8cbf32e86411677b0cca21 | 313,073,862,523,986,300,000,000,000,000,000,000,000 | 16 | nfs: Fix getxattr kernel panic and memory overflow
Move the buffer size check to decode_attr_security_label() before memcpy()
Only call memcpy() if the buffer is large enough
Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS")
Signed-off-by: Jeffrey Mitchell <[email protected]>
[Trond: clean up duplicate test of label->len != 0]
Signed-off-by: Trond Myklebust <[email protected]> |
mod_attr_define(mrb_state *mrb, mrb_value mod, mrb_value (*accessor)(mrb_state *, mrb_value), mrb_sym (*access_name)(mrb_state *, mrb_sym))
{
struct RClass *c = mrb_class_ptr(mod);
const mrb_value *argv;
mrb_int argc, i;
int ai;
mrb_get_args(mrb, "*", &argv, &argc);
ai = mrb_gc_arena_save(mrb);
for (i=0; i<argc; i++) {
mrb_value name;
mrb_sym method;
struct RProc *p;
mrb_method_t m;
method = to_sym(mrb, argv[i]);
name = prepare_ivar_name(mrb, method);
if (access_name) {
method = access_name(mrb, method);
}
p = mrb_proc_new_cfunc_with_env(mrb, accessor, 1, &name);
MRB_METHOD_FROM_PROC(m, p);
mrb_define_method_raw(mrb, c, method, m);
mrb_gc_arena_restore(mrb, ai);
}
return mrb_nil_value();
} | 0 | [
"CWE-787"
] | mruby | b1d0296a937fe278239bdfac840a3fd0e93b3ee9 | 95,130,237,505,716,950,000,000,000,000,000,000,000 | 28 | class.c: clear method cache after `remove_method`. |
FilterFactoryMap::const_iterator findUpgradeCaseInsensitive(const FilterFactoryMap& upgrade_map,
absl::string_view upgrade_type) {
for (auto it = upgrade_map.begin(); it != upgrade_map.end(); ++it) {
if (StringUtil::CaseInsensitiveCompare()(it->first, upgrade_type)) {
return it;
}
}
return upgrade_map.end();
} | 0 | [
"CWE-22"
] | envoy | 5333b928d8bcffa26ab19bf018369a835f697585 | 224,621,065,538,894,130,000,000,000,000,000,000,000 | 9 | Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <[email protected]> |
static int copy_string(struct archive_read* a, int len, int dist) {
struct rar5* rar = get_context(a);
const uint64_t cmask = rar->cstate.window_mask;
const uint64_t write_ptr = rar->cstate.write_ptr +
rar->cstate.solid_offset;
int i;
if (rar->cstate.window_buf == NULL)
return ARCHIVE_FATAL;
/* The unpacker spends most of the time in this function. It would be
* a good idea to introduce some optimizations here.
*
* Just remember that this loop treats buffers that overlap differently
* than buffers that do not overlap. This is why a simple memcpy(3)
* call will not be enough. */
for(i = 0; i < len; i++) {
const ssize_t write_idx = (write_ptr + i) & cmask;
const ssize_t read_idx = (write_ptr + i - dist) & cmask;
rar->cstate.window_buf[write_idx] =
rar->cstate.window_buf[read_idx];
}
rar->cstate.write_ptr += len;
return ARCHIVE_OK;
} | 0 | [
"CWE-20",
"CWE-125"
] | libarchive | 94821008d6eea81e315c5881cdf739202961040a | 63,208,947,997,904,700,000,000,000,000,000,000,000 | 27 | RAR5 reader: reject files that declare invalid header flags
One of the fields in RAR5's base block structure is the size of the
header. Some invalid files declare a 0 header size setting, which can
confuse the unpacker. Minimum header size for RAR5 base blocks is 7
bytes (4 bytes for CRC, and 3 bytes for the rest), so block size of 0
bytes should be rejected at header parsing stage.
The fix adds an error condition if header size of 0 bytes is detected.
In this case, the unpacker will not attempt to unpack the file, as the
header is corrupted.
The commit also adds OSSFuzz #20459 sample to test further regressions
in this area. |
sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking)
{
int k, at_head;
Sg_device *sdp = sfp->parentdp;
sg_io_hdr_t *hp = &srp->header;
srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
hp->status = 0;
hp->masked_status = 0;
hp->msg_status = 0;
hp->info = 0;
hp->host_status = 0;
hp->driver_status = 0;
hp->resid = 0;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
k = sg_start_req(srp, cmnd);
if (k) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
"sg_common_write: start_req err=%d\n", k));
sg_finish_rem_req(srp);
return k; /* probably out of space --> ENOMEM */
}
if (atomic_read(&sdp->detaching)) {
if (srp->bio) {
scsi_req_free_cmd(scsi_req(srp->rq));
blk_end_request_all(srp->rq, -EIO);
srp->rq = NULL;
}
sg_finish_rem_req(srp);
return -ENODEV;
}
hp->duration = jiffies_to_msecs(jiffies);
if (hp->interface_id != '\0' && /* v3 (or later) interface */
(SG_FLAG_Q_AT_TAIL & hp->flags))
at_head = 0;
else
at_head = 1;
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
srp->rq, at_head, sg_rq_end_io);
return 0;
} | 0 | [
"CWE-119"
] | linux | bf33f87dd04c371ea33feb821b60d63d754e3124 | 208,362,822,898,756,140,000,000,000,000,000,000,000 | 50 | scsi: sg: check length passed to SG_NEXT_CMD_LEN
The user can control the size of the next command passed along, but the
value passed to the ioctl isn't checked against the usable max command
size.
Cc: <[email protected]>
Signed-off-by: Peter Chang <[email protected]>
Acked-by: Douglas Gilbert <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]> |
GF_Box *trak_New()
{
ISOM_DECL_BOX_ALLOC(GF_TrackBox, GF_ISOM_BOX_TYPE_TRAK);
return (GF_Box *)tmp; | 0 | [
"CWE-400",
"CWE-401"
] | gpac | d2371b4b204f0a3c0af51ad4e9b491144dd1225c | 132,375,517,637,139,900,000,000,000,000,000,000,000 | 5 | prevent dref memleak on invalid input (#1183) |
static int __init parse_rodata(char *arg)
{
return strtobool(arg, &rodata_enabled);
} | 0 | [] | linux | 15122ee2c515a253b0c66a3e618bc7ebe35105eb | 292,566,749,155,750,570,000,000,000,000,000,000,000 | 4 | arm64: Enforce BBM for huge IO/VMAP mappings
ioremap_page_range doesn't honour break-before-make and attempts to put
down huge mappings (using p*d_set_huge) over the top of pre-existing
table entries. This leads to us leaking page table memory and also gives
rise to TLB conflicts and spurious aborts, which have been seen in
practice on Cortex-A75.
Until this has been resolved, refuse to put block mappings when the
existing entry is found to be present.
Fixes: 324420bf91f60 ("arm64: add support for ioremap() block mappings")
Reported-by: Hanjun Guo <[email protected]>
Reported-by: Lei Li <[email protected]>
Acked-by: Ard Biesheuvel <[email protected]>
Signed-off-by: Will Deacon <[email protected]>
Signed-off-by: Catalin Marinas <[email protected]> |
static int mov_write_tfrf_tag(AVIOContext *pb, MOVMuxContext *mov,
MOVTrack *track, int entry)
{
int n = track->nb_frag_info - 1 - entry, i;
int size = 8 + 16 + 4 + 1 + 16*n;
static const uint8_t uuid[] = {
0xd4, 0x80, 0x7e, 0xf2, 0xca, 0x39, 0x46, 0x95,
0x8e, 0x54, 0x26, 0xcb, 0x9e, 0x46, 0xa7, 0x9f
};
if (entry < 0)
return 0;
avio_seek(pb, track->frag_info[entry].tfrf_offset, SEEK_SET);
avio_wb32(pb, size);
ffio_wfourcc(pb, "uuid");
avio_write(pb, uuid, sizeof(uuid));
avio_w8(pb, 1);
avio_wb24(pb, 0);
avio_w8(pb, n);
for (i = 0; i < n; i++) {
int index = entry + 1 + i;
avio_wb64(pb, track->frag_info[index].time);
avio_wb64(pb, track->frag_info[index].duration);
}
if (n < mov->ism_lookahead) {
int free_size = 16 * (mov->ism_lookahead - n);
avio_wb32(pb, free_size);
ffio_wfourcc(pb, "free");
ffio_fill(pb, 0, free_size - 8);
}
return 0;
} | 0 | [
"CWE-369"
] | FFmpeg | 2c0e98a0b478284bdff6d7a4062522605a8beae5 | 337,904,717,244,195,400,000,000,000,000,000,000,000 | 34 | avformat/movenc: Write version 2 of audio atom if channels is not known
The version 1 needs the channel count and would divide by 0
Fixes: division by 0
Fixes: fpe_movenc.c_1108_1.ogg
Fixes: fpe_movenc.c_1108_2.ogg
Fixes: fpe_movenc.c_1108_3.wav
Found-by: #CHEN HONGXU# <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
(cherry picked from commit fa19fbcf712a6a6cc5a5cfdc3254a97b9bce6582)
Signed-off-by: Michael Niedermayer <[email protected]> |
static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
int nonagle)
{
return partial &&
((nonagle & TCP_NAGLE_CORK) ||
(!nonagle && tp->packets_out && tcp_minshall_check(tp)));
} | 0 | [
"CWE-190"
] | net | 3b4929f65b0d8249f19a50245cd88ed1a2f78cff | 286,901,450,575,194,440,000,000,000,000,000,000,000 | 7 | tcp: limit payload size of sacked skbs
Jonathan Looney reported that TCP can trigger the following crash
in tcp_shifted_skb() :
BUG_ON(tcp_skb_pcount(skb) < pcount);
This can happen if the remote peer has advertized the smallest
MSS that linux TCP accepts : 48
An skb can hold 17 fragments, and each fragment can hold 32KB
on x86, or 64KB on PowerPC.
This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs
can overflow.
Note that tcp_sendmsg() builds skbs with less than 64KB
of payload, so this problem needs SACK to be enabled.
SACK blocks allow TCP to coalesce multiple skbs in the retransmit
queue, thus filling the 17 fragments to maximal capacity.
CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs
Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jonathan Looney <[email protected]>
Acked-by: Neal Cardwell <[email protected]>
Reviewed-by: Tyler Hicks <[email protected]>
Cc: Yuchung Cheng <[email protected]>
Cc: Bruce Curtis <[email protected]>
Cc: Jonathan Lemon <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
ismode_w(const wchar_t *start, const wchar_t *end, int *permset)
{
const wchar_t *p;
if (start >= end)
return (0);
p = start;
*permset = 0;
while (p < end) {
switch (*p++) {
case L'r': case L'R':
*permset |= ARCHIVE_ENTRY_ACL_READ;
break;
case L'w': case L'W':
*permset |= ARCHIVE_ENTRY_ACL_WRITE;
break;
case L'x': case L'X':
*permset |= ARCHIVE_ENTRY_ACL_EXECUTE;
break;
case L'-':
break;
default:
return (0);
}
}
return (1);
} | 0 | [
"CWE-476"
] | libarchive | 15bf44fd2c1ad0e3fd87048b3fcc90c4dcff1175 | 287,783,309,549,417,960,000,000,000,000,000,000,000 | 27 | Skip 0-length ACL fields
Currently, it is possible to create an archive that crashes bsdtar
with a malformed ACL:
Program received signal SIGSEGV, Segmentation fault.
archive_acl_from_text_l (acl=<optimised out>, text=0x7e2e92 "", want_type=<optimised out>, sc=<optimised out>) at libarchive/archive_acl.c:1726
1726 switch (*s) {
(gdb) p n
$1 = 1
(gdb) p field[n]
$2 = {start = 0x0, end = 0x0}
Stop this by checking that the length is not zero before beginning
the switch statement.
I am pretty sure this is the bug mentioned in the qsym paper [1],
and I was able to replicate it with a qsym + AFL + afl-rb setup.
[1] https://www.usenix.org/conference/usenixsecurity18/presentation/yun |
static Reinst *emit(Reprog *prog, int opcode)
{
Reinst *inst = prog->end++;
inst->opcode = opcode;
inst->n = 0;
inst->c = 0;
inst->cc = NULL;
inst->x = inst->y = NULL;
return inst;
} | 0 | [
"CWE-703",
"CWE-674"
] | mujs | 160ae29578054dc09fd91e5401ef040d52797e61 | 321,605,466,382,150,360,000,000,000,000,000,000,000 | 10 | Issue #162: Check stack overflow during regexp compilation.
Only bother checking during the first compilation pass that counts
the size of the program. |
header_cache_t *nntp_hcache_open(struct NntpData *nntp_data)
{
struct Url url;
char file[PATH_MAX];
if (!nntp_data->nserv || !nntp_data->nserv->cacheable ||
!nntp_data->nserv->conn || !nntp_data->group ||
!(nntp_data->newsrc_ent || nntp_data->subscribed || SaveUnsubscribed))
{
return NULL;
}
mutt_account_tourl(&nntp_data->nserv->conn->account, &url);
url.path = nntp_data->group;
url_tostring(&url, file, sizeof(file), U_PATH);
return mutt_hcache_open(NewsCacheDir, file, nntp_hcache_namer);
} | 0 | [
"CWE-119",
"CWE-787"
] | neomutt | 6296f7153f0c9d5e5cd3aaf08f9731e56621bdd3 | 1,695,834,195,095,676,800,000,000,000,000,000,000 | 17 | Set length modifiers for group and desc
nntp_add_group parses a line controlled by the connected nntp server.
Restrict the maximum lengths read into the stack buffers group, and
desc. |
struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx)
{
struct nfc_se *se;
list_for_each_entry(se, &dev->secure_elements, list)
if (se->idx == se_idx)
return se;
return NULL;
} | 0 | [] | linux | 3e3b5dfcd16a3e254aab61bd1e8c417dd4503102 | 28,765,896,531,555,140,000,000,000,000,000,000,000 | 10 | NFC: reorder the logic in nfc_{un,}register_device
There is a potential UAF between the unregistration routine and the NFC
netlink operations.
The race that cause that UAF can be shown as below:
(FREE) | (USE)
nfcmrvl_nci_unregister_dev | nfc_genl_dev_up
nci_close_device |
nci_unregister_device | nfc_get_device
nfc_unregister_device | nfc_dev_up
rfkill_destory |
device_del | rfkill_blocked
... | ...
The root cause for this race is concluded below:
1. The rfkill_blocked (USE) in nfc_dev_up is supposed to be placed after
the device_is_registered check.
2. Since the netlink operations are possible just after the device_add
in nfc_register_device, the nfc_dev_up() can happen anywhere during the
rfkill creation process, which leads to data race.
This patch reorder these actions to permit
1. Once device_del is finished, the nfc_dev_up cannot dereference the
rfkill object.
2. The rfkill_register need to be placed after the device_add of nfc_dev
because the parent device need to be created first. So this patch keeps
the order but inject device_lock to prevent the data race.
Signed-off-by: Lin Ma <[email protected]>
Fixes: be055b2f89b5 ("NFC: RFKILL support")
Reviewed-by: Jakub Kicinski <[email protected]>
Reviewed-by: Krzysztof Kozlowski <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]> |
ByteVector &ByteVector::replace(const ByteVector &pattern, const ByteVector &with)
{
if(pattern.size() == 0 || pattern.size() > size())
return *this;
const uint withSize = with.size();
const uint patternSize = pattern.size();
int offset = 0;
if(withSize == patternSize) {
// I think this case might be common enough to optimize it
detach();
offset = find(pattern);
while(offset >= 0) {
::memcpy(data() + offset, with.data(), withSize);
offset = find(pattern, offset + withSize);
}
return *this;
}
// calculate new size:
uint newSize = 0;
for(;;) {
int next = find(pattern, offset);
if(next < 0) {
if(offset == 0)
// pattern not found, do nothing:
return *this;
newSize += size() - offset;
break;
}
newSize += (next - offset) + withSize;
offset = next + patternSize;
}
// new private data of appropriate size:
ByteVectorPrivate *newData = new ByteVectorPrivate(newSize, 0);
char *target = DATA(newData);
const char *source = data();
// copy modified data into new private data:
offset = 0;
for(;;) {
int next = find(pattern, offset);
if(next < 0) {
::memcpy(target, source + offset, size() - offset);
break;
}
int chunkSize = next - offset;
::memcpy(target, source + offset, chunkSize);
target += chunkSize;
::memcpy(target, with.data(), withSize);
target += withSize;
offset += chunkSize + patternSize;
}
// replace private data:
if(d->deref())
delete d;
d = newData;
return *this;
} | 0 | [
"CWE-189"
] | taglib | dcdf4fd954e3213c355746fa15b7480461972308 | 26,261,867,392,398,430,000,000,000,000,000,000,000 | 64 | Avoid uint overflow in case the length + index is over UINT_MAX |
static int su3000_identify_state(struct usb_device *udev,
struct dvb_usb_device_properties *props,
struct dvb_usb_device_description **desc,
int *cold)
{
info("%s", __func__);
*cold = 0;
return 0;
} | 0 | [
"CWE-476",
"CWE-119"
] | linux | 606142af57dad981b78707234cfbd15f9f7b7125 | 154,290,980,471,884,400,000,000,000,000,000,000,000 | 10 | [media] dw2102: don't do DMA on stack
On Kernel 4.9, WARNINGs about doing DMA on stack are hit at
the dw2102 driver: one in su3000_power_ctrl() and the other in tt_s2_4600_frontend_attach().
Both were due to the use of buffers on the stack as parameters to
dvb_usb_generic_rw() and the resulting attempt to do DMA with them.
The device was non-functional as a result.
So, switch this driver over to use a buffer within the device state
structure, as has been done with other DVB-USB drivers.
Tested with TechnoTrend TT-connect S2-4600.
[[email protected]: fixed a warning at su3000_i2c_transfer() that
state var were dereferenced before check 'd']
Signed-off-by: Jonathan McDowell <[email protected]>
Cc: <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
static MagickBooleanType WriteOneJNGImage(MngInfo *mng_info,
const ImageInfo *image_info,Image *image)
{
Image
*jpeg_image;
ImageInfo
*jpeg_image_info;
int
unique_filenames;
MagickBooleanType
logging,
status;
size_t
length;
unsigned char
*blob,
chunk[80],
*p;
unsigned int
jng_alpha_compression_method,
jng_alpha_sample_depth,
jng_color_type,
transparent;
size_t
jng_alpha_quality,
jng_quality;
logging=LogMagickEvent(CoderEvent,GetMagickModule(),
" Enter WriteOneJNGImage()");
blob=(unsigned char *) NULL;
jpeg_image=(Image *) NULL;
jpeg_image_info=(ImageInfo *) NULL;
length=0;
unique_filenames=0;
status=MagickTrue;
transparent=image_info->type==GrayscaleMatteType ||
image_info->type==TrueColorMatteType || image->matte != MagickFalse;
jng_alpha_sample_depth = 0;
jng_quality=image_info->quality == 0UL ? 75UL : image_info->quality%1000;
jng_alpha_compression_method=image->compression==JPEGCompression? 8 : 0;
jng_alpha_quality=image_info->quality == 0UL ? 75UL :
image_info->quality;
if (jng_alpha_quality >= 1000)
jng_alpha_quality /= 1000;
if (transparent != 0)
{
jng_color_type=14;
/* Create JPEG blob, image, and image_info */
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Creating jpeg_image_info for opacity.");
jpeg_image_info=(ImageInfo *) CloneImageInfo(image_info);
if (jpeg_image_info == (ImageInfo *) NULL)
{
jpeg_image_info=DestroyImageInfo(jpeg_image_info);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Creating jpeg_image.");
jpeg_image=CloneImage(image,0,0,MagickTrue,&image->exception);
if (jpeg_image == (Image *) NULL)
{
jpeg_image_info=DestroyImageInfo(jpeg_image_info);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent);
status=SeparateImageChannel(jpeg_image,OpacityChannel);
if (status == MagickFalse)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
status=NegateImage(jpeg_image,MagickFalse);
if (status == MagickFalse)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
jpeg_image->matte=MagickFalse;
jpeg_image_info->type=GrayscaleType;
jpeg_image->quality=jng_alpha_quality;
jpeg_image_info->type=GrayscaleType;
(void) SetImageType(jpeg_image,GrayscaleType);
(void) AcquireUniqueFilename(jpeg_image->filename);
unique_filenames++;
(void) FormatLocaleString(jpeg_image_info->filename,MaxTextExtent,
"%s",jpeg_image->filename);
}
else
{
jng_alpha_compression_method=0;
jng_color_type=10;
jng_alpha_sample_depth=0;
}
/* To do: check bit depth of PNG alpha channel */
/* Check if image is grayscale. */
if (image_info->type != TrueColorMatteType && image_info->type !=
TrueColorType && SetImageGray(image,&image->exception))
jng_color_type-=2;
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" JNG Quality = %d",(int) jng_quality);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" JNG Color Type = %d",jng_color_type);
if (transparent != 0)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" JNG Alpha Compression = %d",jng_alpha_compression_method);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" JNG Alpha Depth = %d",jng_alpha_sample_depth);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" JNG Alpha Quality = %d",(int) jng_alpha_quality);
}
}
if (transparent != 0)
{
if (jng_alpha_compression_method==0)
{
const char
*value;
/* Encode opacity as a grayscale PNG blob */
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Creating PNG blob for alpha.");
status=OpenBlob(jpeg_image_info,jpeg_image,WriteBinaryBlobMode,
&image->exception);
if (status == MagickFalse)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
length=0;
(void) CopyMagickString(jpeg_image_info->magick,"PNG",MaxTextExtent);
(void) CopyMagickString(jpeg_image->magick,"PNG",MaxTextExtent);
jpeg_image_info->interlace=NoInterlace;
/* Exclude all ancillary chunks */
(void) SetImageArtifact(jpeg_image,"png:exclude-chunks","all");
blob=ImageToBlob(jpeg_image_info,jpeg_image,&length,
&image->exception);
/* Retrieve sample depth used */
value=GetImageProperty(jpeg_image,"png:bit-depth-written");
if (value != (char *) NULL)
jng_alpha_sample_depth= (unsigned int) value[0];
}
else
{
/* Encode opacity as a grayscale JPEG blob */
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Creating JPEG blob for alpha.");
status=OpenBlob(jpeg_image_info,jpeg_image,WriteBinaryBlobMode,
&image->exception);
if (status == MagickFalse)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
(void) CopyMagickString(jpeg_image_info->magick,"JPEG",MaxTextExtent);
(void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent);
jpeg_image_info->interlace=NoInterlace;
blob=ImageToBlob(jpeg_image_info,jpeg_image,&length,
&image->exception);
jng_alpha_sample_depth=8;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Successfully read jpeg_image into a blob, length=%.20g.",
(double) length);
}
/* Destroy JPEG image and image_info */
jpeg_image=DestroyImage(jpeg_image);
(void) RelinquishUniqueFileResource(jpeg_image_info->filename);
unique_filenames--;
jpeg_image_info=DestroyImageInfo(jpeg_image_info);
}
/* Write JHDR chunk */
(void) WriteBlobMSBULong(image,16L); /* chunk data length=16 */
PNGType(chunk,mng_JHDR);
LogPNGChunk(logging,mng_JHDR,16L);
PNGLong(chunk+4,(png_uint_32) image->columns);
PNGLong(chunk+8,(png_uint_32) image->rows);
chunk[12]=jng_color_type;
chunk[13]=8; /* sample depth */
chunk[14]=8; /*jng_image_compression_method */
chunk[15]=(unsigned char) (image_info->interlace == NoInterlace ? 0 : 8);
chunk[16]=jng_alpha_sample_depth;
chunk[17]=jng_alpha_compression_method;
chunk[18]=0; /*jng_alpha_filter_method */
chunk[19]=0; /*jng_alpha_interlace_method */
(void) WriteBlob(image,20,chunk);
(void) WriteBlobMSBULong(image,crc32(0,chunk,20));
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" JNG width:%15lu",(unsigned long) image->columns);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" JNG height:%14lu",(unsigned long) image->rows);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" JNG color type:%10d",jng_color_type);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" JNG sample depth:%8d",8);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" JNG compression:%9d",8);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" JNG interlace:%11d",0);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" JNG alpha depth:%9d",jng_alpha_sample_depth);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" JNG alpha compression:%3d",jng_alpha_compression_method);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" JNG alpha filter:%8d",0);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" JNG alpha interlace:%5d",0);
}
/*
Write leading ancillary chunks
*/
if (transparent != 0)
{
/*
Write JNG bKGD chunk
*/
unsigned char
blue,
green,
red;
ssize_t
num_bytes;
if (jng_color_type == 8 || jng_color_type == 12)
num_bytes=6L;
else
num_bytes=10L;
(void) WriteBlobMSBULong(image,(size_t) (num_bytes-4L));
PNGType(chunk,mng_bKGD);
LogPNGChunk(logging,mng_bKGD,(size_t) (num_bytes-4L));
red=ScaleQuantumToChar(image->background_color.red);
green=ScaleQuantumToChar(image->background_color.green);
blue=ScaleQuantumToChar(image->background_color.blue);
*(chunk+4)=0;
*(chunk+5)=red;
*(chunk+6)=0;
*(chunk+7)=green;
*(chunk+8)=0;
*(chunk+9)=blue;
(void) WriteBlob(image,(size_t) num_bytes,chunk);
(void) WriteBlobMSBULong(image,crc32(0,chunk,(uInt) num_bytes));
}
if ((image->colorspace == sRGBColorspace || image->rendering_intent))
{
/*
Write JNG sRGB chunk
*/
(void) WriteBlobMSBULong(image,1L);
PNGType(chunk,mng_sRGB);
LogPNGChunk(logging,mng_sRGB,1L);
if (image->rendering_intent != UndefinedIntent)
chunk[4]=(unsigned char)
Magick_RenderingIntent_to_PNG_RenderingIntent(
(image->rendering_intent));
else
chunk[4]=(unsigned char)
Magick_RenderingIntent_to_PNG_RenderingIntent(
(PerceptualIntent));
(void) WriteBlob(image,5,chunk);
(void) WriteBlobMSBULong(image,crc32(0,chunk,5));
}
else
{
if (image->gamma != 0.0)
{
/*
Write JNG gAMA chunk
*/
(void) WriteBlobMSBULong(image,4L);
PNGType(chunk,mng_gAMA);
LogPNGChunk(logging,mng_gAMA,4L);
PNGLong(chunk+4,(png_uint_32) (100000*image->gamma+0.5));
(void) WriteBlob(image,8,chunk);
(void) WriteBlobMSBULong(image,crc32(0,chunk,8));
}
if ((mng_info->equal_chrms == MagickFalse) &&
(image->chromaticity.red_primary.x != 0.0))
{
PrimaryInfo
primary;
/*
Write JNG cHRM chunk
*/
(void) WriteBlobMSBULong(image,32L);
PNGType(chunk,mng_cHRM);
LogPNGChunk(logging,mng_cHRM,32L);
primary=image->chromaticity.white_point;
PNGLong(chunk+4,(png_uint_32) (100000*primary.x+0.5));
PNGLong(chunk+8,(png_uint_32) (100000*primary.y+0.5));
primary=image->chromaticity.red_primary;
PNGLong(chunk+12,(png_uint_32) (100000*primary.x+0.5));
PNGLong(chunk+16,(png_uint_32) (100000*primary.y+0.5));
primary=image->chromaticity.green_primary;
PNGLong(chunk+20,(png_uint_32) (100000*primary.x+0.5));
PNGLong(chunk+24,(png_uint_32) (100000*primary.y+0.5));
primary=image->chromaticity.blue_primary;
PNGLong(chunk+28,(png_uint_32) (100000*primary.x+0.5));
PNGLong(chunk+32,(png_uint_32) (100000*primary.y+0.5));
(void) WriteBlob(image,36,chunk);
(void) WriteBlobMSBULong(image,crc32(0,chunk,36));
}
}
if (image->x_resolution && image->y_resolution && !mng_info->equal_physs)
{
/*
Write JNG pHYs chunk
*/
(void) WriteBlobMSBULong(image,9L);
PNGType(chunk,mng_pHYs);
LogPNGChunk(logging,mng_pHYs,9L);
if (image->units == PixelsPerInchResolution)
{
PNGLong(chunk+4,(png_uint_32)
(image->x_resolution*100.0/2.54+0.5));
PNGLong(chunk+8,(png_uint_32)
(image->y_resolution*100.0/2.54+0.5));
chunk[12]=1;
}
else
{
if (image->units == PixelsPerCentimeterResolution)
{
PNGLong(chunk+4,(png_uint_32)
(image->x_resolution*100.0+0.5));
PNGLong(chunk+8,(png_uint_32)
(image->y_resolution*100.0+0.5));
chunk[12]=1;
}
else
{
PNGLong(chunk+4,(png_uint_32) (image->x_resolution+0.5));
PNGLong(chunk+8,(png_uint_32) (image->y_resolution+0.5));
chunk[12]=0;
}
}
(void) WriteBlob(image,13,chunk);
(void) WriteBlobMSBULong(image,crc32(0,chunk,13));
}
if (mng_info->write_mng == 0 && (image->page.x || image->page.y))
{
/*
Write JNG oFFs chunk
*/
(void) WriteBlobMSBULong(image,9L);
PNGType(chunk,mng_oFFs);
LogPNGChunk(logging,mng_oFFs,9L);
PNGsLong(chunk+4,(ssize_t) (image->page.x));
PNGsLong(chunk+8,(ssize_t) (image->page.y));
chunk[12]=0;
(void) WriteBlob(image,13,chunk);
(void) WriteBlobMSBULong(image,crc32(0,chunk,13));
}
if (transparent != 0)
{
if (jng_alpha_compression_method==0)
{
register ssize_t
i;
size_t
len;
/* Write IDAT chunk header */
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Write IDAT chunks from blob, length=%.20g.",(double)
length);
/* Copy IDAT chunks */
len=0;
p=blob+8;
for (i=8; i<(ssize_t) length; i+=len+12)
{
len=(((unsigned int) *(p ) & 0xff) << 24) +
(((unsigned int) *(p + 1) & 0xff) << 16) +
(((unsigned int) *(p + 2) & 0xff) << 8) +
(((unsigned int) *(p + 3) & 0xff) ) ;
p+=4;
if (*(p)==73 && *(p+1)==68 && *(p+2)==65 && *(p+3)==84) /* IDAT */
{
/* Found an IDAT chunk. */
(void) WriteBlobMSBULong(image,len);
LogPNGChunk(logging,mng_IDAT,len);
(void) WriteBlob(image,len+4,p);
(void) WriteBlobMSBULong(image,crc32(0,p,(uInt) len+4));
}
else
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Skipping %c%c%c%c chunk, length=%.20g.",
*(p),*(p+1),*(p+2),*(p+3),(double) len);
}
p+=(8+len);
}
}
else if (length != 0)
{
/* Write JDAA chunk header */
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Write JDAA chunk, length=%.20g.",(double) length);
(void) WriteBlobMSBULong(image,(size_t) length);
PNGType(chunk,mng_JDAA);
LogPNGChunk(logging,mng_JDAA,length);
/* Write JDAT chunk(s) data */
(void) WriteBlob(image,4,chunk);
(void) WriteBlob(image,length,blob);
(void) WriteBlobMSBULong(image,crc32(crc32(0,chunk,4),blob,
(uInt) length));
}
blob=(unsigned char *) RelinquishMagickMemory(blob);
}
/* Encode image as a JPEG blob */
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Creating jpeg_image_info.");
jpeg_image_info=(ImageInfo *) CloneImageInfo(image_info);
if (jpeg_image_info == (ImageInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Creating jpeg_image.");
jpeg_image=CloneImage(image,0,0,MagickTrue,&image->exception);
if (jpeg_image == (Image *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
(void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent);
(void) AcquireUniqueFilename(jpeg_image->filename);
unique_filenames++;
(void) FormatLocaleString(jpeg_image_info->filename,MaxTextExtent,"%s",
jpeg_image->filename);
status=OpenBlob(jpeg_image_info,jpeg_image,WriteBinaryBlobMode,
&image->exception);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Created jpeg_image, %.20g x %.20g.",(double) jpeg_image->columns,
(double) jpeg_image->rows);
if (status == MagickFalse)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
if (jng_color_type == 8 || jng_color_type == 12)
jpeg_image_info->type=GrayscaleType;
jpeg_image_info->quality=jng_quality;
jpeg_image->quality=jng_quality;
(void) CopyMagickString(jpeg_image_info->magick,"JPEG",MaxTextExtent);
(void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Creating blob.");
blob=ImageToBlob(jpeg_image_info,jpeg_image,&length,&image->exception);
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Successfully read jpeg_image into a blob, length=%.20g.",
(double) length);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Write JDAT chunk, length=%.20g.",(double) length);
}
/* Write JDAT chunk(s) */
(void) WriteBlobMSBULong(image,(size_t) length);
PNGType(chunk,mng_JDAT);
LogPNGChunk(logging,mng_JDAT,length);
(void) WriteBlob(image,4,chunk);
(void) WriteBlob(image,length,blob);
(void) WriteBlobMSBULong(image,crc32(crc32(0,chunk,4),blob,(uInt) length));
jpeg_image=DestroyImage(jpeg_image);
(void) RelinquishUniqueFileResource(jpeg_image_info->filename);
unique_filenames--;
jpeg_image_info=DestroyImageInfo(jpeg_image_info);
blob=(unsigned char *) RelinquishMagickMemory(blob);
/* Write IEND chunk */
(void) WriteBlobMSBULong(image,0L);
PNGType(chunk,mng_IEND);
LogPNGChunk(logging,mng_IEND,0);
(void) WriteBlob(image,4,chunk);
(void) WriteBlobMSBULong(image,crc32(0,chunk,4));
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" exit WriteOneJNGImage(); unique_filenames=%d",unique_filenames);
return(status);
} | 0 | [
"CWE-835"
] | ImageMagick6 | 9eda4b36a8695e4a0cd27bea28b9c173c68a01ec | 313,211,455,340,796,100,000,000,000,000,000,000,000 | 569 | Fixed infinite loop (#1095). |
uint32_t smb1cli_req_seqnum(struct tevent_req *req)
{
struct smbXcli_req_state *state =
tevent_req_data(req,
struct smbXcli_req_state);
return state->smb1.seqnum;
} | 0 | [
"CWE-20"
] | samba | a819d2b440aafa3138d95ff6e8b824da885a70e9 | 60,697,085,277,469,100,000,000,000,000,000,000,000 | 8 | CVE-2015-5296: libcli/smb: make sure we require signing when we demand encryption on a session
BUG: https://bugzilla.samba.org/show_bug.cgi?id=11536
Signed-off-by: Stefan Metzmacher <[email protected]>
Reviewed-by: Jeremy Allison <[email protected]> |
TfLiteStatus Subgraph::AddNodeWithParameters(
const std::vector<int>& inputs, const std::vector<int>& outputs,
const std::vector<int>& intermediates, const char* init_data,
size_t init_data_size, void* builtin_data,
const TfLiteRegistration* registration, int* node_index) {
std::unique_ptr<void, decltype(free)*> builtin_data_deleter(builtin_data,
free);
if (state_ == kStateInvokableAndImmutable) {
ReportError("AddNodeWithParameters is disallowed when graph is immutable.");
return kTfLiteError;
}
state_ = kStateUninvokable;
TF_LITE_ENSURE_OK(&context_, CheckTensorIndices("node inputs", inputs.data(),
inputs.size()));
TF_LITE_ENSURE_OK(
&context_,
CheckTensorIndices("node outputs", outputs.data(), outputs.size()));
int new_node_index = nodes_and_registration_.size();
if (node_index) *node_index = new_node_index;
nodes_and_registration_.resize(nodes_and_registration_.size() + 1);
auto& node_and_reg = nodes_and_registration_.back();
TfLiteNode& node = node_and_reg.first;
if (node.inputs) TfLiteIntArrayFree(node.inputs);
if (node.outputs) TfLiteIntArrayFree(node.outputs);
if (node.intermediates) TfLiteIntArrayFree(node.intermediates);
if (node.temporaries) TfLiteIntArrayFree(node.temporaries);
// NOTE, here we are not using move semantics yet, since our internal
// representation isn't std::vector, but in the future we would like to avoid
// copies, so we want the interface to take r-value references now.
node.inputs = ConvertVectorToTfLiteIntArray(inputs);
node.outputs = ConvertVectorToTfLiteIntArray(outputs);
node.intermediates = ConvertVectorToTfLiteIntArray(intermediates);
node.temporaries = TfLiteIntArrayCreate(0);
if (init_data) {
node.user_data = OpInit(*registration, init_data, init_data_size);
} else {
node.user_data = OpInit(
*registration, static_cast<const char*>(builtin_data_deleter.get()), 0);
}
node.builtin_data = builtin_data_deleter.release();
// TODO(ycling): Filling `custom_initial_data` and `custom_initial_data_size`
// properly for nodes generated by ReplaceNodeSubsetsWithDelegateKernels.
if (registration->builtin_code == BuiltinOperator_CUSTOM) {
// When it's a CUSTOM op, the `custom_options` field in the Flatbuffer
// `Operator` table is passed in.
node.custom_initial_data = init_data;
node.custom_initial_data_size = init_data_size;
} else {
node.custom_initial_data = nullptr;
node.custom_initial_data_size = 0;
}
node.delegate = nullptr;
// Copying of registration is required to support unresolved custom ops.
node_and_reg.second = *registration;
execution_plan_.push_back(new_node_index);
return kTfLiteOk;
} | 1 | [
"CWE-20",
"CWE-787"
] | tensorflow | d58c96946b2880991d63d1dacacb32f0a4dfa453 | 235,526,692,593,501,700,000,000,000,000,000,000,000 | 63 | [tflite] Ensure inputs and outputs don't overlap.
If a model uses the same tensor for both an input and an output then this can result in data loss and memory corruption. This should not happen.
PiperOrigin-RevId: 332522916
Change-Id: If0905b142415a9dfceaf2d181872f2a8fb88f48a |
const char *lj_debug_uvname(GCproto *pt, uint32_t idx)
{
const uint8_t *p = proto_uvinfo(pt);
lua_assert(idx < pt->sizeuv);
if (!p) return "";
if (idx) while (*p++ || --idx) ;
return (const char *)p;
} | 0 | [
"CWE-125"
] | LuaJIT | e296f56b825c688c3530a981dc6b495d972f3d01 | 330,623,531,118,087,100,000,000,000,000,000,000,000 | 8 | Call error function on rethrow after trace exit. |
eap_input(unit, inp, inlen)
int unit;
u_char *inp;
int inlen;
{
eap_state *esp = &eap_states[unit];
u_char code, id;
int len;
/*
* Parse header (code, id and length). If packet too short,
* drop it.
*/
if (inlen < EAP_HEADERLEN) {
error("EAP: packet too short: %d < %d", inlen, EAP_HEADERLEN);
return;
}
GETCHAR(code, inp);
GETCHAR(id, inp);
GETSHORT(len, inp);
if (len < EAP_HEADERLEN || len > inlen) {
error("EAP: packet has illegal length field %d (%d..%d)", len,
EAP_HEADERLEN, inlen);
return;
}
len -= EAP_HEADERLEN;
/* Dispatch based on message code */
switch (code) {
case EAP_REQUEST:
eap_request(esp, inp, id, len);
break;
case EAP_RESPONSE:
eap_response(esp, inp, id, len);
break;
case EAP_SUCCESS:
eap_success(esp, inp, id, len);
break;
case EAP_FAILURE:
eap_failure(esp, inp, id, len);
break;
default: /* XXX Need code reject */
/* Note: it's not legal to send EAP Nak here. */
warn("EAP: unknown code %d received", code);
break;
}
} | 0 | [
"CWE-120",
"CWE-787"
] | ppp | 8d7970b8f3db727fe798b65f3377fe6787575426 | 281,698,668,037,436,600,000,000,000,000,000,000,000 | 51 | pppd: Fix bounds check in EAP code
Given that we have just checked vallen < len, it can never be the case
that vallen >= len + sizeof(rhostname). This fixes the check so we
actually avoid overflowing the rhostname array.
Reported-by: Ilja Van Sprundel <[email protected]>
Signed-off-by: Paul Mackerras <[email protected]> |
static void clusterip_tg_destroy(const struct xt_tgdtor_param *par)
{
const struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
/* if no more entries are referencing the config, remove it
* from the list and destroy the proc entry */
clusterip_config_entry_put(cipinfo->config);
clusterip_config_put(cipinfo->config);
nf_ct_l3proto_module_put(par->family);
} | 0 | [
"CWE-120"
] | linux-2.6 | 961ed183a9fd080cf306c659b8736007e44065a5 | 46,091,060,368,745,280,000,000,000,000,000,000,000 | 12 | netfilter: ipt_CLUSTERIP: fix buffer overflow
'buffer' string is copied from userspace. It is not checked whether it is
zero terminated. This may lead to overflow inside of simple_strtoul().
Changli Gao suggested to copy not more than user supplied 'size' bytes.
It was introduced before the git epoch. Files "ipt_CLUSTERIP/*" are
root writable only by default, however, on some setups permissions might be
relaxed to e.g. network admin user.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Acked-by: Changli Gao <[email protected]>
Signed-off-by: Patrick McHardy <[email protected]> |
empathy_adium_info_get_available_variants (GHashTable *info)
{
GPtrArray *variants;
const gchar *path;
gchar *dirpath;
GDir *dir;
variants = tp_asv_get_boxed (info, "AvailableVariants", G_TYPE_PTR_ARRAY);
if (variants != NULL) {
return variants;
}
variants = g_ptr_array_new_with_free_func (g_free);
tp_asv_take_boxed (info, g_strdup ("AvailableVariants"),
G_TYPE_PTR_ARRAY, variants);
path = tp_asv_get_string (info, "path");
dirpath = g_build_filename (path, "Contents", "Resources", "Variants", NULL);
dir = g_dir_open (dirpath, 0, NULL);
if (dir != NULL) {
const gchar *name;
for (name = g_dir_read_name (dir);
name != NULL;
name = g_dir_read_name (dir)) {
gchar *display_name;
if (!g_str_has_suffix (name, ".css")) {
continue;
}
display_name = g_strdup (name);
strstr (display_name, ".css")[0] = '\0';
g_ptr_array_add (variants, display_name);
}
g_dir_close (dir);
}
g_free (dirpath);
if (adium_info_get_version (info) <= 2) {
g_ptr_array_add (variants,
g_strdup (adium_info_get_no_variant_name (info)));
}
return variants;
} | 0 | [
"CWE-79"
] | empathy | 739aca418457de752be13721218aaebc74bd9d36 | 280,137,232,346,788,870,000,000,000,000,000,000,000 | 46 | theme_adium_append_message: escape alias before displaying it
Not doing so can lead to nasty HTML injection from hostile users.
https://bugzilla.gnome.org/show_bug.cgi?id=662035 |
validate_nonempty_seq(asdl_seq *seq, const char *what, const char *owner)
{
if (asdl_seq_LEN(seq))
return 1;
PyErr_Format(PyExc_ValueError, "empty %s on %s", what, owner);
return 0;
} | 0 | [
"CWE-125"
] | cpython | a4d78362397fc3bced6ea80fbc7b5f4827aec55e | 264,136,294,366,083,300,000,000,000,000,000,000,000 | 7 | bpo-36495: Fix two out-of-bounds array reads (GH-12641)
Research and fix by @bradlarsen. |
strcat_capa_from_static(UChar* dest, UChar* dest_end,
const UChar* src, const UChar* src_end, size_t capa)
{
UChar* r;
r = (UChar* )xmalloc(capa + 1);
CHECK_NULL_RETURN(r);
onig_strcpy(r, dest, dest_end);
onig_strcpy(r + (dest_end - dest), src, src_end);
return r;
} | 0 | [
"CWE-476"
] | Onigmo | 00cc7e28a3ed54b3b512ef3b58ea737a57acf1f9 | 333,649,525,275,776,570,000,000,000,000,000,000,000 | 11 | Fix SEGV in onig_error_code_to_str() (Fix #132)
When onig_new(ONIG_SYNTAX_PERL) fails with ONIGERR_INVALID_GROUP_NAME,
onig_error_code_to_str() crashes.
onig_scan_env_set_error_string() should have been used when returning
ONIGERR_INVALID_GROUP_NAME. |
C_MgrProxyCommand(Monitor *mon, MonOpRequestRef op, uint64_t s)
: mon(mon), op(op), size(s) { } | 0 | [
"CWE-287",
"CWE-284"
] | ceph | 5ead97120e07054d80623dada90a5cc764c28468 | 227,277,607,290,344,870,000,000,000,000,000,000,000 | 2 | auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random() |
Tracing::Span& ConnectionManagerImpl::ActiveStreamFilterBase::activeSpan() {
if (parent_.active_span_) {
return *parent_.active_span_;
} else {
return Tracing::NullSpan::instance();
}
} | 0 | [
"CWE-400",
"CWE-703"
] | envoy | afc39bea36fd436e54262f150c009e8d72db5014 | 334,823,268,270,681,080,000,000,000,000,000,000,000 | 7 | Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <[email protected]> |
static bool uid_in_group(uid_t uid, gid_t gid)
{
char **tmp;
struct passwd *pwd = getpwuid(uid);
if (!pwd)
return FALSE;
if (pwd->pw_gid == gid)
return TRUE;
struct group *grp = getgrgid(gid);
if (!(grp && grp->gr_mem))
return FALSE;
for (tmp = grp->gr_mem; *tmp != NULL; tmp++)
{
if (g_strcmp0(*tmp, pwd->pw_name) == 0)
{
log_debug("user %s belongs to group: %s", pwd->pw_name, grp->gr_name);
return TRUE;
}
}
log_info("user %s DOESN'T belong to group: %s", pwd->pw_name, grp->gr_name);
return FALSE;
} | 0 | [
"CWE-22"
] | libreport | 239c4f7d1f47265526b39ad70106767d00805277 | 233,431,559,139,962,100,000,000,000,000,000,000,000 | 27 | dd: harden functions against directory traversal issues
Test correctness of all accessed dump dir files in all dd* functions.
Before this commit, the callers were allowed to pass strings like
"../../etc/shadow" in the filename argument of all dd* functions.
Related: #1214457
Signed-off-by: Jakub Filak <[email protected]> |
void lodepng_info_init(LodePNGInfo* info)
{
lodepng_color_mode_init(&info->color);
info->interlace_method = 0;
info->compression_method = 0;
info->filter_method = 0;
#ifdef LODEPNG_COMPILE_ANCILLARY_CHUNKS
info->background_defined = 0;
info->background_r = info->background_g = info->background_b = 0;
LodePNGText_init(info);
LodePNGIText_init(info);
info->time_defined = 0;
info->phys_defined = 0;
LodePNGUnknownChunks_init(info);
#endif /*LODEPNG_COMPILE_ANCILLARY_CHUNKS*/
} | 0 | [
"CWE-401"
] | FreeRDP | 9fee4ae076b1ec97b97efb79ece08d1dab4df29a | 191,298,624,702,096,300,000,000,000,000,000,000,000 | 19 | Fixed #5645: realloc return handling |
static void virgl_cmd_context_destroy(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
struct virtio_gpu_ctx_destroy cd;
VIRTIO_GPU_FILL_CMD(cd);
trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id);
virgl_renderer_context_destroy(cd.hdr.ctx_id);
} | 0 | [] | qemu | 2fe760554eb3769d70f608a158474f728ba45ba6 | 269,921,892,699,206,960,000,000,000,000,000,000,000 | 10 | virtio-gpu: check max_outputs only
The scanout id should not be above the configured num_scanouts.
Signed-off-by: Marc-André Lureau <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]> |
void MDSDaemon::tick()
{
// reschedule
reset_tick();
// Call through to subsystems' tick functions
if (mds_rank) {
mds_rank->tick();
}
} | 0 | [
"CWE-287",
"CWE-284"
] | ceph | 5ead97120e07054d80623dada90a5cc764c28468 | 320,758,393,738,300,740,000,000,000,000,000,000,000 | 10 | auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random() |
static int am_handle_paos_reply(request_rec *r)
{
int rc;
char *post_data;
LassoServer *server;
LassoLogin *login;
char *relay_state = NULL;
int i, err;
/* Make sure that this is a POST request. */
if(r->method_number != M_POST) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"Expected POST request for paosResponse endpoint."
" Got a %s request instead.", r->method);
/* According to the documentation for request_rec, a handler which
* doesn't handle a request method, should set r->allowed to the
* methods it handles, and return DECLINED.
* However, the default handler handles GET-requests, so for GET
* requests the handler should return HTTP_METHOD_NOT_ALLOWED.
*/
r->allowed = M_POST;
if(r->method_number == M_GET) {
return HTTP_METHOD_NOT_ALLOWED;
} else {
return DECLINED;
}
}
/* Read POST-data. */
rc = am_read_post_data(r, &post_data, NULL);
if (rc != OK) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r,
"Error reading POST data.");
return rc;
}
server = am_get_lasso_server(r);
if(server == NULL) {
return HTTP_INTERNAL_SERVER_ERROR;
}
login = lasso_login_new(server);
if (login == NULL) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"Failed to initialize LassoLogin object.");
return HTTP_INTERNAL_SERVER_ERROR;
}
/* Process login response. */
rc = lasso_login_process_paos_response_msg(login, post_data);
if (rc != 0) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"Error processing ECP authn response."
" Lasso error: [%i] %s", rc, lasso_strerror(rc));
lasso_login_destroy(login);
err = HTTP_BAD_REQUEST;
for (i = 0; auth_mellon_errormap[i].lasso_error != 0; i++) {
if (auth_mellon_errormap[i].lasso_error == rc) {
err = auth_mellon_errormap[i].http_error;
break;
}
}
return err;
}
/* Extract RelayState parameter. */
if (LASSO_PROFILE(login)->msg_relayState) {
relay_state = apr_pstrdup(r->pool, LASSO_PROFILE(login)->msg_relayState);
}
/* Finish handling the reply with the common handler. */
return am_handle_reply_common(r, login, relay_state, post_data, true);
} | 0 | [] | mod_auth_mellon | 6bdda9170a8f1757dabc5b109958657417728018 | 112,559,665,553,079,050,000,000,000,000,000,000,000 | 76 | Fix segmentation fault when receiving badly formed logout message.
If the logout message is badly formed, we won't get the entityID in
`logout->parent.remote_providerID`. If we call `apr_hash_get()` with a
null pointer, it will cause a segmentation fault.
Add a check to validate that the entityID is correctly set. |
GF_Err pdin_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
for (i=0; i<ptr->count; i++) {
gf_bs_write_u32(bs, ptr->rates[i]);
gf_bs_write_u32(bs, ptr->times[i]);
}
return GF_OK; | 0 | [
"CWE-400",
"CWE-401"
] | gpac | d2371b4b204f0a3c0af51ad4e9b491144dd1225c | 276,852,930,935,993,500,000,000,000,000,000,000,000 | 13 | prevent dref memleak on invalid input (#1183) |
static void check_file(char *basename)
{
gdImagePtr im;
char *buffer;
size_t size;
size = read_test_file(&buffer, basename);
im = gdImageCreateFromTiffPtr(size, (void *) buffer);
gdTestAssert(im == NULL);
free(buffer);
} | 0 | [
"CWE-125"
] | libgd | 4859d69e07504d4b0a4bdf9bcb4d9e3769ca35ae | 95,986,976,186,911,380,000,000,000,000,000,000,000 | 11 | Fix invalid read in gdImageCreateFromTiffPtr()
tiff_invalid_read.tiff is corrupt, and causes an invalid read in
gdImageCreateFromTiffPtr(), but not in gdImageCreateFromTiff(). The culprit
is dynamicGetbuf(), which doesn't check for out-of-bound reads. In this case,
dynamicGetbuf() is called with a negative dp->pos, but also positive buffer
overflows have to be handled, in which case 0 has to be returned (cf. commit
75e29a9).
Fixing dynamicGetbuf() exhibits that the corrupt TIFF would still create
the image, because the return value of TIFFReadRGBAImage() is not checked.
We do that, and let createFromTiffRgba() fail if TIFFReadRGBAImage() fails.
This issue had been reported by Ibrahim El-Sayed to [email protected].
CVE-2016-6911 |
void CModule::CancelJobs(const std::set<CModuleJob*>& sJobs) {
set<CJob*> sPlainJobs(sJobs.begin(), sJobs.end());
// Destructor calls UnlinkJob and removes the jobs from m_sJobs
CThreadPool::Get().cancelJobs(sPlainJobs);
} | 0 | [
"CWE-20",
"CWE-264"
] | znc | 8de9e376ce531fe7f3c8b0aa4876d15b479b7311 | 92,148,593,264,721,320,000,000,000,000,000,000,000 | 6 | Fix remote code execution and privilege escalation vulnerability.
To trigger this, need to have a user already.
Thanks for Jeriko One <[email protected]> for finding and reporting this.
CVE-2019-12816 |
encode_constant (MonoDynamicImage *assembly, MonoObject *val, guint32 *ret_type) {
char blob_size [64];
char *b = blob_size;
char *p, *box_val;
char* buf;
guint32 idx = 0, len = 0, dummy = 0;
#ifdef ARM_FPU_FPA
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
guint32 fpa_double [2];
guint32 *fpa_p;
#endif
#endif
p = buf = g_malloc (64);
if (!val) {
*ret_type = MONO_TYPE_CLASS;
len = 4;
box_val = (char*)&dummy;
} else {
box_val = ((char*)val) + sizeof (MonoObject);
*ret_type = val->vtable->klass->byval_arg.type;
}
handle_enum:
switch (*ret_type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_U1:
case MONO_TYPE_I1:
len = 1;
break;
case MONO_TYPE_CHAR:
case MONO_TYPE_U2:
case MONO_TYPE_I2:
len = 2;
break;
case MONO_TYPE_U4:
case MONO_TYPE_I4:
case MONO_TYPE_R4:
len = 4;
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
len = 8;
break;
case MONO_TYPE_R8:
len = 8;
#ifdef ARM_FPU_FPA
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
fpa_p = (guint32*)box_val;
fpa_double [0] = fpa_p [1];
fpa_double [1] = fpa_p [0];
box_val = (char*)fpa_double;
#endif
#endif
break;
case MONO_TYPE_VALUETYPE:
if (val->vtable->klass->enumtype) {
*ret_type = mono_class_enum_basetype (val->vtable->klass)->type;
goto handle_enum;
} else
g_error ("we can't encode valuetypes");
case MONO_TYPE_CLASS:
break;
case MONO_TYPE_STRING: {
MonoString *str = (MonoString*)val;
/* there is no signature */
len = str->length * 2;
mono_metadata_encode_value (len, b, &b);
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
{
char *swapped = g_malloc (2 * mono_string_length (str));
const char *p = (const char*)mono_string_chars (str);
swap_with_size (swapped, p, 2, mono_string_length (str));
idx = add_to_blob_cached (assembly, blob_size, b-blob_size, swapped, len);
g_free (swapped);
}
#else
idx = add_to_blob_cached (assembly, blob_size, b-blob_size, (char*)mono_string_chars (str), len);
#endif
g_free (buf);
return idx;
}
case MONO_TYPE_GENERICINST:
*ret_type = val->vtable->klass->generic_class->container_class->byval_arg.type;
goto handle_enum;
default:
g_error ("we don't encode constant type 0x%02x yet", *ret_type);
}
/* there is no signature */
mono_metadata_encode_value (len, b, &b);
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
idx = mono_image_add_stream_data (&assembly->blob, blob_size, b-blob_size);
swap_with_size (blob_size, box_val, len, 1);
mono_image_add_stream_data (&assembly->blob, blob_size, len);
#else
idx = add_to_blob_cached (assembly, blob_size, b-blob_size, box_val, len);
#endif
g_free (buf);
return idx;
} | 0 | [
"CWE-20"
] | mono | 4905ef1130feb26c3150b28b97e4a96752e0d399 | 181,529,580,785,672,000,000,000,000,000,000,000,000 | 103 | Handle invalid instantiation of generic methods.
* verify.c: Add new function to internal verifier API to check
method instantiations.
* reflection.c (mono_reflection_bind_generic_method_parameters):
Check the instantiation before returning it.
Fixes #655847 |
flatpak_dir_install (FlatpakDir *self,
gboolean no_pull,
gboolean no_deploy,
gboolean no_static_deltas,
gboolean reinstall,
gboolean app_hint,
FlatpakRemoteState *state,
const char *ref,
const char *opt_commit,
const char **opt_subpaths,
OstreeAsyncProgress *progress,
GCancellable *cancellable,
GError **error)
{
FlatpakPullFlags flatpak_flags;
flatpak_flags = FLATPAK_PULL_FLAGS_DOWNLOAD_EXTRA_DATA;
if (no_static_deltas)
flatpak_flags |= FLATPAK_PULL_FLAGS_NO_STATIC_DELTAS;
if (flatpak_dir_use_system_helper (self, NULL))
{
g_autoptr(OstreeRepo) child_repo = NULL;
g_auto(GLnxLockFile) child_repo_lock = { 0, };
const char *installation = flatpak_dir_get_id (self);
const char *empty_subpaths[] = {NULL};
const char **subpaths;
g_autofree char *child_repo_path = NULL;
FlatpakHelperDeployFlags helper_flags = 0;
g_autofree char *url = NULL;
gboolean gpg_verify_summary;
gboolean gpg_verify;
gboolean is_oci;
if (opt_subpaths)
subpaths = opt_subpaths;
else
subpaths = empty_subpaths;
if (!ostree_repo_remote_get_url (self->repo,
state->remote_name,
&url,
error))
return FALSE;
if (!ostree_repo_remote_get_gpg_verify_summary (self->repo, state->remote_name,
&gpg_verify_summary, error))
return FALSE;
if (!ostree_repo_remote_get_gpg_verify (self->repo, state->remote_name,
&gpg_verify, error))
return FALSE;
is_oci = flatpak_dir_get_remote_oci (self, state->remote_name);
if (no_pull)
{
/* Do nothing */
}
else if (is_oci)
{
g_autoptr(FlatpakOciRegistry) registry = NULL;
g_autoptr(GFile) registry_file = NULL;
registry = flatpak_dir_create_system_child_oci_registry (self, &child_repo_lock, error);
if (registry == NULL)
return FALSE;
registry_file = g_file_new_for_uri (flatpak_oci_registry_get_uri (registry));
child_repo_path = g_file_get_path (registry_file);
if (!flatpak_dir_mirror_oci (self, registry, state, ref, NULL, progress, cancellable, error))
return FALSE;
}
else if ((!gpg_verify_summary && state->collection_id == NULL) || !gpg_verify)
{
/* The remote is not gpg verified, so we don't want to allow installation via
a download in the home directory, as there is no way to verify you're not
injecting anything into the remote. However, in the case of a remote
configured to a local filesystem we can just let the system helper do
the installation, as it can then avoid network i/o and be certain the
data comes from the right place.
If a collection ID is available, we can verify the refs in commit
metadata. */
if (g_str_has_prefix (url, "file:"))
helper_flags |= FLATPAK_HELPER_DEPLOY_FLAGS_LOCAL_PULL;
else
return flatpak_fail_error (error, FLATPAK_ERROR_UNTRUSTED, _("Can't pull from untrusted non-gpg verified remote"));
}
else
{
/* We're pulling from a remote source, we do the network mirroring pull as a
user and hand back the resulting data to the system-helper, that trusts us
due to the GPG signatures in the repo */
child_repo = flatpak_dir_create_system_child_repo (self, &child_repo_lock, NULL, error);
if (child_repo == NULL)
return FALSE;
flatpak_flags |= FLATPAK_PULL_FLAGS_SIDELOAD_EXTRA_DATA;
if (!flatpak_dir_pull (self, state, ref, opt_commit, NULL, subpaths,
child_repo,
flatpak_flags,
OSTREE_REPO_PULL_FLAGS_MIRROR,
progress, cancellable, error))
return FALSE;
if (!child_repo_ensure_summary (child_repo, state, cancellable, error))
return FALSE;
child_repo_path = g_file_get_path (ostree_repo_get_path (child_repo));
}
if (no_deploy)
helper_flags |= FLATPAK_HELPER_DEPLOY_FLAGS_NO_DEPLOY;
if (reinstall)
helper_flags |= FLATPAK_HELPER_DEPLOY_FLAGS_REINSTALL;
if (app_hint)
helper_flags |= FLATPAK_HELPER_DEPLOY_FLAGS_APP_HINT;
helper_flags |= FLATPAK_HELPER_DEPLOY_FLAGS_INSTALL_HINT;
if (!flatpak_dir_system_helper_call_deploy (self,
child_repo_path ? child_repo_path : "",
helper_flags, ref, state->remote_name,
(const char * const *) subpaths,
installation ? installation : "",
cancellable,
error))
return FALSE;
if (child_repo_path)
(void) glnx_shutil_rm_rf_at (AT_FDCWD, child_repo_path, NULL, NULL);
return TRUE;
}
if (!no_pull)
{
if (!flatpak_dir_pull (self, state, ref, opt_commit, NULL, opt_subpaths, NULL,
flatpak_flags, OSTREE_REPO_PULL_FLAGS_NONE,
progress, cancellable, error))
return FALSE;
}
if (!no_deploy)
{
if (!flatpak_dir_deploy_install (self, ref, state->remote_name, opt_subpaths,
reinstall, cancellable, error))
return FALSE;
}
return TRUE;
} | 0 | [
"CWE-668"
] | flatpak | cd2142888fc4c199723a0dfca1f15ea8788a5483 | 1,423,650,616,973,415,600,000,000,000,000,000,000 | 158 | Don't expose /proc when running apply_extra
As shown by CVE-2019-5736, it is sometimes possible for the sandbox
app to access outside files using /proc/self/exe. This is not
typically an issue for flatpak as the sandbox runs as the user which
has no permissions to e.g. modify the host files.
However, when installing apps using extra-data into the system repo
we *do* actually run a sandbox as root. So, in this case we disable mounting
/proc in the sandbox, which will neuter attacks like this. |
Item_datetime_literal_for_invalid_dates(THD *thd,
const Datetime *ltime, uint dec_arg)
:Item_datetime_literal(thd, ltime, dec_arg)
{
maybe_null= false;
} | 0 | [
"CWE-617"
] | server | 807945f2eb5fa22e6f233cc17b85a2e141efe2c8 | 75,753,499,467,449,810,000,000,000,000,000,000,000 | 6 | MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item. |
nv_ctrlg(cmdarg_T *cap)
{
if (VIsual_active) // toggle Selection/Visual mode
{
VIsual_select = !VIsual_select;
may_trigger_modechanged();
showmode();
}
else if (!checkclearop(cap->oap))
// print full name if count given or :cd used
fileinfo((int)cap->count0, FALSE, TRUE);
} | 0 | [
"CWE-416"
] | vim | e2fa213cf571041dbd04ab0329303ffdc980678a | 189,149,062,782,715,980,000,000,000,000,000,000,000 | 12 | patch 8.2.5024: using freed memory with "]d"
Problem: Using freed memory with "]d".
Solution: Copy the pattern before searching. |
int dsdb_modify(struct ldb_context *ldb, const struct ldb_message *message,
uint32_t dsdb_flags)
{
struct ldb_request *req;
int ret;
ret = ldb_build_mod_req(&req, ldb, ldb,
message,
NULL,
NULL,
ldb_op_default_callback,
NULL);
if (ret != LDB_SUCCESS) return ret;
ret = dsdb_request_add_controls(req, dsdb_flags);
if (ret != LDB_SUCCESS) {
talloc_free(req);
return ret;
}
ret = dsdb_autotransaction_request(ldb, req);
talloc_free(req);
return ret;
} | 0 | [
"CWE-200"
] | samba | 0a3aa5f908e351201dc9c4d4807b09ed9eedff77 | 206,362,142,901,172,700,000,000,000,000,000,000,000 | 26 | CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]> |
static int iee80211_tdls_have_ht_peers(struct ieee80211_sub_if_data *sdata)
{
struct sta_info *sta;
bool result = false;
rcu_read_lock();
list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
!test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
!test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH) ||
!sta->sta.ht_cap.ht_supported)
continue;
result = true;
break;
}
rcu_read_unlock();
return result;
} | 0 | [] | linux | 79c92ca42b5a3e0ea172ea2ce8df8e125af237da | 335,501,357,372,589,550,000,000,000,000,000,000 | 19 | mac80211: handle deauthentication/disassociation from TDLS peer
When receiving a deauthentication/disassociation frame from a TDLS
peer, a station should not disconnect the current AP, but only
disable the current TDLS link if it's enabled.
Without this change, a TDLS issue can be reproduced by following the
steps as below:
1. STA-1 and STA-2 are connected to AP, bidirection traffic is running
between STA-1 and STA-2.
2. Set up TDLS link between STA-1 and STA-2, stay for a while, then
teardown TDLS link.
3. Repeat step #2 and monitor the connection between STA and AP.
During the test, one STA may send a deauthentication/disassociation
frame to another, after TDLS teardown, with reason code 6/7, which
means: Class 2/3 frame received from nonassociated STA.
On receive this frame, the receiver STA will disconnect the current
AP and then reconnect. It's not a expected behavior, purpose of this
frame should be disabling the TDLS link, not the link with AP.
Cc: [email protected]
Signed-off-by: Yu Wang <[email protected]>
Signed-off-by: Johannes Berg <[email protected]> |
bool LEX::sp_for_loop_condition(THD *thd, const Lex_for_loop_st &loop)
{
Item_splocal *args[2];
for (uint i= 0 ; i < 2; i++)
{
sp_variable *src= i == 0 ? loop.m_index : loop.m_target_bound;
args[i]= new (thd->mem_root)
Item_splocal(thd, &sp_rcontext_handler_local,
&src->name, src->offset, src->type_handler());
if (unlikely(args[i] == NULL))
return true;
#ifdef DBUG_ASSERT_EXISTS
args[i]->m_sp= sphead;
#endif
}
Item *expr= loop.m_direction > 0 ?
(Item *) new (thd->mem_root) Item_func_le(thd, args[0], args[1]) :
(Item *) new (thd->mem_root) Item_func_ge(thd, args[0], args[1]);
return unlikely(!expr) || unlikely(sp_while_loop_expression(thd, expr));
} | 0 | [
"CWE-703"
] | server | 39feab3cd31b5414aa9b428eaba915c251ac34a2 | 190,476,410,586,168,900,000,000,000,000,000,000,000 | 21 | MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <[email protected]> |
ex_update(exarg_T *eap)
{
if (curbufIsChanged())
(void)do_write(eap);
} | 0 | [
"CWE-78"
] | vim | 8c62a08faf89663e5633dc5036cd8695c80f1075 | 96,377,542,313,731,030,000,000,000,000,000,000,000 | 5 | patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others. |
get_flp_value(buf_T *buf)
{
if (buf->b_p_flp == NULL || *buf->b_p_flp == NUL)
return p_flp;
return buf->b_p_flp;
} | 0 | [
"CWE-122",
"CWE-787"
] | vim | 652dee448618589de5528a9e9a36995803f5557a | 195,447,143,108,773,430,000,000,000,000,000,000,000 | 6 | patch 8.2.4245: ":retab 0" may cause illegal memory access
Problem: ":retab 0" may cause illegal memory access.
Solution: Limit the value of 'tabstop' to 10000. |
double Field_newdate::val_real(void)
{
ASSERT_COLUMN_MARKED_FOR_READ;
return (double) Field_newdate::val_int();
} | 0 | [
"CWE-416",
"CWE-703"
] | server | 08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917 | 257,703,914,881,327,270,000,000,000,000,000,000,000 | 5 | MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]> |
gdLayerOverlay (int dst, int src)
{
int a1, a2;
a1 = gdAlphaMax - gdTrueColorGetAlpha(dst);
a2 = gdAlphaMax - gdTrueColorGetAlpha(src);
return ( ((gdAlphaMax - a1*a2/gdAlphaMax) << 24) +
(gdAlphaOverlayColor( gdTrueColorGetRed(src), gdTrueColorGetRed(dst), gdRedMax ) << 16) +
(gdAlphaOverlayColor( gdTrueColorGetGreen(src), gdTrueColorGetGreen(dst), gdGreenMax ) << 8) +
(gdAlphaOverlayColor( gdTrueColorGetBlue(src), gdTrueColorGetBlue(dst), gdBlueMax ))
);
} | 0 | [
"CWE-119"
] | php-src | feba44546c27b0158f9ac20e72040a224b918c75 | 96,171,849,966,077,220,000,000,000,000,000,000,000 | 11 | Fixed bug #22965 (Crash in gd lib's ImageFillToBorder()). |
static ssize_t bfq_low_latency_store(struct elevator_queue *e,
const char *page, size_t count)
{
struct bfq_data *bfqd = e->elevator_data;
unsigned long __data;
int ret;
ret = bfq_var_store(&__data, (page));
if (ret)
return ret;
if (__data > 1)
__data = 1;
if (__data == 0 && bfqd->low_latency != 0)
bfq_end_wr(bfqd);
bfqd->low_latency = __data;
return count; | 0 | [
"CWE-416"
] | linux | 2f95fa5c955d0a9987ffdc3a095e2f4e62c5f2a9 | 104,084,270,119,240,540,000,000,000,000,000,000,000 | 19 | block, bfq: fix use-after-free in bfq_idle_slice_timer_body
In bfq_idle_slice_timer func, bfqq = bfqd->in_service_queue is
not in bfqd-lock critical section. The bfqq, which is not
equal to NULL in bfq_idle_slice_timer, may be freed after passing
to bfq_idle_slice_timer_body. So we will access the freed memory.
In addition, considering the bfqq may be in race, we should
firstly check whether bfqq is in service before doing something
on it in bfq_idle_slice_timer_body func. If the bfqq in race is
not in service, it means the bfqq has been expired through
__bfq_bfqq_expire func, and wait_request flags has been cleared in
__bfq_bfqd_reset_in_service func. So we do not need to re-clear the
wait_request of bfqq which is not in service.
KASAN log is given as follows:
[13058.354613] ==================================================================
[13058.354640] BUG: KASAN: use-after-free in bfq_idle_slice_timer+0xac/0x290
[13058.354644] Read of size 8 at addr ffffa02cf3e63f78 by task fork13/19767
[13058.354646]
[13058.354655] CPU: 96 PID: 19767 Comm: fork13
[13058.354661] Call trace:
[13058.354667] dump_backtrace+0x0/0x310
[13058.354672] show_stack+0x28/0x38
[13058.354681] dump_stack+0xd8/0x108
[13058.354687] print_address_description+0x68/0x2d0
[13058.354690] kasan_report+0x124/0x2e0
[13058.354697] __asan_load8+0x88/0xb0
[13058.354702] bfq_idle_slice_timer+0xac/0x290
[13058.354707] __hrtimer_run_queues+0x298/0x8b8
[13058.354710] hrtimer_interrupt+0x1b8/0x678
[13058.354716] arch_timer_handler_phys+0x4c/0x78
[13058.354722] handle_percpu_devid_irq+0xf0/0x558
[13058.354731] generic_handle_irq+0x50/0x70
[13058.354735] __handle_domain_irq+0x94/0x110
[13058.354739] gic_handle_irq+0x8c/0x1b0
[13058.354742] el1_irq+0xb8/0x140
[13058.354748] do_wp_page+0x260/0xe28
[13058.354752] __handle_mm_fault+0x8ec/0x9b0
[13058.354756] handle_mm_fault+0x280/0x460
[13058.354762] do_page_fault+0x3ec/0x890
[13058.354765] do_mem_abort+0xc0/0x1b0
[13058.354768] el0_da+0x24/0x28
[13058.354770]
[13058.354773] Allocated by task 19731:
[13058.354780] kasan_kmalloc+0xe0/0x190
[13058.354784] kasan_slab_alloc+0x14/0x20
[13058.354788] kmem_cache_alloc_node+0x130/0x440
[13058.354793] bfq_get_queue+0x138/0x858
[13058.354797] bfq_get_bfqq_handle_split+0xd4/0x328
[13058.354801] bfq_init_rq+0x1f4/0x1180
[13058.354806] bfq_insert_requests+0x264/0x1c98
[13058.354811] blk_mq_sched_insert_requests+0x1c4/0x488
[13058.354818] blk_mq_flush_plug_list+0x2d4/0x6e0
[13058.354826] blk_flush_plug_list+0x230/0x548
[13058.354830] blk_finish_plug+0x60/0x80
[13058.354838] read_pages+0xec/0x2c0
[13058.354842] __do_page_cache_readahead+0x374/0x438
[13058.354846] ondemand_readahead+0x24c/0x6b0
[13058.354851] page_cache_sync_readahead+0x17c/0x2f8
[13058.354858] generic_file_buffered_read+0x588/0xc58
[13058.354862] generic_file_read_iter+0x1b4/0x278
[13058.354965] ext4_file_read_iter+0xa8/0x1d8 [ext4]
[13058.354972] __vfs_read+0x238/0x320
[13058.354976] vfs_read+0xbc/0x1c0
[13058.354980] ksys_read+0xdc/0x1b8
[13058.354984] __arm64_sys_read+0x50/0x60
[13058.354990] el0_svc_common+0xb4/0x1d8
[13058.354994] el0_svc_handler+0x50/0xa8
[13058.354998] el0_svc+0x8/0xc
[13058.354999]
[13058.355001] Freed by task 19731:
[13058.355007] __kasan_slab_free+0x120/0x228
[13058.355010] kasan_slab_free+0x10/0x18
[13058.355014] kmem_cache_free+0x288/0x3f0
[13058.355018] bfq_put_queue+0x134/0x208
[13058.355022] bfq_exit_icq_bfqq+0x164/0x348
[13058.355026] bfq_exit_icq+0x28/0x40
[13058.355030] ioc_exit_icq+0xa0/0x150
[13058.355035] put_io_context_active+0x250/0x438
[13058.355038] exit_io_context+0xd0/0x138
[13058.355045] do_exit+0x734/0xc58
[13058.355050] do_group_exit+0x78/0x220
[13058.355054] __wake_up_parent+0x0/0x50
[13058.355058] el0_svc_common+0xb4/0x1d8
[13058.355062] el0_svc_handler+0x50/0xa8
[13058.355066] el0_svc+0x8/0xc
[13058.355067]
[13058.355071] The buggy address belongs to the object at ffffa02cf3e63e70#012 which belongs to the cache bfq_queue of size 464
[13058.355075] The buggy address is located 264 bytes inside of#012 464-byte region [ffffa02cf3e63e70, ffffa02cf3e64040)
[13058.355077] The buggy address belongs to the page:
[13058.355083] page:ffff7e80b3cf9800 count:1 mapcount:0 mapping:ffff802db5c90780 index:0xffffa02cf3e606f0 compound_mapcount: 0
[13058.366175] flags: 0x2ffffe0000008100(slab|head)
[13058.370781] raw: 2ffffe0000008100 ffff7e80b53b1408 ffffa02d730c1c90 ffff802db5c90780
[13058.370787] raw: ffffa02cf3e606f0 0000000000370023 00000001ffffffff 0000000000000000
[13058.370789] page dumped because: kasan: bad access detected
[13058.370791]
[13058.370792] Memory state around the buggy address:
[13058.370797] ffffa02cf3e63e00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fb fb
[13058.370801] ffffa02cf3e63e80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[13058.370805] >ffffa02cf3e63f00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[13058.370808] ^
[13058.370811] ffffa02cf3e63f80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[13058.370815] ffffa02cf3e64000: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
[13058.370817] ==================================================================
[13058.370820] Disabling lock debugging due to kernel taint
Here, we directly pass the bfqd to bfq_idle_slice_timer_body func.
--
V2->V3: rewrite the comment as suggested by Paolo Valente
V1->V2: add one comment, and add Fixes and Reported-by tag.
Fixes: aee69d78d ("block, bfq: introduce the BFQ-v0 I/O scheduler as an extra scheduler")
Acked-by: Paolo Valente <[email protected]>
Reported-by: Wang Wang <[email protected]>
Signed-off-by: Zhiqiang Liu <[email protected]>
Signed-off-by: Feilong Lin <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
static bool login_via_cert(PgSocket *client)
{
struct tls *tls = client->sbuf.tls;
struct tls_cert *cert;
struct tls_cert_dname *subj;
if (!tls) {
disconnect_client(client, true, "TLS connection required");
return false;
}
if (tls_get_peer_cert(client->sbuf.tls, &cert, NULL) < 0 || !cert) {
disconnect_client(client, true, "TLS client certificate required");
return false;
}
subj = &cert->subject;
log_debug("TLS cert login: CN=%s/C=%s/L=%s/ST=%s/O=%s/OU=%s",
subj->common_name ? subj->common_name : "(null)",
subj->country_name ? subj->country_name : "(null)",
subj->locality_name ? subj->locality_name : "(null)",
subj->state_or_province_name ? subj->state_or_province_name : "(null)",
subj->organization_name ? subj->organization_name : "(null)",
subj->organizational_unit_name ? subj->organizational_unit_name : "(null)");
if (!subj->common_name) {
disconnect_client(client, true, "Invalid TLS certificate");
goto fail;
}
if (strcmp(subj->common_name, client->auth_user->name) != 0) {
disconnect_client(client, true, "TLS certificate name mismatch");
goto fail;
}
tls_cert_free(cert);
/* login successful */
return finish_client_login(client);
fail:
tls_cert_free(cert);
return false;
} | 0 | [
"CWE-287",
"CWE-284"
] | pgbouncer | 7ca3e5279d05fceb1e8a043c6f5b6f58dea3ed38 | 26,901,960,394,923,580,000,000,000,000,000,000,000 | 39 | Remove too early set of auth_user
When query returns 0 rows (user not found),
this user stays as login user...
Should fix #69. |
encode_tlv_table_mappings(struct ofpbuf *b, struct ovs_list *mappings)
{
struct ofputil_tlv_map *map;
LIST_FOR_EACH (map, list_node, mappings) {
struct nx_tlv_map *nx_map;
nx_map = ofpbuf_put_zeros(b, sizeof *nx_map);
nx_map->option_class = htons(map->option_class);
nx_map->option_type = map->option_type;
nx_map->option_len = map->option_len;
nx_map->index = htons(map->index);
}
} | 0 | [
"CWE-772"
] | ovs | 77ad4225d125030420d897c873e4734ac708c66b | 93,129,851,636,753,420,000,000,000,000,000,000,000 | 14 | ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod().
Found by libFuzzer.
Reported-by: Bhargava Shastry <[email protected]>
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]> |
void sctp_v6_del_protocol(void)
{
inet6_del_protocol(&sctpv6_protocol, IPPROTO_SCTP);
unregister_inet6addr_notifier(&sctp_inet6addr_notifier);
} | 0 | [
"CWE-310"
] | net | 95ee62083cb6453e056562d91f597552021e6ae7 | 153,425,144,632,427,340,000,000,000,000,000,000,000 | 5 | net: sctp: fix ipv6 ipsec encryption bug in sctp_v6_xmit
Alan Chester reported an issue with IPv6 on SCTP that IPsec traffic is not
being encrypted, whereas on IPv4 it is. Setting up an AH + ESP transport
does not seem to have the desired effect:
SCTP + IPv4:
22:14:20.809645 IP (tos 0x2,ECT(0), ttl 64, id 0, offset 0, flags [DF], proto AH (51), length 116)
192.168.0.2 > 192.168.0.5: AH(spi=0x00000042,sumlen=16,seq=0x1): ESP(spi=0x00000044,seq=0x1), length 72
22:14:20.813270 IP (tos 0x2,ECT(0), ttl 64, id 0, offset 0, flags [DF], proto AH (51), length 340)
192.168.0.5 > 192.168.0.2: AH(spi=0x00000043,sumlen=16,seq=0x1):
SCTP + IPv6:
22:31:19.215029 IP6 (class 0x02, hlim 64, next-header SCTP (132) payload length: 364)
fe80::222:15ff:fe87:7fc.3333 > fe80::92e6:baff:fe0d:5a54.36767: sctp
1) [INIT ACK] [init tag: 747759530] [rwnd: 62464] [OS: 10] [MIS: 10]
Moreover, Alan says:
This problem was seen with both Racoon and Racoon2. Other people have seen
this with OpenSwan. When IPsec is configured to encrypt all upper layer
protocols the SCTP connection does not initialize. After using Wireshark to
follow packets, this is because the SCTP packet leaves Box A unencrypted and
Box B believes all upper layer protocols are to be encrypted so it drops
this packet, causing the SCTP connection to fail to initialize. When IPsec
is configured to encrypt just SCTP, the SCTP packets are observed unencrypted.
In fact, using `socat sctp6-listen:3333 -` on one end and transferring "plaintext"
string on the other end, results in cleartext on the wire where SCTP eventually
does not report any errors, thus in the latter case that Alan reports, the
non-paranoid user might think he's communicating over an encrypted transport on
SCTP although he's not (tcpdump ... -X):
...
0x0030: 5d70 8e1a 0003 001a 177d eb6c 0000 0000 ]p.......}.l....
0x0040: 0000 0000 706c 6169 6e74 6578 740a 0000 ....plaintext...
Only in /proc/net/xfrm_stat we can see XfrmInTmplMismatch increasing on the
receiver side. Initial follow-up analysis from Alan's bug report was done by
Alexey Dobriyan. Also thanks to Vlad Yasevich for feedback on this.
SCTP has its own implementation of sctp_v6_xmit() not calling inet6_csk_xmit().
This has the implication that it probably never really got updated along with
changes in inet6_csk_xmit() and therefore does not seem to invoke xfrm handlers.
SCTP's IPv4 xmit however, properly calls ip_queue_xmit() to do the work. Since
a call to inet6_csk_xmit() would solve this problem, but result in unecessary
route lookups, let us just use the cached flowi6 instead that we got through
sctp_v6_get_dst(). Since all SCTP packets are being sent through sctp_packet_transmit(),
we do the route lookup / flow caching in sctp_transport_route(), hold it in
tp->dst and skb_dst_set() right after that. If we would alter fl6->daddr in
sctp_v6_xmit() to np->opt->srcrt, we possibly could run into the same effect
of not having xfrm layer pick it up, hence, use fl6_update_dst() in sctp_v6_get_dst()
instead to get the correct source routed dst entry, which we assign to the skb.
Also source address routing example from 625034113 ("sctp: fix sctp to work with
ipv6 source address routing") still works with this patch! Nevertheless, in RFC5095
it is actually 'recommended' to not use that anyway due to traffic amplification [1].
So it seems we're not supposed to do that anyway in sctp_v6_xmit(). Moreover, if
we overwrite the flow destination here, the lower IPv6 layer will be unable to
put the correct destination address into IP header, as routing header is added in
ipv6_push_nfrag_opts() but then probably with wrong final destination. Things aside,
result of this patch is that we do not have any XfrmInTmplMismatch increase plus on
the wire with this patch it now looks like:
SCTP + IPv6:
08:17:47.074080 IP6 2620:52:0:102f:7a2b:cbff:fe27:1b0a > 2620:52:0:102f:213:72ff:fe32:7eba:
AH(spi=0x00005fb4,seq=0x1): ESP(spi=0x00005fb5,seq=0x1), length 72
08:17:47.074264 IP6 2620:52:0:102f:213:72ff:fe32:7eba > 2620:52:0:102f:7a2b:cbff:fe27:1b0a:
AH(spi=0x00003d54,seq=0x1): ESP(spi=0x00003d55,seq=0x1), length 296
This fixes Kernel Bugzilla 24412. This security issue seems to be present since
2.6.18 kernels. Lets just hope some big passive adversary in the wild didn't have
its fun with that. lksctp-tools IPv6 regression test suite passes as well with
this patch.
[1] http://www.secdev.org/conf/IPv6_RH_security-csw07.pdf
Reported-by: Alan Chester <[email protected]>
Reported-by: Alexey Dobriyan <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Cc: Steffen Klassert <[email protected]>
Cc: Hannes Frederic Sowa <[email protected]>
Acked-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
BOOL windows_ctrl_handler(DWORD fdwCtrlType)
{
switch (fdwCtrlType)
{
case CTRL_C_EVENT:
case CTRL_BREAK_EVENT:
handle_ctrlc_signal(SIGINT);
/* Indicate that signal has beed handled. */
return TRUE;
case CTRL_CLOSE_EVENT:
case CTRL_LOGOFF_EVENT:
case CTRL_SHUTDOWN_EVENT:
handle_quit_signal(SIGINT + 1);
}
/* Pass signal to the next control handler function. */
return FALSE;
} | 0 | [
"CWE-284",
"CWE-295"
] | mysql-server | 3bd5589e1a5a93f9c224badf983cd65c45215390 | 116,565,101,787,367,600,000,000,000,000,000,000,000 | 17 | WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options |
static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
{
struct rfcomm_dev *dev = dlc->owner;
if (!dev)
return;
BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig);
if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV)) {
if (dev->port.tty && !C_CLOCAL(dev->port.tty))
tty_hangup(dev->port.tty);
}
dev->modem_status =
((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) |
((v24_sig & RFCOMM_V24_RTR) ? (TIOCM_RTS | TIOCM_CTS) : 0) |
((v24_sig & RFCOMM_V24_IC) ? TIOCM_RI : 0) |
((v24_sig & RFCOMM_V24_DV) ? TIOCM_CD : 0);
} | 0 | [
"CWE-200"
] | linux | f9432c5ec8b1e9a09b9b0e5569e3c73db8de432a | 276,509,280,496,824,780,000,000,000,000,000,000,000 | 19 | Bluetooth: RFCOMM - Fix info leak in ioctl(RFCOMMGETDEVLIST)
The RFCOMM code fails to initialize the two padding bytes of struct
rfcomm_dev_list_req inserted for alignment before copying it to
userland. Additionally there are two padding bytes in each instance of
struct rfcomm_dev_info. The ioctl() that for disclosures two bytes plus
dev_num times two bytes uninitialized kernel heap memory.
Allocate the memory using kzalloc() to fix this issue.
Signed-off-by: Mathias Krause <[email protected]>
Cc: Marcel Holtmann <[email protected]>
Cc: Gustavo Padovan <[email protected]>
Cc: Johan Hedberg <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
_rsvg_css_hand_normalize_length (const RsvgLength * in, gdouble pixels_per_inch,
gdouble width_or_height, gdouble font_size)
{
if (in->factor == '\0')
return in->length;
else if (in->factor == 'p')
return in->length * width_or_height;
else if (in->factor == 'm')
return in->length * font_size;
else if (in->factor == 'x')
return in->length * font_size / 2.;
else if (in->factor == 'i')
return in->length * pixels_per_inch;
return 0;
} | 0 | [
"CWE-20"
] | librsvg | d83e426fff3f6d0fa6042d0930fb70357db24125 | 327,543,577,694,601,400,000,000,000,000,000,000,000 | 16 | io: Use XML_PARSE_NONET
We don't want to load resources off the net.
Bug #691708. |
httpRead2(http_t *http, /* I - HTTP connection */
char *buffer, /* I - Buffer for data */
size_t length) /* I - Maximum number of bytes */
{
ssize_t bytes; /* Bytes read */
#ifdef HAVE_LIBZ
DEBUG_printf(("httpRead2(http=%p, buffer=%p, length=" CUPS_LLFMT ") coding=%d data_encoding=%d data_remaining=" CUPS_LLFMT, (void *)http, (void *)buffer, CUPS_LLCAST length, http->coding, http->data_encoding, CUPS_LLCAST http->data_remaining));
#else
DEBUG_printf(("httpRead2(http=%p, buffer=%p, length=" CUPS_LLFMT ") data_encoding=%d data_remaining=" CUPS_LLFMT, (void *)http, (void *)buffer, CUPS_LLCAST length, http->data_encoding, CUPS_LLCAST http->data_remaining));
#endif /* HAVE_LIBZ */
if (http == NULL || buffer == NULL)
return (-1);
http->activity = time(NULL);
http->error = 0;
if (length <= 0)
return (0);
#ifdef HAVE_LIBZ
if (http->coding >= _HTTP_CODING_GUNZIP)
{
do
{
if (((z_stream *)http->stream)->avail_in > 0)
{
int zerr; /* Decompressor error */
DEBUG_printf(("2httpRead2: avail_in=%d, avail_out=%d",
(int)((z_stream *)http->stream)->avail_in, (int)length));
((z_stream *)http->stream)->next_out = (Bytef *)buffer;
((z_stream *)http->stream)->avail_out = (uInt)length;
if ((zerr = inflate((z_stream *)http->stream, Z_SYNC_FLUSH)) < Z_OK)
{
DEBUG_printf(("2httpRead2: zerr=%d", zerr));
#ifdef DEBUG
http_debug_hex("2httpRead2", (char *)http->sbuffer, (int)((z_stream *)http->stream)->avail_in);
#endif /* DEBUG */
http->error = EIO;
return (-1);
}
bytes = (ssize_t)(length - ((z_stream *)http->stream)->avail_out);
DEBUG_printf(("2httpRead2: avail_in=%d, avail_out=%d, bytes=%d",
((z_stream *)http->stream)->avail_in, ((z_stream *)http->stream)->avail_out,
(int)bytes));
}
else
bytes = 0;
if (bytes == 0)
{
ssize_t buflen = HTTP_MAX_BUFFER - (ssize_t)((z_stream *)http->stream)->avail_in;
/* Additional bytes for buffer */
if (buflen > 0)
{
if (((z_stream *)http->stream)->avail_in > 0 &&
((z_stream *)http->stream)->next_in > http->sbuffer)
memmove(http->sbuffer, ((z_stream *)http->stream)->next_in, ((z_stream *)http->stream)->avail_in);
((z_stream *)http->stream)->next_in = http->sbuffer;
DEBUG_printf(("1httpRead2: Reading up to %d more bytes of data into "
"decompression buffer.", (int)buflen));
if (http->data_remaining > 0)
{
if (buflen > http->data_remaining)
buflen = (ssize_t)http->data_remaining;
bytes = http_read_buffered(http, (char *)http->sbuffer + ((z_stream *)http->stream)->avail_in, (size_t)buflen);
}
else if (http->data_encoding == HTTP_ENCODING_CHUNKED)
bytes = http_read_chunk(http, (char *)http->sbuffer + ((z_stream *)http->stream)->avail_in, (size_t)buflen);
else
bytes = 0;
if (bytes < 0)
return (bytes);
else if (bytes == 0)
break;
DEBUG_printf(("1httpRead2: Adding " CUPS_LLFMT " bytes to "
"decompression buffer.", CUPS_LLCAST bytes));
http->data_remaining -= bytes;
((z_stream *)http->stream)->avail_in += (uInt)bytes;
if (http->data_remaining <= 0 &&
http->data_encoding == HTTP_ENCODING_CHUNKED)
{
/*
* Read the trailing blank line now...
*/
char len[32]; /* Length string */
httpGets(len, sizeof(len), http);
}
bytes = 0;
}
else
return (0);
}
}
while (bytes == 0);
}
else
#endif /* HAVE_LIBZ */
if (http->data_remaining == 0 && http->data_encoding == HTTP_ENCODING_CHUNKED)
{
if ((bytes = http_read_chunk(http, buffer, length)) > 0)
{
http->data_remaining -= bytes;
if (http->data_remaining <= 0)
{
/*
* Read the trailing blank line now...
*/
char len[32]; /* Length string */
httpGets(len, sizeof(len), http);
}
}
}
else if (http->data_remaining <= 0)
{
/*
* No more data to read...
*/
return (0);
}
else
{
DEBUG_printf(("1httpRead2: Reading up to %d bytes into buffer.",
(int)length));
if (length > (size_t)http->data_remaining)
length = (size_t)http->data_remaining;
if ((bytes = http_read_buffered(http, buffer, length)) > 0)
{
http->data_remaining -= bytes;
if (http->data_remaining <= 0 &&
http->data_encoding == HTTP_ENCODING_CHUNKED)
{
/*
* Read the trailing blank line now...
*/
char len[32]; /* Length string */
httpGets(len, sizeof(len), http);
}
}
}
if (
#ifdef HAVE_LIBZ
(http->coding == _HTTP_CODING_IDENTITY ||
(http->coding >= _HTTP_CODING_GUNZIP && ((z_stream *)http->stream)->avail_in == 0)) &&
#endif /* HAVE_LIBZ */
((http->data_remaining <= 0 &&
http->data_encoding == HTTP_ENCODING_LENGTH) ||
(http->data_encoding == HTTP_ENCODING_CHUNKED && bytes == 0)))
{
#ifdef HAVE_LIBZ
if (http->coding >= _HTTP_CODING_GUNZIP)
http_content_coding_finish(http);
#endif /* HAVE_LIBZ */
if (http->state == HTTP_STATE_POST_RECV)
http->state ++;
else if (http->state == HTTP_STATE_GET_SEND ||
http->state == HTTP_STATE_POST_SEND)
http->state = HTTP_STATE_WAITING;
else
http->state = HTTP_STATE_STATUS;
DEBUG_printf(("1httpRead2: End of content, set state to %s.",
httpStateString(http->state)));
}
return (bytes);
} | 0 | [
"CWE-120"
] | cups | f24e6cf6a39300ad0c3726a41a4aab51ad54c109 | 266,266,149,795,889,800,000,000,000,000,000,000,000 | 198 | Fix multiple security/disclosure issues:
- CVE-2019-8696 and CVE-2019-8675: Fixed SNMP buffer overflows (rdar://51685251)
- Fixed IPP buffer overflow (rdar://50035411)
- Fixed memory disclosure issue in the scheduler (rdar://51373853)
- Fixed DoS issues in the scheduler (rdar://51373929) |
void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a)
{
BN_ULONG c1,c2,c3;
c1=0;
c2=0;
c3=0;
sqr_add_c(a,0,c1,c2,c3);
r[0]=c1;
c1=0;
sqr_add_c2(a,1,0,c2,c3,c1);
r[1]=c2;
c2=0;
sqr_add_c(a,1,c3,c1,c2);
sqr_add_c2(a,2,0,c3,c1,c2);
r[2]=c3;
c3=0;
sqr_add_c2(a,3,0,c1,c2,c3);
sqr_add_c2(a,2,1,c1,c2,c3);
r[3]=c1;
c1=0;
sqr_add_c(a,2,c2,c3,c1);
sqr_add_c2(a,3,1,c2,c3,c1);
sqr_add_c2(a,4,0,c2,c3,c1);
r[4]=c2;
c2=0;
sqr_add_c2(a,5,0,c3,c1,c2);
sqr_add_c2(a,4,1,c3,c1,c2);
sqr_add_c2(a,3,2,c3,c1,c2);
r[5]=c3;
c3=0;
sqr_add_c(a,3,c1,c2,c3);
sqr_add_c2(a,4,2,c1,c2,c3);
sqr_add_c2(a,5,1,c1,c2,c3);
sqr_add_c2(a,6,0,c1,c2,c3);
r[6]=c1;
c1=0;
sqr_add_c2(a,7,0,c2,c3,c1);
sqr_add_c2(a,6,1,c2,c3,c1);
sqr_add_c2(a,5,2,c2,c3,c1);
sqr_add_c2(a,4,3,c2,c3,c1);
r[7]=c2;
c2=0;
sqr_add_c(a,4,c3,c1,c2);
sqr_add_c2(a,5,3,c3,c1,c2);
sqr_add_c2(a,6,2,c3,c1,c2);
sqr_add_c2(a,7,1,c3,c1,c2);
r[8]=c3;
c3=0;
sqr_add_c2(a,7,2,c1,c2,c3);
sqr_add_c2(a,6,3,c1,c2,c3);
sqr_add_c2(a,5,4,c1,c2,c3);
r[9]=c1;
c1=0;
sqr_add_c(a,5,c2,c3,c1);
sqr_add_c2(a,6,4,c2,c3,c1);
sqr_add_c2(a,7,3,c2,c3,c1);
r[10]=c2;
c2=0;
sqr_add_c2(a,7,4,c3,c1,c2);
sqr_add_c2(a,6,5,c3,c1,c2);
r[11]=c3;
c3=0;
sqr_add_c(a,6,c1,c2,c3);
sqr_add_c2(a,7,5,c1,c2,c3);
r[12]=c1;
c1=0;
sqr_add_c2(a,7,6,c2,c3,c1);
r[13]=c2;
c2=0;
sqr_add_c(a,7,c3,c1,c2);
r[14]=c3;
r[15]=c1;
} | 0 | [
"CWE-310"
] | openssl | a7a44ba55cb4f884c6bc9ceac90072dea38e66d0 | 136,244,921,358,634,130,000,000,000,000,000,000,000 | 74 | Fix for CVE-2014-3570 (with minor bn_asm.c revamp).
Reviewed-by: Emilia Kasper <[email protected]> |
Item *Item_sum_udf_str::copy_or_same(THD* thd)
{
return new (thd->mem_root) Item_sum_udf_str(thd, this);
} | 0 | [
"CWE-120"
] | server | eca207c46293bc72dd8d0d5622153fab4d3fccf1 | 170,308,688,662,488,550,000,000,000,000,000,000,000 | 4 | MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix. |
mark_source_chains(const struct xt_table_info *newinfo,
unsigned int valid_hooks, void *entry0,
unsigned int *offsets)
{
unsigned int hook;
/* No recursion; use packet counter to save back ptrs (reset
to 0 as we leave), and comefrom to save source hook bitmask */
for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
unsigned int pos = newinfo->hook_entry[hook];
struct ip6t_entry *e = entry0 + pos;
if (!(valid_hooks & (1 << hook)))
continue;
/* Set initial back pointer. */
e->counters.pcnt = pos;
for (;;) {
const struct xt_standard_target *t
= (void *)ip6t_get_target_c(e);
int visited = e->comefrom & (1 << hook);
if (e->comefrom & (1 << NF_INET_NUMHOOKS))
return 0;
e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
/* Unconditional return/END. */
if ((unconditional(e) &&
(strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
t->verdict < 0) || visited) {
unsigned int oldpos, size;
/* Return: backtrack through the last
big jump. */
do {
e->comefrom ^= (1<<NF_INET_NUMHOOKS);
oldpos = pos;
pos = e->counters.pcnt;
e->counters.pcnt = 0;
/* We're at the start. */
if (pos == oldpos)
goto next;
e = entry0 + pos;
} while (oldpos == pos + e->next_offset);
/* Move along one */
size = e->next_offset;
e = entry0 + pos + size;
if (pos + size >= newinfo->size)
return 0;
e->counters.pcnt = pos;
pos += size;
} else {
int newpos = t->verdict;
if (strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0 &&
newpos >= 0) {
/* This a jump; chase it. */
if (!xt_find_jump_offset(offsets, newpos,
newinfo->number))
return 0;
} else {
/* ... this is a fallthru */
newpos = pos + e->next_offset;
if (newpos >= newinfo->size)
return 0;
}
e = entry0 + newpos;
e->counters.pcnt = pos;
pos = newpos;
}
}
next: ;
}
return 1;
} | 0 | [
"CWE-787"
] | linux | b29c457a6511435960115c0f548c4360d5f4801d | 98,217,830,454,979,840,000,000,000,000,000,000,000 | 82 | netfilter: x_tables: fix compat match/target pad out-of-bound write
xt_compat_match/target_from_user doesn't check that zeroing the area
to start of next rule won't write past end of allocated ruleset blob.
Remove this code and zero the entire blob beforehand.
Reported-by: [email protected]
Reported-by: Andy Nguyen <[email protected]>
Fixes: 9fa492cdc160c ("[NETFILTER]: x_tables: simplify compat API")
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]> |
static char *tls_socket_get_peer_name(struct socket_context *sock, TALLOC_CTX *mem_ctx)
{
struct tls_context *tls = talloc_get_type(sock->private_data, struct tls_context);
return socket_get_peer_name(tls->socket, mem_ctx);
} | 0 | [] | samba | 22af043d2f20760f27150d7d469c7c7b944c6b55 | 167,345,559,247,091,780,000,000,000,000,000,000,000 | 5 | CVE-2013-4476: s4:libtls: check for safe permissions of tls private key file (key.pem)
If the tls key is not owned by root or has not mode 0600 samba will not
start up.
Bug: https://bugzilla.samba.org/show_bug.cgi?id=10234
Pair-Programmed-With: Stefan Metzmacher <[email protected]>
Signed-off-by: Björn Baumbach <[email protected]>
Signed-off-by: Stefan Metzmacher <[email protected]>
Reviewed-by: Stefan Metzmacher <[email protected]>
Autobuild-User(master): Karolin Seeger <[email protected]>
Autobuild-Date(master): Mon Nov 11 13:07:16 CET 2013 on sn-devel-104 |
static int tg3_mem_rx_acquire(struct tg3 *tp)
{
unsigned int i, limit;
limit = tp->rxq_cnt;
/* If RSS is enabled, we need a (dummy) producer ring
* set on vector zero. This is the true hw prodring.
*/
if (tg3_flag(tp, ENABLE_RSS))
limit++;
for (i = 0; i < limit; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
if (tg3_rx_prodring_init(tp, &tnapi->prodring))
goto err_out;
/* If multivector RSS is enabled, vector 0
* does not handle rx or tx interrupts.
* Don't allocate any resources for it.
*/
if (!i && tg3_flag(tp, ENABLE_RSS))
continue;
tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
TG3_RX_RCB_RING_BYTES(tp),
&tnapi->rx_rcb_mapping,
GFP_KERNEL);
if (!tnapi->rx_rcb)
goto err_out;
memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
}
return 0;
err_out:
tg3_mem_rx_release(tp);
return -ENOMEM;
} | 0 | [
"CWE-476",
"CWE-119"
] | linux | 715230a44310a8cf66fbfb5a46f9a62a9b2de424 | 60,951,230,090,791,830,000,000,000,000,000,000,000 | 41 | tg3: fix length overflow in VPD firmware parsing
Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version
when present") introduced VPD parsing that contained a potential length
overflow.
Limit the hardware's reported firmware string length (max 255 bytes) to
stay inside the driver's firmware string length (32 bytes). On overflow,
truncate the formatted firmware string instead of potentially overwriting
portions of the tg3 struct.
http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf
Signed-off-by: Kees Cook <[email protected]>
Reported-by: Oded Horovitz <[email protected]>
Reported-by: Brad Spengler <[email protected]>
Cc: [email protected]
Cc: Matt Carlson <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
{
struct netlink_set_err_data info;
struct hlist_node *node;
struct sock *sk;
int ret = 0;
info.exclude_sk = ssk;
info.pid = pid;
info.group = group;
/* sk->sk_err wants a positive error value */
info.code = -code;
read_lock(&nl_table_lock);
sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
ret += do_one_set_err(sk, &info);
read_unlock(&nl_table_lock);
return ret;
} | 0 | [] | linux-2.6 | 16e5726269611b71c930054ffe9b858c1cea88eb | 209,653,306,038,774,500,000,000,000,000,000,000,000 | 21 | af_unix: dont send SCM_CREDENTIALS by default
Since commit 7361c36c5224 (af_unix: Allow credentials to work across
user and pid namespaces) af_unix performance dropped a lot.
This is because we now take a reference on pid and cred in each write(),
and release them in read(), usually done from another process,
eventually from another cpu. This triggers false sharing.
# Events: 154K cycles
#
# Overhead Command Shared Object Symbol
# ........ ....... .................. .........................
#
10.40% hackbench [kernel.kallsyms] [k] put_pid
8.60% hackbench [kernel.kallsyms] [k] unix_stream_recvmsg
7.87% hackbench [kernel.kallsyms] [k] unix_stream_sendmsg
6.11% hackbench [kernel.kallsyms] [k] do_raw_spin_lock
4.95% hackbench [kernel.kallsyms] [k] unix_scm_to_skb
4.87% hackbench [kernel.kallsyms] [k] pid_nr_ns
4.34% hackbench [kernel.kallsyms] [k] cred_to_ucred
2.39% hackbench [kernel.kallsyms] [k] unix_destruct_scm
2.24% hackbench [kernel.kallsyms] [k] sub_preempt_count
1.75% hackbench [kernel.kallsyms] [k] fget_light
1.51% hackbench [kernel.kallsyms] [k]
__mutex_lock_interruptible_slowpath
1.42% hackbench [kernel.kallsyms] [k] sock_alloc_send_pskb
This patch includes SCM_CREDENTIALS information in a af_unix message/skb
only if requested by the sender, [man 7 unix for details how to include
ancillary data using sendmsg() system call]
Note: This might break buggy applications that expected SCM_CREDENTIAL
from an unaware write() system call, and receiver not using SO_PASSCRED
socket option.
If SOCK_PASSCRED is set on source or destination socket, we still
include credentials for mere write() syscalls.
Performance boost in hackbench : more than 50% gain on a 16 thread
machine (2 quad-core cpus, 2 threads per core)
hackbench 20 thread 2000
4.228 sec instead of 9.102 sec
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Tim Chen <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
spnego_gss_wrap_size_limit(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
OM_uint32 req_output_size,
OM_uint32 *max_input_size)
{
OM_uint32 ret;
spnego_gss_ctx_id_t sc = (spnego_gss_ctx_id_t)context_handle;
if (sc->ctx_handle == GSS_C_NO_CONTEXT)
return (GSS_S_NO_CONTEXT);
ret = gss_wrap_size_limit(minor_status,
sc->ctx_handle,
conf_req_flag,
qop_req,
req_output_size,
max_input_size);
return (ret);
} | 0 | [
"CWE-18",
"CWE-763"
] | krb5 | b51b33f2bc5d1497ddf5bd107f791c101695000d | 96,874,969,248,965,370,000,000,000,000,000,000,000 | 22 | Fix SPNEGO context aliasing bugs [CVE-2015-2695]
The SPNEGO mechanism currently replaces its context handle with the
mechanism context handle upon establishment, under the assumption that
most GSS functions are only called after context establishment. This
assumption is incorrect, and can lead to aliasing violations for some
programs. Maintain the SPNEGO context structure after context
establishment and refer to it in all GSS methods. Add initiate and
opened flags to the SPNEGO context structure for use in
gss_inquire_context() prior to context establishment.
CVE-2015-2695:
In MIT krb5 1.5 and later, applications which call
gss_inquire_context() on a partially-established SPNEGO context can
cause the GSS-API library to read from a pointer using the wrong type,
generally causing a process crash. This bug may go unnoticed, because
the most common SPNEGO authentication scenario establishes the context
after just one call to gss_accept_sec_context(). Java server
applications using the native JGSS provider are vulnerable to this
bug. A carefully crafted SPNEGO packet might allow the
gss_inquire_context() call to succeed with attacker-determined
results, but applications should not make access control decisions
based on gss_inquire_context() results prior to context establishment.
CVSSv2 Vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:POC/RL:OF/RC:C
[[email protected]: several bugfixes, style changes, and edge-case
behavior changes; commit message and CVE description]
ticket: 8244
target_version: 1.14
tags: pullup |
static char **fill_envp(struct dhcp_packet *packet)
{
int envc;
int i;
char **envp, **curr;
const char *opt_name;
uint8_t *temp;
uint8_t overload = 0;
#define BITMAP unsigned
#define BBITS (sizeof(BITMAP) * 8)
#define BMASK(i) (1 << (i & (sizeof(BITMAP) * 8 - 1)))
#define FOUND_OPTS(i) (found_opts[(unsigned)i / BBITS])
BITMAP found_opts[256 / BBITS];
memset(found_opts, 0, sizeof(found_opts));
/* We need 6 elements for:
* "interface=IFACE"
* "ip=N.N.N.N" from packet->yiaddr
* "siaddr=IP" from packet->siaddr_nip (unless 0)
* "boot_file=FILE" from packet->file (unless overloaded)
* "sname=SERVER_HOSTNAME" from packet->sname (unless overloaded)
* terminating NULL
*/
envc = 6;
/* +1 element for each option, +2 for subnet option: */
if (packet) {
/* note: do not search for "pad" (0) and "end" (255) options */
//TODO: change logic to scan packet _once_
for (i = 1; i < 255; i++) {
temp = udhcp_get_option(packet, i);
if (temp) {
if (i == DHCP_OPTION_OVERLOAD)
overload |= *temp;
else if (i == DHCP_SUBNET)
envc++; /* for $mask */
envc++;
/*if (i != DHCP_MESSAGE_TYPE)*/
FOUND_OPTS(i) |= BMASK(i);
}
}
}
curr = envp = xzalloc(sizeof(envp[0]) * envc);
*curr = xasprintf("interface=%s", client_config.interface);
putenv(*curr++);
if (!packet)
return envp;
/* Export BOOTP fields. Fields we don't (yet?) export:
* uint8_t op; // always BOOTREPLY
* uint8_t htype; // hardware address type. 1 = 10mb ethernet
* uint8_t hlen; // hardware address length
* uint8_t hops; // used by relay agents only
* uint32_t xid;
* uint16_t secs; // elapsed since client began acquisition/renewal
* uint16_t flags; // only one flag so far: bcast. Never set by server
* uint32_t ciaddr; // client IP (usually == yiaddr. can it be different
* // if during renew server wants to give us different IP?)
* uint32_t gateway_nip; // relay agent IP address
* uint8_t chaddr[16]; // link-layer client hardware address (MAC)
* TODO: export gateway_nip as $giaddr?
*/
/* Most important one: yiaddr as $ip */
*curr = xmalloc(sizeof("ip=255.255.255.255"));
sprint_nip(*curr, "ip=", (uint8_t *) &packet->yiaddr);
putenv(*curr++);
if (packet->siaddr_nip) {
/* IP address of next server to use in bootstrap */
*curr = xmalloc(sizeof("siaddr=255.255.255.255"));
sprint_nip(*curr, "siaddr=", (uint8_t *) &packet->siaddr_nip);
putenv(*curr++);
}
if (!(overload & FILE_FIELD) && packet->file[0]) {
/* watch out for invalid packets */
*curr = xasprintf("boot_file=%."DHCP_PKT_FILE_LEN_STR"s", packet->file);
putenv(*curr++);
}
if (!(overload & SNAME_FIELD) && packet->sname[0]) {
/* watch out for invalid packets */
*curr = xasprintf("sname=%."DHCP_PKT_SNAME_LEN_STR"s", packet->sname);
putenv(*curr++);
}
/* Export known DHCP options */
opt_name = dhcp_option_strings;
i = 0;
while (*opt_name) {
uint8_t code = dhcp_optflags[i].code;
BITMAP *found_ptr = &FOUND_OPTS(code);
BITMAP found_mask = BMASK(code);
if (!(*found_ptr & found_mask))
goto next;
*found_ptr &= ~found_mask; /* leave only unknown options */
temp = udhcp_get_option(packet, code);
*curr = xmalloc_optname_optval(temp, &dhcp_optflags[i], opt_name);
putenv(*curr++);
if (code == DHCP_SUBNET) {
/* Subnet option: make things like "$ip/$mask" possible */
uint32_t subnet;
move_from_unaligned32(subnet, temp);
*curr = xasprintf("mask=%u", mton(subnet));
putenv(*curr++);
}
next:
opt_name += strlen(opt_name) + 1;
i++;
}
/* Export unknown options */
for (i = 0; i < 256;) {
BITMAP bitmap = FOUND_OPTS(i);
if (!bitmap) {
i += BBITS;
continue;
}
if (bitmap & BMASK(i)) {
unsigned len, ofs;
temp = udhcp_get_option(packet, i);
/* udhcp_get_option returns ptr to data portion,
* need to go back to get len
*/
len = temp[-OPT_DATA + OPT_LEN];
*curr = xmalloc(sizeof("optNNN=") + 1 + len*2);
ofs = sprintf(*curr, "opt%u=", i);
*bin2hex(*curr + ofs, (void*) temp, len) = '\0';
putenv(*curr++);
}
i++;
}
return envp;
} | 1 | [
"CWE-125"
] | busybox | 74d9f1ba37010face4bd1449df4d60dd84450b06 | 16,379,697,809,689,600,000,000,000,000,000,000,000 | 135 | udhcpc: when decoding DHCP_SUBNET, ensure it is 4 bytes long
function old new delta
udhcp_run_script 795 801 +6
Signed-off-by: Denys Vlasenko <[email protected]> |
f_cosh(typval_T *argvars, typval_T *rettv)
{
float_T f = 0.0;
rettv->v_type = VAR_FLOAT;
if (get_float_arg(argvars, &f) == OK)
rettv->vval.v_float = cosh(f);
else
rettv->vval.v_float = 0.0;
} | 0 | [
"CWE-78"
] | vim | 8c62a08faf89663e5633dc5036cd8695c80f1075 | 114,616,925,685,029,170,000,000,000,000,000,000,000 | 10 | patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others. |
static int tftp_read_data(struct tftp_session *spt, uint32_t block_nr,
uint8_t *buf, int len)
{
int bytes_read = 0;
if (spt->fd < 0) {
spt->fd = open(spt->filename, O_RDONLY | O_BINARY);
}
if (spt->fd < 0) {
return -1;
}
if (len) {
if (lseek(spt->fd, block_nr * spt->block_size, SEEK_SET) == (off_t)-1) {
return -1;
}
bytes_read = read(spt->fd, buf, len);
}
return bytes_read;
} | 0 | [] | libslirp | 3f17948137155f025f7809fdc38576d5d2451c3d | 22,834,260,929,243,480,000,000,000,000,000,000,000 | 23 | tftp: check tftp_input buffer size
Fixes: CVE-2021-3595
Fixes: https://gitlab.freedesktop.org/slirp/libslirp/-/issues/46
Signed-off-by: Marc-André Lureau <[email protected]> |
static inline int should_follow_link(struct nameidata *nd, struct path *link,
int follow,
struct inode *inode, unsigned seq)
{
if (likely(!d_is_symlink(link->dentry)))
return 0;
if (!follow)
return 0;
/* make sure that d_is_symlink above matches inode */
if (nd->flags & LOOKUP_RCU) {
if (read_seqcount_retry(&link->dentry->d_seq, seq))
return -ECHILD;
}
return pick_link(nd, link, inode, seq);
} | 0 | [
"CWE-284"
] | linux | 9409e22acdfc9153f88d9b1ed2bd2a5b34d2d3ca | 6,733,374,793,926,959,000,000,000,000,000,000,000 | 15 | vfs: rename: check backing inode being equal
If a file is renamed to a hardlink of itself POSIX specifies that rename(2)
should do nothing and return success.
This condition is checked in vfs_rename(). However it won't detect hard
links on overlayfs where these are given separate inodes on the overlayfs
layer.
Overlayfs itself detects this condition and returns success without doing
anything, but then vfs_rename() will proceed as if this was a successful
rename (detach_mounts(), d_move()).
The correct thing to do is to detect this condition before even calling
into overlayfs. This patch does this by calling vfs_select_inode() to get
the underlying inodes.
Signed-off-by: Miklos Szeredi <[email protected]>
Cc: <[email protected]> # v4.2+ |
read_indent(const char *input, int indent, int size, int in_index, int *out_index, char *output)
{
int k = 0, j;
while (in_index < size) {
if (input[in_index] == ' ') {
k++;
} else if (input[in_index] == '\t') {
/* RFC 6020 6.1.3 tab character is treated as 8 space characters */
k += 8;
} else if (input[in_index] == '\\' && input[in_index + 1] == 't') {
/* RFC 6020 6.1.3 tab character is treated as 8 space characters */
k += 8;
++in_index;
} else {
break;
}
++in_index;
if (k >= indent) {
for (j = k - indent; j > 0; --j) {
output[*out_index] = ' ';
if (j > 1) {
++(*out_index);
}
}
break;
}
}
return in_index - 1;
} | 0 | [
"CWE-415"
] | libyang | d9feacc4a590d35dbc1af21caf9080008b4450ed | 205,042,427,006,769,280,000,000,000,000,000,000,000 | 30 | yang parser BUGFIX double free
Fixes #742 |
snmp_out_toggle_options_usage(const char *lead, FILE * outf)
{
fprintf(outf, "%s0: print leading 0 for single-digit hex characters\n", lead);
fprintf(outf, "%sa: print all strings in ascii format\n", lead);
fprintf(outf, "%sb: do not break OID indexes down\n", lead);
fprintf(outf, "%se: print enums numerically\n", lead);
fprintf(outf, "%sE: escape quotes in string indices\n", lead);
fprintf(outf, "%sf: print full OIDs on output\n", lead);
fprintf(outf, "%sn: print OIDs numerically\n", lead);
fprintf(outf, "%sp PRECISION: display floating point values with specified PRECISION (printf format string)\n", lead);
fprintf(outf, "%sq: quick print for easier parsing\n", lead);
fprintf(outf, "%sQ: quick print with equal-signs\n", lead); /* @@JDW */
fprintf(outf, "%ss: print only last symbolic element of OID\n", lead);
fprintf(outf, "%sS: print MIB module-id plus last element\n", lead);
fprintf(outf, "%st: print timeticks unparsed as numeric integers\n",
lead);
fprintf(outf,
"%sT: print human-readable text along with hex strings\n",
lead);
fprintf(outf, "%su: print OIDs using UCD-style prefix suppression\n",
lead);
fprintf(outf, "%sU: don't print units\n", lead);
fprintf(outf, "%sv: print values only (not OID = value)\n", lead);
fprintf(outf, "%sx: print all strings in hex format\n", lead);
fprintf(outf, "%sX: extended index format\n", lead);
} | 0 | [
"CWE-59",
"CWE-61"
] | net-snmp | 4fd9a450444a434a993bc72f7c3486ccce41f602 | 194,776,689,532,331,480,000,000,000,000,000,000,000 | 26 | CHANGES: snmpd: Stop reading and writing the mib_indexes/* files
Caching directory contents is something the operating system should do
and is not something Net-SNMP should do. Instead of storing a copy of
the directory contents in ${tmp_dir}/mib_indexes/${n}, always scan a
MIB directory. |
int dev_close_many(struct list_head *head, bool unlink)
{
struct net_device *dev, *tmp;
/* Remove the devices that don't need to be closed */
list_for_each_entry_safe(dev, tmp, head, close_list)
if (!(dev->flags & IFF_UP))
list_del_init(&dev->close_list);
__dev_close_many(head);
list_for_each_entry_safe(dev, tmp, head, close_list) {
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
call_netdevice_notifiers(NETDEV_DOWN, dev);
if (unlink)
list_del_init(&dev->close_list);
}
return 0;
} | 0 | [
"CWE-400",
"CWE-703"
] | linux | fac8e0f579695a3ecbc4d3cac369139d7f819971 | 301,822,555,546,934,500,000,000,000,000,000,000,000 | 20 | tunnels: Don't apply GRO to multiple layers of encapsulation.
When drivers express support for TSO of encapsulated packets, they
only mean that they can do it for one layer of encapsulation.
Supporting additional levels would mean updating, at a minimum,
more IP length fields and they are unaware of this.
No encapsulation device expresses support for handling offloaded
encapsulated packets, so we won't generate these types of frames
in the transmit path. However, GRO doesn't have a check for
multiple levels of encapsulation and will attempt to build them.
UDP tunnel GRO actually does prevent this situation but it only
handles multiple UDP tunnels stacked on top of each other. This
generalizes that solution to prevent any kind of tunnel stacking
that would cause problems.
Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack")
Signed-off-by: Jesse Gross <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void gdImageWBMPCtx (gdImagePtr image, int fg, gdIOCtx * out)
{
_gdImageWBMPCtx(image, fg, out);
} | 0 | [
"CWE-415"
] | php-src | 089f7c0bc28d399b0420aa6ef058e4c1c120b2ae | 234,098,661,658,716,050,000,000,000,000,000,000,000 | 4 | Sync with upstream
Even though libgd/libgd#492 is not a relevant bug fix for PHP, since
the binding doesn't use the `gdImage*Ptr()` functions at all, we're
porting the fix to stay in sync here. |
Subsets and Splits