func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
TfLiteRegistration* Register_MEAN() {
#ifdef USE_NEON
return Register_MEAN_OPT();
#else
return Register_MEAN_REF();
#endif
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
1970c2158b1ffa416d159d03c3370b9a462aee35
| 317,608,484,640,466,170,000,000,000,000,000,000,000 | 7 |
[tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
|
ofpacts_parse_actions(const char *s, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols)
{
return ofpacts_parse_copy(s, ofpacts, usable_protocols, false, 0);
}
| 0 |
[
"CWE-125"
] |
ovs
|
9237a63c47bd314b807cda0bd2216264e82edbe8
| 25,396,815,448,136,630,000,000,000,000,000,000,000 | 5 |
ofp-actions: Avoid buffer overread in BUNDLE action decoding.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]>
|
static void gp_handle_query(struct gp_workers *w, struct gp_query *q)
{
struct gp_call_ctx gpcall = { 0 };
uint8_t *buffer;
size_t buflen;
int ret;
/* find service */
gpcall.gpctx = w->gpctx;
gpcall.service = gp_creds_match_conn(w->gpctx, q->conn);
if (!gpcall.service) {
q->status = GP_QUERY_ERR;
return;
}
gpcall.connection = q->conn;
ret = gp_rpc_process_call(&gpcall,
q->buffer, q->buflen,
&buffer, &buflen);
if (ret) {
q->status = GP_QUERY_ERR;
} else {
q->status = GP_QUERY_OUT;
free(q->buffer);
q->buffer = buffer;
q->buflen = buflen;
}
if (gpcall.destroy_callback) {
gpcall.destroy_callback(gpcall.destroy_callback_data);
}
}
| 0 |
[
"CWE-667"
] |
gssproxy
|
cb761412e299ef907f22cd7c4146d50c8a792003
| 12,963,937,488,357,503,000,000,000,000,000,000,000 | 32 |
Unlock cond_mutex before pthread exit in gp_worker_main()
Signed-off-by: GuiYao <[email protected]>
[[email protected]: whitespace, tweak commit message]
Reviewed-by: Robbie Harwood <[email protected]>
|
struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
{
if (x86_pmu.guest_get_msrs)
return x86_pmu.guest_get_msrs(nr);
*nr = 0;
return NULL;
}
| 0 |
[
"CWE-20",
"CWE-401"
] |
linux
|
f1923820c447e986a9da0fc6bf60c1dccdf0408e
| 272,026,713,722,280,970,000,000,000,000,000,000,000 | 7 |
perf/x86: Fix offcore_rsp valid mask for SNB/IVB
The valid mask for both offcore_response_0 and
offcore_response_1 was wrong for SNB/SNB-EP,
IVB/IVB-EP. It was possible to write to
reserved bit and cause a GP fault crashing
the kernel.
This patch fixes the problem by correctly marking the
reserved bits in the valid mask for all the processors
mentioned above.
A distinction between desktop and server parts is introduced
because bits 24-30 are only available on the server parts.
This version of the patch is just a rebase to perf/urgent tree
and should apply to older kernels as well.
Signed-off-by: Stephane Eranian <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
main(int argc, char** argv)
{
int ret = 0;
int chansrv_pid = 0;
int wm_pid = 0;
int x_pid = 0;
int lerror = 0;
char exe_path[262];
g_init("xrdp-sessvc");
g_memset(exe_path,0,sizeof(exe_path));
if (argc < 3)
{
g_writeln("xrdp-sessvc: exiting, not enough parameters");
return 1;
}
g_signal_kill(term_signal_handler); /* SIGKILL */
g_signal_terminate(term_signal_handler); /* SIGTERM */
g_signal_user_interrupt(term_signal_handler); /* SIGINT */
g_signal_pipe(nil_signal_handler); /* SIGPIPE */
x_pid = g_atoi(argv[1]);
wm_pid = g_atoi(argv[2]);
g_writeln("xrdp-sessvc: waiting for X (pid %d) and WM (pid %d)",
x_pid, wm_pid);
/* run xrdp-chansrv as a seperate process */
chansrv_pid = g_fork();
if (chansrv_pid == -1)
{
g_writeln("xrdp-sessvc: fork error");
return 1;
}
else if (chansrv_pid == 0) /* child */
{
g_set_current_dir(XRDP_SBIN_PATH);
g_snprintf(exe_path, 261, "%s/xrdp-chansrv", XRDP_SBIN_PATH);
g_execlp3(exe_path, "xrdp-chansrv", 0);
/* should not get here */
g_writeln("xrdp-sessvc: g_execlp3() failed");
return 1;
}
lerror = 0;
/* wait for window manager to get done */
ret = g_waitpid(wm_pid);
while ((ret == 0) && !g_term)
{
ret = g_waitpid(wm_pid);
g_sleep(1);
}
if (ret < 0)
{
lerror = g_get_errno();
}
g_writeln("xrdp-sessvc: WM is dead (waitpid said %d, errno is %d) "
"exiting...", ret, lerror);
/* kill channel server */
g_writeln("xrdp-sessvc: stopping channel server");
g_sigterm(chansrv_pid);
ret = g_waitpid(chansrv_pid);
while ((ret == 0) && !g_term)
{
ret = g_waitpid(chansrv_pid);
g_sleep(1);
}
chansrv_cleanup(chansrv_pid);
/* kill X server */
g_writeln("xrdp-sessvc: stopping X server");
g_sigterm(x_pid);
ret = g_waitpid(x_pid);
while ((ret == 0) && !g_term)
{
ret = g_waitpid(x_pid);
g_sleep(1);
}
g_writeln("xrdp-sessvc: clean exit");
g_deinit();
return 0;
}
| 1 |
[] |
xrdp
|
cadad6e181d2a67698e5eb7cacd6b233ae29eb97
| 244,836,906,433,361,570,000,000,000,000,000,000,000 | 78 |
/tmp cleanup
|
multi_instance_string(const struct multi_instance *mi, bool null, struct gc_arena *gc)
{
if (mi)
{
struct buffer out = alloc_buf_gc(MULTI_PREFIX_MAX_LENGTH, gc);
const char *cn = tls_common_name(mi->context.c2.tls_multi, true);
if (cn)
{
buf_printf(&out, "%s/", cn);
}
buf_printf(&out, "%s", mroute_addr_print(&mi->real, gc));
return BSTR(&out);
}
else if (null)
{
return NULL;
}
else
{
return "UNDEF";
}
}
| 0 |
[
"CWE-362",
"CWE-476"
] |
openvpn
|
37bc691e7d26ea4eb61a8a434ebd7a9ae76225ab
| 236,896,557,425,034,950,000,000,000,000,000,000,000 | 23 |
Fix illegal client float (CVE-2020-11810)
There is a time frame between allocating peer-id and initializing data
channel key (which is performed on receiving push request or on async
push-reply) in which the existing peer-id float checks do not work right.
If a "rogue" data channel packet arrives during that time frame from
another address and with same peer-id, this would cause client to float
to that new address. This is because:
- tls_pre_decrypt() sets packet length to zero if
data channel key has not been initialized, which leads to
- openvpn_decrypt() returns true if packet length is zero,
which leads to
- process_incoming_link_part1() returns true, which
calls multi_process_float(), which commits float
Note that problem doesn't happen when data channel key is initialized,
since in this case openvpn_decrypt() returns false.
The net effect of this behaviour is that the VPN session for the
"victim client" is broken. Since the "attacker client" does not have
suitable keys, it can not inject or steal VPN traffic from the other
session. The time window is small and it can not be used to attack
a specific client's session, unless some other way is found to make it
disconnect and reconnect first.
CVE-2020-11810 has been assigned to acknowledge this risk.
Fix illegal float by adding buffer length check ("is this packet still
considered valid") before calling multi_process_float().
Trac: #1272
CVE: 2020-11810
Signed-off-by: Lev Stipakov <[email protected]>
Acked-by: Arne Schwabe <[email protected]>
Acked-by: Antonio Quartulli <[email protected]>
Acked-by: Gert Doering <[email protected]>
Message-Id: <[email protected]>
URL: https://www.mail-archive.com/[email protected]/msg19720.html
Signed-off-by: Gert Doering <[email protected]>
|
static size_t ip6gre_get_size(const struct net_device *dev)
{
return
/* IFLA_GRE_LINK */
nla_total_size(4) +
/* IFLA_GRE_IFLAGS */
nla_total_size(2) +
/* IFLA_GRE_OFLAGS */
nla_total_size(2) +
/* IFLA_GRE_IKEY */
nla_total_size(4) +
/* IFLA_GRE_OKEY */
nla_total_size(4) +
/* IFLA_GRE_LOCAL */
nla_total_size(sizeof(struct in6_addr)) +
/* IFLA_GRE_REMOTE */
nla_total_size(sizeof(struct in6_addr)) +
/* IFLA_GRE_TTL */
nla_total_size(1) +
/* IFLA_GRE_ENCAP_LIMIT */
nla_total_size(1) +
/* IFLA_GRE_FLOWINFO */
nla_total_size(4) +
/* IFLA_GRE_FLAGS */
nla_total_size(4) +
/* IFLA_GRE_ENCAP_TYPE */
nla_total_size(2) +
/* IFLA_GRE_ENCAP_FLAGS */
nla_total_size(2) +
/* IFLA_GRE_ENCAP_SPORT */
nla_total_size(2) +
/* IFLA_GRE_ENCAP_DPORT */
nla_total_size(2) +
0;
}
| 0 |
[
"CWE-125"
] |
net
|
7892032cfe67f4bde6fc2ee967e45a8fbaf33756
| 197,124,358,852,559,750,000,000,000,000,000,000,000 | 35 |
ip6_gre: fix ip6gre_err() invalid reads
Andrey Konovalov reported out of bound accesses in ip6gre_err()
If GRE flags contains GRE_KEY, the following expression
*(((__be32 *)p) + (grehlen / 4) - 1)
accesses data ~40 bytes after the expected point, since
grehlen includes the size of IPv6 headers.
Let's use a "struct gre_base_hdr *greh" pointer to make this
code more readable.
p[1] becomes greh->protocol.
grhlen is the GRE header length.
Fixes: c12b395a4664 ("gre: Support GRE over IPv6")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void blk_mq_freeze_queue_start(struct request_queue *q)
{
int freeze_depth;
freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
if (freeze_depth == 1) {
percpu_ref_kill(&q->mq_usage_counter);
blk_mq_run_hw_queues(q, false);
}
}
| 0 |
[
"CWE-362",
"CWE-264"
] |
linux
|
0048b4837affd153897ed1222283492070027aa9
| 32,716,953,025,427,890,000,000,000,000,000,000,000 | 10 |
blk-mq: fix race between timeout and freeing request
Inside timeout handler, blk_mq_tag_to_rq() is called
to retrieve the request from one tag. This way is obviously
wrong because the request can be freed any time and some
fiedds of the request can't be trusted, then kernel oops
might be triggered[1].
Currently wrt. blk_mq_tag_to_rq(), the only special case is
that the flush request can share same tag with the request
cloned from, and the two requests can't be active at the same
time, so this patch fixes the above issue by updating tags->rqs[tag]
with the active request(either flush rq or the request cloned
from) of the tag.
Also blk_mq_tag_to_rq() gets much simplified with this patch.
Given blk_mq_tag_to_rq() is mainly for drivers and the caller must
make sure the request can't be freed, so in bt_for_each() this
helper is replaced with tags->rqs[tag].
[1] kernel oops log
[ 439.696220] BUG: unable to handle kernel NULL pointer dereference at 0000000000000158^M
[ 439.697162] IP: [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.700653] PGD 7ef765067 PUD 7ef764067 PMD 0 ^M
[ 439.700653] Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC ^M
[ 439.700653] Dumping ftrace buffer:^M
[ 439.700653] (ftrace buffer empty)^M
[ 439.700653] Modules linked in: nbd ipv6 kvm_intel kvm serio_raw^M
[ 439.700653] CPU: 6 PID: 2779 Comm: stress-ng-sigfd Not tainted 4.2.0-rc5-next-20150805+ #265^M
[ 439.730500] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011^M
[ 439.730500] task: ffff880605308000 ti: ffff88060530c000 task.ti: ffff88060530c000^M
[ 439.730500] RIP: 0010:[<ffffffff812d89ba>] [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.730500] RSP: 0018:ffff880819203da0 EFLAGS: 00010283^M
[ 439.730500] RAX: ffff880811b0e000 RBX: ffff8800bb465f00 RCX: 0000000000000002^M
[ 439.730500] RDX: 0000000000000000 RSI: 0000000000000202 RDI: 0000000000000000^M
[ 439.730500] RBP: ffff880819203db0 R08: 0000000000000002 R09: 0000000000000000^M
[ 439.730500] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000202^M
[ 439.730500] R13: ffff880814104800 R14: 0000000000000002 R15: ffff880811a2ea00^M
[ 439.730500] FS: 00007f165b3f5740(0000) GS:ffff880819200000(0000) knlGS:0000000000000000^M
[ 439.730500] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b^M
[ 439.730500] CR2: 0000000000000158 CR3: 00000007ef766000 CR4: 00000000000006e0^M
[ 439.730500] Stack:^M
[ 439.730500] 0000000000000008 ffff8808114eed90 ffff880819203e00 ffffffff812dc104^M
[ 439.755663] ffff880819203e40 ffffffff812d9f5e 0000020000000000 ffff8808114eed80^M
[ 439.755663] Call Trace:^M
[ 439.755663] <IRQ> ^M
[ 439.755663] [<ffffffff812dc104>] bt_for_each+0x6e/0xc8^M
[ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M
[ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M
[ 439.755663] [<ffffffff812dc1b3>] blk_mq_tag_busy_iter+0x55/0x5e^M
[ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M
[ 439.755663] [<ffffffff812d8911>] blk_mq_rq_timer+0x5d/0xd4^M
[ 439.755663] [<ffffffff810a3e10>] call_timer_fn+0xf7/0x284^M
[ 439.755663] [<ffffffff810a3d1e>] ? call_timer_fn+0x5/0x284^M
[ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M
[ 439.755663] [<ffffffff810a46d6>] run_timer_softirq+0x1ce/0x1f8^M
[ 439.755663] [<ffffffff8104c367>] __do_softirq+0x181/0x3a4^M
[ 439.755663] [<ffffffff8104c76e>] irq_exit+0x40/0x94^M
[ 439.755663] [<ffffffff81031482>] smp_apic_timer_interrupt+0x33/0x3e^M
[ 439.755663] [<ffffffff815559a4>] apic_timer_interrupt+0x84/0x90^M
[ 439.755663] <EOI> ^M
[ 439.755663] [<ffffffff81554350>] ? _raw_spin_unlock_irq+0x32/0x4a^M
[ 439.755663] [<ffffffff8106a98b>] finish_task_switch+0xe0/0x163^M
[ 439.755663] [<ffffffff8106a94d>] ? finish_task_switch+0xa2/0x163^M
[ 439.755663] [<ffffffff81550066>] __schedule+0x469/0x6cd^M
[ 439.755663] [<ffffffff8155039b>] schedule+0x82/0x9a^M
[ 439.789267] [<ffffffff8119b28b>] signalfd_read+0x186/0x49a^M
[ 439.790911] [<ffffffff8106d86a>] ? wake_up_q+0x47/0x47^M
[ 439.790911] [<ffffffff811618c2>] __vfs_read+0x28/0x9f^M
[ 439.790911] [<ffffffff8117a289>] ? __fget_light+0x4d/0x74^M
[ 439.790911] [<ffffffff811620a7>] vfs_read+0x7a/0xc6^M
[ 439.790911] [<ffffffff8116292b>] SyS_read+0x49/0x7f^M
[ 439.790911] [<ffffffff81554c17>] entry_SYSCALL_64_fastpath+0x12/0x6f^M
[ 439.790911] Code: 48 89 e5 e8 a9 b8 e7 ff 5d c3 0f 1f 44 00 00 55 89
f2 48 89 e5 41 54 41 89 f4 53 48 8b 47 60 48 8b 1c d0 48 8b 7b 30 48 8b
53 38 <48> 8b 87 58 01 00 00 48 85 c0 75 09 48 8b 97 88 0c 00 00 eb 10
^M
[ 439.790911] RIP [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.790911] RSP <ffff880819203da0>^M
[ 439.790911] CR2: 0000000000000158^M
[ 439.790911] ---[ end trace d40af58949325661 ]---^M
Cc: <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
CURLcode Curl_close(struct SessionHandle *data)
{
struct Curl_multi *m;
if(!data)
return CURLE_OK;
Curl_expire(data, 0); /* shut off timers */
m = data->multi;
if(m)
/* This handle is still part of a multi handle, take care of this first
and detach this handle from there. */
curl_multi_remove_handle(data->multi, data);
if(data->multi_easy)
/* when curl_easy_perform() is used, it creates its own multi handle to
use and this is the one */
curl_multi_cleanup(data->multi_easy);
/* Destroy the timeout list that is held in the easy handle. It is
/normally/ done by curl_multi_remove_handle() but this is "just in
case" */
if(data->state.timeoutlist) {
Curl_llist_destroy(data->state.timeoutlist, NULL);
data->state.timeoutlist = NULL;
}
data->magic = 0; /* force a clear AFTER the possibly enforced removal from
the multi handle, since that function uses the magic
field! */
if(data->state.rangestringalloc)
free(data->state.range);
/* Free the pathbuffer */
Curl_safefree(data->state.pathbuffer);
data->state.path = NULL;
/* freed here just in case DONE wasn't called */
Curl_free_request_state(data);
/* Close down all open SSL info and sessions */
Curl_ssl_close_all(data);
Curl_safefree(data->state.first_host);
Curl_safefree(data->state.scratch);
Curl_ssl_free_certinfo(data);
/* Cleanup possible redirect junk */
free(data->req.newurl);
data->req.newurl = NULL;
if(data->change.referer_alloc) {
Curl_safefree(data->change.referer);
data->change.referer_alloc = FALSE;
}
data->change.referer = NULL;
if(data->change.url_alloc) {
Curl_safefree(data->change.url);
data->change.url_alloc = FALSE;
}
data->change.url = NULL;
Curl_safefree(data->state.headerbuff);
Curl_flush_cookies(data, 1);
Curl_digest_cleanup(data);
Curl_safefree(data->info.contenttype);
Curl_safefree(data->info.wouldredirect);
/* this destroys the channel and we cannot use it anymore after this */
Curl_resolver_cleanup(data->state.resolver);
Curl_convert_close(data);
/* No longer a dirty share, if it exists */
if(data->share) {
Curl_share_lock(data, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE);
data->share->dirty--;
Curl_share_unlock(data, CURL_LOCK_DATA_SHARE);
}
Curl_freeset(data);
free(data);
return CURLE_OK;
}
| 0 |
[
"CWE-264"
] |
curl
|
31be461c6b659312100c47be6ddd5f0f569290f6
| 276,821,503,621,952,660,000,000,000,000,000,000,000 | 90 |
ConnectionExists: for NTLM re-use, require credentials to match
CVE-2015-3143
Bug: http://curl.haxx.se/docs/adv_20150422A.html
Reported-by: Paras Sethia
|
HttpTransact::OSDNSLookup(State* s)
{
static int max_dns_lookups = 3 + s->http_config_param->num_url_expansions;
++s->dns_info.attempts;
DebugTxn("http_trans", "[HttpTransact::OSDNSLookup] This was attempt %d", s->dns_info.attempts);
ink_assert(s->dns_info.looking_up == ORIGIN_SERVER);
// detect whether we are about to self loop. the client may have
// specified the proxy as the origin server (badness).
// Check if this procedure is already done - YTS Team, yamsat
if (!s->request_will_not_selfloop) {
if (will_this_request_self_loop(s)) {
DebugTxn("http_trans", "[OSDNSLookup] request will selfloop - bailing out");
SET_VIA_STRING(VIA_DETAIL_TUNNEL, VIA_DETAIL_TUNNEL_NO_FORWARD);
TRANSACT_RETURN(SM_ACTION_SEND_ERROR_CACHE_NOOP, NULL);
}
}
if (!s->dns_info.lookup_success) {
// maybe the name can be expanded (e.g cnn -> www.cnn.com)
HostNameExpansionError_t host_name_expansion = try_to_expand_host_name(s);
switch (host_name_expansion) {
case RETRY_EXPANDED_NAME:
// expansion successful, do a dns lookup on expanded name
HTTP_RELEASE_ASSERT(s->dns_info.attempts < max_dns_lookups);
HTTP_RELEASE_ASSERT(s->http_config_param->enable_url_expandomatic);
TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, OSDNSLookup);
break;
case EXPANSION_NOT_ALLOWED:
case EXPANSION_FAILED:
case DNS_ATTEMPTS_EXHAUSTED:
if (DNSLookupInfo::OS_ADDR_TRY_HOSTDB == s->dns_info.os_addr_style) {
// No HostDB data, just keep on with the CTA.
s->dns_info.lookup_success = true;
s->dns_info.os_addr_style = DNSLookupInfo::OS_ADDR_USE_CLIENT;
DebugTxn("http_seq", "[HttpTransact::OSDNSLookup] DNS lookup unsuccessful reverting to force client target address use");
} else {
if (host_name_expansion == EXPANSION_NOT_ALLOWED) {
// config file doesn't allow automatic expansion of host names
HTTP_RELEASE_ASSERT(!(s->http_config_param->enable_url_expandomatic));
DebugTxn("http_seq", "[HttpTransact::OSDNSLookup] DNS Lookup unsuccessful");
} else if (host_name_expansion == EXPANSION_FAILED) {
// not able to expand the hostname. dns lookup failed
DebugTxn("http_seq", "[HttpTransact::OSDNSLookup] DNS Lookup unsuccessful");
} else if (host_name_expansion == DNS_ATTEMPTS_EXHAUSTED) {
// retry attempts exhausted --- can't find dns entry for this host name
HTTP_RELEASE_ASSERT(s->dns_info.attempts >= max_dns_lookups);
DebugTxn("http_seq", "[HttpTransact::OSDNSLookup] DNS Lookup unsuccessful");
}
// output the DNS failure error message
SET_VIA_STRING(VIA_DETAIL_TUNNEL, VIA_DETAIL_TUNNEL_NO_FORWARD);
build_error_response(s, HTTP_STATUS_BAD_GATEWAY, "Cannot find server.", "connect#dns_failed", NULL);
// s->cache_info.action = CACHE_DO_NO_ACTION;
TRANSACT_RETURN(SM_ACTION_SEND_ERROR_CACHE_NOOP, NULL);
}
break;
default:
ink_assert(!("try_to_expand_hostname returned an unsupported code"));
break;
}
return;
}
// ok, so the dns lookup succeeded
ink_assert(s->dns_info.lookup_success);
DebugTxn("http_seq", "[HttpTransact::OSDNSLookup] DNS Lookup successful");
if (DNSLookupInfo::OS_ADDR_TRY_HOSTDB == s->dns_info.os_addr_style) {
// We've backed off from a client supplied address and found some
// HostDB addresses. We use those if they're different from the CTA.
// In all cases we now commit to client or HostDB for our source.
if (s->host_db_info.round_robin) {
HostDBInfo* cta = s->host_db_info.rr()->select_next(&s->current.server->addr.sa);
if (cta) {
// found another addr, lock in host DB.
s->host_db_info = *cta;
s->dns_info.os_addr_style = DNSLookupInfo::OS_ADDR_USE_HOSTDB;
} else {
// nothing else there, continue with CTA.
s->dns_info.os_addr_style = DNSLookupInfo::OS_ADDR_USE_CLIENT;
}
} else if (ats_ip_addr_eq(s->host_db_info.ip(), &s->server_info.addr.sa)) {
s->dns_info.os_addr_style = DNSLookupInfo::OS_ADDR_USE_CLIENT;
} else {
s->dns_info.os_addr_style = DNSLookupInfo::OS_ADDR_USE_HOSTDB;
}
}
// Check to see if can fullfill expect requests based on the cached
// update some state variables with hostdb information that has
// been provided.
ats_ip_copy(&s->server_info.addr, s->host_db_info.ip());
ats_ip_copy(&s->request_data.dest_ip, &s->server_info.addr);
get_ka_info_from_host_db(s, &s->server_info, &s->client_info, &s->host_db_info);
s->server_info.dns_round_robin = s->host_db_info.round_robin;
char addrbuf[INET6_ADDRSTRLEN];
DebugTxn("http_trans", "[OSDNSLookup] DNS lookup for O.S. successful "
"IP: %s", ats_ip_ntop(&s->server_info.addr.sa, addrbuf, sizeof(addrbuf)));
// so the dns lookup was a success, but the lookup succeeded on
// a hostname which was expanded by the traffic server. we should
// not automatically forward the request to this expanded hostname.
// return a response to the client with the expanded host name
// and a tasty little blurb explaining what happened.
// if a DNS lookup succeeded on a user-defined
// hostname expansion, forward the request to the expanded hostname.
// On the other hand, if the lookup succeeded on a www.<hostname>.com
// expansion, return a 302 response.
// [amc] Also don't redirect if we backed off using HostDB instead of CTA.
if (s->dns_info.attempts == max_dns_lookups && s->dns_info.looking_up == ORIGIN_SERVER && DNSLookupInfo::OS_ADDR_USE_CLIENT != s->dns_info.os_addr_style) {
DebugTxn("http_trans", "[OSDNSLookup] DNS name resolution on expansion");
DebugTxn("http_seq", "[OSDNSLookup] DNS name resolution on expansion - returning");
build_redirect_response(s);
// s->cache_info.action = CACHE_DO_NO_ACTION;
TRANSACT_RETURN(SM_ACTION_INTERNAL_CACHE_NOOP, NULL);
}
// everything succeeded with the DNS lookup so do an API callout
// that allows for filtering. We'll do traffic_server internal
// filtering after API filtering
// After SM_ACTION_DNS_LOOKUP, goto the saved action/state ORIGIN_SERVER_(RAW_)OPEN.
// Should we skip the StartAccessControl()? why?
if (s->cdn_remap_complete) {
DebugTxn("cdn", "This is a late DNS lookup. We are going to the OS, " "not to HandleFiltering.");
ink_assert(s->cdn_saved_next_action == SM_ACTION_ORIGIN_SERVER_OPEN || s->cdn_saved_next_action == SM_ACTION_ORIGIN_SERVER_RAW_OPEN);
DebugTxn("cdn", "outgoing version -- (pre conversion) %d", s->hdr_info.server_request.m_http->m_version);
(&s->hdr_info.server_request)->version_set(HTTPVersion(1, 1));
HttpTransactHeaders::convert_request(s->current.server->http_version, &s->hdr_info.server_request);
DebugTxn("cdn", "outgoing version -- (post conversion) %d", s->hdr_info.server_request.m_http->m_version);
TRANSACT_RETURN(s->cdn_saved_next_action, NULL);
} else if (DNSLookupInfo::OS_ADDR_USE_CLIENT == s->dns_info.os_addr_style ||
DNSLookupInfo::OS_ADDR_USE_HOSTDB == s->dns_info.os_addr_style) {
// we've come back after already trying the server to get a better address
// and finished with all backtracking - return to trying the server.
TRANSACT_RETURN(how_to_open_connection(s), HttpTransact::HandleResponse);
} else if (s->dns_info.lookup_name[0] <= '9' &&
s->dns_info.lookup_name[0] >= '0' &&
s->parent_params->ParentTable->hostMatch &&
!s->http_config_param->no_dns_forward_to_parent) {
// note, broken logic: ACC fudges the OR stmt to always be true,
// 'AuthHttpAdapter' should do the rev-dns if needed, not here .
TRANSACT_RETURN(SM_ACTION_DNS_REVERSE_LOOKUP, HttpTransact::StartAccessControl);
} else {
//(s->state_machine->authAdapter).StartLookup (s);
// TRANSACT_RETURN(SM_ACTION_AUTH_LOOKUP, NULL);
if (s->force_dns) {
StartAccessControl(s); // If skip_dns is enabled and no ip based rules in cache.config and parent.config
// Access Control is called after DNS response
} else {
if ((s->cache_info.action == CACHE_DO_NO_ACTION) &&
(((s->hdr_info.client_request.presence(MIME_PRESENCE_RANGE) && !s->txn_conf->cache_range_write) ||
s->range_setup == RANGE_NOT_SATISFIABLE || s->range_setup == RANGE_NOT_HANDLED))) {
TRANSACT_RETURN(SM_ACTION_API_OS_DNS, HandleCacheOpenReadMiss);
} else if (s->cache_lookup_result == HttpTransact::CACHE_LOOKUP_SKIPPED) {
TRANSACT_RETURN(SM_ACTION_API_OS_DNS, LookupSkipOpenServer);
// DNS Lookup is done after LOOKUP Skipped and after we get response
// from the DNS we need to call LookupSkipOpenServer
} else if (s->cache_lookup_result == CACHE_LOOKUP_HIT_FRESH ||
s->cache_lookup_result == CACHE_LOOKUP_HIT_WARNING ||
s->cache_lookup_result == CACHE_LOOKUP_HIT_STALE) {
//DNS lookup is done if the content is state need to call handle cache open read hit
TRANSACT_RETURN(SM_ACTION_API_OS_DNS, HandleCacheOpenReadHit);
} else if (s->cache_lookup_result == CACHE_LOOKUP_MISS || s->cache_info.action == CACHE_DO_NO_ACTION) {
TRANSACT_RETURN(SM_ACTION_API_OS_DNS, HandleCacheOpenReadMiss);
//DNS lookup is done if the lookup failed and need to call Handle Cache Open Read Miss
} else {
build_error_response(s, HTTP_STATUS_INTERNAL_SERVER_ERROR, "Invalid Cache Lookup result", "default", NULL);
Log::error("HTTP: Invalid CACHE LOOKUP RESULT : %d", s->cache_lookup_result);
TRANSACT_RETURN(SM_ACTION_SEND_ERROR_CACHE_NOOP, NULL);
}
}
}
}
| 0 |
[
"CWE-119"
] |
trafficserver
|
8b5f0345dade6b2822d9b52c8ad12e63011a5c12
| 57,477,196,797,216,370,000,000,000,000,000,000,000 | 181 |
Fix the internal buffer sizing. Thanks to Sudheer for helping isolating this bug
|
void* Type_Text_Dup(struct _cms_typehandler_struct* self, const void *Ptr, cmsUInt32Number n)
{
return (void*) cmsMLUdup((cmsMLU*) Ptr);
cmsUNUSED_PARAMETER(n);
cmsUNUSED_PARAMETER(self);
}
| 0 |
[] |
Little-CMS
|
41d222df1bc6188131a8f46c32eab0a4d4cdf1b6
| 134,197,016,010,030,660,000,000,000,000,000,000,000 | 7 |
Memory squeezing fix: lcms2 cmsPipeline construction
When creating a new pipeline, lcms would often try to allocate a stage
and pass it to cmsPipelineInsertStage without checking whether the
allocation succeeded. cmsPipelineInsertStage would then assert (or crash)
if it had not.
The fix here is to change cmsPipelineInsertStage to check and return
an error value. All calling code is then checked to test this return
value and cope.
|
make_file_name_valid_for_dest_fs (char *filename,
const char *dest_fs_type)
{
if (dest_fs_type != NULL && filename != NULL)
{
if (!strcmp (dest_fs_type, "fat") ||
!strcmp (dest_fs_type, "vfat") ||
!strcmp (dest_fs_type, "msdos") ||
!strcmp (dest_fs_type, "msdosfs"))
{
gboolean ret;
int i, old_len;
ret = fat_str_replace (filename, '_');
old_len = strlen (filename);
for (i = 0; i < old_len; i++)
{
if (filename[i] != ' ')
{
g_strchomp (filename);
ret |= (old_len != strlen (filename));
break;
}
}
return ret;
}
}
return FALSE;
}
| 0 |
[
"CWE-20"
] |
nautilus
|
1630f53481f445ada0a455e9979236d31a8d3bb0
| 232,952,398,326,877,300,000,000,000,000,000,000,000 | 32 |
mime-actions: use file metadata for trusting desktop files
Currently we only trust desktop files that have the executable bit
set, and don't replace the displayed icon or the displayed name until
it's trusted, which prevents for running random programs by a malicious
desktop file.
However, the executable permission is preserved if the desktop file
comes from a compressed file.
To prevent this, add a metadata::trusted metadata to the file once the
user acknowledges the file as trusted. This adds metadata to the file,
which cannot be added unless it has access to the computer.
Also remove the SHEBANG "trusted" content we were putting inside the
desktop file, since that doesn't add more security since it can come
with the file itself.
https://bugzilla.gnome.org/show_bug.cgi?id=777991
|
static int64_t http_seek_internal(URLContext *h, int64_t off, int whence, int force_reconnect)
{
HTTPContext *s = h->priv_data;
URLContext *old_hd = s->hd;
uint64_t old_off = s->off;
uint8_t old_buf[BUFFER_SIZE];
int old_buf_size, ret;
AVDictionary *options = NULL;
if (whence == AVSEEK_SIZE)
return s->filesize;
else if (!force_reconnect &&
((whence == SEEK_CUR && off == 0) ||
(whence == SEEK_SET && off == s->off)))
return s->off;
else if ((s->filesize == UINT64_MAX && whence == SEEK_END))
return AVERROR(ENOSYS);
if (whence == SEEK_CUR)
off += s->off;
else if (whence == SEEK_END)
off += s->filesize;
else if (whence != SEEK_SET)
return AVERROR(EINVAL);
if (off < 0)
return AVERROR(EINVAL);
s->off = off;
if (s->off && h->is_streamed)
return AVERROR(ENOSYS);
/* we save the old context in case the seek fails */
old_buf_size = s->buf_end - s->buf_ptr;
memcpy(old_buf, s->buf_ptr, old_buf_size);
s->hd = NULL;
/* if it fails, continue on old connection */
if ((ret = http_open_cnx(h, &options)) < 0) {
av_dict_free(&options);
memcpy(s->buffer, old_buf, old_buf_size);
s->buf_ptr = s->buffer;
s->buf_end = s->buffer + old_buf_size;
s->hd = old_hd;
s->off = old_off;
return ret;
}
av_dict_free(&options);
ffurl_close(old_hd);
return off;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
FFmpeg
|
2a05c8f813de6f2278827734bf8102291e7484aa
| 91,450,774,405,209,480,000,000,000,000,000,000,000 | 50 |
http: make length/offset-related variables unsigned.
Fixes #5992, reported and found by Paul Cher <[email protected]>.
|
posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
{
int error = check_clock(which_clock);
if (!error) {
tp->tv_sec = 0;
tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
/*
* If sched_clock is using a cycle counter, we
* don't have any idea of its true resolution
* exported, but it is much more than 1s/HZ.
*/
tp->tv_nsec = 1;
}
}
return error;
}
| 0 |
[
"CWE-190"
] |
linux
|
78c9c4dfbf8c04883941445a195276bb4bb92c76
| 121,614,300,268,011,410,000,000,000,000,000,000,000 | 17 |
posix-timers: Sanitize overrun handling
The posix timer overrun handling is broken because the forwarding functions
can return a huge number of overruns which does not fit in an int. As a
consequence timer_getoverrun(2) and siginfo::si_overrun can turn into
random number generators.
The k_clock::timer_forward() callbacks return a 64 bit value now. Make
k_itimer::ti_overrun[_last] 64bit as well, so the kernel internal
accounting is correct. 3Remove the temporary (int) casts.
Add a helper function which clamps the overrun value returned to user space
via timer_getoverrun(2) or siginfo::si_overrun limited to a positive value
between 0 and INT_MAX. INT_MAX is an indicator for user space that the
overrun value has been clamped.
Reported-by: Team OWL337 <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Acked-by: John Stultz <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Michael Kerrisk <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
|
static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx,
gfp_t gfp)
{
struct xfrm_user_sec_ctx *uctx = NULL;
int ctx_size = sec_ctx->sadb_x_ctx_len;
uctx = kmalloc((sizeof(*uctx)+ctx_size), gfp);
if (!uctx)
return NULL;
uctx->len = pfkey_sec_ctx_len(sec_ctx);
uctx->exttype = sec_ctx->sadb_x_sec_exttype;
uctx->ctx_doi = sec_ctx->sadb_x_ctx_doi;
uctx->ctx_alg = sec_ctx->sadb_x_ctx_alg;
uctx->ctx_len = sec_ctx->sadb_x_ctx_len;
memcpy(uctx + 1, sec_ctx + 1,
uctx->ctx_len);
return uctx;
}
| 0 |
[] |
linux
|
096f41d3a8fcbb8dde7f71379b1ca85fe213eded
| 331,671,246,317,774,400,000,000,000,000,000,000,000 | 21 |
af_key: Fix sadb_x_ipsecrequest parsing
The parsing of sadb_x_ipsecrequest is broken in a number of ways.
First of all we're not verifying sadb_x_ipsecrequest_len. This
is needed when the structure carries addresses at the end. Worse
we don't even look at the length when we parse those optional
addresses.
The migration code had similar parsing code that's better but
it also has some deficiencies. The length is overcounted first
of all as it includes the header itself. It also fails to check
the length before dereferencing the sa_family field.
This patch fixes those problems in parse_sockaddr_pair and then
uses it in parse_ipsecrequest.
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]>
|
xrdp_mm_send_login(struct xrdp_mm* self)
{
struct stream* s;
int rv;
int index;
int count;
int xserverbpp;
char* username;
char* password;
char* name;
char* value;
xrdp_wm_log_msg(self->wm, "sending login info to session manager, "
"please wait...");
username = 0;
password = 0;
self->code = 0;
xserverbpp = 0;
count = self->login_names->count;
for (index = 0; index < count; index++)
{
name = (char*)list_get_item(self->login_names, index);
value = (char*)list_get_item(self->login_values, index);
if (g_strcasecmp(name, "username") == 0)
{
username = value;
}
else if (g_strcasecmp(name, "password") == 0)
{
password = value;
}
else if (g_strcasecmp(name, "lib") == 0)
{
if ((g_strcasecmp(value, "libxup.so") == 0) ||
(g_strcasecmp(value, "xup.dll") == 0))
{
self->code = 10;
}
}
else if (g_strcasecmp(name, "xserverbpp") == 0)
{
xserverbpp = g_atoi(value);
}
}
if ((username == 0) || (password == 0))
{
xrdp_wm_log_msg(self->wm, "Error finding username and password");
return 1;
}
s = trans_get_out_s(self->sesman_trans, 8192);
s_push_layer(s, channel_hdr, 8);
/* this code is either 0 for Xvnc or 10 for X11rdp */
out_uint16_be(s, self->code);
index = g_strlen(username);
out_uint16_be(s, index);
out_uint8a(s, username, index);
index = g_strlen(password);
out_uint16_be(s, index);
out_uint8a(s, password, index);
out_uint16_be(s, self->wm->screen->width);
out_uint16_be(s, self->wm->screen->height);
if (xserverbpp > 0)
{
out_uint16_be(s, xserverbpp);
}
else
{
out_uint16_be(s, self->wm->screen->bpp);
}
/* send domain */
index = g_strlen(self->wm->client_info->domain);
out_uint16_be(s, index);
out_uint8a(s, self->wm->client_info->domain, index);
/* send program / shell */
index = g_strlen(self->wm->client_info->program);
out_uint16_be(s, index);
out_uint8a(s, self->wm->client_info->program, index);
/* send directory */
index = g_strlen(self->wm->client_info->directory);
out_uint16_be(s, index);
out_uint8a(s, self->wm->client_info->directory, index);
/* send client ip */
index = g_strlen(self->wm->client_info->client_ip);
out_uint16_be(s, index);
out_uint8a(s, self->wm->client_info->client_ip, index);
s_mark_end(s);
s_pop_layer(s, channel_hdr);
out_uint32_be(s, 0); /* version */
index = (int)(s->end - s->data);
out_uint32_be(s, index); /* size */
rv = trans_force_write(self->sesman_trans);
if (rv != 0) {
xrdp_wm_log_msg(self->wm, "xrdp_mm_send_login: xrdp_mm_send_login failed");
}
return rv;
}
| 0 |
[] |
xrdp
|
d8f9e8310dac362bb9578763d1024178f94f4ecc
| 87,993,565,929,748,840,000,000,000,000,000,000,000 | 108 |
move temp files from /tmp to /tmp/.xrdp
|
evdev_post_scroll(struct evdev_device *device,
uint64_t time,
enum libinput_pointer_axis_source source,
const struct normalized_coords *delta)
{
const struct normalized_coords *trigger;
struct normalized_coords event;
if (!evdev_is_scrolling(device,
LIBINPUT_POINTER_AXIS_SCROLL_VERTICAL))
device->scroll.buildup.y += delta->y;
if (!evdev_is_scrolling(device,
LIBINPUT_POINTER_AXIS_SCROLL_HORIZONTAL))
device->scroll.buildup.x += delta->x;
trigger = &device->scroll.buildup;
/* If we're not scrolling yet, use a distance trigger: moving
past a certain distance starts scrolling */
if (!evdev_is_scrolling(device,
LIBINPUT_POINTER_AXIS_SCROLL_HORIZONTAL) &&
!evdev_is_scrolling(device,
LIBINPUT_POINTER_AXIS_SCROLL_VERTICAL)) {
if (fabs(trigger->y) >= device->scroll.threshold)
evdev_start_scrolling(device,
LIBINPUT_POINTER_AXIS_SCROLL_VERTICAL);
if (fabs(trigger->x) >= device->scroll.threshold)
evdev_start_scrolling(device,
LIBINPUT_POINTER_AXIS_SCROLL_HORIZONTAL);
/* We're already scrolling in one direction. Require some
trigger speed to start scrolling in the other direction */
} else if (!evdev_is_scrolling(device,
LIBINPUT_POINTER_AXIS_SCROLL_VERTICAL)) {
if (fabs(delta->y) >= device->scroll.direction_lock_threshold)
evdev_start_scrolling(device,
LIBINPUT_POINTER_AXIS_SCROLL_VERTICAL);
} else if (!evdev_is_scrolling(device,
LIBINPUT_POINTER_AXIS_SCROLL_HORIZONTAL)) {
if (fabs(delta->x) >= device->scroll.direction_lock_threshold)
evdev_start_scrolling(device,
LIBINPUT_POINTER_AXIS_SCROLL_HORIZONTAL);
}
event = *delta;
/* We use the trigger to enable, but the delta from this event for
* the actual scroll movement. Otherwise we get a jump once
* scrolling engages */
if (!evdev_is_scrolling(device,
LIBINPUT_POINTER_AXIS_SCROLL_VERTICAL))
event.y = 0.0;
if (!evdev_is_scrolling(device,
LIBINPUT_POINTER_AXIS_SCROLL_HORIZONTAL))
event.x = 0.0;
if (!normalized_is_zero(event)) {
const struct discrete_coords zero_discrete = { 0.0, 0.0 };
uint32_t axes = device->scroll.direction;
if (event.y == 0.0)
axes &= ~bit(LIBINPUT_POINTER_AXIS_SCROLL_VERTICAL);
if (event.x == 0.0)
axes &= ~bit(LIBINPUT_POINTER_AXIS_SCROLL_HORIZONTAL);
evdev_notify_axis(device,
time,
axes,
source,
&event,
&zero_discrete);
}
}
| 0 |
[
"CWE-134"
] |
libinput
|
562157f2a56537f353ca49b194efeb770004ba63
| 271,321,681,436,069,300,000,000,000,000,000,000,000 | 73 |
evdev: strip the device name of format directives
This fixes a format string vulnerabilty.
evdev_log_message() composes a format string consisting of a fixed
prefix (including the rendered device name) and the passed-in format
buffer. This format string is then passed with the arguments to the
actual log handler, which usually and eventually ends up being printf.
If the device name contains a printf-style format directive, these ended
up in the format string and thus get interpreted correctly, e.g. for a
device "Foo%sBar" the log message vs printf invocation ends up being:
evdev_log_message(device, "some message %s", "some argument");
printf("event9 - Foo%sBar: some message %s", "some argument");
This can enable an attacker to execute malicious code with the
privileges of the process using libinput.
To exploit this, an attacker needs to be able to create a kernel device
with a malicious name, e.g. through /dev/uinput or a Bluetooth device.
To fix this, convert any potential format directives in the device name
by duplicating percentages.
Pre-rendering the device to avoid the issue altogether would be nicer
but the current log level hooks do not easily allow for this. The device
name is the only user-controlled part of the format string.
A second potential issue is the sysname of the device which is also
sanitized.
This issue was found by Albin Eldstål-Ahrens and Benjamin Svensson from
Assured AB, and independently by Lukas Lamster.
Fixes #752
Signed-off-by: Peter Hutterer <[email protected]>
(cherry picked from commit a423d7d3269dc32a87384f79e29bb5ac021c83d1)
|
void my_coll_agg_error(DTCollation &c1, DTCollation &c2, const char *fname)
{
my_error(ER_CANT_AGGREGATE_2COLLATIONS,MYF(0),
c1.collation->name,c1.derivation_name(),
c2.collation->name,c2.derivation_name(),
fname);
}
| 0 |
[] |
server
|
b000e169562697aa072600695d4f0c0412f94f4f
| 335,107,760,973,289,700,000,000,000,000,000,000,000 | 7 |
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL))
based on:
commit f7316aa0c9a
Author: Ajo Robert <[email protected]>
Date: Thu Aug 24 17:03:21 2017 +0530
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item.
|
entry_guards_upgrade_waiting_circuits(guard_selection_t *gs,
const smartlist_t *all_circuits_in,
smartlist_t *newly_complete_out)
{
tor_assert(gs);
tor_assert(all_circuits_in);
tor_assert(newly_complete_out);
if (! entry_guards_all_primary_guards_are_down(gs)) {
/* We only upgrade a waiting circuit if the primary guards are all
* down. */
log_debug(LD_GUARD, "Considered upgrading guard-stalled circuits, "
"but not all primary guards were definitely down.");
return 0;
}
int n_waiting = 0;
int n_complete = 0;
int n_complete_blocking = 0;
origin_circuit_t *best_waiting_circuit = NULL;
smartlist_t *all_circuits = smartlist_new();
SMARTLIST_FOREACH_BEGIN(all_circuits_in, origin_circuit_t *, circ) {
// We filter out circuits that aren't ours, or which we can't
// reason about.
circuit_guard_state_t *state = origin_circuit_get_guard_state(circ);
if (state == NULL)
continue;
entry_guard_t *guard = entry_guard_handle_get(state->guard);
if (!guard || guard->in_selection != gs)
continue;
smartlist_add(all_circuits, circ);
} SMARTLIST_FOREACH_END(circ);
SMARTLIST_FOREACH_BEGIN(all_circuits, origin_circuit_t *, circ) {
circuit_guard_state_t *state = origin_circuit_get_guard_state(circ);
if BUG((state == NULL))
continue;
if (state->state == GUARD_CIRC_STATE_WAITING_FOR_BETTER_GUARD) {
++n_waiting;
if (! best_waiting_circuit ||
circ_state_has_higher_priority(circ, NULL, best_waiting_circuit)) {
best_waiting_circuit = circ;
}
}
} SMARTLIST_FOREACH_END(circ);
if (! best_waiting_circuit) {
log_debug(LD_GUARD, "Considered upgrading guard-stalled circuits, "
"but didn't find any.");
goto no_change;
}
/* We'll need to keep track of what restrictions were used when picking this
* circuit, so that we don't allow any circuit without those restrictions to
* block it. */
const entry_guard_restriction_t *rst_on_best_waiting =
origin_circuit_get_guard_state(best_waiting_circuit)->restrictions;
/* First look at the complete circuits: Do any block this circuit? */
SMARTLIST_FOREACH_BEGIN(all_circuits, origin_circuit_t *, circ) {
/* "C2 "blocks" C1 if:
* C2 obeys all the restrictions that C1 had to obey, AND
* C2 has higher priority than C1, AND
* Either C2 is <complete>, or C2 is <waiting_for_better_guard>,
or C2 has been <usable_if_no_better_guard> for no more than
{NONPRIMARY_GUARD_CONNECT_TIMEOUT} seconds."
*/
circuit_guard_state_t *state = origin_circuit_get_guard_state(circ);
if BUG((state == NULL))
continue;
if (state->state != GUARD_CIRC_STATE_COMPLETE)
continue;
++n_complete;
if (circ_state_has_higher_priority(circ, rst_on_best_waiting,
best_waiting_circuit))
++n_complete_blocking;
} SMARTLIST_FOREACH_END(circ);
if (n_complete_blocking) {
log_debug(LD_GUARD, "Considered upgrading guard-stalled circuits: found "
"%d complete and %d guard-stalled. At least one complete "
"circuit had higher priority, so not upgrading.",
n_complete, n_waiting);
goto no_change;
}
/* " * If any circuit C1 is <waiting_for_better_guard>, AND:
* All primary guards have reachable status of <no>.
* There is no circuit C2 that "blocks" C1.
Then, upgrade C1 to <complete>.""
*/
int n_blockers_found = 0;
const time_t state_set_at_cutoff =
approx_time() - get_nonprimary_guard_connect_timeout();
SMARTLIST_FOREACH_BEGIN(all_circuits, origin_circuit_t *, circ) {
circuit_guard_state_t *state = origin_circuit_get_guard_state(circ);
if (BUG(state == NULL))
continue;
if (state->state != GUARD_CIRC_STATE_USABLE_IF_NO_BETTER_GUARD)
continue;
if (state->state_set_at <= state_set_at_cutoff)
continue;
if (circ_state_has_higher_priority(circ, rst_on_best_waiting,
best_waiting_circuit))
++n_blockers_found;
} SMARTLIST_FOREACH_END(circ);
if (n_blockers_found) {
log_debug(LD_GUARD, "Considered upgrading guard-stalled circuits: found "
"%d guard-stalled, but %d pending circuit(s) had higher "
"guard priority, so not upgrading.",
n_waiting, n_blockers_found);
goto no_change;
}
/* Okay. We have a best waiting circuit, and we aren't waiting for
anything better. Add all circuits with that priority to the
list, and call them COMPLETE. */
int n_succeeded = 0;
SMARTLIST_FOREACH_BEGIN(all_circuits, origin_circuit_t *, circ) {
circuit_guard_state_t *state = origin_circuit_get_guard_state(circ);
if (BUG(state == NULL))
continue;
if (circ != best_waiting_circuit && rst_on_best_waiting) {
/* Can't upgrade other circ with same priority as best; might
be blocked. */
continue;
}
if (state->state != GUARD_CIRC_STATE_WAITING_FOR_BETTER_GUARD)
continue;
if (circ_state_has_higher_priority(best_waiting_circuit, NULL, circ))
continue;
state->state = GUARD_CIRC_STATE_COMPLETE;
state->state_set_at = approx_time();
smartlist_add(newly_complete_out, circ);
++n_succeeded;
} SMARTLIST_FOREACH_END(circ);
log_info(LD_GUARD, "Considered upgrading guard-stalled circuits: found "
"%d guard-stalled, %d complete. %d of the guard-stalled "
"circuit(s) had high enough priority to upgrade.",
n_waiting, n_complete, n_succeeded);
tor_assert_nonfatal(n_succeeded >= 1);
smartlist_free(all_circuits);
return 1;
no_change:
smartlist_free(all_circuits);
return 0;
}
| 0 |
[
"CWE-200"
] |
tor
|
665baf5ed5c6186d973c46cdea165c0548027350
| 100,421,681,643,554,160,000,000,000,000,000,000,000 | 154 |
Consider the exit family when applying guard restrictions.
When the new path selection logic went into place, I accidentally
dropped the code that considered the _family_ of the exit node when
deciding if the guard was usable, and we didn't catch that during
code review.
This patch makes the guard_restriction_t code consider the exit
family as well, and adds some (hopefully redundant) checks for the
case where we lack a node_t for a guard but we have a bridge_info_t
for it.
Fixes bug 22753; bugfix on 0.3.0.1-alpha. Tracked as TROVE-2016-006
and CVE-2017-0377.
|
struct CImgArgumentException : public CImgException {
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 248,247,556,019,081,070,000,000,000,000,000,000,000 | 1 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
void DL_Dxf::addPoint(DL_CreationInterface* creationInterface) {
DL_PointData d(getRealValue(10, 0.0),
getRealValue(20, 0.0),
getRealValue(30, 0.0));
creationInterface->addPoint(d);
}
| 0 |
[
"CWE-191"
] |
qcad
|
1eeffc5daf5a06cf6213ffc19e95923cdebb2eb8
| 99,644,244,699,740,330,000,000,000,000,000,000,000 | 6 |
check vertexIndex which might be -1 for broken DXF
|
xfs_alloc_ag_vextent_locality(
struct xfs_alloc_arg *args,
struct xfs_alloc_cur *acur,
int *stat)
{
struct xfs_btree_cur *fbcur = NULL;
int error;
int i;
bool fbinc;
ASSERT(acur->len == 0);
ASSERT(args->type == XFS_ALLOCTYPE_NEAR_BNO);
*stat = 0;
error = xfs_alloc_lookup_ge(acur->cnt, args->agbno, acur->cur_len, &i);
if (error)
return error;
error = xfs_alloc_lookup_le(acur->bnolt, args->agbno, 0, &i);
if (error)
return error;
error = xfs_alloc_lookup_ge(acur->bnogt, args->agbno, 0, &i);
if (error)
return error;
/*
* Search the bnobt and cntbt in parallel. Search the bnobt left and
* right and lookup the closest extent to the locality hint for each
* extent size key in the cntbt. The entire search terminates
* immediately on a bnobt hit because that means we've found best case
* locality. Otherwise the search continues until the cntbt cursor runs
* off the end of the tree. If no allocation candidate is found at this
* point, give up on locality, walk backwards from the end of the cntbt
* and take the first available extent.
*
* The parallel tree searches balance each other out to provide fairly
* consistent performance for various situations. The bnobt search can
* have pathological behavior in the worst case scenario of larger
* allocation requests and fragmented free space. On the other hand, the
* bnobt is able to satisfy most smaller allocation requests much more
* quickly than the cntbt. The cntbt search can sift through fragmented
* free space and sets of free extents for larger allocation requests
* more quickly than the bnobt. Since the locality hint is just a hint
* and we don't want to scan the entire bnobt for perfect locality, the
* cntbt search essentially bounds the bnobt search such that we can
* find good enough locality at reasonable performance in most cases.
*/
while (xfs_alloc_cur_active(acur->bnolt) ||
xfs_alloc_cur_active(acur->bnogt) ||
xfs_alloc_cur_active(acur->cnt)) {
trace_xfs_alloc_cur_lookup(args);
/*
* Search the bnobt left and right. In the case of a hit, finish
* the search in the opposite direction and we're done.
*/
error = xfs_alloc_walk_iter(args, acur, acur->bnolt, false,
true, 1, &i);
if (error)
return error;
if (i == 1) {
trace_xfs_alloc_cur_left(args);
fbcur = acur->bnogt;
fbinc = true;
break;
}
error = xfs_alloc_walk_iter(args, acur, acur->bnogt, true, true,
1, &i);
if (error)
return error;
if (i == 1) {
trace_xfs_alloc_cur_right(args);
fbcur = acur->bnolt;
fbinc = false;
break;
}
/*
* Check the extent with best locality based on the current
* extent size search key and keep track of the best candidate.
*/
error = xfs_alloc_cntbt_iter(args, acur);
if (error)
return error;
if (!xfs_alloc_cur_active(acur->cnt)) {
trace_xfs_alloc_cur_lookup_done(args);
break;
}
}
/*
* If we failed to find anything due to busy extents, return empty
* handed so the caller can flush and retry. If no busy extents were
* found, walk backwards from the end of the cntbt as a last resort.
*/
if (!xfs_alloc_cur_active(acur->cnt) && !acur->len && !acur->busy) {
error = xfs_btree_decrement(acur->cnt, 0, &i);
if (error)
return error;
if (i) {
acur->cnt->bc_private.a.priv.abt.active = true;
fbcur = acur->cnt;
fbinc = false;
}
}
/*
* Search in the opposite direction for a better entry in the case of
* a bnobt hit or walk backwards from the end of the cntbt.
*/
if (fbcur) {
error = xfs_alloc_walk_iter(args, acur, fbcur, fbinc, true, -1,
&i);
if (error)
return error;
}
if (acur->len)
*stat = 1;
return 0;
}
| 0 |
[
"CWE-400",
"CWE-703",
"CWE-835"
] |
linux
|
d0c7feaf87678371c2c09b3709400be416b2dc62
| 335,254,118,816,812,060,000,000,000,000,000,000,000 | 123 |
xfs: add agf freeblocks verify in xfs_agf_verify
We recently used fuzz(hydra) to test XFS and automatically generate
tmp.img(XFS v5 format, but some metadata is wrong)
xfs_repair information(just one AG):
agf_freeblks 0, counted 3224 in ag 0
agf_longest 536874136, counted 3224 in ag 0
sb_fdblocks 613, counted 3228
Test as follows:
mount tmp.img tmpdir
cp file1M tmpdir
sync
In 4.19-stable, sync will stuck, the reason is:
xfs_mountfs
xfs_check_summary_counts
if ((!xfs_sb_version_haslazysbcount(&mp->m_sb) ||
XFS_LAST_UNMOUNT_WAS_CLEAN(mp)) &&
!xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS))
return 0; -->just return, incore sb_fdblocks still be 613
xfs_initialize_perag_data
cp file1M tmpdir -->ok(write file to pagecache)
sync -->stuck(write pagecache to disk)
xfs_map_blocks
xfs_iomap_write_allocate
while (count_fsb != 0) {
nimaps = 0;
while (nimaps == 0) { --> endless loop
nimaps = 1;
xfs_bmapi_write(..., &nimaps) --> nimaps becomes 0 again
xfs_bmapi_write
xfs_bmap_alloc
xfs_bmap_btalloc
xfs_alloc_vextent
xfs_alloc_fix_freelist
xfs_alloc_space_available -->fail(agf_freeblks is 0)
In linux-next, sync not stuck, cause commit c2b3164320b5 ("xfs:
use the latest extent at writeback delalloc conversion time") remove
the above while, dmesg is as follows:
[ 55.250114] XFS (loop0): page discard on page ffffea0008bc7380, inode 0x1b0c, offset 0.
Users do not know why this page is discard, the better soultion is:
1. Like xfs_repair, make sure sb_fdblocks is equal to counted
(xfs_initialize_perag_data did this, who is not called at this mount)
2. Add agf verify, if fail, will tell users to repair
This patch use the second soultion.
Signed-off-by: Zheng Bin <[email protected]>
Signed-off-by: Ren Xudong <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
Signed-off-by: Darrick J. Wong <[email protected]>
|
run_sigchld_trap (nchild)
int nchild;
{
char *trap_command;
int i;
/* Turn off the trap list during the call to parse_and_execute ()
to avoid potentially infinite recursive calls. Preserve the
values of last_command_exit_value, last_made_pid, and the_pipeline
around the execution of the trap commands. */
trap_command = savestring (trap_list[SIGCHLD]);
begin_unwind_frame ("SIGCHLD trap");
unwind_protect_int (last_command_exit_value);
unwind_protect_int (last_command_exit_signal);
unwind_protect_var (last_made_pid);
unwind_protect_int (interrupt_immediately);
unwind_protect_int (jobs_list_frozen);
unwind_protect_pointer (the_pipeline);
unwind_protect_pointer (subst_assign_varlist);
unwind_protect_pointer (this_shell_builtin);
unwind_protect_pointer (temporary_env);
/* We have to add the commands this way because they will be run
in reverse order of adding. We don't want maybe_set_sigchld_trap ()
to reference freed memory. */
add_unwind_protect (xfree, trap_command);
add_unwind_protect (maybe_set_sigchld_trap, trap_command);
subst_assign_varlist = (WORD_LIST *)NULL;
the_pipeline = (PROCESS *)NULL;
temporary_env = 0; /* traps should not run with temporary env */
running_trap = SIGCHLD + 1;
set_impossible_sigchld_trap ();
jobs_list_frozen = 1;
for (i = 0; i < nchild; i++)
{
#if 0
interrupt_immediately = 1;
#endif
parse_and_execute (savestring (trap_command), "trap", SEVAL_NOHIST|SEVAL_RESETLINE);
}
run_unwind_frame ("SIGCHLD trap");
running_trap = 0;
}
| 0 |
[] |
bash
|
955543877583837c85470f7fb8a97b7aa8d45e6c
| 234,986,247,464,779,400,000,000,000,000,000,000,000 | 48 |
bash-4.4-rc2 release
|
free_ice_connection_watch (GsmIceConnectionWatch *data)
{
if (data->watch_id) {
g_source_remove (data->watch_id);
data->watch_id = 0;
}
if (data->protocol_timeout) {
g_source_remove (data->protocol_timeout);
data->protocol_timeout = 0;
}
g_free (data);
}
| 0 |
[
"CWE-125",
"CWE-835"
] |
gnome-session
|
b0dc999e0b45355314616321dbb6cb71e729fc9d
| 239,191,956,818,581,430,000,000,000,000,000,000,000 | 14 |
[gsm] Delay the creation of the GsmXSMPClient until it really exists
We used to create the GsmXSMPClient before the XSMP connection is really
accepted. This can lead to some issues, though. An example is:
https://bugzilla.gnome.org/show_bug.cgi?id=598211#c19. Quoting:
"What is happening is that a new client (probably metacity in your
case) is opening an ICE connection in the GSM_MANAGER_PHASE_END_SESSION
phase, which causes a new GsmXSMPClient to be added to the client
store. The GSM_MANAGER_PHASE_EXIT phase then begins before the client
has had a chance to establish a xsmp connection, which means that
client->priv->conn will not be initialized at the point that xsmp_stop
is called on the new unregistered client."
The fix is to create the GsmXSMPClient object when there's a real XSMP
connection. This implies moving the timeout that makes sure we don't
have an empty client to the XSMP server.
https://bugzilla.gnome.org/show_bug.cgi?id=598211
|
void CWebSession::FillMessageLoops(CTemplate& Tmpl) {
for (const CString& sMessage : m_vsErrorMsgs) {
CTemplate& Row = Tmpl.AddRow("ErrorLoop");
Row["Message"] = sMessage;
}
for (const CString& sMessage : m_vsSuccessMsgs) {
CTemplate& Row = Tmpl.AddRow("SuccessLoop");
Row["Message"] = sMessage;
}
}
| 0 |
[
"CWE-22"
] |
znc
|
a4a5aeeb17d32937d8c7d743dae9a4cc755ce773
| 307,927,834,937,208,460,000,000,000,000,000,000,000 | 11 |
Don't let web skin name ../../../../ access files outside of usual skins directories.
Thanks for Jeriko One <[email protected]> for finding and reporting this.
|
void php_startup_auto_globals(void)
{
zend_register_auto_global(zend_string_init("_GET", sizeof("_GET")-1, 1), 0, php_auto_globals_create_get);
zend_register_auto_global(zend_string_init("_POST", sizeof("_POST")-1, 1), 0, php_auto_globals_create_post);
zend_register_auto_global(zend_string_init("_COOKIE", sizeof("_COOKIE")-1, 1), 0, php_auto_globals_create_cookie);
zend_register_auto_global(zend_string_init("_SERVER", sizeof("_SERVER")-1, 1), PG(auto_globals_jit), php_auto_globals_create_server);
zend_register_auto_global(zend_string_init("_ENV", sizeof("_ENV")-1, 1), PG(auto_globals_jit), php_auto_globals_create_env);
zend_register_auto_global(zend_string_init("_REQUEST", sizeof("_REQUEST")-1, 1), PG(auto_globals_jit), php_auto_globals_create_request);
zend_register_auto_global(zend_string_init("_FILES", sizeof("_FILES")-1, 1), 0, php_auto_globals_create_files);
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
php-src
|
a15bffd105ac28fd0dd9b596632dbf035238fda3
| 6,290,684,100,767,236,000,000,000,000,000,000,000 | 10 |
Fix bug #73807
|
maybe_log_offset(double offset, time_t now)
{
double abs_offset;
FILE *p;
char buffer[BUFLEN], host[BUFLEN];
struct tm *tm;
abs_offset = fabs(offset);
if (abs_offset > log_change_threshold) {
LOG(LOGS_WARN, "System clock wrong by %.6f seconds, adjustment started",
-offset);
}
if (do_mail_change &&
(abs_offset > mail_change_threshold)) {
snprintf(buffer, sizeof (buffer), "%s -t", MAIL_PROGRAM);
p = popen(buffer, "w");
if (p) {
if (gethostname(host, sizeof(host)) < 0) {
strcpy(host, "<UNKNOWN>");
}
host[sizeof (host) - 1] = '\0';
fprintf(p, "To: %s\n", mail_change_user);
fprintf(p, "Subject: chronyd reports change to system clock on node [%s]\n", host);
fputs("\n", p);
tm = localtime(&now);
if (tm) {
strftime(buffer, sizeof (buffer),
"On %A, %d %B %Y\n with the system clock reading %H:%M:%S (%Z)", tm);
fputs(buffer, p);
}
/* If offset < 0 the local clock is slow, so we are applying a
positive change to it to bring it into line, hence the
negation of 'offset' in the next statement (and earlier) */
fprintf(p,
"\n\nchronyd started to apply an adjustment of %.3f seconds to it,\n"
" which exceeded the reporting threshold of %.3f seconds\n\n",
-offset, mail_change_threshold);
pclose(p);
} else {
LOG(LOGS_ERR, "Could not send mail notification to user %s\n",
mail_change_user);
}
}
}
| 0 |
[
"CWE-59"
] |
chrony
|
e18903a6b56341481a2e08469c0602010bf7bfe3
| 332,983,169,412,540,540,000,000,000,000,000,000,000 | 50 |
switch to new util file functions
Replace all fopen(), rename(), and unlink() calls with the new util
functions.
|
int MonClient::get_auth_request(
Connection *con,
AuthConnectionMeta *auth_meta,
uint32_t *auth_method,
std::vector<uint32_t> *preferred_modes,
bufferlist *bl)
{
std::lock_guard l(monc_lock);
ldout(cct,10) << __func__ << " con " << con << " auth_method " << *auth_method
<< dendl;
// connection to mon?
if (con->get_peer_type() == CEPH_ENTITY_TYPE_MON) {
ceph_assert(!auth_meta->authorizer);
for (auto& i : pending_cons) {
if (i.second.is_con(con)) {
return i.second.get_auth_request(
auth_method, preferred_modes, bl,
entity_name, want_keys, rotating_secrets.get());
}
}
return -ENOENT;
}
// generate authorizer
if (!auth) {
lderr(cct) << __func__ << " but no auth handler is set up" << dendl;
return -EACCES;
}
auth_meta->authorizer.reset(auth->build_authorizer(con->get_peer_type()));
if (!auth_meta->authorizer) {
lderr(cct) << __func__ << " failed to build_authorizer for type "
<< ceph_entity_type_name(con->get_peer_type()) << dendl;
return -EACCES;
}
auth_meta->auth_method = auth_meta->authorizer->protocol;
auth_registry.get_supported_modes(con->get_peer_type(),
auth_meta->auth_method,
preferred_modes);
*bl = auth_meta->authorizer->bl;
return 0;
}
| 0 |
[
"CWE-294"
] |
ceph
|
2927fd91d41e505237cc73f9700e5c6a63e5cb4f
| 174,661,561,143,875,100,000,000,000,000,000,000,000 | 42 |
mon/MonClient: bring back CEPHX_V2 authorizer challenges
Commit c58c5754dfd2 ("msg/async/ProtocolV1: use AuthServer and
AuthClient") introduced a backwards compatibility issue into msgr1.
To fix it, commit 321548010578 ("mon/MonClient: skip CEPHX_V2
challenge if client doesn't support it") set out to skip authorizer
challenges for peers that don't support CEPHX_V2. However, it
made it so that authorizer challenges are skipped for all peers in
both msgr1 and msgr2 cases, effectively disabling the protection
against replay attacks that was put in place in commit f80b848d3f83
("auth/cephx: add authorizer challenge", CVE-2018-1128).
This is because con->get_features() always returns 0 at that
point. In msgr1 case, the peer shares its features along with the
authorizer, but while they are available in connect_msg.features they
aren't assigned to con until ProtocolV1::open(). In msgr2 case, the
peer doesn't share its features until much later (in CLIENT_IDENT
frame, i.e. after the authentication phase). The result is that
!CEPHX_V2 branch is taken in all cases and replay attack protection
is lost.
Only clusters with cephx_service_require_version set to 2 on the
service daemons would not be silently downgraded. But, since the
default is 1 and there are no reports of looping on BADAUTHORIZER
faults, I'm pretty sure that no one has ever done that. Note that
cephx_require_version set to 2 would have no effect even though it
is supposed to be stronger than cephx_service_require_version
because MonClient::handle_auth_request() didn't check it.
To fix:
- for msgr1, check connect_msg.features (as was done before commit
c58c5754dfd2) and challenge if CEPHX_V2 is supported. Together
with two preceding patches that resurrect proper cephx_* option
handling in msgr1, this covers both "I want old clients to work"
and "I wish to require better authentication" use cases.
- for msgr2, don't check anything and always challenge. CEPHX_V2
predates msgr2, anyone speaking msgr2 must support it.
Signed-off-by: Ilya Dryomov <[email protected]>
(cherry picked from commit 4a82c72e3bdddcb625933e83af8b50a444b961f1)
Conflicts:
src/msg/async/ProtocolV1.cc [ commit c58c5754dfd2
("msg/async/ProtocolV1: use AuthServer and AuthClient") not
in nautilus. This means that only msgr2 is affected, so drop
ProtocolV1.cc hunk. As a result, skip_authorizer_challenge is
never set, but this is fine because msgr1 still uses old ms_*
auth methods and tests CEPHX_V2 appropriately. ]
|
void compute_angular_endpoints_2planes(
unsigned int tune_low_weight_limit,
const block_size_descriptor& bsd,
const float* dec_weight_quant_uvalue,
const float* dec_weight_quant_sig,
float low_value1[WEIGHTS_MAX_BLOCK_MODES],
float high_value1[WEIGHTS_MAX_BLOCK_MODES],
float low_value2[WEIGHTS_MAX_BLOCK_MODES],
float high_value2[WEIGHTS_MAX_BLOCK_MODES]
) {
float low_values1[WEIGHTS_MAX_DECIMATION_MODES][12];
float high_values1[WEIGHTS_MAX_DECIMATION_MODES][12];
float low_values2[WEIGHTS_MAX_DECIMATION_MODES][12];
float high_values2[WEIGHTS_MAX_DECIMATION_MODES][12];
promise(bsd.decimation_mode_count > 0);
for (unsigned int i = 0; i < bsd.decimation_mode_count; i++)
{
const decimation_mode& dm = bsd.decimation_modes[i];
if (dm.maxprec_2planes < 0 || !dm.percentile_hit)
{
continue;
}
unsigned int weight_count = bsd.decimation_tables[i]->weight_count;
if (weight_count < tune_low_weight_limit)
{
compute_angular_endpoints_for_quant_levels_lwc(
weight_count,
dec_weight_quant_uvalue + i * BLOCK_MAX_WEIGHTS,
dec_weight_quant_sig + i * BLOCK_MAX_WEIGHTS,
dm.maxprec_2planes, low_values1[i], high_values1[i]);
compute_angular_endpoints_for_quant_levels_lwc(
weight_count,
dec_weight_quant_uvalue + i * BLOCK_MAX_WEIGHTS + WEIGHTS_PLANE2_OFFSET,
dec_weight_quant_sig + i * BLOCK_MAX_WEIGHTS + WEIGHTS_PLANE2_OFFSET,
dm.maxprec_2planes, low_values2[i], high_values2[i]);
}
else
{
compute_angular_endpoints_for_quant_levels(
weight_count,
dec_weight_quant_uvalue + i * BLOCK_MAX_WEIGHTS,
dec_weight_quant_sig + i * BLOCK_MAX_WEIGHTS,
dm.maxprec_2planes, low_values1[i], high_values1[i]);
compute_angular_endpoints_for_quant_levels(
weight_count,
dec_weight_quant_uvalue + i * BLOCK_MAX_WEIGHTS + WEIGHTS_PLANE2_OFFSET,
dec_weight_quant_sig + i * BLOCK_MAX_WEIGHTS + WEIGHTS_PLANE2_OFFSET,
dm.maxprec_2planes, low_values2[i], high_values2[i]);
}
}
promise(bsd.block_mode_count > 0);
for (unsigned int i = 0; i < bsd.block_mode_count; ++i)
{
const block_mode& bm = bsd.block_modes[i];
if (!bm.is_dual_plane || !bm.percentile_hit)
{
continue;
}
unsigned int quant_mode = bm.quant_mode;
unsigned int decim_mode = bm.decimation_mode;
low_value1[i] = low_values1[decim_mode][quant_mode];
high_value1[i] = high_values1[decim_mode][quant_mode];
low_value2[i] = low_values2[decim_mode][quant_mode];
high_value2[i] = high_values2[decim_mode][quant_mode];
}
}
| 0 |
[
"CWE-787"
] |
astc-encoder
|
6ffb3058bfbcc836108c25274e955e399481e2b4
| 201,825,731,859,519,820,000,000,000,000,000,000,000 | 74 |
Provide a fallback for blocks which find no valid encoding
|
int TTF_SetFontSizeDPI(TTF_Font *font, int ptsize, unsigned int hdpi, unsigned int vdpi)
{
FT_Face face = font->face;
FT_Error error;
/* Make sure that our font face is scalable (global metrics) */
if (FT_IS_SCALABLE(face)) {
/* Set the character size using the provided DPI. If a zero DPI
* is provided, then the other DPI setting will be used. If both
* are zero, then Freetype's default 72 DPI will be used. */
error = FT_Set_Char_Size(face, 0, ptsize * 64, hdpi, vdpi);
if (error) {
TTF_SetFTError("Couldn't set font size", error);
return -1;
}
} else {
/* Non-scalable font case. ptsize determines which family
* or series of fonts to grab from the non-scalable format.
* It is not the point size of the font. */
if (face->num_fixed_sizes <= 0) {
TTF_SetError("Couldn't select size : no num_fixed_sizes");
return -1;
}
/* within [0; num_fixed_sizes - 1] */
ptsize = SDL_max(ptsize, 0);
ptsize = SDL_min(ptsize, face->num_fixed_sizes - 1);
error = FT_Select_Size(face, ptsize);
if (error) {
TTF_SetFTError("Couldn't select size", error);
return -1;
}
}
if (TTF_initFontMetrics(font) < 0) {
TTF_SetError("Cannot initialize metrics");
return -1;
}
Flush_Cache(font);
#if TTF_USE_HARFBUZZ
/* Call when size or variations settings on underlying FT_Face change. */
hb_ft_font_changed(font->hb_font);
#endif
return 0;
| 0 |
[
"CWE-190",
"CWE-787"
] |
SDL_ttf
|
db1b41ab8bde6723c24b866e466cad78c2fa0448
| 245,774,241,705,715,950,000,000,000,000,000,000,000 | 49 |
More integer overflow (see bug #187)
Make sure that 'width + alignment' doesn't overflow, otherwise
it could create a SDL_Surface of 'width' but with wrong 'pitch'
|
callbacks_change_tool (GtkButton *button, gpointer user_data) {
gint toolNumber = GPOINTER_TO_INT (user_data);
/* make sure se don't get caught in endless recursion here */
if (screen.win.updatingTools)
return;
screen.win.updatingTools = TRUE;
gtk_toggle_tool_button_set_active (GTK_TOGGLE_TOOL_BUTTON (screen.win.toolButtonPointer), FALSE);
gtk_toggle_tool_button_set_active (GTK_TOGGLE_TOOL_BUTTON (screen.win.toolButtonPan), FALSE);
gtk_toggle_tool_button_set_active (GTK_TOGGLE_TOOL_BUTTON (screen.win.toolButtonZoom), FALSE);
gtk_toggle_tool_button_set_active (GTK_TOGGLE_TOOL_BUTTON (screen.win.toolButtonMeasure), FALSE);
switch (toolNumber) {
case POINTER:
gtk_toggle_tool_button_set_active (GTK_TOGGLE_TOOL_BUTTON (screen.win.toolButtonPointer), TRUE);
screen.tool = POINTER;
screen.state = NORMAL;
utf8_strncpy(screen.statusbar.diststr,
_("Click to select objects in the active layer. "
"Middle click and drag to pan."),
MAX_DISTLEN);
break;
case PAN:
gtk_toggle_tool_button_set_active (GTK_TOGGLE_TOOL_BUTTON (screen.win.toolButtonPan), TRUE);
screen.tool = PAN;
screen.state = NORMAL;
utf8_strncpy(screen.statusbar.diststr,
_("Click and drag to pan. Right click and drag to zoom."),
MAX_DISTLEN);
break;
case ZOOM:
gtk_toggle_tool_button_set_active (GTK_TOGGLE_TOOL_BUTTON (screen.win.toolButtonZoom), TRUE);
screen.tool = ZOOM;
screen.state = NORMAL;
utf8_strncpy(screen.statusbar.diststr,
_("Click and drag to zoom in. Shift+click to zoom out."),
MAX_DISTLEN);
break;
case MEASURE:
gtk_toggle_tool_button_set_active (GTK_TOGGLE_TOOL_BUTTON (screen.win.toolButtonMeasure), TRUE);
screen.tool = MEASURE;
screen.state = NORMAL;
utf8_strncpy(screen.statusbar.diststr,
_("Click and drag to measure a distance or select two apertures."),
MAX_DISTLEN);
/* To not show previous measure drag-line */
screen.measure_start_x = 0;
screen.measure_start_y = 0;
screen.measure_stop_x = 0;
screen.measure_stop_y = 0;
/* If two items are selected, measure they distance */
if (selection_length (&screen.selectionInfo) == 2) {
gerbv_selection_item_t item[2];
gerbv_net_t *net[2];
item[0] = selection_get_item_by_index(
&screen.selectionInfo, 0);
item[1] = selection_get_item_by_index(
&screen.selectionInfo, 1);
net[0] = item[0].net;
net[1] = item[1].net;
if ((net[0]->aperture_state ==
net[1]->aperture_state)
&& (net[0]->aperture_state ==
GERBV_APERTURE_STATE_FLASH)) {
screen.measure_start_x = net[0]->stop_x;
screen.measure_start_y = net[0]->stop_y;
gerbv_transform_coord_for_image(
&screen.measure_start_x,
&screen.measure_start_y,
item[0].image,
mainProject);
screen.measure_stop_x = net[1]->stop_x;
screen.measure_stop_y = net[1]->stop_y;
gerbv_transform_coord_for_image(
&screen.measure_stop_x,
&screen.measure_stop_y,
item[1].image,
mainProject);
render_draw_measure_distance();
}
}
break;
default:
break;
}
callbacks_switch_to_normal_tool_cursor (toolNumber);
callbacks_update_statusbar();
screen.win.updatingTools = FALSE;
callbacks_force_expose_event_for_screen();
}
| 0 |
[
"CWE-200"
] |
gerbv
|
319a8af890e4d0a5c38e6d08f510da8eefc42537
| 115,295,329,532,110,440,000,000,000,000,000,000,000 | 97 |
Remove local alias to parameter array
Normalizing access to `gerbv_simplified_amacro_t::parameter` as a step to fix CVE-2021-40402
|
xmlParseAttribute2(xmlParserCtxtPtr ctxt,
const xmlChar * pref, const xmlChar * elem,
const xmlChar ** prefix, xmlChar ** value,
int *len, int *alloc)
{
const xmlChar *name;
xmlChar *val, *internal_val = NULL;
int normalize = 0;
*value = NULL;
GROW;
name = xmlParseQName(ctxt, prefix);
if (name == NULL) {
xmlFatalErrMsg(ctxt, XML_ERR_NAME_REQUIRED,
"error parsing attribute name\n");
return (NULL);
}
/*
* get the type if needed
*/
if (ctxt->attsSpecial != NULL) {
int type;
type = (int) (ptrdiff_t) xmlHashQLookup2(ctxt->attsSpecial,
pref, elem, *prefix, name);
if (type != 0)
normalize = 1;
}
/*
* read the value
*/
SKIP_BLANKS;
if (RAW == '=') {
NEXT;
SKIP_BLANKS;
val = xmlParseAttValueInternal(ctxt, len, alloc, normalize);
if (normalize) {
/*
* Sometimes a second normalisation pass for spaces is needed
* but that only happens if charrefs or entities refernces
* have been used in the attribute value, i.e. the attribute
* value have been extracted in an allocated string already.
*/
if (*alloc) {
const xmlChar *val2;
val2 = xmlAttrNormalizeSpace2(ctxt, val, len);
if ((val2 != NULL) && (val2 != val)) {
xmlFree(val);
val = (xmlChar *) val2;
}
}
}
ctxt->instate = XML_PARSER_CONTENT;
} else {
xmlFatalErrMsgStr(ctxt, XML_ERR_ATTRIBUTE_WITHOUT_VALUE,
"Specification mandates value for attribute %s\n",
name);
return (NULL);
}
if (*prefix == ctxt->str_xml) {
/*
* Check that xml:lang conforms to the specification
* No more registered as an error, just generate a warning now
* since this was deprecated in XML second edition
*/
if ((ctxt->pedantic) && (xmlStrEqual(name, BAD_CAST "lang"))) {
internal_val = xmlStrndup(val, *len);
if (!xmlCheckLanguageID(internal_val)) {
xmlWarningMsg(ctxt, XML_WAR_LANG_VALUE,
"Malformed value for xml:lang : %s\n",
internal_val, NULL);
}
}
/*
* Check that xml:space conforms to the specification
*/
if (xmlStrEqual(name, BAD_CAST "space")) {
internal_val = xmlStrndup(val, *len);
if (xmlStrEqual(internal_val, BAD_CAST "default"))
*(ctxt->space) = 0;
else if (xmlStrEqual(internal_val, BAD_CAST "preserve"))
*(ctxt->space) = 1;
else {
xmlWarningMsg(ctxt, XML_WAR_SPACE_VALUE,
"Invalid value \"%s\" for xml:space : \"default\" or \"preserve\" expected\n",
internal_val, NULL);
}
}
if (internal_val) {
xmlFree(internal_val);
}
}
*value = val;
return (name);
}
| 0 |
[
"CWE-401"
] |
libxml2
|
5a02583c7e683896d84878bd90641d8d9b0d0549
| 244,170,051,053,511,430,000,000,000,000,000,000,000 | 101 |
Fix memory leak in xmlParseBalancedChunkMemoryRecover
When doc is NULL, namespace created in xmlTreeEnsureXMLDecl
is bind to newDoc->oldNs, in this case, set newDoc->oldNs to
NULL and free newDoc will cause a memory leak.
Found with libFuzzer.
Closes #82.
|
static int reg_event_syscall_exit(struct ftrace_event_file *file,
struct ftrace_event_call *call)
{
struct trace_array *tr = file->tr;
int ret = 0;
int num;
num = ((struct syscall_metadata *)call->data)->syscall_nr;
if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
return -ENOSYS;
mutex_lock(&syscall_trace_lock);
if (!tr->sys_refcount_exit)
ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
if (!ret) {
rcu_assign_pointer(tr->exit_syscall_files[num], file);
tr->sys_refcount_exit++;
}
mutex_unlock(&syscall_trace_lock);
return ret;
}
| 0 |
[
"CWE-125",
"CWE-476",
"CWE-119",
"CWE-264"
] |
linux
|
086ba77a6db00ed858ff07451bedee197df868c9
| 90,126,166,431,253,540,000,000,000,000,000,000,000 | 20 |
tracing/syscalls: Ignore numbers outside NR_syscalls' range
ARM has some private syscalls (for example, set_tls(2)) which lie
outside the range of NR_syscalls. If any of these are called while
syscall tracing is being performed, out-of-bounds array access will
occur in the ftrace and perf sys_{enter,exit} handlers.
# trace-cmd record -e raw_syscalls:* true && trace-cmd report
...
true-653 [000] 384.675777: sys_enter: NR 192 (0, 1000, 3, 4000022, ffffffff, 0)
true-653 [000] 384.675812: sys_exit: NR 192 = 1995915264
true-653 [000] 384.675971: sys_enter: NR 983045 (76f74480, 76f74000, 76f74b28, 76f74480, 76f76f74, 1)
true-653 [000] 384.675988: sys_exit: NR 983045 = 0
...
# trace-cmd record -e syscalls:* true
[ 17.289329] Unable to handle kernel paging request at virtual address aaaaaace
[ 17.289590] pgd = 9e71c000
[ 17.289696] [aaaaaace] *pgd=00000000
[ 17.289985] Internal error: Oops: 5 [#1] PREEMPT SMP ARM
[ 17.290169] Modules linked in:
[ 17.290391] CPU: 0 PID: 704 Comm: true Not tainted 3.18.0-rc2+ #21
[ 17.290585] task: 9f4dab00 ti: 9e710000 task.ti: 9e710000
[ 17.290747] PC is at ftrace_syscall_enter+0x48/0x1f8
[ 17.290866] LR is at syscall_trace_enter+0x124/0x184
Fix this by ignoring out-of-NR_syscalls-bounds syscall numbers.
Commit cd0980fc8add "tracing: Check invalid syscall nr while tracing syscalls"
added the check for less than zero, but it should have also checked
for greater than NR_syscalls.
Link: http://lkml.kernel.org/p/[email protected]
Fixes: cd0980fc8add "tracing: Check invalid syscall nr while tracing syscalls"
Cc: [email protected] # 2.6.33+
Signed-off-by: Rabin Vincent <[email protected]>
Signed-off-by: Steven Rostedt <[email protected]>
|
bool Image::isLongType(uint16_t type) {
return type == Exiv2::unsignedLong
|| type == Exiv2::signedLong
;
}
| 0 |
[
"CWE-125"
] |
exiv2
|
6e3855aed7ba8bb4731fc4087ca7f9078b2f3d97
| 250,683,765,704,996,600,000,000,000,000,000,000,000 | 5 |
Fix https://github.com/Exiv2/exiv2/issues/55
|
static bool test_state(unsigned int *tasks, enum psi_states state)
{
switch (state) {
case PSI_IO_SOME:
return tasks[NR_IOWAIT];
case PSI_IO_FULL:
return tasks[NR_IOWAIT] && !tasks[NR_RUNNING];
case PSI_MEM_SOME:
return tasks[NR_MEMSTALL];
case PSI_MEM_FULL:
return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING];
case PSI_CPU_SOME:
return tasks[NR_RUNNING] > 1;
case PSI_NONIDLE:
return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
tasks[NR_RUNNING];
default:
return false;
}
}
| 0 |
[
"CWE-787"
] |
linux
|
6fcca0fa48118e6d63733eb4644c6cd880c15b8f
| 322,418,777,216,334,900,000,000,000,000,000,000,000 | 20 |
sched/psi: Fix OOB write when writing 0 bytes to PSI files
Issuing write() with count parameter set to 0 on any file under
/proc/pressure/ will cause an OOB write because of the access to
buf[buf_size-1] when NUL-termination is performed. Fix this by checking
for buf_size to be non-zero.
Signed-off-by: Suren Baghdasaryan <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
|
static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
struct crypto_skcipher *null_tfm = aeadc->null_tfm;
unsigned int as = crypto_aead_authsize(tfm);
struct af_alg_async_req *areq;
struct af_alg_tsgl *tsgl;
struct scatterlist *src;
int err = 0;
size_t used = 0; /* [in] TX bufs to be en/decrypted */
size_t outlen = 0; /* [out] RX bufs produced by kernel */
size_t usedpages = 0; /* [in] RX bufs to be used from user */
size_t processed = 0; /* [in] TX bufs to be consumed */
/*
* Data length provided by caller via sendmsg/sendpage that has not
* yet been processed.
*/
used = ctx->used;
/*
* Make sure sufficient data is present -- note, the same check is
* is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
* shall provide an information to the data sender that something is
* wrong, but they are irrelevant to maintain the kernel integrity.
* We need this check here too in case user space decides to not honor
* the error message in sendmsg/sendpage and still call recvmsg. This
* check here protects the kernel integrity.
*/
if (!aead_sufficient_data(sk))
return -EINVAL;
/*
* Calculate the minimum output buffer size holding the result of the
* cipher operation. When encrypting data, the receiving buffer is
* larger by the tag length compared to the input buffer as the
* encryption operation generates the tag. For decryption, the input
* buffer provides the tag which is consumed resulting in only the
* plaintext without a buffer for the tag returned to the caller.
*/
if (ctx->enc)
outlen = used + as;
else
outlen = used - as;
/*
* The cipher operation input data is reduced by the associated data
* length as this data is processed separately later on.
*/
used -= ctx->aead_assoclen;
/* Allocate cipher request for current operation. */
areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
crypto_aead_reqsize(tfm));
if (IS_ERR(areq))
return PTR_ERR(areq);
/* convert iovecs of output buffers into RX SGL */
err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages);
if (err)
goto free;
/*
* Ensure output buffer is sufficiently large. If the caller provides
* less buffer space, only use the relative required input size. This
* allows AIO operation where the caller sent all data to be processed
* and the AIO operation performs the operation on the different chunks
* of the input data.
*/
if (usedpages < outlen) {
size_t less = outlen - usedpages;
if (used < less) {
err = -EINVAL;
goto free;
}
used -= less;
outlen -= less;
}
processed = used + ctx->aead_assoclen;
tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list);
/*
* Copy of AAD from source to destination
*
* The AAD is copied to the destination buffer without change. Even
* when user space uses an in-place cipher operation, the kernel
* will copy the data as it does not see whether such in-place operation
* is initiated.
*
* To ensure efficiency, the following implementation ensure that the
* ciphers are invoked to perform a crypto operation in-place. This
* is achieved by memory management specified as follows.
*/
/* Use the RX SGL as source (and destination) for crypto op. */
src = areq->first_rsgl.sgl.sg;
if (ctx->enc) {
/*
* Encryption operation - The in-place cipher operation is
* achieved by the following operation:
*
* TX SGL: AAD || PT
* | |
* | copy |
* v v
* RX SGL: AAD || PT || Tag
*/
err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
areq->first_rsgl.sgl.sg, processed);
if (err)
goto free;
af_alg_pull_tsgl(sk, processed, NULL, 0);
} else {
/*
* Decryption operation - To achieve an in-place cipher
* operation, the following SGL structure is used:
*
* TX SGL: AAD || CT || Tag
* | | ^
* | copy | | Create SGL link.
* v v |
* RX SGL: AAD || CT ----+
*/
/* Copy AAD || CT to RX SGL buffer for in-place operation. */
err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
areq->first_rsgl.sgl.sg, outlen);
if (err)
goto free;
/* Create TX SGL for tag and chain it to RX SGL. */
areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
processed - as);
if (!areq->tsgl_entries)
areq->tsgl_entries = 1;
areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) *
areq->tsgl_entries,
GFP_KERNEL);
if (!areq->tsgl) {
err = -ENOMEM;
goto free;
}
sg_init_table(areq->tsgl, areq->tsgl_entries);
/* Release TX SGL, except for tag data and reassign tag data. */
af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
/* chain the areq TX SGL holding the tag with RX SGL */
if (usedpages) {
/* RX SGL present */
struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
sg_chain(sgl_prev->sg, sgl_prev->npages + 1,
areq->tsgl);
} else
/* no RX SGL present (e.g. authentication only) */
src = areq->tsgl;
}
/* Initialize the crypto operation */
aead_request_set_crypt(&areq->cra_u.aead_req, src,
areq->first_rsgl.sgl.sg, used, ctx->iv);
aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
/* AIO operation */
areq->iocb = msg->msg_iocb;
aead_request_set_callback(&areq->cra_u.aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_async_cb, areq);
err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
crypto_aead_decrypt(&areq->cra_u.aead_req);
} else {
/* Synchronous operation */
aead_request_set_callback(&areq->cra_u.aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->wait);
err = crypto_wait_req(ctx->enc ?
crypto_aead_encrypt(&areq->cra_u.aead_req) :
crypto_aead_decrypt(&areq->cra_u.aead_req),
&ctx->wait);
}
/* AIO operation in progress */
if (err == -EINPROGRESS) {
sock_hold(sk);
/* Remember output size that will be generated. */
areq->outlen = outlen;
return -EIOCBQUEUED;
}
free:
af_alg_free_areq_sgls(areq);
sock_kfree_s(sk, areq, areq->areqlen);
return err ? err : outlen;
}
| 0 |
[
"CWE-20"
] |
linux
|
b32a7dc8aef1882fbf983eb354837488cc9d54dc
| 134,032,405,162,228,300,000,000,000,000,000,000,000 | 211 |
crypto: algif_aead - fix reference counting of null skcipher
In the AEAD interface for AF_ALG, the reference to the "null skcipher"
held by each tfm was being dropped in the wrong place -- when each
af_alg_ctx was freed instead of when the aead_tfm was freed. As
discovered by syzkaller, a specially crafted program could use this to
cause the null skcipher to be freed while it is still in use.
Fix it by dropping the reference in the right place.
Fixes: 72548b093ee3 ("crypto: algif_aead - copy AAD from src to dst")
Reported-by: syzbot <[email protected]>
Cc: <[email protected]> # v4.14+
Signed-off-by: Eric Biggers <[email protected]>
Reviewed-by: Stephan Mueller <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
|
int MAIN(int argc, char **argv)
{
int off=0, clr = 0;
SSL *con=NULL,*con2=NULL;
X509_STORE *store = NULL;
int s,k,width,state=0;
char *cbuf=NULL,*sbuf=NULL,*mbuf=NULL;
int cbuf_len,cbuf_off;
int sbuf_len,sbuf_off;
fd_set readfds,writefds;
short port=PORT;
int full_log=1;
char *host=SSL_HOST_NAME;
char *cert_file=NULL,*key_file=NULL;
int cert_format = FORMAT_PEM, key_format = FORMAT_PEM;
char *passarg = NULL, *pass = NULL;
X509 *cert = NULL;
EVP_PKEY *key = NULL;
char *CApath=NULL,*CAfile=NULL,*cipher=NULL;
int reconnect=0,badop=0,verify=SSL_VERIFY_NONE,bugs=0;
int crlf=0;
int write_tty,read_tty,write_ssl,read_ssl,tty_on,ssl_pending;
SSL_CTX *ctx=NULL;
int ret=1,in_init=1,i,nbio_test=0;
int starttls_proto = PROTO_OFF;
int prexit = 0, vflags = 0;
SSL_METHOD *meth=NULL;
#ifdef sock_type
#undef sock_type
#endif
int sock_type=SOCK_STREAM;
BIO *sbio;
char *inrand=NULL;
int mbuf_len=0;
struct timeval timeout, *timeoutp;
#ifndef OPENSSL_NO_ENGINE
char *engine_id=NULL;
char *ssl_client_engine_id=NULL;
ENGINE *ssl_client_engine=NULL;
#endif
ENGINE *e=NULL;
#if defined(OPENSSL_SYS_WINDOWS) || defined(OPENSSL_SYS_MSDOS) || defined(OPENSSL_SYS_NETWARE)
struct timeval tv;
#endif
#ifndef OPENSSL_NO_TLSEXT
char *servername = NULL;
tlsextctx tlsextcbp =
{NULL,0};
#endif
char *sess_in = NULL;
char *sess_out = NULL;
struct sockaddr peer;
int peerlen = sizeof(peer);
int enable_timeouts = 0 ;
long socket_mtu = 0;
#ifndef OPENSSL_NO_JPAKE
char *jpake_secret = NULL;
#endif
meth=SSLv23_client_method();
apps_startup();
c_Pause=0;
c_quiet=0;
c_ign_eof=0;
c_debug=0;
c_msg=0;
c_showcerts=0;
if (bio_err == NULL)
bio_err=BIO_new_fp(stderr,BIO_NOCLOSE);
if (!load_config(bio_err, NULL))
goto end;
if ( ((cbuf=OPENSSL_malloc(BUFSIZZ)) == NULL) ||
((sbuf=OPENSSL_malloc(BUFSIZZ)) == NULL) ||
((mbuf=OPENSSL_malloc(BUFSIZZ)) == NULL))
{
BIO_printf(bio_err,"out of memory\n");
goto end;
}
verify_depth=0;
verify_error=X509_V_OK;
#ifdef FIONBIO
c_nbio=0;
#endif
argc--;
argv++;
while (argc >= 1)
{
if (strcmp(*argv,"-host") == 0)
{
if (--argc < 1) goto bad;
host= *(++argv);
}
else if (strcmp(*argv,"-port") == 0)
{
if (--argc < 1) goto bad;
port=atoi(*(++argv));
if (port == 0) goto bad;
}
else if (strcmp(*argv,"-connect") == 0)
{
if (--argc < 1) goto bad;
if (!extract_host_port(*(++argv),&host,NULL,&port))
goto bad;
}
else if (strcmp(*argv,"-verify") == 0)
{
verify=SSL_VERIFY_PEER;
if (--argc < 1) goto bad;
verify_depth=atoi(*(++argv));
BIO_printf(bio_err,"verify depth is %d\n",verify_depth);
}
else if (strcmp(*argv,"-cert") == 0)
{
if (--argc < 1) goto bad;
cert_file= *(++argv);
}
else if (strcmp(*argv,"-sess_out") == 0)
{
if (--argc < 1) goto bad;
sess_out = *(++argv);
}
else if (strcmp(*argv,"-sess_in") == 0)
{
if (--argc < 1) goto bad;
sess_in = *(++argv);
}
else if (strcmp(*argv,"-certform") == 0)
{
if (--argc < 1) goto bad;
cert_format = str2fmt(*(++argv));
}
else if (strcmp(*argv,"-crl_check") == 0)
vflags |= X509_V_FLAG_CRL_CHECK;
else if (strcmp(*argv,"-crl_check_all") == 0)
vflags |= X509_V_FLAG_CRL_CHECK|X509_V_FLAG_CRL_CHECK_ALL;
else if (strcmp(*argv,"-prexit") == 0)
prexit=1;
else if (strcmp(*argv,"-crlf") == 0)
crlf=1;
else if (strcmp(*argv,"-quiet") == 0)
{
c_quiet=1;
c_ign_eof=1;
}
else if (strcmp(*argv,"-ign_eof") == 0)
c_ign_eof=1;
else if (strcmp(*argv,"-no_ign_eof") == 0)
c_ign_eof=0;
else if (strcmp(*argv,"-pause") == 0)
c_Pause=1;
else if (strcmp(*argv,"-debug") == 0)
c_debug=1;
#ifndef OPENSSL_NO_TLSEXT
else if (strcmp(*argv,"-tlsextdebug") == 0)
c_tlsextdebug=1;
else if (strcmp(*argv,"-status") == 0)
c_status_req=1;
#endif
#ifdef WATT32
else if (strcmp(*argv,"-wdebug") == 0)
dbug_init();
#endif
else if (strcmp(*argv,"-msg") == 0)
c_msg=1;
else if (strcmp(*argv,"-showcerts") == 0)
c_showcerts=1;
else if (strcmp(*argv,"-nbio_test") == 0)
nbio_test=1;
else if (strcmp(*argv,"-state") == 0)
state=1;
#ifndef OPENSSL_NO_SSL2
else if (strcmp(*argv,"-ssl2") == 0)
meth=SSLv2_client_method();
#endif
#ifndef OPENSSL_NO_SSL3
else if (strcmp(*argv,"-ssl3") == 0)
meth=SSLv3_client_method();
#endif
#ifndef OPENSSL_NO_TLS1
else if (strcmp(*argv,"-tls1") == 0)
meth=TLSv1_client_method();
#endif
#ifndef OPENSSL_NO_DTLS1
else if (strcmp(*argv,"-dtls1") == 0)
{
meth=DTLSv1_client_method();
sock_type=SOCK_DGRAM;
}
else if (strcmp(*argv,"-timeout") == 0)
enable_timeouts=1;
else if (strcmp(*argv,"-mtu") == 0)
{
if (--argc < 1) goto bad;
socket_mtu = atol(*(++argv));
}
#endif
else if (strcmp(*argv,"-bugs") == 0)
bugs=1;
else if (strcmp(*argv,"-keyform") == 0)
{
if (--argc < 1) goto bad;
key_format = str2fmt(*(++argv));
}
else if (strcmp(*argv,"-pass") == 0)
{
if (--argc < 1) goto bad;
passarg = *(++argv);
}
else if (strcmp(*argv,"-key") == 0)
{
if (--argc < 1) goto bad;
key_file= *(++argv);
}
else if (strcmp(*argv,"-reconnect") == 0)
{
reconnect=5;
}
else if (strcmp(*argv,"-CApath") == 0)
{
if (--argc < 1) goto bad;
CApath= *(++argv);
}
else if (strcmp(*argv,"-CAfile") == 0)
{
if (--argc < 1) goto bad;
CAfile= *(++argv);
}
else if (strcmp(*argv,"-no_tls1") == 0)
off|=SSL_OP_NO_TLSv1;
else if (strcmp(*argv,"-no_ssl3") == 0)
off|=SSL_OP_NO_SSLv3;
else if (strcmp(*argv,"-no_ssl2") == 0)
off|=SSL_OP_NO_SSLv2;
#ifndef OPENSSL_NO_TLSEXT
else if (strcmp(*argv,"-no_ticket") == 0)
{ off|=SSL_OP_NO_TICKET; }
#endif
else if (strcmp(*argv,"-serverpref") == 0)
off|=SSL_OP_CIPHER_SERVER_PREFERENCE;
else if (strcmp(*argv,"-legacy_renegotiation") == 0)
off|=SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION;
else if (strcmp(*argv,"-legacy_server_connect") == 0)
{ off|=SSL_OP_LEGACY_SERVER_CONNECT; }
else if (strcmp(*argv,"-no_legacy_server_connect") == 0)
{ clr|=SSL_OP_LEGACY_SERVER_CONNECT; }
else if (strcmp(*argv,"-cipher") == 0)
{
if (--argc < 1) goto bad;
cipher= *(++argv);
}
#ifdef FIONBIO
else if (strcmp(*argv,"-nbio") == 0)
{ c_nbio=1; }
#endif
else if (strcmp(*argv,"-starttls") == 0)
{
if (--argc < 1) goto bad;
++argv;
if (strcmp(*argv,"smtp") == 0)
starttls_proto = PROTO_SMTP;
else if (strcmp(*argv,"pop3") == 0)
starttls_proto = PROTO_POP3;
else if (strcmp(*argv,"imap") == 0)
starttls_proto = PROTO_IMAP;
else if (strcmp(*argv,"ftp") == 0)
starttls_proto = PROTO_FTP;
else if (strcmp(*argv, "xmpp") == 0)
starttls_proto = PROTO_XMPP;
else
goto bad;
}
#ifndef OPENSSL_NO_ENGINE
else if (strcmp(*argv,"-engine") == 0)
{
if (--argc < 1) goto bad;
engine_id = *(++argv);
}
else if (strcmp(*argv,"-ssl_client_engine") == 0)
{
if (--argc < 1) goto bad;
ssl_client_engine_id = *(++argv);
}
#endif
else if (strcmp(*argv,"-rand") == 0)
{
if (--argc < 1) goto bad;
inrand= *(++argv);
}
#ifndef OPENSSL_NO_TLSEXT
else if (strcmp(*argv,"-servername") == 0)
{
if (--argc < 1) goto bad;
servername= *(++argv);
/* meth=TLSv1_client_method(); */
}
#endif
#ifndef OPENSSL_NO_JPAKE
else if (strcmp(*argv,"-jpake") == 0)
{
if (--argc < 1) goto bad;
jpake_secret = *++argv;
}
#endif
else
{
BIO_printf(bio_err,"unknown option %s\n",*argv);
badop=1;
break;
}
argc--;
argv++;
}
if (badop)
{
bad:
sc_usage();
goto end;
}
OpenSSL_add_ssl_algorithms();
SSL_load_error_strings();
#ifndef OPENSSL_NO_ENGINE
e = setup_engine(bio_err, engine_id, 1);
if (ssl_client_engine_id)
{
ssl_client_engine = ENGINE_by_id(ssl_client_engine_id);
if (!ssl_client_engine)
{
BIO_printf(bio_err,
"Error getting client auth engine\n");
goto end;
}
}
#endif
if (!app_passwd(bio_err, passarg, NULL, &pass, NULL))
{
BIO_printf(bio_err, "Error getting password\n");
goto end;
}
if (key_file == NULL)
key_file = cert_file;
if (key_file)
{
key = load_key(bio_err, key_file, key_format, 0, pass, e,
"client certificate private key file");
if (!key)
{
ERR_print_errors(bio_err);
goto end;
}
}
if (cert_file)
{
cert = load_cert(bio_err,cert_file,cert_format,
NULL, e, "client certificate file");
if (!cert)
{
ERR_print_errors(bio_err);
goto end;
}
}
if (!app_RAND_load_file(NULL, bio_err, 1) && inrand == NULL
&& !RAND_status())
{
BIO_printf(bio_err,"warning, not much extra random data, consider using the -rand option\n");
}
if (inrand != NULL)
BIO_printf(bio_err,"%ld semi-random bytes loaded\n",
app_RAND_load_files(inrand));
if (bio_c_out == NULL)
{
if (c_quiet && !c_debug && !c_msg)
{
bio_c_out=BIO_new(BIO_s_null());
}
else
{
if (bio_c_out == NULL)
bio_c_out=BIO_new_fp(stdout,BIO_NOCLOSE);
}
}
ctx=SSL_CTX_new(meth);
if (ctx == NULL)
{
ERR_print_errors(bio_err);
goto end;
}
#ifndef OPENSSL_NO_ENGINE
if (ssl_client_engine)
{
if (!SSL_CTX_set_client_cert_engine(ctx, ssl_client_engine))
{
BIO_puts(bio_err, "Error setting client auth engine\n");
ERR_print_errors(bio_err);
ENGINE_free(ssl_client_engine);
goto end;
}
ENGINE_free(ssl_client_engine);
}
#endif
if (bugs)
SSL_CTX_set_options(ctx,SSL_OP_ALL|off);
else
SSL_CTX_set_options(ctx,off);
if (clr)
SSL_CTX_clear_options(ctx, clr);
/* DTLS: partial reads end up discarding unread UDP bytes :-(
* Setting read ahead solves this problem.
*/
if (sock_type == SOCK_DGRAM) SSL_CTX_set_read_ahead(ctx, 1);
if (state) SSL_CTX_set_info_callback(ctx,apps_ssl_info_callback);
if (cipher != NULL)
if(!SSL_CTX_set_cipher_list(ctx,cipher)) {
BIO_printf(bio_err,"error setting cipher list\n");
ERR_print_errors(bio_err);
goto end;
}
#if 0
else
SSL_CTX_set_cipher_list(ctx,getenv("SSL_CIPHER"));
#endif
SSL_CTX_set_verify(ctx,verify,verify_callback);
if (!set_cert_key_stuff(ctx,cert,key))
goto end;
if ((!SSL_CTX_load_verify_locations(ctx,CAfile,CApath)) ||
(!SSL_CTX_set_default_verify_paths(ctx)))
{
/* BIO_printf(bio_err,"error setting default verify locations\n"); */
ERR_print_errors(bio_err);
/* goto end; */
}
store = SSL_CTX_get_cert_store(ctx);
X509_STORE_set_flags(store, vflags);
#ifndef OPENSSL_NO_TLSEXT
if (servername != NULL)
{
tlsextcbp.biodebug = bio_err;
SSL_CTX_set_tlsext_servername_callback(ctx, ssl_servername_cb);
SSL_CTX_set_tlsext_servername_arg(ctx, &tlsextcbp);
}
#endif
con=SSL_new(ctx);
if (sess_in)
{
SSL_SESSION *sess;
BIO *stmp = BIO_new_file(sess_in, "r");
if (!stmp)
{
BIO_printf(bio_err, "Can't open session file %s\n",
sess_in);
ERR_print_errors(bio_err);
goto end;
}
sess = PEM_read_bio_SSL_SESSION(stmp, NULL, 0, NULL);
BIO_free(stmp);
if (!sess)
{
BIO_printf(bio_err, "Can't open session file %s\n",
sess_in);
ERR_print_errors(bio_err);
goto end;
}
SSL_set_session(con, sess);
SSL_SESSION_free(sess);
}
#ifndef OPENSSL_NO_TLSEXT
if (servername != NULL)
{
if (!SSL_set_tlsext_host_name(con,servername))
{
BIO_printf(bio_err,"Unable to set TLS servername extension.\n");
ERR_print_errors(bio_err);
goto end;
}
}
#endif
#ifndef OPENSSL_NO_KRB5
if (con && (con->kssl_ctx = kssl_ctx_new()) != NULL)
{
kssl_ctx_setstring(con->kssl_ctx, KSSL_SERVER, host);
}
#endif /* OPENSSL_NO_KRB5 */
/* SSL_set_cipher_list(con,"RC4-MD5"); */
re_start:
if (init_client(&s,host,port,sock_type) == 0)
{
BIO_printf(bio_err,"connect:errno=%d\n",get_last_socket_error());
SHUTDOWN(s);
goto end;
}
BIO_printf(bio_c_out,"CONNECTED(%08X)\n",s);
#ifdef FIONBIO
if (c_nbio)
{
unsigned long l=1;
BIO_printf(bio_c_out,"turning on non blocking io\n");
if (BIO_socket_ioctl(s,FIONBIO,&l) < 0)
{
ERR_print_errors(bio_err);
goto end;
}
}
#endif
if (c_Pause & 0x01) con->debug=1;
if ( SSL_version(con) == DTLS1_VERSION)
{
sbio=BIO_new_dgram(s,BIO_NOCLOSE);
if (getsockname(s, &peer, (void *)&peerlen) < 0)
{
BIO_printf(bio_err, "getsockname:errno=%d\n",
get_last_socket_error());
SHUTDOWN(s);
goto end;
}
(void)BIO_ctrl_set_connected(sbio, 1, &peer);
if ( enable_timeouts)
{
timeout.tv_sec = 0;
timeout.tv_usec = DGRAM_RCV_TIMEOUT;
BIO_ctrl(sbio, BIO_CTRL_DGRAM_SET_RECV_TIMEOUT, 0, &timeout);
timeout.tv_sec = 0;
timeout.tv_usec = DGRAM_SND_TIMEOUT;
BIO_ctrl(sbio, BIO_CTRL_DGRAM_SET_SEND_TIMEOUT, 0, &timeout);
}
if (socket_mtu > 28)
{
SSL_set_options(con, SSL_OP_NO_QUERY_MTU);
SSL_set_mtu(con, socket_mtu - 28);
}
else
/* want to do MTU discovery */
BIO_ctrl(sbio, BIO_CTRL_DGRAM_MTU_DISCOVER, 0, NULL);
}
else
sbio=BIO_new_socket(s,BIO_NOCLOSE);
if (nbio_test)
{
BIO *test;
test=BIO_new(BIO_f_nbio_test());
sbio=BIO_push(test,sbio);
}
if (c_debug)
{
con->debug=1;
BIO_set_callback(sbio,bio_dump_callback);
BIO_set_callback_arg(sbio,(char *)bio_c_out);
}
if (c_msg)
{
SSL_set_msg_callback(con, msg_cb);
SSL_set_msg_callback_arg(con, bio_c_out);
}
#ifndef OPENSSL_NO_TLSEXT
if (c_tlsextdebug)
{
SSL_set_tlsext_debug_callback(con, tlsext_cb);
SSL_set_tlsext_debug_arg(con, bio_c_out);
}
if (c_status_req)
{
SSL_set_tlsext_status_type(con, TLSEXT_STATUSTYPE_ocsp);
SSL_CTX_set_tlsext_status_cb(ctx, ocsp_resp_cb);
SSL_CTX_set_tlsext_status_arg(ctx, bio_c_out);
#if 0
{
STACK_OF(OCSP_RESPID) *ids = sk_OCSP_RESPID_new_null();
OCSP_RESPID *id = OCSP_RESPID_new();
id->value.byKey = ASN1_OCTET_STRING_new();
id->type = V_OCSP_RESPID_KEY;
ASN1_STRING_set(id->value.byKey, "Hello World", -1);
sk_OCSP_RESPID_push(ids, id);
SSL_set_tlsext_status_ids(con, ids);
}
#endif
}
#endif
#ifndef OPENSSL_NO_JPAKE
if (jpake_secret)
jpake_client_auth(bio_c_out, sbio, jpake_secret);
#endif
SSL_set_bio(con,sbio,sbio);
SSL_set_connect_state(con);
/* ok, lets connect */
width=SSL_get_fd(con)+1;
read_tty=1;
write_tty=0;
tty_on=0;
read_ssl=1;
write_ssl=1;
cbuf_len=0;
cbuf_off=0;
sbuf_len=0;
sbuf_off=0;
/* This is an ugly hack that does a lot of assumptions */
/* We do have to handle multi-line responses which may come
in a single packet or not. We therefore have to use
BIO_gets() which does need a buffering BIO. So during
the initial chitchat we do push a buffering BIO into the
chain that is removed again later on to not disturb the
rest of the s_client operation. */
if (starttls_proto == PROTO_SMTP)
{
int foundit=0;
BIO *fbio = BIO_new(BIO_f_buffer());
BIO_push(fbio, sbio);
/* wait for multi-line response to end from SMTP */
do
{
mbuf_len = BIO_gets(fbio,mbuf,BUFSIZZ);
}
while (mbuf_len>3 && mbuf[3]=='-');
/* STARTTLS command requires EHLO... */
BIO_printf(fbio,"EHLO openssl.client.net\r\n");
(void)BIO_flush(fbio);
/* wait for multi-line response to end EHLO SMTP response */
do
{
mbuf_len = BIO_gets(fbio,mbuf,BUFSIZZ);
if (strstr(mbuf,"STARTTLS"))
foundit=1;
}
while (mbuf_len>3 && mbuf[3]=='-');
(void)BIO_flush(fbio);
BIO_pop(fbio);
BIO_free(fbio);
if (!foundit)
BIO_printf(bio_err,
"didn't found starttls in server response,"
" try anyway...\n");
BIO_printf(sbio,"STARTTLS\r\n");
BIO_read(sbio,sbuf,BUFSIZZ);
}
else if (starttls_proto == PROTO_POP3)
{
BIO_read(sbio,mbuf,BUFSIZZ);
BIO_printf(sbio,"STLS\r\n");
BIO_read(sbio,sbuf,BUFSIZZ);
}
else if (starttls_proto == PROTO_IMAP)
{
int foundit=0;
BIO *fbio = BIO_new(BIO_f_buffer());
BIO_push(fbio, sbio);
BIO_gets(fbio,mbuf,BUFSIZZ);
/* STARTTLS command requires CAPABILITY... */
BIO_printf(fbio,". CAPABILITY\r\n");
(void)BIO_flush(fbio);
/* wait for multi-line CAPABILITY response */
do
{
mbuf_len = BIO_gets(fbio,mbuf,BUFSIZZ);
if (strstr(mbuf,"STARTTLS"))
foundit=1;
}
while (mbuf_len>3 && mbuf[0]!='.');
(void)BIO_flush(fbio);
BIO_pop(fbio);
BIO_free(fbio);
if (!foundit)
BIO_printf(bio_err,
"didn't found STARTTLS in server response,"
" try anyway...\n");
BIO_printf(sbio,". STARTTLS\r\n");
BIO_read(sbio,sbuf,BUFSIZZ);
}
else if (starttls_proto == PROTO_FTP)
{
BIO *fbio = BIO_new(BIO_f_buffer());
BIO_push(fbio, sbio);
/* wait for multi-line response to end from FTP */
do
{
mbuf_len = BIO_gets(fbio,mbuf,BUFSIZZ);
}
while (mbuf_len>3 && mbuf[3]=='-');
(void)BIO_flush(fbio);
BIO_pop(fbio);
BIO_free(fbio);
BIO_printf(sbio,"AUTH TLS\r\n");
BIO_read(sbio,sbuf,BUFSIZZ);
}
if (starttls_proto == PROTO_XMPP)
{
int seen = 0;
BIO_printf(sbio,"<stream:stream "
"xmlns:stream='http://etherx.jabber.org/streams' "
"xmlns='jabber:client' to='%s' version='1.0'>", host);
seen = BIO_read(sbio,mbuf,BUFSIZZ);
mbuf[seen] = 0;
while (!strstr(mbuf, "<starttls xmlns='urn:ietf:params:xml:ns:xmpp-tls'"))
{
if (strstr(mbuf, "/stream:features>"))
goto shut;
seen = BIO_read(sbio,mbuf,BUFSIZZ);
mbuf[seen] = 0;
}
BIO_printf(sbio, "<starttls xmlns='urn:ietf:params:xml:ns:xmpp-tls'/>");
seen = BIO_read(sbio,sbuf,BUFSIZZ);
sbuf[seen] = 0;
if (!strstr(sbuf, "<proceed"))
goto shut;
mbuf[0] = 0;
}
for (;;)
{
FD_ZERO(&readfds);
FD_ZERO(&writefds);
if ((SSL_version(con) == DTLS1_VERSION) &&
DTLSv1_get_timeout(con, &timeout))
timeoutp = &timeout;
else
timeoutp = NULL;
if (SSL_in_init(con) && !SSL_total_renegotiations(con))
{
in_init=1;
tty_on=0;
}
else
{
tty_on=1;
if (in_init)
{
in_init=0;
if (sess_out)
{
BIO *stmp = BIO_new_file(sess_out, "w");
if (stmp)
{
PEM_write_bio_SSL_SESSION(stmp, SSL_get_session(con));
BIO_free(stmp);
}
else
BIO_printf(bio_err, "Error writing session file %s\n", sess_out);
}
print_stuff(bio_c_out,con,full_log);
if (full_log > 0) full_log--;
if (starttls_proto)
{
BIO_printf(bio_err,"%s",mbuf);
/* We don't need to know any more */
starttls_proto = PROTO_OFF;
}
if (reconnect)
{
reconnect--;
BIO_printf(bio_c_out,"drop connection and then reconnect\n");
SSL_shutdown(con);
SSL_set_connect_state(con);
SHUTDOWN(SSL_get_fd(con));
goto re_start;
}
}
}
ssl_pending = read_ssl && SSL_pending(con);
if (!ssl_pending)
{
#if !defined(OPENSSL_SYS_WINDOWS) && !defined(OPENSSL_SYS_MSDOS) && !defined(OPENSSL_SYS_NETWARE)
if (tty_on)
{
if (read_tty) FD_SET(fileno(stdin),&readfds);
if (write_tty) FD_SET(fileno(stdout),&writefds);
}
if (read_ssl)
FD_SET(SSL_get_fd(con),&readfds);
if (write_ssl)
FD_SET(SSL_get_fd(con),&writefds);
#else
if(!tty_on || !write_tty) {
if (read_ssl)
FD_SET(SSL_get_fd(con),&readfds);
if (write_ssl)
FD_SET(SSL_get_fd(con),&writefds);
}
#endif
/* printf("mode tty(%d %d%d) ssl(%d%d)\n",
tty_on,read_tty,write_tty,read_ssl,write_ssl);*/
/* Note: under VMS with SOCKETSHR the second parameter
* is currently of type (int *) whereas under other
* systems it is (void *) if you don't have a cast it
* will choke the compiler: if you do have a cast then
* you can either go for (int *) or (void *).
*/
#if defined(OPENSSL_SYS_WINDOWS) || defined(OPENSSL_SYS_MSDOS)
/* Under Windows/DOS we make the assumption that we can
* always write to the tty: therefore if we need to
* write to the tty we just fall through. Otherwise
* we timeout the select every second and see if there
* are any keypresses. Note: this is a hack, in a proper
* Windows application we wouldn't do this.
*/
i=0;
if(!write_tty) {
if(read_tty) {
tv.tv_sec = 1;
tv.tv_usec = 0;
i=select(width,(void *)&readfds,(void *)&writefds,
NULL,&tv);
#if defined(OPENSSL_SYS_WINCE) || defined(OPENSSL_SYS_MSDOS)
if(!i && (!_kbhit() || !read_tty) ) continue;
#else
if(!i && (!((_kbhit()) || (WAIT_OBJECT_0 == WaitForSingleObject(GetStdHandle(STD_INPUT_HANDLE), 0))) || !read_tty) ) continue;
#endif
} else i=select(width,(void *)&readfds,(void *)&writefds,
NULL,timeoutp);
}
#elif defined(OPENSSL_SYS_NETWARE)
if(!write_tty) {
if(read_tty) {
tv.tv_sec = 1;
tv.tv_usec = 0;
i=select(width,(void *)&readfds,(void *)&writefds,
NULL,&tv);
} else i=select(width,(void *)&readfds,(void *)&writefds,
NULL,timeoutp);
}
#else
i=select(width,(void *)&readfds,(void *)&writefds,
NULL,timeoutp);
#endif
if ( i < 0)
{
BIO_printf(bio_err,"bad select %d\n",
get_last_socket_error());
goto shut;
/* goto end; */
}
}
if ((SSL_version(con) == DTLS1_VERSION) && DTLSv1_handle_timeout(con) > 0)
{
BIO_printf(bio_err,"TIMEOUT occured\n");
}
if (!ssl_pending && FD_ISSET(SSL_get_fd(con),&writefds))
{
k=SSL_write(con,&(cbuf[cbuf_off]),
(unsigned int)cbuf_len);
switch (SSL_get_error(con,k))
{
case SSL_ERROR_NONE:
cbuf_off+=k;
cbuf_len-=k;
if (k <= 0) goto end;
/* we have done a write(con,NULL,0); */
if (cbuf_len <= 0)
{
read_tty=1;
write_ssl=0;
}
else /* if (cbuf_len > 0) */
{
read_tty=0;
write_ssl=1;
}
break;
case SSL_ERROR_WANT_WRITE:
BIO_printf(bio_c_out,"write W BLOCK\n");
write_ssl=1;
read_tty=0;
break;
case SSL_ERROR_WANT_READ:
BIO_printf(bio_c_out,"write R BLOCK\n");
write_tty=0;
read_ssl=1;
write_ssl=0;
break;
case SSL_ERROR_WANT_X509_LOOKUP:
BIO_printf(bio_c_out,"write X BLOCK\n");
break;
case SSL_ERROR_ZERO_RETURN:
if (cbuf_len != 0)
{
BIO_printf(bio_c_out,"shutdown\n");
goto shut;
}
else
{
read_tty=1;
write_ssl=0;
break;
}
case SSL_ERROR_SYSCALL:
if ((k != 0) || (cbuf_len != 0))
{
BIO_printf(bio_err,"write:errno=%d\n",
get_last_socket_error());
goto shut;
}
else
{
read_tty=1;
write_ssl=0;
}
break;
case SSL_ERROR_SSL:
ERR_print_errors(bio_err);
goto shut;
}
}
#if defined(OPENSSL_SYS_WINDOWS) || defined(OPENSSL_SYS_MSDOS) || defined(OPENSSL_SYS_NETWARE)
/* Assume Windows/DOS can always write */
else if (!ssl_pending && write_tty)
#else
else if (!ssl_pending && FD_ISSET(fileno(stdout),&writefds))
#endif
{
#ifdef CHARSET_EBCDIC
ascii2ebcdic(&(sbuf[sbuf_off]),&(sbuf[sbuf_off]),sbuf_len);
#endif
i=write(fileno(stdout),&(sbuf[sbuf_off]),sbuf_len);
if (i <= 0)
{
BIO_printf(bio_c_out,"DONE\n");
goto shut;
/* goto end; */
}
sbuf_len-=i;;
sbuf_off+=i;
if (sbuf_len <= 0)
{
read_ssl=1;
write_tty=0;
}
}
else if (ssl_pending || FD_ISSET(SSL_get_fd(con),&readfds))
{
#ifdef RENEG
{ static int iiii; if (++iiii == 52) { SSL_renegotiate(con); iiii=0; } }
#endif
#if 1
k=SSL_read(con,sbuf,1024 /* BUFSIZZ */ );
#else
/* Demo for pending and peek :-) */
k=SSL_read(con,sbuf,16);
{ char zbuf[10240];
printf("read=%d pending=%d peek=%d\n",k,SSL_pending(con),SSL_peek(con,zbuf,10240));
}
#endif
switch (SSL_get_error(con,k))
{
case SSL_ERROR_NONE:
if (k <= 0)
goto end;
sbuf_off=0;
sbuf_len=k;
read_ssl=0;
write_tty=1;
break;
case SSL_ERROR_WANT_WRITE:
BIO_printf(bio_c_out,"read W BLOCK\n");
write_ssl=1;
read_tty=0;
break;
case SSL_ERROR_WANT_READ:
BIO_printf(bio_c_out,"read R BLOCK\n");
write_tty=0;
read_ssl=1;
if ((read_tty == 0) && (write_ssl == 0))
write_ssl=1;
break;
case SSL_ERROR_WANT_X509_LOOKUP:
BIO_printf(bio_c_out,"read X BLOCK\n");
break;
case SSL_ERROR_SYSCALL:
BIO_printf(bio_err,"read:errno=%d\n",get_last_socket_error());
goto shut;
case SSL_ERROR_ZERO_RETURN:
BIO_printf(bio_c_out,"closed\n");
goto shut;
case SSL_ERROR_SSL:
ERR_print_errors(bio_err);
goto shut;
/* break; */
}
}
#if defined(OPENSSL_SYS_WINDOWS) || defined(OPENSSL_SYS_MSDOS)
#if defined(OPENSSL_SYS_WINCE) || defined(OPENSSL_SYS_MSDOS)
else if (_kbhit())
#else
else if ((_kbhit()) || (WAIT_OBJECT_0 == WaitForSingleObject(GetStdHandle(STD_INPUT_HANDLE), 0)))
#endif
#elif defined (OPENSSL_SYS_NETWARE)
else if (_kbhit())
#else
else if (FD_ISSET(fileno(stdin),&readfds))
#endif
{
if (crlf)
{
int j, lf_num;
i=read(fileno(stdin),cbuf,BUFSIZZ/2);
lf_num = 0;
/* both loops are skipped when i <= 0 */
for (j = 0; j < i; j++)
if (cbuf[j] == '\n')
lf_num++;
for (j = i-1; j >= 0; j--)
{
cbuf[j+lf_num] = cbuf[j];
if (cbuf[j] == '\n')
{
lf_num--;
i++;
cbuf[j+lf_num] = '\r';
}
}
assert(lf_num == 0);
}
else
i=read(fileno(stdin),cbuf,BUFSIZZ);
if ((!c_ign_eof) && ((i <= 0) || (cbuf[0] == 'Q')))
{
BIO_printf(bio_err,"DONE\n");
goto shut;
}
if ((!c_ign_eof) && (cbuf[0] == 'R'))
{
BIO_printf(bio_err,"RENEGOTIATING\n");
SSL_renegotiate(con);
cbuf_len=0;
}
else
{
cbuf_len=i;
cbuf_off=0;
#ifdef CHARSET_EBCDIC
ebcdic2ascii(cbuf, cbuf, i);
#endif
}
write_ssl=1;
read_tty=0;
}
}
shut:
SSL_shutdown(con);
SHUTDOWN(SSL_get_fd(con));
ret=0;
end:
if(prexit) print_stuff(bio_c_out,con,1);
if (con != NULL) SSL_free(con);
if (con2 != NULL) SSL_free(con2);
if (ctx != NULL) SSL_CTX_free(ctx);
if (cert)
X509_free(cert);
if (key)
EVP_PKEY_free(key);
if (pass)
OPENSSL_free(pass);
if (cbuf != NULL) { OPENSSL_cleanse(cbuf,BUFSIZZ); OPENSSL_free(cbuf); }
if (sbuf != NULL) { OPENSSL_cleanse(sbuf,BUFSIZZ); OPENSSL_free(sbuf); }
if (mbuf != NULL) { OPENSSL_cleanse(mbuf,BUFSIZZ); OPENSSL_free(mbuf); }
if (bio_c_out != NULL)
{
BIO_free(bio_c_out);
bio_c_out=NULL;
}
apps_shutdown();
OPENSSL_EXIT(ret);
}
| 1 |
[
"CWE-310"
] |
openssl
|
c6a876473cbff0fd323c8abcaace98ee2d21863d
| 125,453,681,194,267,970,000,000,000,000,000,000,000 | 1,123 |
Support TLS_FALLBACK_SCSV.
Reviewed-by: Stephen Henson <[email protected]>
|
static int atl2_set_features(struct net_device *netdev,
netdev_features_t features)
{
netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
atl2_vlan_mode(netdev, features);
return 0;
}
| 0 |
[
"CWE-200"
] |
linux
|
f43bfaeddc79effbf3d0fcb53ca477cca66f3db8
| 62,287,564,946,275,220,000,000,000,000,000,000,000 | 10 |
atl2: Disable unimplemented scatter/gather feature
atl2 includes NETIF_F_SG in hw_features even though it has no support
for non-linear skbs. This bug was originally harmless since the
driver does not claim to implement checksum offload and that used to
be a requirement for SG.
Now that SG and checksum offload are independent features, if you
explicitly enable SG *and* use one of the rare protocols that can use
SG without checkusm offload, this potentially leaks sensitive
information (before you notice that it just isn't working). Therefore
this obscure bug has been designated CVE-2016-2117.
Reported-by: Justin Yackoski <[email protected]>
Signed-off-by: Ben Hutchings <[email protected]>
Fixes: ec5f06156423 ("net: Kill link between CSUM and SG features.")
Signed-off-by: David S. Miller <[email protected]>
|
static void des3_encrypt(struct ssh_cipher_struct *cipher, void *in,
void *out, unsigned long len) {
gcry_cipher_encrypt(cipher->key[0], out, len, in, len);
}
| 0 |
[
"CWE-310"
] |
libssh
|
e99246246b4061f7e71463f8806b9dcad65affa0
| 124,827,231,781,448,530,000,000,000,000,000,000,000 | 4 |
security: fix for vulnerability CVE-2014-0017
When accepting a new connection, a forking server based on libssh forks
and the child process handles the request. The RAND_bytes() function of
openssl doesn't reset its state after the fork, but simply adds the
current process id (getpid) to the PRNG state, which is not guaranteed
to be unique.
This can cause several children to end up with same PRNG state which is
a security issue.
|
TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_NoDataFrames) {
InSequence s;
setup(false, "");
EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status {
RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_);
RequestHeaderMapPtr headers{
new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}};
decoder->decodeHeaders(std::move(headers), false);
RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}};
decoder->decodeTrailers(std::move(trailers));
return Http::okStatus();
}));
setupFilterChain(2, 1);
EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false))
.WillOnce(Return(FilterHeadersStatus::StopIteration));
Buffer::OwnedImpl trailers_data("hello");
EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_))
.WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus {
decoder_filters_[0]->callbacks_->addDecodedData(trailers_data, false);
return FilterTrailersStatus::Continue;
}));
EXPECT_CALL(*decoder_filters_[0], decodeComplete());
EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false))
.WillOnce(Return(FilterHeadersStatus::StopIteration));
EXPECT_CALL(*decoder_filters_[1], decodeData(_, false))
.WillOnce(Return(FilterDataStatus::StopIterationAndBuffer));
EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_))
.WillOnce(Return(FilterTrailersStatus::StopIteration));
EXPECT_CALL(*decoder_filters_[1], decodeComplete());
// Kick off the incoming data.
Buffer::OwnedImpl fake_input("1234");
conn_manager_->onData(fake_input, false);
EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false))
.WillOnce(Return(FilterHeadersStatus::StopIteration));
decoder_filters_[0]->callbacks_->encodeHeaders(
ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false);
EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_))
.WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus {
encoder_filters_[0]->callbacks_->addEncodedData(trailers_data, false);
return FilterTrailersStatus::Continue;
}));
EXPECT_CALL(*encoder_filters_[0], encodeComplete());
EXPECT_CALL(response_encoder_, encodeHeaders(_, false));
EXPECT_CALL(response_encoder_, encodeData(_, false));
EXPECT_CALL(response_encoder_, encodeTrailers(_));
expectOnDestroy();
decoder_filters_[0]->callbacks_->encodeTrailers(
ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{"some", "trailer"}}});
}
| 0 |
[
"CWE-400"
] |
envoy
|
0e49a495826ea9e29134c1bd54fdeb31a034f40c
| 9,494,117,133,272,920,000,000,000,000,000,000,000 | 58 |
http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]>
|
static inline void sock_rps_reset_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
struct rps_sock_flow_table *sock_flow_table;
rcu_read_lock();
sock_flow_table = rcu_dereference(rps_sock_flow_table);
rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
rcu_read_unlock();
#endif
}
| 0 |
[
"CWE-400"
] |
linux-2.6
|
c377411f2494a931ff7facdbb3a6839b1266bcf6
| 53,767,273,004,708,000,000,000,000,000,000,000,000 | 11 |
net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
TIFFWriteDirectorySec(TIFF* tif, int isimage, int imagedone, uint64* pdiroff)
{
static const char module[] = "TIFFWriteDirectorySec";
uint32 ndir;
TIFFDirEntry* dir;
uint32 dirsize;
void* dirmem;
uint32 m;
if (tif->tif_mode == O_RDONLY)
return (1);
_TIFFFillStriles( tif );
/*
* Clear write state so that subsequent images with
* different characteristics get the right buffers
* setup for them.
*/
if (imagedone)
{
if (tif->tif_flags & TIFF_POSTENCODE)
{
tif->tif_flags &= ~TIFF_POSTENCODE;
if (!(*tif->tif_postencode)(tif))
{
TIFFErrorExt(tif->tif_clientdata,module,
"Error post-encoding before directory write");
return (0);
}
}
(*tif->tif_close)(tif); /* shutdown encoder */
/*
* Flush any data that might have been written
* by the compression close+cleanup routines. But
* be careful not to write stuff if we didn't add data
* in the previous steps as the "rawcc" data may well be
* a previously read tile/strip in mixed read/write mode.
*/
if (tif->tif_rawcc > 0
&& (tif->tif_flags & TIFF_BEENWRITING) != 0 )
{
if( !TIFFFlushData1(tif) )
{
TIFFErrorExt(tif->tif_clientdata, module,
"Error flushing data before directory write");
return (0);
}
}
if ((tif->tif_flags & TIFF_MYBUFFER) && tif->tif_rawdata)
{
_TIFFfree(tif->tif_rawdata);
tif->tif_rawdata = NULL;
tif->tif_rawcc = 0;
tif->tif_rawdatasize = 0;
tif->tif_rawdataoff = 0;
tif->tif_rawdataloaded = 0;
}
tif->tif_flags &= ~(TIFF_BEENWRITING|TIFF_BUFFERSETUP);
}
dir=NULL;
dirmem=NULL;
dirsize=0;
while (1)
{
ndir=0;
if (isimage)
{
if (TIFFFieldSet(tif,FIELD_IMAGEDIMENSIONS))
{
if (!TIFFWriteDirectoryTagShortLong(tif,&ndir,dir,TIFFTAG_IMAGEWIDTH,tif->tif_dir.td_imagewidth))
goto bad;
if (!TIFFWriteDirectoryTagShortLong(tif,&ndir,dir,TIFFTAG_IMAGELENGTH,tif->tif_dir.td_imagelength))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_TILEDIMENSIONS))
{
if (!TIFFWriteDirectoryTagShortLong(tif,&ndir,dir,TIFFTAG_TILEWIDTH,tif->tif_dir.td_tilewidth))
goto bad;
if (!TIFFWriteDirectoryTagShortLong(tif,&ndir,dir,TIFFTAG_TILELENGTH,tif->tif_dir.td_tilelength))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_RESOLUTION))
{
if (!TIFFWriteDirectoryTagRational(tif,&ndir,dir,TIFFTAG_XRESOLUTION,tif->tif_dir.td_xresolution))
goto bad;
if (!TIFFWriteDirectoryTagRational(tif,&ndir,dir,TIFFTAG_YRESOLUTION,tif->tif_dir.td_yresolution))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_POSITION))
{
if (!TIFFWriteDirectoryTagRational(tif,&ndir,dir,TIFFTAG_XPOSITION,tif->tif_dir.td_xposition))
goto bad;
if (!TIFFWriteDirectoryTagRational(tif,&ndir,dir,TIFFTAG_YPOSITION,tif->tif_dir.td_yposition))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_SUBFILETYPE))
{
if (!TIFFWriteDirectoryTagLong(tif,&ndir,dir,TIFFTAG_SUBFILETYPE,tif->tif_dir.td_subfiletype))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_BITSPERSAMPLE))
{
if (!TIFFWriteDirectoryTagShortPerSample(tif,&ndir,dir,TIFFTAG_BITSPERSAMPLE,tif->tif_dir.td_bitspersample))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_COMPRESSION))
{
if (!TIFFWriteDirectoryTagShort(tif,&ndir,dir,TIFFTAG_COMPRESSION,tif->tif_dir.td_compression))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_PHOTOMETRIC))
{
if (!TIFFWriteDirectoryTagShort(tif,&ndir,dir,TIFFTAG_PHOTOMETRIC,tif->tif_dir.td_photometric))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_THRESHHOLDING))
{
if (!TIFFWriteDirectoryTagShort(tif,&ndir,dir,TIFFTAG_THRESHHOLDING,tif->tif_dir.td_threshholding))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_FILLORDER))
{
if (!TIFFWriteDirectoryTagShort(tif,&ndir,dir,TIFFTAG_FILLORDER,tif->tif_dir.td_fillorder))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_ORIENTATION))
{
if (!TIFFWriteDirectoryTagShort(tif,&ndir,dir,TIFFTAG_ORIENTATION,tif->tif_dir.td_orientation))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_SAMPLESPERPIXEL))
{
if (!TIFFWriteDirectoryTagShort(tif,&ndir,dir,TIFFTAG_SAMPLESPERPIXEL,tif->tif_dir.td_samplesperpixel))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_ROWSPERSTRIP))
{
if (!TIFFWriteDirectoryTagShortLong(tif,&ndir,dir,TIFFTAG_ROWSPERSTRIP,tif->tif_dir.td_rowsperstrip))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_MINSAMPLEVALUE))
{
if (!TIFFWriteDirectoryTagShortPerSample(tif,&ndir,dir,TIFFTAG_MINSAMPLEVALUE,tif->tif_dir.td_minsamplevalue))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_MAXSAMPLEVALUE))
{
if (!TIFFWriteDirectoryTagShortPerSample(tif,&ndir,dir,TIFFTAG_MAXSAMPLEVALUE,tif->tif_dir.td_maxsamplevalue))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_PLANARCONFIG))
{
if (!TIFFWriteDirectoryTagShort(tif,&ndir,dir,TIFFTAG_PLANARCONFIG,tif->tif_dir.td_planarconfig))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_RESOLUTIONUNIT))
{
if (!TIFFWriteDirectoryTagShort(tif,&ndir,dir,TIFFTAG_RESOLUTIONUNIT,tif->tif_dir.td_resolutionunit))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_PAGENUMBER))
{
if (!TIFFWriteDirectoryTagShortArray(tif,&ndir,dir,TIFFTAG_PAGENUMBER,2,&tif->tif_dir.td_pagenumber[0]))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_STRIPBYTECOUNTS))
{
if (!isTiled(tif))
{
if (!TIFFWriteDirectoryTagLongLong8Array(tif,&ndir,dir,TIFFTAG_STRIPBYTECOUNTS,tif->tif_dir.td_nstrips,tif->tif_dir.td_stripbytecount))
goto bad;
}
else
{
if (!TIFFWriteDirectoryTagLongLong8Array(tif,&ndir,dir,TIFFTAG_TILEBYTECOUNTS,tif->tif_dir.td_nstrips,tif->tif_dir.td_stripbytecount))
goto bad;
}
}
if (TIFFFieldSet(tif,FIELD_STRIPOFFSETS))
{
if (!isTiled(tif))
{
/* td_stripoffset might be NULL in an odd OJPEG case. See
* tif_dirread.c around line 3634.
* XXX: OJPEG hack.
* If a) compression is OJPEG, b) it's not a tiled TIFF,
* and c) the number of strips is 1,
* then we tolerate the absence of stripoffsets tag,
* because, presumably, all required data is in the
* JpegInterchangeFormat stream.
* We can get here when using tiffset on such a file.
* See http://bugzilla.maptools.org/show_bug.cgi?id=2500
*/
if (tif->tif_dir.td_stripoffset != NULL &&
!TIFFWriteDirectoryTagLongLong8Array(tif,&ndir,dir,TIFFTAG_STRIPOFFSETS,tif->tif_dir.td_nstrips,tif->tif_dir.td_stripoffset))
goto bad;
}
else
{
if (!TIFFWriteDirectoryTagLongLong8Array(tif,&ndir,dir,TIFFTAG_TILEOFFSETS,tif->tif_dir.td_nstrips,tif->tif_dir.td_stripoffset))
goto bad;
}
}
if (TIFFFieldSet(tif,FIELD_COLORMAP))
{
if (!TIFFWriteDirectoryTagColormap(tif,&ndir,dir))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_EXTRASAMPLES))
{
if (tif->tif_dir.td_extrasamples)
{
uint16 na;
uint16* nb;
TIFFGetFieldDefaulted(tif,TIFFTAG_EXTRASAMPLES,&na,&nb);
if (!TIFFWriteDirectoryTagShortArray(tif,&ndir,dir,TIFFTAG_EXTRASAMPLES,na,nb))
goto bad;
}
}
if (TIFFFieldSet(tif,FIELD_SAMPLEFORMAT))
{
if (!TIFFWriteDirectoryTagShortPerSample(tif,&ndir,dir,TIFFTAG_SAMPLEFORMAT,tif->tif_dir.td_sampleformat))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_SMINSAMPLEVALUE))
{
if (!TIFFWriteDirectoryTagSampleformatArray(tif,&ndir,dir,TIFFTAG_SMINSAMPLEVALUE,tif->tif_dir.td_samplesperpixel,tif->tif_dir.td_sminsamplevalue))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_SMAXSAMPLEVALUE))
{
if (!TIFFWriteDirectoryTagSampleformatArray(tif,&ndir,dir,TIFFTAG_SMAXSAMPLEVALUE,tif->tif_dir.td_samplesperpixel,tif->tif_dir.td_smaxsamplevalue))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_IMAGEDEPTH))
{
if (!TIFFWriteDirectoryTagLong(tif,&ndir,dir,TIFFTAG_IMAGEDEPTH,tif->tif_dir.td_imagedepth))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_TILEDEPTH))
{
if (!TIFFWriteDirectoryTagLong(tif,&ndir,dir,TIFFTAG_TILEDEPTH,tif->tif_dir.td_tiledepth))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_HALFTONEHINTS))
{
if (!TIFFWriteDirectoryTagShortArray(tif,&ndir,dir,TIFFTAG_HALFTONEHINTS,2,&tif->tif_dir.td_halftonehints[0]))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_YCBCRSUBSAMPLING))
{
if (!TIFFWriteDirectoryTagShortArray(tif,&ndir,dir,TIFFTAG_YCBCRSUBSAMPLING,2,&tif->tif_dir.td_ycbcrsubsampling[0]))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_YCBCRPOSITIONING))
{
if (!TIFFWriteDirectoryTagShort(tif,&ndir,dir,TIFFTAG_YCBCRPOSITIONING,tif->tif_dir.td_ycbcrpositioning))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_REFBLACKWHITE))
{
if (!TIFFWriteDirectoryTagRationalArray(tif,&ndir,dir,TIFFTAG_REFERENCEBLACKWHITE,6,tif->tif_dir.td_refblackwhite))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_TRANSFERFUNCTION))
{
if (!TIFFWriteDirectoryTagTransferfunction(tif,&ndir,dir))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_INKNAMES))
{
if (!TIFFWriteDirectoryTagAscii(tif,&ndir,dir,TIFFTAG_INKNAMES,tif->tif_dir.td_inknameslen,tif->tif_dir.td_inknames))
goto bad;
}
if (TIFFFieldSet(tif,FIELD_SUBIFD))
{
if (!TIFFWriteDirectoryTagSubifd(tif,&ndir,dir))
goto bad;
}
{
uint32 n;
for (n=0; n<tif->tif_nfields; n++) {
const TIFFField* o;
o = tif->tif_fields[n];
if ((o->field_bit>=FIELD_CODEC)&&(TIFFFieldSet(tif,o->field_bit)))
{
switch (o->get_field_type)
{
case TIFF_SETGET_ASCII:
{
uint32 pa;
char* pb;
assert(o->field_type==TIFF_ASCII);
assert(o->field_readcount==TIFF_VARIABLE);
assert(o->field_passcount==0);
TIFFGetField(tif,o->field_tag,&pb);
pa=(uint32)(strlen(pb));
if (!TIFFWriteDirectoryTagAscii(tif,&ndir,dir,(uint16)o->field_tag,pa,pb))
goto bad;
}
break;
case TIFF_SETGET_UINT16:
{
uint16 p;
assert(o->field_type==TIFF_SHORT);
assert(o->field_readcount==1);
assert(o->field_passcount==0);
TIFFGetField(tif,o->field_tag,&p);
if (!TIFFWriteDirectoryTagShort(tif,&ndir,dir,(uint16)o->field_tag,p))
goto bad;
}
break;
case TIFF_SETGET_UINT32:
{
uint32 p;
assert(o->field_type==TIFF_LONG);
assert(o->field_readcount==1);
assert(o->field_passcount==0);
TIFFGetField(tif,o->field_tag,&p);
if (!TIFFWriteDirectoryTagLong(tif,&ndir,dir,(uint16)o->field_tag,p))
goto bad;
}
break;
case TIFF_SETGET_C32_UINT8:
{
uint32 pa;
void* pb;
assert(o->field_type==TIFF_UNDEFINED);
assert(o->field_readcount==TIFF_VARIABLE2);
assert(o->field_passcount==1);
TIFFGetField(tif,o->field_tag,&pa,&pb);
if (!TIFFWriteDirectoryTagUndefinedArray(tif,&ndir,dir,(uint16)o->field_tag,pa,pb))
goto bad;
}
break;
default:
assert(0); /* we should never get here */
break;
}
}
}
}
}
for (m=0; m<(uint32)(tif->tif_dir.td_customValueCount); m++)
{
uint16 tag = (uint16)tif->tif_dir.td_customValues[m].info->field_tag;
uint32 count = tif->tif_dir.td_customValues[m].count;
switch (tif->tif_dir.td_customValues[m].info->field_type)
{
case TIFF_ASCII:
if (!TIFFWriteDirectoryTagAscii(tif,&ndir,dir,tag,count,tif->tif_dir.td_customValues[m].value))
goto bad;
break;
case TIFF_UNDEFINED:
if (!TIFFWriteDirectoryTagUndefinedArray(tif,&ndir,dir,tag,count,tif->tif_dir.td_customValues[m].value))
goto bad;
break;
case TIFF_BYTE:
if (!TIFFWriteDirectoryTagByteArray(tif,&ndir,dir,tag,count,tif->tif_dir.td_customValues[m].value))
goto bad;
break;
case TIFF_SBYTE:
if (!TIFFWriteDirectoryTagSbyteArray(tif,&ndir,dir,tag,count,tif->tif_dir.td_customValues[m].value))
goto bad;
break;
case TIFF_SHORT:
if (!TIFFWriteDirectoryTagShortArray(tif,&ndir,dir,tag,count,tif->tif_dir.td_customValues[m].value))
goto bad;
break;
case TIFF_SSHORT:
if (!TIFFWriteDirectoryTagSshortArray(tif,&ndir,dir,tag,count,tif->tif_dir.td_customValues[m].value))
goto bad;
break;
case TIFF_LONG:
if (!TIFFWriteDirectoryTagLongArray(tif,&ndir,dir,tag,count,tif->tif_dir.td_customValues[m].value))
goto bad;
break;
case TIFF_SLONG:
if (!TIFFWriteDirectoryTagSlongArray(tif,&ndir,dir,tag,count,tif->tif_dir.td_customValues[m].value))
goto bad;
break;
case TIFF_LONG8:
if (!TIFFWriteDirectoryTagLong8Array(tif,&ndir,dir,tag,count,tif->tif_dir.td_customValues[m].value))
goto bad;
break;
case TIFF_SLONG8:
if (!TIFFWriteDirectoryTagSlong8Array(tif,&ndir,dir,tag,count,tif->tif_dir.td_customValues[m].value))
goto bad;
break;
case TIFF_RATIONAL:
if (!TIFFWriteDirectoryTagRationalArray(tif,&ndir,dir,tag,count,tif->tif_dir.td_customValues[m].value))
goto bad;
break;
case TIFF_SRATIONAL:
if (!TIFFWriteDirectoryTagSrationalArray(tif,&ndir,dir,tag,count,tif->tif_dir.td_customValues[m].value))
goto bad;
break;
case TIFF_FLOAT:
if (!TIFFWriteDirectoryTagFloatArray(tif,&ndir,dir,tag,count,tif->tif_dir.td_customValues[m].value))
goto bad;
break;
case TIFF_DOUBLE:
if (!TIFFWriteDirectoryTagDoubleArray(tif,&ndir,dir,tag,count,tif->tif_dir.td_customValues[m].value))
goto bad;
break;
case TIFF_IFD:
if (!TIFFWriteDirectoryTagIfdArray(tif,&ndir,dir,tag,count,tif->tif_dir.td_customValues[m].value))
goto bad;
break;
case TIFF_IFD8:
if (!TIFFWriteDirectoryTagIfdIfd8Array(tif,&ndir,dir,tag,count,tif->tif_dir.td_customValues[m].value))
goto bad;
break;
default:
assert(0); /* we should never get here */
break;
}
}
if (dir!=NULL)
break;
dir=_TIFFmalloc(ndir*sizeof(TIFFDirEntry));
if (dir==NULL)
{
TIFFErrorExt(tif->tif_clientdata,module,"Out of memory");
goto bad;
}
if (isimage)
{
if ((tif->tif_diroff==0)&&(!TIFFLinkDirectory(tif)))
goto bad;
}
else
tif->tif_diroff=(TIFFSeekFile(tif,0,SEEK_END)+1)&(~((toff_t)1));
if (pdiroff!=NULL)
*pdiroff=tif->tif_diroff;
if (!(tif->tif_flags&TIFF_BIGTIFF))
dirsize=2+ndir*12+4;
else
dirsize=8+ndir*20+8;
tif->tif_dataoff=tif->tif_diroff+dirsize;
if (!(tif->tif_flags&TIFF_BIGTIFF))
tif->tif_dataoff=(uint32)tif->tif_dataoff;
if ((tif->tif_dataoff<tif->tif_diroff)||(tif->tif_dataoff<(uint64)dirsize))
{
TIFFErrorExt(tif->tif_clientdata,module,"Maximum TIFF file size exceeded");
goto bad;
}
if (tif->tif_dataoff&1)
tif->tif_dataoff++;
if (isimage)
tif->tif_curdir++;
}
if (isimage)
{
if (TIFFFieldSet(tif,FIELD_SUBIFD)&&(tif->tif_subifdoff==0))
{
uint32 na;
TIFFDirEntry* nb;
for (na=0, nb=dir; ; na++, nb++)
{
assert(na<ndir);
if (nb->tdir_tag==TIFFTAG_SUBIFD)
break;
}
if (!(tif->tif_flags&TIFF_BIGTIFF))
tif->tif_subifdoff=tif->tif_diroff+2+na*12+8;
else
tif->tif_subifdoff=tif->tif_diroff+8+na*20+12;
}
}
dirmem=_TIFFmalloc(dirsize);
if (dirmem==NULL)
{
TIFFErrorExt(tif->tif_clientdata,module,"Out of memory");
goto bad;
}
if (!(tif->tif_flags&TIFF_BIGTIFF))
{
uint8* n;
uint32 nTmp;
TIFFDirEntry* o;
n=dirmem;
*(uint16*)n=(uint16)ndir;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabShort((uint16*)n);
n+=2;
o=dir;
for (m=0; m<ndir; m++)
{
*(uint16*)n=o->tdir_tag;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabShort((uint16*)n);
n+=2;
*(uint16*)n=o->tdir_type;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabShort((uint16*)n);
n+=2;
nTmp = (uint32)o->tdir_count;
_TIFFmemcpy(n,&nTmp,4);
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabLong((uint32*)n);
n+=4;
/* This is correct. The data has been */
/* swabbed previously in TIFFWriteDirectoryTagData */
_TIFFmemcpy(n,&o->tdir_offset,4);
n+=4;
o++;
}
nTmp = (uint32)tif->tif_nextdiroff;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabLong(&nTmp);
_TIFFmemcpy(n,&nTmp,4);
}
else
{
uint8* n;
TIFFDirEntry* o;
n=dirmem;
*(uint64*)n=ndir;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabLong8((uint64*)n);
n+=8;
o=dir;
for (m=0; m<ndir; m++)
{
*(uint16*)n=o->tdir_tag;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabShort((uint16*)n);
n+=2;
*(uint16*)n=o->tdir_type;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabShort((uint16*)n);
n+=2;
_TIFFmemcpy(n,&o->tdir_count,8);
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabLong8((uint64*)n);
n+=8;
_TIFFmemcpy(n,&o->tdir_offset,8);
n+=8;
o++;
}
_TIFFmemcpy(n,&tif->tif_nextdiroff,8);
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabLong8((uint64*)n);
}
_TIFFfree(dir);
dir=NULL;
if (!SeekOK(tif,tif->tif_diroff))
{
TIFFErrorExt(tif->tif_clientdata,module,"IO error writing directory");
goto bad;
}
if (!WriteOK(tif,dirmem,(tmsize_t)dirsize))
{
TIFFErrorExt(tif->tif_clientdata,module,"IO error writing directory");
goto bad;
}
_TIFFfree(dirmem);
if (imagedone)
{
TIFFFreeDirectory(tif);
tif->tif_flags &= ~TIFF_DIRTYDIRECT;
tif->tif_flags &= ~TIFF_DIRTYSTRIP;
(*tif->tif_cleanup)(tif);
/*
* Reset directory-related state for subsequent
* directories.
*/
TIFFCreateDirectory(tif);
}
return(1);
bad:
if (dir!=NULL)
_TIFFfree(dir);
if (dirmem!=NULL)
_TIFFfree(dirmem);
return(0);
}
| 0 |
[
"CWE-20"
] |
libtiff
|
3144e57770c1e4d26520d8abee750f8ac8b75490
| 218,621,199,088,199,240,000,000,000,000,000,000,000 | 578 |
* libtiff/tif_dir.c, tif_dirread.c, tif_dirwrite.c: implement various clampings
of double to other data types to avoid undefined behaviour if the output range
isn't big enough to hold the input value.
Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2643
http://bugzilla.maptools.org/show_bug.cgi?id=2642
http://bugzilla.maptools.org/show_bug.cgi?id=2646
http://bugzilla.maptools.org/show_bug.cgi?id=2647
|
static void __cgroup_kill(struct cgroup *cgrp)
{
struct css_task_iter it;
struct task_struct *task;
lockdep_assert_held(&cgroup_mutex);
spin_lock_irq(&css_set_lock);
set_bit(CGRP_KILL, &cgrp->flags);
spin_unlock_irq(&css_set_lock);
css_task_iter_start(&cgrp->self, CSS_TASK_ITER_PROCS | CSS_TASK_ITER_THREADED, &it);
while ((task = css_task_iter_next(&it))) {
/* Ignore kernel threads here. */
if (task->flags & PF_KTHREAD)
continue;
/* Skip tasks that are already dying. */
if (__fatal_signal_pending(task))
continue;
send_sig(SIGKILL, task, 0);
}
css_task_iter_end(&it);
spin_lock_irq(&css_set_lock);
clear_bit(CGRP_KILL, &cgrp->flags);
spin_unlock_irq(&css_set_lock);
}
| 0 |
[
"CWE-416"
] |
linux
|
a06247c6804f1a7c86a2e5398a4c1f1db1471848
| 169,633,863,805,382,190,000,000,000,000,000,000,000 | 29 |
psi: Fix uaf issue when psi trigger is destroyed while being polled
With write operation on psi files replacing old trigger with a new one,
the lifetime of its waitqueue is totally arbitrary. Overwriting an
existing trigger causes its waitqueue to be freed and pending poll()
will stumble on trigger->event_wait which was destroyed.
Fix this by disallowing to redefine an existing psi trigger. If a write
operation is used on a file descriptor with an already existing psi
trigger, the operation will fail with EBUSY error.
Also bypass a check for psi_disabled in the psi_trigger_destroy as the
flag can be flipped after the trigger is created, leading to a memory
leak.
Fixes: 0e94682b73bf ("psi: introduce psi monitor")
Reported-by: [email protected]
Suggested-by: Linus Torvalds <[email protected]>
Analyzed-by: Eric Biggers <[email protected]>
Signed-off-by: Suren Baghdasaryan <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Eric Biggers <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: [email protected]
Link: https://lore.kernel.org/r/[email protected]
|
const char* Http2Session::TypeName() const {
switch (session_type_) {
case NGHTTP2_SESSION_SERVER: return "server";
case NGHTTP2_SESSION_CLIENT: return "client";
default:
// This should never happen
ABORT();
}
}
| 0 |
[
"CWE-416"
] |
node
|
7f178663ebffc82c9f8a5a1b6bf2da0c263a30ed
| 171,703,018,391,782,630,000,000,000,000,000,000,000 | 9 |
src: use unique_ptr for WriteWrap
This commit attempts to avoid a use-after-free error by using unqiue_ptr
and passing a reference to it.
CVE-ID: CVE-2020-8265
Fixes: https://github.com/nodejs-private/node-private/issues/227
PR-URL: https://github.com/nodejs-private/node-private/pull/238
Reviewed-By: Michael Dawson <[email protected]>
Reviewed-By: Tobias Nießen <[email protected]>
Reviewed-By: Richard Lau <[email protected]>
|
static gboolean handle_store(MonoThread *thread)
{
mono_threads_lock ();
THREAD_DEBUG (g_message ("%s: thread %p ID %"G_GSIZE_FORMAT, __func__, thread, (gsize)thread->internal_thread->tid));
if (threads_starting_up)
mono_g_hash_table_remove (threads_starting_up, thread);
if (shutting_down) {
mono_threads_unlock ();
return FALSE;
}
if(threads==NULL) {
MONO_GC_REGISTER_ROOT_FIXED (threads);
threads=mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_VALUE_GC);
}
/* We don't need to duplicate thread->handle, because it is
* only closed when the thread object is finalized by the GC.
*/
g_assert (thread->internal_thread);
mono_g_hash_table_insert(threads, (gpointer)(gsize)(thread->internal_thread->tid),
thread->internal_thread);
mono_threads_unlock ();
return TRUE;
}
| 0 |
[
"CWE-399",
"CWE-264"
] |
mono
|
722f9890f09aadfc37ae479e7d946d5fc5ef7b91
| 261,136,583,641,029,400,000,000,000,000,000,000,000 | 30 |
Fix access to freed members of a dead thread
* threads.c: Fix access to freed members of a dead thread. Found
and fixed by Rodrigo Kumpera <[email protected]>
Ref: CVE-2011-0992
|
void silk_NLSF_stabilize(
opus_int16 *NLSF_Q15, /* I/O Unstable/stabilized normalized LSF vector in Q15 [L] */
const opus_int16 *NDeltaMin_Q15, /* I Min distance vector, NDeltaMin_Q15[L] must be >= 1 [L+1] */
const opus_int L /* I Number of NLSF parameters in the input vector */
)
{
opus_int i, I=0, k, loops;
opus_int16 center_freq_Q15;
opus_int32 diff_Q15, min_diff_Q15, min_center_Q15, max_center_Q15;
/* This is necessary to ensure an output within range of a opus_int16 */
silk_assert( NDeltaMin_Q15[L] >= 1 );
for( loops = 0; loops < MAX_LOOPS; loops++ ) {
/**************************/
/* Find smallest distance */
/**************************/
/* First element */
min_diff_Q15 = NLSF_Q15[0] - NDeltaMin_Q15[0];
I = 0;
/* Middle elements */
for( i = 1; i <= L-1; i++ ) {
diff_Q15 = NLSF_Q15[i] - ( NLSF_Q15[i-1] + NDeltaMin_Q15[i] );
if( diff_Q15 < min_diff_Q15 ) {
min_diff_Q15 = diff_Q15;
I = i;
}
}
/* Last element */
diff_Q15 = ( 1 << 15 ) - ( NLSF_Q15[L-1] + NDeltaMin_Q15[L] );
if( diff_Q15 < min_diff_Q15 ) {
min_diff_Q15 = diff_Q15;
I = L;
}
/***************************************************/
/* Now check if the smallest distance non-negative */
/***************************************************/
if( min_diff_Q15 >= 0 ) {
return;
}
if( I == 0 ) {
/* Move away from lower limit */
NLSF_Q15[0] = NDeltaMin_Q15[0];
} else if( I == L) {
/* Move away from higher limit */
NLSF_Q15[L-1] = ( 1 << 15 ) - NDeltaMin_Q15[L];
} else {
/* Find the lower extreme for the location of the current center frequency */
min_center_Q15 = 0;
for( k = 0; k < I; k++ ) {
min_center_Q15 += NDeltaMin_Q15[k];
}
min_center_Q15 += silk_RSHIFT( NDeltaMin_Q15[I], 1 );
/* Find the upper extreme for the location of the current center frequency */
max_center_Q15 = 1 << 15;
for( k = L; k > I; k-- ) {
max_center_Q15 -= NDeltaMin_Q15[k];
}
max_center_Q15 -= silk_RSHIFT( NDeltaMin_Q15[I], 1 );
/* Move apart, sorted by value, keeping the same center frequency */
center_freq_Q15 = (opus_int16)silk_LIMIT_32( silk_RSHIFT_ROUND( (opus_int32)NLSF_Q15[I-1] + (opus_int32)NLSF_Q15[I], 1 ),
min_center_Q15, max_center_Q15 );
NLSF_Q15[I-1] = center_freq_Q15 - silk_RSHIFT( NDeltaMin_Q15[I], 1 );
NLSF_Q15[I] = NLSF_Q15[I-1] + NDeltaMin_Q15[I];
}
}
/* Safe and simple fall back method, which is less ideal than the above */
if( loops == MAX_LOOPS )
{
/* Insertion sort (fast for already almost sorted arrays): */
/* Best case: O(n) for an already sorted array */
/* Worst case: O(n^2) for an inversely sorted array */
silk_insertion_sort_increasing_all_values_int16( &NLSF_Q15[0], L );
/* First NLSF should be no less than NDeltaMin[0] */
NLSF_Q15[0] = silk_max_int( NLSF_Q15[0], NDeltaMin_Q15[0] );
/* Keep delta_min distance between the NLSFs */
for( i = 1; i < L; i++ )
NLSF_Q15[i] = silk_max_int( NLSF_Q15[i], silk_ADD_SAT16( NLSF_Q15[i-1], NDeltaMin_Q15[i] ) );
/* Last NLSF should be no higher than 1 - NDeltaMin[L] */
NLSF_Q15[L-1] = silk_min_int( NLSF_Q15[L-1], (1<<15) - NDeltaMin_Q15[L] );
/* Keep NDeltaMin distance between the NLSFs */
for( i = L-2; i >= 0; i-- )
NLSF_Q15[i] = silk_min_int( NLSF_Q15[i], NLSF_Q15[i+1] - NDeltaMin_Q15[i+1] );
}
}
| 0 |
[
"CWE-190"
] |
opus
|
79e8f527b0344b0897a65be35e77f7885bd99409
| 112,278,650,635,579,980,000,000,000,000,000,000,000 | 96 |
Ensure that NLSF cannot be negative when computing a min distance between them
Signed-off-by: Jean-Marc Valin <[email protected]>
|
enumerate_children_callback (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
DirectoryLoadState *state;
GFileEnumerator *enumerator;
GError *error;
state = user_data;
if (state->directory == NULL) {
/* Operation was cancelled. Bail out */
directory_load_state_free (state);
return;
}
error = NULL;
enumerator = g_file_enumerate_children_finish (G_FILE (source_object),
res, &error);
if (enumerator == NULL) {
directory_load_done (state->directory, error);
g_error_free (error);
directory_load_state_free (state);
return;
} else {
state->enumerator = enumerator;
g_file_enumerator_next_files_async (state->enumerator,
DIRECTORY_LOAD_ITEMS_PER_CALLBACK,
G_PRIORITY_DEFAULT,
state->cancellable,
more_files_callback,
state);
}
}
| 0 |
[] |
nautilus
|
7632a3e13874a2c5e8988428ca913620a25df983
| 72,871,285,434,184,770,000,000,000,000,000,000,000 | 35 |
Check for trusted desktop file launchers.
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-directory-async.c:
Check for trusted desktop file launchers.
* libnautilus-private/nautilus-file-private.h:
* libnautilus-private/nautilus-file.c:
* libnautilus-private/nautilus-file.h:
Add nautilus_file_is_trusted_link.
Allow unsetting of custom display name.
* libnautilus-private/nautilus-mime-actions.c:
Display dialog when trying to launch a non-trusted desktop file.
svn path=/trunk/; revision=15003
|
run (const gchar *name,
gint nparams,
const GimpParam *param,
gint *nreturn_vals,
GimpParam **return_vals)
{
static GimpParam values[2];
GimpRunMode run_mode;
GimpPDBStatusType status = GIMP_PDB_SUCCESS;
gint32 image_ID;
gint32 drawable_ID;
GimpExportReturn export = GIMP_EXPORT_CANCEL;
GError *error = NULL;
l_run_mode = run_mode = param[0].data.d_int32;
INIT_I18N ();
*nreturn_vals = 1;
*return_vals = values;
values[0].type = GIMP_PDB_STATUS;
values[0].data.d_status = GIMP_PDB_EXECUTION_ERROR;
if (strcmp (name, LOAD_PROC) == 0)
{
image_ID = load_image (param[1].data.d_string, &error);
if (image_ID != -1)
{
*nreturn_vals = 2;
values[1].type = GIMP_PDB_IMAGE;
values[1].data.d_image = image_ID;
}
else
{
status = GIMP_PDB_EXECUTION_ERROR;
}
}
else if (strcmp (name, SAVE_PROC) == 0)
{
image_ID = param[1].data.d_int32;
drawable_ID = param[2].data.d_int32;
/* eventually export the image */
switch (run_mode)
{
case GIMP_RUN_INTERACTIVE:
case GIMP_RUN_WITH_LAST_VALS:
gimp_ui_init (PLUG_IN_BINARY, FALSE);
export = gimp_export_image (&image_ID, &drawable_ID, NULL,
(GIMP_EXPORT_CAN_HANDLE_RGB |
GIMP_EXPORT_CAN_HANDLE_GRAY |
GIMP_EXPORT_CAN_HANDLE_INDEXED));
if (export == GIMP_EXPORT_CANCEL)
{
values[0].data.d_status = GIMP_PDB_CANCEL;
return;
}
break;
default:
break;
}
switch (run_mode)
{
case GIMP_RUN_INTERACTIVE:
case GIMP_RUN_WITH_LAST_VALS:
/* No additional data to retrieve */
break;
case GIMP_RUN_NONINTERACTIVE:
/* Make sure all the arguments are there! */
if (nparams != 5)
status = GIMP_PDB_CALLING_ERROR;
break;
default:
break;
}
if (status == GIMP_PDB_SUCCESS)
{
if (! save_image (param[3].data.d_string, image_ID, drawable_ID,
&error))
{
status = GIMP_PDB_EXECUTION_ERROR;
}
}
if (export == GIMP_EXPORT_EXPORT)
gimp_image_delete (image_ID);
}
else
{
status = GIMP_PDB_CANCEL;
}
if (status != GIMP_PDB_SUCCESS && error)
{
*nreturn_vals = 2;
values[1].type = GIMP_PDB_STRING;
values[1].data.d_string = error->message;
}
values[0].data.d_status = status;
}
| 0 |
[
"CWE-787"
] |
gimp
|
0b35f6a082a0b3c372c568ea6bde39a4796acde2
| 230,265,338,595,704,370,000,000,000,000,000,000,000 | 108 |
Bug 687392 - Memory corruption vulnerability when reading XWD files
Applied and enhanced patch from andres which makes file-xwd detect
this kind of file corruption and abort loading with an error message.
|
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
{
pgtable_t new = pte_alloc_one(mm, address);
if (!new)
return -ENOMEM;
/*
* Ensure all pte setup (eg. pte page lock and page clearing) are
* visible before the pte is made visible to other CPUs by being
* put into page tables.
*
* The other side of the story is the pointer chasing in the page
* table walking code (when walking the page table without locking;
* ie. most of the time). Fortunately, these data accesses consist
* of a chain of data-dependent loads, meaning most CPUs (alpha
* being the notable exception) will already guarantee loads are
* seen in-order. See the alpha page table accessors for the
* smp_read_barrier_depends() barriers in page table walking code.
*/
smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
spin_lock(&mm->page_table_lock);
if (!pmd_present(*pmd)) { /* Has another populated it ? */
mm->nr_ptes++;
pmd_populate(mm, pmd, new);
new = NULL;
}
spin_unlock(&mm->page_table_lock);
if (new)
pte_free(mm, new);
return 0;
}
| 0 |
[
"CWE-20"
] |
linux-2.6
|
89f5b7da2a6bad2e84670422ab8192382a5aeb9f
| 2,332,324,207,665,356,500,000,000,000,000,000,000 | 32 |
Reinstate ZERO_PAGE optimization in 'get_user_pages()' and fix XIP
KAMEZAWA Hiroyuki and Oleg Nesterov point out that since the commit
557ed1fa2620dc119adb86b34c614e152a629a80 ("remove ZERO_PAGE") removed
the ZERO_PAGE from the VM mappings, any users of get_user_pages() will
generally now populate the VM with real empty pages needlessly.
We used to get the ZERO_PAGE when we did the "handle_mm_fault()", but
since fault handling no longer uses ZERO_PAGE for new anonymous pages,
we now need to handle that special case in follow_page() instead.
In particular, the removal of ZERO_PAGE effectively removed the core
file writing optimization where we would skip writing pages that had not
been populated at all, and increased memory pressure a lot by allocating
all those useless newly zeroed pages.
This reinstates the optimization by making the unmapped PTE case the
same as for a non-existent page table, which already did this correctly.
While at it, this also fixes the XIP case for follow_page(), where the
caller could not differentiate between the case of a page that simply
could not be used (because it had no "struct page" associated with it)
and a page that just wasn't mapped.
We do that by simply returning an error pointer for pages that could not
be turned into a "struct page *". The error is arbitrarily picked to be
EFAULT, since that was what get_user_pages() already used for the
equivalent IO-mapped page case.
[ Also removed an impossible test for pte_offset_map_lock() failing:
that's not how that function works ]
Acked-by: Oleg Nesterov <[email protected]>
Acked-by: Nick Piggin <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Roland McGrath <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
explicit ReplaceMulWithBroadcastByTile(
const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("ReplaceMulWithBroadcastByTile", ctx,
ctx_ext) {}
| 0 |
[
"CWE-476"
] |
tensorflow
|
e6340f0665d53716ef3197ada88936c2a5f7a2d3
| 21,153,868,868,167,099,000,000,000,000,000,000,000 | 5 |
Handle a special grappler case resulting in crash.
It might happen that a malformed input could be used to trick Grappler into trying to optimize a node with no inputs. This, in turn, would produce a null pointer dereference and a segfault.
PiperOrigin-RevId: 369242852
Change-Id: I2e5cbe7aec243d34a6d60220ac8ac9b16f136f6b
|
static void process_size_override(insn *result, operand *op)
{
if (tasm_compatible_mode) {
switch (tokval.t_integer) {
/* For TASM compatibility a size override inside the
* brackets changes the size of the operand, not the
* address type of the operand as it does in standard
* NASM syntax. Hence:
*
* mov eax,[DWORD val]
*
* is valid syntax in TASM compatibility mode. Note that
* you lose the ability to override the default address
* type for the instruction, but we never use anything
* but 32-bit flat model addressing in our code.
*/
case S_BYTE:
op->type |= BITS8;
break;
case S_WORD:
op->type |= BITS16;
break;
case S_DWORD:
case S_LONG:
op->type |= BITS32;
break;
case S_QWORD:
op->type |= BITS64;
break;
case S_TWORD:
op->type |= BITS80;
break;
case S_OWORD:
op->type |= BITS128;
break;
default:
nasm_nonfatal("invalid operand size specification");
break;
}
} else {
/* Standard NASM compatible syntax */
switch (tokval.t_integer) {
case S_NOSPLIT:
op->eaflags |= EAF_TIMESTWO;
break;
case S_REL:
op->eaflags |= EAF_REL;
break;
case S_ABS:
op->eaflags |= EAF_ABS;
break;
case S_BYTE:
op->disp_size = 8;
op->eaflags |= EAF_BYTEOFFS;
break;
case P_A16:
case P_A32:
case P_A64:
if (result->prefixes[PPS_ASIZE] &&
result->prefixes[PPS_ASIZE] != tokval.t_integer)
nasm_nonfatal("conflicting address size specifications");
else
result->prefixes[PPS_ASIZE] = tokval.t_integer;
break;
case S_WORD:
op->disp_size = 16;
op->eaflags |= EAF_WORDOFFS;
break;
case S_DWORD:
case S_LONG:
op->disp_size = 32;
op->eaflags |= EAF_WORDOFFS;
break;
case S_QWORD:
op->disp_size = 64;
op->eaflags |= EAF_WORDOFFS;
break;
default:
nasm_nonfatal("invalid size specification in"
" effective address");
break;
}
}
}
| 0 |
[
"CWE-416"
] |
nasm
|
6ac6ac57e3d01ea8ed4ea47706eb724b59176461
| 31,009,751,592,634,100,000,000,000,000,000,000,000 | 84 |
parser: when flattening an eop, must preserve any data buffer
An eop may have a data buffer associated with it as part of the same
memory allocation. Therefore, we need to move "subexpr" up instead of
merging it into "eop".
This *partially* resolves BR 3392707, but that test case still
triggers a violation when using -gcv8.
Reported-by: Suhwan <[email protected]>
Signed-off-by: H. Peter Anvin (Intel) <[email protected]>
|
static size_t rtnl_port_size(const struct net_device *dev,
u32 ext_filter_mask)
{
size_t port_size = nla_total_size(4) /* PORT_VF */
+ nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
+ nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
+ nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
+ nla_total_size(1) /* PROT_VDP_REQUEST */
+ nla_total_size(2); /* PORT_VDP_RESPONSE */
size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
+ port_size;
size_t port_self_size = nla_total_size(sizeof(struct nlattr))
+ port_size;
if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
!(ext_filter_mask & RTEXT_FILTER_VF))
return 0;
if (dev_num_vf(dev->dev.parent))
return port_self_size + vf_ports_size +
vf_port_size * dev_num_vf(dev->dev.parent);
else
return port_self_size;
}
| 0 |
[
"CWE-476"
] |
linux
|
f428fe4a04cc339166c8bbd489789760de3a0cee
| 298,446,635,561,417,560,000,000,000,000,000,000,000 | 24 |
rtnetlink: give a user socket to get_target_net()
This function is used from two places: rtnl_dump_ifinfo and
rtnl_getlink. In rtnl_getlink(), we give a request skb into
get_target_net(), but in rtnl_dump_ifinfo, we give a response skb
into get_target_net().
The problem here is that NETLINK_CB() isn't initialized for the response
skb. In both cases we can get a user socket and give it instead of skb
into get_target_net().
This bug was found by syzkaller with this call-trace:
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN
Modules linked in:
CPU: 1 PID: 3149 Comm: syzkaller140561 Not tainted 4.15.0-rc4-mm1+ #47
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
Google 01/01/2011
RIP: 0010:__netlink_ns_capable+0x8b/0x120 net/netlink/af_netlink.c:868
RSP: 0018:ffff8801c880f348 EFLAGS: 00010206
RAX: dffffc0000000000 RBX: 0000000000000000 RCX: ffffffff8443f900
RDX: 000000000000007b RSI: ffffffff86510f40 RDI: 00000000000003d8
RBP: ffff8801c880f360 R08: 0000000000000000 R09: 1ffff10039101e4f
R10: 0000000000000000 R11: 0000000000000001 R12: ffffffff86510f40
R13: 000000000000000c R14: 0000000000000004 R15: 0000000000000011
FS: 0000000001a1a880(0000) GS:ffff8801db300000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000020151000 CR3: 00000001c9511005 CR4: 00000000001606e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
netlink_ns_capable+0x26/0x30 net/netlink/af_netlink.c:886
get_target_net+0x9d/0x120 net/core/rtnetlink.c:1765
rtnl_dump_ifinfo+0x2e5/0xee0 net/core/rtnetlink.c:1806
netlink_dump+0x48c/0xce0 net/netlink/af_netlink.c:2222
__netlink_dump_start+0x4f0/0x6d0 net/netlink/af_netlink.c:2319
netlink_dump_start include/linux/netlink.h:214 [inline]
rtnetlink_rcv_msg+0x7f0/0xb10 net/core/rtnetlink.c:4485
netlink_rcv_skb+0x21e/0x460 net/netlink/af_netlink.c:2441
rtnetlink_rcv+0x1c/0x20 net/core/rtnetlink.c:4540
netlink_unicast_kernel net/netlink/af_netlink.c:1308 [inline]
netlink_unicast+0x4be/0x6a0 net/netlink/af_netlink.c:1334
netlink_sendmsg+0xa4a/0xe60 net/netlink/af_netlink.c:1897
Cc: Jiri Benc <[email protected]>
Fixes: 79e1ad148c84 ("rtnetlink: use netnsid to query interface")
Signed-off-by: Andrei Vagin <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static VALUE read_memory(VALUE klass, VALUE content)
{
xmlRelaxNGParserCtxtPtr ctx = xmlRelaxNGNewMemParserCtxt(
(const char *)StringValuePtr(content),
(int)RSTRING_LEN(content)
);
xmlRelaxNGPtr schema;
VALUE errors = rb_ary_new();
VALUE rb_schema;
xmlSetStructuredErrorFunc((void *)errors, Nokogiri_error_array_pusher);
#ifdef HAVE_XMLRELAXNGSETPARSERSTRUCTUREDERRORS
xmlRelaxNGSetParserStructuredErrors(
ctx,
Nokogiri_error_array_pusher,
(void *)errors
);
#endif
schema = xmlRelaxNGParse(ctx);
xmlSetStructuredErrorFunc(NULL, NULL);
xmlRelaxNGFreeParserCtxt(ctx);
if(NULL == schema) {
xmlErrorPtr error = xmlGetLastError();
if(error)
Nokogiri_error_raise(NULL, error);
else
rb_raise(rb_eRuntimeError, "Could not parse document");
return Qnil;
}
rb_schema = Data_Wrap_Struct(klass, 0, dealloc, schema);
rb_iv_set(rb_schema, "@errors", errors);
return rb_schema;
}
| 1 |
[
"CWE-611",
"CWE-703"
] |
nokogiri
|
9c87439d9afa14a365ff13e73adc809cb2c3d97b
| 284,210,493,410,329,840,000,000,000,000,000,000,000 | 40 |
feat: XML::Schema and RelaxNG creation accept optional ParseOptions
I'm trying out a new pattern, which is that the parsed object carries
around the ParseOptions it was created with, which should make some
testing a bit easier.
I'm also not implementing the "config block" pattern in use for
Documents, because I think the UX is weird and I'm hoping to change
everything to use kwargs in a 2.0 release, anyway.
|
lka_report_smtp_tx_data(const char *direction, struct timeval *tv, uint64_t reqid, uint32_t msgid, int ok)
{
const char *result;
switch (ok) {
case 1:
result = "ok";
break;
case 0:
result = "permfail";
break;
default:
result = "tempfail";
break;
}
report_smtp_broadcast(reqid, direction, tv, "tx-data", "%08x|%s\n",
msgid, result);
}
| 0 |
[
"CWE-476"
] |
src
|
6c3220444ed06b5796dedfd53a0f4becd903c0d1
| 251,916,666,645,334,150,000,000,000,000,000,000,000 | 18 |
smtpd's filter state machine can prematurely release resources
leading to a crash. From gilles@
|
CImg<_cimg_Tt> get_append(const CImg<T>& img, const char axis='x', const float align=0) const {
if (is_empty()) return +img;
if (!img) return +*this;
return CImgList<_cimg_Tt>(*this,true).insert(img).get_append(axis,align);
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 91,006,267,236,880,910,000,000,000,000,000,000,000 | 5 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
PHP_FUNCTION(exif_imagetype)
{
char *imagefile;
size_t imagefile_len;
php_stream * stream;
int itype = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "p", &imagefile, &imagefile_len) == FAILURE) {
return;
}
stream = php_stream_open_wrapper(imagefile, "rb", IGNORE_PATH|REPORT_ERRORS, NULL);
if (stream == NULL) {
RETURN_FALSE;
}
itype = php_getimagetype(stream, NULL);
php_stream_close(stream);
if (itype == IMAGE_FILETYPE_UNKNOWN) {
RETURN_FALSE;
} else {
ZVAL_LONG(return_value, itype);
}
}
| 0 |
[
"CWE-416"
] |
php-src
|
3fdde65617e9f954e2c964768aac8831005497e5
| 77,235,441,407,672,320,000,000,000,000,000,000,000 | 27 |
Fix #76409: heap use after free in _php_stream_free
We must not close the stream in exif_read_from_impl(), since it is the
responsibility of the (caller's) caller to do so, if it actually opened
the stream.
We simplify the reproduce script, which is actually about supplying a
path to a directory (opposed to a regular file), and use `.` instead of
`/` to also make it work on Windows.
|
GF_Err tfdt_Read(GF_Box *s,GF_BitStream *bs)
{
GF_TFBaseMediaDecodeTimeBox *ptr = (GF_TFBaseMediaDecodeTimeBox *)s;
if (ptr->version==1) {
ptr->baseMediaDecodeTime = gf_bs_read_u64(bs);
ISOM_DECREASE_SIZE(ptr, 8);
} else {
ptr->baseMediaDecodeTime = (u32) gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
}
return GF_OK;
}
| 0 |
[
"CWE-125"
] |
gpac
|
bceb03fd2be95097a7b409ea59914f332fb6bc86
| 265,489,627,833,906,500,000,000,000,000,000,000,000 | 13 |
fixed 2 possible heap overflows (inc. #1088)
|
static int wq_clamp_max_active(int max_active, unsigned int flags,
const char *name)
{
int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
if (max_active < 1 || max_active > lim)
pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
max_active, name, 1, lim);
return clamp_val(max_active, 1, lim);
}
| 0 |
[
"CWE-200"
] |
tip
|
dfb4357da6ddbdf57d583ba64361c9d792b0e0b1
| 44,640,555,890,348,935,000,000,000,000,000,000,000 | 11 |
time: Remove CONFIG_TIMER_STATS
Currently CONFIG_TIMER_STATS exposes process information across namespaces:
kernel/time/timer_list.c print_timer():
SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
/proc/timer_list:
#11: <0000000000000000>, hrtimer_wakeup, S:01, do_nanosleep, cron/2570
Given that the tracer can give the same information, this patch entirely
removes CONFIG_TIMER_STATS.
Suggested-by: Thomas Gleixner <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Acked-by: John Stultz <[email protected]>
Cc: Nicolas Pitre <[email protected]>
Cc: [email protected]
Cc: Lai Jiangshan <[email protected]>
Cc: Shuah Khan <[email protected]>
Cc: Xing Gao <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Jessica Frazelle <[email protected]>
Cc: [email protected]
Cc: Nicolas Iooss <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Petr Mladek <[email protected]>
Cc: Richard Cochran <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Michal Marek <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: Olof Johansson <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Cc: Arjan van de Ven <[email protected]>
Link: http://lkml.kernel.org/r/20170208192659.GA32582@beast
Signed-off-by: Thomas Gleixner <[email protected]>
|
TEST_F(SQLiteUtilTests, test_get_test_db_result_stream) {
auto dbc = getTestDBC();
auto results = getTestDBResultStream();
for (auto r : results) {
char* err_char = nullptr;
sqlite3_exec(dbc->db(), (r.first).c_str(), nullptr, nullptr, &err_char);
EXPECT_TRUE(err_char == nullptr);
if (err_char != nullptr) {
sqlite3_free(err_char);
ASSERT_TRUE(false);
}
QueryDataTyped expected;
auto status = queryInternal(kTestQuery, expected, dbc);
EXPECT_EQ(expected, r.second);
}
}
| 0 |
[
"CWE-77",
"CWE-295"
] |
osquery
|
c3f9a3dae22d43ed3b4f6a403cbf89da4cba7c3c
| 172,471,829,484,310,370,000,000,000,000,000,000,000 | 17 |
Merge pull request from GHSA-4g56-2482-x7q8
* Proposed fix for attach tables vulnerability
* Add authorizer to ATC tables and cleanups
- Add unit test for authorizer function
|
static double mp_complex_cosh(_cimg_math_parser& mp) {
const double real = _mp_arg(2), imag = _mp_arg(3);
double *ptrd = &_mp_arg(1) + 1;
ptrd[0] = std::cosh(real)*std::cos(imag);
ptrd[1] = std::sinh(real)*std::sin(imag);
return cimg::type<double>::nan();
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 29,905,468,380,510,277,000,000,000,000,000,000,000 | 7 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto(
const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&
proto_config,
Server::Configuration::FactoryContext& context, Network::ReadFilterCallbacks& read_callbacks) {
Utility::Singletons singletons = Utility::createSingletons(context);
auto filter_config = Utility::createConfig(
proto_config, context, *singletons.date_provider_, *singletons.route_config_provider_manager_,
*singletons.scoped_routes_config_provider_manager_, *singletons.http_tracer_manager_,
*singletons.filter_config_provider_manager_);
// This lambda captures the shared_ptrs created above, thus preserving the
// reference count.
// Keep in mind the lambda capture list **doesn't** determine the destruction order, but it's fine
// as these captured objects are also global singletons.
return [singletons, filter_config, &context, &read_callbacks]() -> Http::ApiListenerPtr {
auto conn_manager = std::make_unique<Http::ConnectionManagerImpl>(
*filter_config, context.drainDecision(), context.api().randomGenerator(),
context.httpContext(), context.runtime(), context.localInfo(), context.clusterManager(),
context.overloadManager(), context.dispatcher().timeSource());
// This factory creates a new ConnectionManagerImpl in the absence of its usual environment as
// an L4 filter, so this factory needs to take a few actions.
// When a new connection is creating its filter chain it hydrates the factory with a filter
// manager which provides the ConnectionManager with its "read_callbacks".
conn_manager->initializeReadFilterCallbacks(read_callbacks);
// When the connection first calls onData on the ConnectionManager, the ConnectionManager
// creates a codec. Here we force create a codec as onData will not be called.
Buffer::OwnedImpl dummy;
conn_manager->createCodec(dummy);
return conn_manager;
};
}
| 0 |
[
"CWE-22"
] |
envoy
|
5333b928d8bcffa26ab19bf018369a835f697585
| 159,061,972,471,064,200,000,000,000,000,000,000,000 | 37 |
Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <[email protected]>
|
static bool test_writeunlock(struct torture_context *tctx,
struct smbcli_state *cli)
{
union smb_write io;
NTSTATUS status;
bool ret = true;
int fnum;
uint8_t *buf;
const int maxsize = 90000;
const char *fname = BASEDIR "\\test.txt";
unsigned int seed = time(NULL);
union smb_fileinfo finfo;
buf = talloc_zero_array(tctx, uint8_t, maxsize);
if (!cli->transport->negotiate.lockread_supported) {
torture_skip(tctx, "Server does not support writeunlock - skipping\n");
}
if (!torture_setup_dir(cli, BASEDIR)) {
torture_fail(tctx, "failed to setup basedir");
}
torture_comment(tctx, "Testing RAW_WRITE_WRITEUNLOCK\n");
io.generic.level = RAW_WRITE_WRITEUNLOCK;
fnum = smbcli_open(cli->tree, fname, O_RDWR|O_CREAT, DENY_NONE);
if (fnum == -1) {
ret = false;
torture_fail_goto(tctx, done, talloc_asprintf(tctx, "Failed to create %s - %s\n", fname, smbcli_errstr(cli->tree)));
}
torture_comment(tctx, "Trying zero write\n");
io.writeunlock.in.file.fnum = fnum;
io.writeunlock.in.count = 0;
io.writeunlock.in.offset = 0;
io.writeunlock.in.remaining = 0;
io.writeunlock.in.data = buf;
status = smb_raw_write(cli->tree, &io);
CHECK_STATUS(status, NT_STATUS_OK);
CHECK_VALUE(io.writeunlock.out.nwritten, io.writeunlock.in.count);
setup_buffer(buf, seed, maxsize);
torture_comment(tctx, "Trying small write\n");
io.writeunlock.in.count = 9;
io.writeunlock.in.offset = 4;
io.writeunlock.in.data = buf;
status = smb_raw_write(cli->tree, &io);
CHECK_STATUS(status, NT_STATUS_RANGE_NOT_LOCKED);
if (smbcli_read(cli->tree, fnum, buf, 0, 13) != 13) {
ret = false;
torture_fail_goto(tctx, done, talloc_asprintf(tctx, "read failed at %s\n", __location__));
}
CHECK_BUFFER(buf+4, seed, 9);
CHECK_VALUE(IVAL(buf,0), 0);
setup_buffer(buf, seed, maxsize);
smbcli_lock(cli->tree, fnum, io.writeunlock.in.offset, io.writeunlock.in.count,
0, WRITE_LOCK);
status = smb_raw_write(cli->tree, &io);
CHECK_STATUS(status, NT_STATUS_OK);
CHECK_VALUE(io.writeunlock.out.nwritten, io.writeunlock.in.count);
memset(buf, 0, maxsize);
if (smbcli_read(cli->tree, fnum, buf, 0, 13) != 13) {
ret = false;
torture_fail_goto(tctx, done, talloc_asprintf(tctx, "read failed at %s\n", __location__));
}
CHECK_BUFFER(buf+4, seed, 9);
CHECK_VALUE(IVAL(buf,0), 0);
setup_buffer(buf, seed, maxsize);
torture_comment(tctx, "Trying large write\n");
io.writeunlock.in.count = 4000;
io.writeunlock.in.offset = 0;
io.writeunlock.in.data = buf;
smbcli_lock(cli->tree, fnum, io.writeunlock.in.offset, io.writeunlock.in.count,
0, WRITE_LOCK);
status = smb_raw_write(cli->tree, &io);
CHECK_STATUS(status, NT_STATUS_OK);
CHECK_VALUE(io.writeunlock.out.nwritten, 4000);
status = smb_raw_write(cli->tree, &io);
CHECK_STATUS(status, NT_STATUS_RANGE_NOT_LOCKED);
memset(buf, 0, maxsize);
if (smbcli_read(cli->tree, fnum, buf, 0, 4000) != 4000) {
ret = false;
torture_fail_goto(tctx, done, talloc_asprintf(tctx, "read failed at %s\n", __location__));
}
CHECK_BUFFER(buf, seed, 4000);
torture_comment(tctx, "Trying bad fnum\n");
io.writeunlock.in.file.fnum = fnum+1;
io.writeunlock.in.count = 4000;
io.writeunlock.in.offset = 0;
io.writeunlock.in.data = buf;
status = smb_raw_write(cli->tree, &io);
CHECK_STATUS(status, NT_STATUS_INVALID_HANDLE);
torture_comment(tctx, "Setting file as sparse\n");
status = torture_set_sparse(cli->tree, fnum);
CHECK_STATUS(status, NT_STATUS_OK);
if (!(cli->transport->negotiate.capabilities & CAP_LARGE_FILES)) {
torture_skip(tctx, "skipping large file tests - CAP_LARGE_FILES not set\n");
}
torture_comment(tctx, "Trying 2^32 offset\n");
setup_buffer(buf, seed, maxsize);
io.writeunlock.in.file.fnum = fnum;
io.writeunlock.in.count = 4000;
io.writeunlock.in.offset = 0xFFFFFFFF - 2000;
io.writeunlock.in.data = buf;
smbcli_lock(cli->tree, fnum, io.writeunlock.in.offset, io.writeunlock.in.count,
0, WRITE_LOCK);
status = smb_raw_write(cli->tree, &io);
CHECK_STATUS(status, NT_STATUS_OK);
CHECK_VALUE(io.writeunlock.out.nwritten, 4000);
CHECK_ALL_INFO(io.writeunlock.in.count + (uint64_t)io.writeunlock.in.offset, size);
memset(buf, 0, maxsize);
if (smbcli_read(cli->tree, fnum, buf, io.writeunlock.in.offset, 4000) != 4000) {
ret = false;
torture_fail_goto(tctx, done, talloc_asprintf(tctx, "read failed at %s\n", __location__));
}
CHECK_BUFFER(buf, seed, 4000);
done:
smbcli_close(cli->tree, fnum);
smb_raw_exit(cli->session);
smbcli_deltree(cli->tree, BASEDIR);
return ret;
}
| 0 |
[
"CWE-200"
] |
samba
|
a60863458dc6b60a09aa8d31fada6c36f5043c76
| 51,356,335,459,648,690,000,000,000,000,000,000,000 | 136 |
CVE-2022-32742: s4: torture: Add raw.write.bad-write test.
Reproduces the test code in:
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15085
Add knownfail.
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: David Disseldorp <[email protected]>
|
void assoc_array_cancel_edit(struct assoc_array_edit *edit)
{
struct assoc_array_ptr *ptr;
int i;
pr_devel("-->%s()\n", __func__);
/* Clean up after an out of memory error */
for (i = 0; i < ARRAY_SIZE(edit->new_meta); i++) {
ptr = edit->new_meta[i];
if (ptr) {
if (assoc_array_ptr_is_node(ptr))
kfree(assoc_array_ptr_to_node(ptr));
else
kfree(assoc_array_ptr_to_shortcut(ptr));
}
}
kfree(edit);
}
| 0 |
[
"CWE-399"
] |
linux
|
95389b08d93d5c06ec63ab49bd732b0069b7c35e
| 75,923,324,636,241,900,000,000,000,000,000,000,000 | 19 |
KEYS: Fix termination condition in assoc array garbage collection
This fixes CVE-2014-3631.
It is possible for an associative array to end up with a shortcut node at the
root of the tree if there are more than fan-out leaves in the tree, but they
all crowd into the same slot in the lowest level (ie. they all have the same
first nibble of their index keys).
When assoc_array_gc() returns back up the tree after scanning some leaves, it
can fall off of the root and crash because it assumes that the back pointer
from a shortcut (after label ascend_old_tree) must point to a normal node -
which isn't true of a shortcut node at the root.
Should we find we're ascending rootwards over a shortcut, we should check to
see if the backpointer is zero - and if it is, we have completed the scan.
This particular bug cannot occur if the root node is not a shortcut - ie. if
you have fewer than 17 keys in a keyring or if you have at least two keys that
sit into separate slots (eg. a keyring and a non keyring).
This can be reproduced by:
ring=`keyctl newring bar @s`
for ((i=1; i<=18; i++)); do last_key=`keyctl newring foo$i $ring`; done
keyctl timeout $last_key 2
Doing this:
echo 3 >/proc/sys/kernel/keys/gc_delay
first will speed things up.
If we do fall off of the top of the tree, we get the following oops:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000018
IP: [<ffffffff8136cea7>] assoc_array_gc+0x2f7/0x540
PGD dae15067 PUD cfc24067 PMD 0
Oops: 0000 [#1] SMP
Modules linked in: xt_nat xt_mark nf_conntrack_netbios_ns nf_conntrack_broadcast ip6t_rpfilter ip6t_REJECT xt_conntrack ebtable_nat ebtable_broute bridge stp llc ebtable_filter ebtables ip6table_ni
CPU: 0 PID: 26011 Comm: kworker/0:1 Not tainted 3.14.9-200.fc20.x86_64 #1
Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
Workqueue: events key_garbage_collector
task: ffff8800918bd580 ti: ffff8800aac14000 task.ti: ffff8800aac14000
RIP: 0010:[<ffffffff8136cea7>] [<ffffffff8136cea7>] assoc_array_gc+0x2f7/0x540
RSP: 0018:ffff8800aac15d40 EFLAGS: 00010206
RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffff8800aaecacc0
RDX: ffff8800daecf440 RSI: 0000000000000001 RDI: ffff8800aadc2bc0
RBP: ffff8800aac15da8 R08: 0000000000000001 R09: 0000000000000003
R10: ffffffff8136ccc7 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000000 R14: 0000000000000070 R15: 0000000000000001
FS: 0000000000000000(0000) GS:ffff88011fc00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: 0000000000000018 CR3: 00000000db10d000 CR4: 00000000000006f0
Stack:
ffff8800aac15d50 0000000000000011 ffff8800aac15db8 ffffffff812e2a70
ffff880091a00600 0000000000000000 ffff8800aadc2bc3 00000000cd42c987
ffff88003702df20 ffff88003702dfa0 0000000053b65c09 ffff8800aac15fd8
Call Trace:
[<ffffffff812e2a70>] ? keyring_detect_cycle_iterator+0x30/0x30
[<ffffffff812e3e75>] keyring_gc+0x75/0x80
[<ffffffff812e1424>] key_garbage_collector+0x154/0x3c0
[<ffffffff810a67b6>] process_one_work+0x176/0x430
[<ffffffff810a744b>] worker_thread+0x11b/0x3a0
[<ffffffff810a7330>] ? rescuer_thread+0x3b0/0x3b0
[<ffffffff810ae1a8>] kthread+0xd8/0xf0
[<ffffffff810ae0d0>] ? insert_kthread_work+0x40/0x40
[<ffffffff816ffb7c>] ret_from_fork+0x7c/0xb0
[<ffffffff810ae0d0>] ? insert_kthread_work+0x40/0x40
Code: 08 4c 8b 22 0f 84 bf 00 00 00 41 83 c7 01 49 83 e4 fc 41 83 ff 0f 4c 89 65 c0 0f 8f 5a fe ff ff 48 8b 45 c0 4d 63 cf 49 83 c1 02 <4e> 8b 34 c8 4d 85 f6 0f 84 be 00 00 00 41 f6 c6 01 0f 84 92
RIP [<ffffffff8136cea7>] assoc_array_gc+0x2f7/0x540
RSP <ffff8800aac15d40>
CR2: 0000000000000018
---[ end trace 1129028a088c0cbd ]---
Signed-off-by: David Howells <[email protected]>
Acked-by: Don Zickus <[email protected]>
Signed-off-by: James Morris <[email protected]>
|
HeaderUtility::requestHeadersValid(const RequestHeaderMap& headers) {
// Make sure the host is valid.
if (headers.Host() && !HeaderUtility::authorityIsValid(headers.Host()->value().getStringView())) {
return SharedResponseCodeDetails::get().InvalidAuthority;
}
return absl::nullopt;
}
| 0 |
[] |
envoy
|
2c60632d41555ec8b3d9ef5246242be637a2db0f
| 86,713,534,945,079,820,000,000,000,000,000,000,000 | 7 |
http: header map security fixes for duplicate headers (#197)
Previously header matching did not match on all headers for
non-inline headers. This patch changes the default behavior to
always logically match on all headers. Multiple individual
headers will be logically concatenated with ',' similar to what
is done with inline headers. This makes the behavior effectively
consistent. This behavior can be temporary reverted by setting
the runtime value "envoy.reloadable_features.header_match_on_all_headers"
to "false".
Targeted fixes have been additionally performed on the following
extensions which make them consider all duplicate headers by default as
a comma concatenated list:
1) Any extension using CEL matching on headers.
2) The header to metadata filter.
3) The JWT filter.
4) The Lua filter.
Like primary header matching used in routing, RBAC, etc. this behavior
can be disabled by setting the runtime value
"envoy.reloadable_features.header_match_on_all_headers" to false.
Finally, the setCopy() header map API previously only set the first
header in the case of duplicate non-inline headers. setCopy() now
behaves similiarly to the other set*() APIs and replaces all found
headers with a single value. This may have had security implications
in the extauth filter which uses this API. This behavior can be disabled
by setting the runtime value
"envoy.reloadable_features.http_set_copy_replace_all_headers" to false.
Fixes https://github.com/envoyproxy/envoy-setec/issues/188
Signed-off-by: Matt Klein <[email protected]>
|
static int handle_wbinvd(struct kvm_vcpu *vcpu)
{
skip_emulated_instruction(vcpu);
kvm_emulate_wbinvd(vcpu);
return 1;
}
| 0 |
[
"CWE-400"
] |
linux-2.6
|
9581d442b9058d3699b4be568b6e5eae38a41493
| 143,610,620,612,462,660,000,000,000,000,000,000,000 | 6 |
KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment
descriptors may be invalid due to the user modifying the ldt after loading
them.
Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
of home grown unsafe versions.
This is CVE-2010-3698.
KVM-Stable-Tag.
Signed-off-by: Avi Kivity <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
|
TEST_F(QueryPlannerTest, CannotTrimIxisectParam) {
params.options = QueryPlannerParams::INDEX_INTERSECTION;
params.options |= QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1));
addIndex(BSON("b" << 1));
runQuery(fromjson("{a: 1, b: 1, c: 1}"));
assertNumSolutions(3U);
assertSolutionExists(
"{fetch: {filter: {b: 1, c: 1}, node: "
"{ixscan: {filter: null, pattern: {a: 1}}}}}");
assertSolutionExists(
"{fetch: {filter: {a: 1, c: 1}, node: "
"{ixscan: {filter: null, pattern: {b: 1}}}}}");
assertSolutionExists(
"{fetch: {filter: {a:1,b:1,c:1}, node: {andSorted: {nodes: ["
"{ixscan: {filter: null, pattern: {a:1}}},"
"{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
}
| 0 |
[] |
mongo
|
ee97c0699fd55b498310996ee002328e533681a3
| 154,900,453,401,042,900,000,000,000,000,000,000,000 | 21 |
SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr.
|
static int qib_do_user_init(struct file *fp,
const struct qib_user_info *uinfo)
{
int ret;
struct qib_ctxtdata *rcd = ctxt_fp(fp);
struct qib_devdata *dd;
unsigned uctxt;
/* Subctxts don't need to initialize anything since master did it. */
if (subctxt_fp(fp)) {
ret = wait_event_interruptible(rcd->wait,
!test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
goto bail;
}
dd = rcd->dd;
/* some ctxts may get extra buffers, calculate that here */
uctxt = rcd->ctxt - dd->first_user_ctxt;
if (uctxt < dd->ctxts_extrabuf) {
rcd->piocnt = dd->pbufsctxt + 1;
rcd->pio_base = rcd->piocnt * uctxt;
} else {
rcd->piocnt = dd->pbufsctxt;
rcd->pio_base = rcd->piocnt * uctxt +
dd->ctxts_extrabuf;
}
/*
* All user buffers are 2KB buffers. If we ever support
* giving 4KB buffers to user processes, this will need some
* work. Can't use piobufbase directly, because it has
* both 2K and 4K buffer base values. So check and handle.
*/
if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
if (rcd->pio_base >= dd->piobcnt2k) {
qib_dev_err(dd,
"%u:ctxt%u: no 2KB buffers available\n",
dd->unit, rcd->ctxt);
ret = -ENOBUFS;
goto bail;
}
rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
rcd->ctxt, rcd->piocnt);
}
rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
TXCHK_CHG_TYPE_USER, rcd);
/*
* try to ensure that processes start up with consistent avail update
* for their own range, at least. If system very quiet, it might
* have the in-memory copy out of date at startup for this range of
* buffers, when a context gets re-used. Do after the chg_pioavail
* and before the rest of setup, so it's "almost certain" the dma
* will have occurred (can't 100% guarantee, but should be many
* decimals of 9s, with this ordering), given how much else happens
* after this.
*/
dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
/*
* Now allocate the rcvhdr Q and eager TIDs; skip the TID
* array for time being. If rcd->ctxt > chip-supported,
* we need to do extra stuff here to handle by handling overflow
* through ctxt 0, someday
*/
ret = qib_create_rcvhdrq(dd, rcd);
if (!ret)
ret = qib_setup_eagerbufs(rcd);
if (ret)
goto bail_pio;
rcd->tidcursor = 0; /* start at beginning after open */
/* initialize poll variables... */
rcd->urgent = 0;
rcd->urgent_poll = 0;
/*
* Now enable the ctxt for receive.
* For chips that are set to DMA the tail register to memory
* when they change (and when the update bit transitions from
* 0 to 1. So for those chips, we turn it off and then back on.
* This will (very briefly) affect any other open ctxts, but the
* duration is very short, and therefore isn't an issue. We
* explicitly set the in-memory tail copy to 0 beforehand, so we
* don't have to wait to be sure the DMA update has happened
* (chip resets head/tail to 0 on transition to enable).
*/
if (rcd->rcvhdrtail_kvaddr)
qib_clear_rcvhdrtail(rcd);
dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
rcd->ctxt);
/* Notify any waiting slaves */
if (rcd->subctxt_cnt) {
clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
wake_up(&rcd->wait);
}
return 0;
bail_pio:
qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
TXCHK_CHG_TYPE_KERN, rcd);
bail:
return ret;
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
e6bd18f57aad1a2d1ef40e646d03ed0f2515c9e3
| 116,217,248,513,297,280,000,000,000,000,000,000,000 | 110 |
IB/security: Restrict use of the write() interface
The drivers/infiniband stack uses write() as a replacement for
bi-directional ioctl(). This is not safe. There are ways to
trigger write calls that result in the return structure that
is normally written to user space being shunted off to user
specified kernel memory instead.
For the immediate repair, detect and deny suspicious accesses to
the write API.
For long term, update the user space libraries and the kernel API
to something that doesn't present the same security vulnerabilities
(likely a structured ioctl() interface).
The impacted uAPI interfaces are generally only available if
hardware from drivers/infiniband is installed in the system.
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
[ Expanded check to all known write() entry points ]
Cc: [email protected]
Signed-off-by: Doug Ledford <[email protected]>
|
void perf_prepare_sample(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs)
{
u64 sample_type = event->attr.sample_type;
header->type = PERF_RECORD_SAMPLE;
header->size = sizeof(*header) + event->header_size;
header->misc = 0;
header->misc |= perf_misc_flags(regs);
__perf_event_header__init_id(header, data, event);
if (sample_type & PERF_SAMPLE_IP)
data->ip = perf_instruction_pointer(regs);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
int size = 1;
data->callchain = perf_callchain(event, regs);
if (data->callchain)
size += data->callchain->nr;
header->size += size * sizeof(u64);
}
if (sample_type & PERF_SAMPLE_RAW) {
int size = sizeof(u32);
if (data->raw)
size += data->raw->size;
else
size += sizeof(u32);
WARN_ON_ONCE(size & (sizeof(u64)-1));
header->size += size;
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
int size = sizeof(u64); /* nr */
if (data->br_stack) {
size += data->br_stack->nr
* sizeof(struct perf_branch_entry);
}
header->size += size;
}
if (sample_type & PERF_SAMPLE_REGS_USER) {
/* regs dump ABI info */
int size = sizeof(u64);
perf_sample_regs_user(&data->regs_user, regs);
if (data->regs_user.regs) {
u64 mask = event->attr.sample_regs_user;
size += hweight64(mask) * sizeof(u64);
}
header->size += size;
}
if (sample_type & PERF_SAMPLE_STACK_USER) {
/*
* Either we need PERF_SAMPLE_STACK_USER bit to be allways
* processed as the last one or have additional check added
* in case new sample type is added, because we could eat
* up the rest of the sample size.
*/
struct perf_regs_user *uregs = &data->regs_user;
u16 stack_size = event->attr.sample_stack_user;
u16 size = sizeof(u64);
if (!uregs->abi)
perf_sample_regs_user(uregs, regs);
stack_size = perf_sample_ustack_size(stack_size, header->size,
uregs->regs);
/*
* If there is something to dump, add space for the dump
* itself and for the field that tells the dynamic size,
* which is how many have been actually dumped.
*/
if (stack_size)
size += sizeof(u64) + stack_size;
data->stack_user_size = stack_size;
header->size += size;
}
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
linux
|
8176cced706b5e5d15887584150764894e94e02f
| 48,414,461,634,263,955,000,000,000,000,000,000,000 | 93 |
perf: Treat attr.config as u64 in perf_swevent_init()
Trinity discovered that we fail to check all 64 bits of
attr.config passed by user space, resulting to out-of-bounds
access of the perf_swevent_enabled array in
sw_perf_event_destroy().
Introduced in commit b0a873ebb ("perf: Register PMU
implementations").
Signed-off-by: Tommi Rantala <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: [email protected]
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
explicit DenseCount(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("minlength", &minlength_));
OP_REQUIRES_OK(context, context->GetAttr("maxlength", &maxlength_));
OP_REQUIRES_OK(context, context->GetAttr("binary_output", &binary_output_));
}
| 0 |
[
"CWE-703",
"CWE-122",
"CWE-787"
] |
tensorflow
|
3cbb917b4714766030b28eba9fb41bb97ce9ee02
| 288,622,890,649,243,200,000,000,000,000,000,000,000 | 5 |
Fix multiple vulnerabilities in `tf.raw_ops.*CountSparseOutput`.
Also add tests for these API points, both for the happy paths and for the vulnerable ones.
PiperOrigin-RevId: 332563222
Change-Id: Ib3b52116a83a134c2e742a7c66e5e956db8fba05
|
void nbt_name_socket_handle_response_packet(struct nbt_name_request *req,
struct nbt_name_packet *packet,
struct socket_address *src)
{
/* if this is a WACK response, this we need to go back to waiting,
but perhaps increase the timeout */
if ((packet->operation & NBT_OPCODE) == NBT_OPCODE_WACK) {
uint32_t ttl;
if (req->received_wack || packet->ancount < 1) {
nbt_name_request_destructor(req);
req->status = NT_STATUS_INVALID_NETWORK_RESPONSE;
req->state = NBT_REQUEST_ERROR;
goto done;
}
talloc_free(req->te);
/* we know we won't need any more retries - the server
has received our request */
req->num_retries = 0;
req->received_wack = true;
/*
* there is a timeout in the packet,
* it is 5 + 4 * num_old_addresses
*
* although w2k3 screws it up
* and uses num_old_addresses = 0
*
* so we better fallback to the maximum
* of num_old_addresses = 25 if we got
* a timeout of less than 9s (5 + 4*1)
* or more than 105s (5 + 4*25).
*/
ttl = packet->answers[0].ttl;
if ((ttl < (5 + 4*1)) || (ttl > (5 + 4*25))) {
ttl = 5 + 4*25;
}
req->timeout = ttl;
req->te = tevent_add_timer(req->nbtsock->event_ctx, req,
timeval_current_ofs(req->timeout, 0),
nbt_name_socket_timeout, req);
return;
}
req->replies = talloc_realloc(req, req->replies, struct nbt_name_reply, req->num_replies+1);
if (req->replies == NULL) {
nbt_name_request_destructor(req);
req->state = NBT_REQUEST_ERROR;
req->status = NT_STATUS_NO_MEMORY;
goto done;
}
talloc_steal(req, src);
req->replies[req->num_replies].dest = src;
talloc_steal(req, packet);
req->replies[req->num_replies].packet = packet;
req->num_replies++;
/* if we don't want multiple replies then we are done */
if (req->allow_multiple_replies &&
req->num_replies < NBT_MAX_REPLIES) {
return;
}
nbt_name_request_destructor(req);
req->state = NBT_REQUEST_DONE;
req->status = NT_STATUS_OK;
done:
if (req->async.fn) {
req->async.fn(req);
}
}
| 0 |
[
"CWE-834"
] |
samba
|
3cc0f1eeda5f133532dda31eef9fc1b394127e50
| 322,080,291,333,469,900,000,000,000,000,000,000,000 | 72 |
CVE-2020-14303: s4 nbt: fix busy loop on empty UDP packet
An empty UDP packet put the nbt server into a busy loop that consumes
100% of a cpu.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=14417
Signed-off-by: Gary Lockyer <[email protected]>
Autobuild-User(master): Karolin Seeger <[email protected]>
Autobuild-Date(master): Thu Jul 2 10:26:24 UTC 2020 on sn-devel-184
|
read_from_cmd_socket(void *anything)
{
int status;
int read_length; /* Length of packet read */
int expected_length; /* Expected length of packet without auth data */
unsigned long flags;
CMD_Request rx_message;
CMD_Reply tx_message, *prev_tx_message;
int rx_message_length, tx_message_length;
int sock_fd;
union sockaddr_in46 where_from;
socklen_t from_length;
IPAddr remote_ip;
unsigned short remote_port;
int auth_length;
int auth_ok;
int utoken_ok, token_ok;
int issue_token;
int valid_ts;
int authenticated;
int localhost;
int allowed;
unsigned short rx_command;
unsigned long rx_message_token;
unsigned long tx_message_token;
unsigned long rx_message_seq;
unsigned long rx_attempt;
struct timeval now;
struct timeval cooked_now;
flags = 0;
rx_message_length = sizeof(rx_message);
from_length = sizeof(where_from);
sock_fd = (long)anything;
status = recvfrom(sock_fd, (char *)&rx_message, rx_message_length, flags,
&where_from.u, &from_length);
if (status < 0) {
LOG(LOGS_WARN, LOGF_CmdMon, "Error [%s] reading from control socket %d",
strerror(errno), sock_fd);
return;
}
read_length = status;
expected_length = PKL_CommandLength(&rx_message);
rx_command = ntohs(rx_message.command);
LCL_ReadRawTime(&now);
LCL_CookTime(&now, &cooked_now, NULL);
tx_message.version = PROTO_VERSION_NUMBER;
tx_message.pkt_type = PKT_TYPE_CMD_REPLY;
tx_message.res1 = 0;
tx_message.res2 = 0;
tx_message.command = rx_message.command;
tx_message.sequence = rx_message.sequence;
tx_message.reply = htons(RPY_NULL);
tx_message.number = htons(1);
tx_message.total = htons(1);
tx_message.pad1 = 0;
tx_message.utoken = htonl(utoken);
/* Set this to a default (invalid) value. This protects against the
token field being set to an arbitrary value if we reject the
message, e.g. due to the host failing the access check. */
tx_message.token = htonl(0xffffffffUL);
memset(&tx_message.auth, 0, sizeof(tx_message.auth));
switch (where_from.u.sa_family) {
case AF_INET:
remote_ip.family = IPADDR_INET4;
remote_ip.addr.in4 = ntohl(where_from.in4.sin_addr.s_addr);
remote_port = ntohs(where_from.in4.sin_port);
localhost = (remote_ip.addr.in4 == 0x7f000001UL);
break;
#ifdef HAVE_IPV6
case AF_INET6:
remote_ip.family = IPADDR_INET6;
memcpy(&remote_ip.addr.in6, where_from.in6.sin6_addr.s6_addr,
sizeof (remote_ip.addr.in6));
remote_port = ntohs(where_from.in6.sin6_port);
/* Check for ::1 */
for (localhost = 0; localhost < 16; localhost++)
if (remote_ip.addr.in6[localhost] != 0)
break;
localhost = (localhost == 15 && remote_ip.addr.in6[localhost] == 1);
break;
#endif
default:
assert(0);
}
allowed = ADF_IsAllowed(access_auth_table, &remote_ip) || localhost;
if (read_length < offsetof(CMD_Request, data) ||
rx_message.pkt_type != PKT_TYPE_CMD_REQUEST ||
rx_message.res1 != 0 ||
rx_message.res2 != 0) {
/* We don't know how to process anything like this */
if (allowed)
CLG_LogCommandAccess(&remote_ip, CLG_CMD_BAD_PKT, cooked_now.tv_sec);
return;
}
if (rx_message.version != PROTO_VERSION_NUMBER) {
tx_message.status = htons(STT_NOHOSTACCESS);
if (!LOG_RateLimited()) {
LOG(LOGS_WARN, LOGF_CmdMon, "Read command packet with protocol version %d (expected %d) from %s:%hu", rx_message.version, PROTO_VERSION_NUMBER, UTI_IPToString(&remote_ip), remote_port);
}
if (allowed)
CLG_LogCommandAccess(&remote_ip, CLG_CMD_BAD_PKT, cooked_now.tv_sec);
if (rx_message.version >= PROTO_VERSION_MISMATCH_COMPAT) {
tx_message.status = htons(STT_BADPKTVERSION);
/* add empty MD5 auth so older clients will not drop
the reply due to bad length */
memset(((char *)&tx_message) + PKL_ReplyLength(&tx_message), 0, 16);
transmit_reply(&tx_message, &where_from, 16);
}
return;
}
if (rx_command >= N_REQUEST_TYPES) {
if (!LOG_RateLimited()) {
LOG(LOGS_WARN, LOGF_CmdMon, "Read command packet with invalid command %d from %s:%hu", rx_command, UTI_IPToString(&remote_ip), remote_port);
}
if (allowed)
CLG_LogCommandAccess(&remote_ip, CLG_CMD_BAD_PKT, cooked_now.tv_sec);
tx_message.status = htons(STT_INVALID);
transmit_reply(&tx_message, &where_from, 0);
return;
}
if (read_length < expected_length) {
if (!LOG_RateLimited()) {
LOG(LOGS_WARN, LOGF_CmdMon, "Read incorrectly sized command packet from %s:%hu", UTI_IPToString(&remote_ip), remote_port);
}
if (allowed)
CLG_LogCommandAccess(&remote_ip, CLG_CMD_BAD_PKT, cooked_now.tv_sec);
tx_message.status = htons(STT_BADPKTLENGTH);
transmit_reply(&tx_message, &where_from, 0);
return;
}
if (!allowed) {
/* The client is not allowed access, so don't waste any more time
on him. Note that localhost is always allowed access
regardless of the defined access rules - otherwise, we could
shut ourselves out completely! */
if (!LOG_RateLimited()) {
LOG(LOGS_WARN, LOGF_CmdMon, "Command packet received from unauthorised host %s port %d",
UTI_IPToString(&remote_ip),
remote_port);
}
tx_message.status = htons(STT_NOHOSTACCESS);
transmit_reply(&tx_message, &where_from, 0);
return;
}
/* OK, we have a valid message. Now dispatch on message type and process it. */
/* Do authentication stuff and command tokens here. Well-behaved
clients will set their utokens to 0 to save us wasting our time
if the packet is unauthenticatable. */
if (rx_message.utoken != 0) {
auth_ok = check_rx_packet_auth(&rx_message, read_length);
} else {
auth_ok = 0;
}
/* All this malarky is to protect the system against various forms
of attack.
Simple packet forgeries are blocked by requiring the packet to
authenticate properly with MD5 or other crypto hash. (The
assumption is that the command key is in a read-only keys file
read by the daemon, and is known only to administrators.)
Replay attacks are prevented by 2 fields in the packet. The
'token' field is where the client plays back to us a token that
he was issued in an earlier reply. Each time we reply to a
suitable packet, we issue a new token. The 'utoken' field is set
to a new (hopefully increasing) value each time the daemon is
run. This prevents packets from a previous incarnation being
played back at us when the same point in the 'token' sequence
comes up. (The token mechanism also prevents a non-idempotent
command from being executed twice from the same client, if the
client fails to receive our reply the first time and tries a
resend.)
The problem is how a client should get its first token. Our
token handling only remembers a finite number of issued tokens
(actually 32) - if a client replies with a (legitimate) token
older than that, it will be treated as though a duplicate token
has been supplied. If a simple token-request protocol were used,
the whole thing would be vulnerable to a denial of service
attack, where an attacker just replays valid token-request
packets at us, causing us to keep issuing new tokens,
invalidating all the ones we have given out to true clients
already.
To protect against this, the token-request (REQ_LOGON) packet
includes a timestamp field. To issue a token, we require that
this field is different from any we have processed before. To
bound our storage, we require that the timestamp is within a
certain period of our current time. For clients running on the
same host this will be easily satisfied.
*/
utoken_ok = (ntohl(rx_message.utoken) == utoken);
/* Avoid binning a valid user's token if we merely get a forged
packet */
rx_message_token = ntohl(rx_message.token);
rx_message_seq = ntohl(rx_message.sequence);
rx_attempt = ntohs(rx_message.attempt);
if (auth_ok && utoken_ok) {
token_ok = check_token(rx_message_token);
} else {
token_ok = 0;
}
if (auth_ok && utoken_ok && !token_ok) {
/* This might be a resent message, due to the client not getting
our reply to the first attempt. See if we can find the message. */
prev_tx_message = lookup_reply(rx_message_token, rx_message_seq, rx_attempt);
if (prev_tx_message) {
/* Just send this message again */
tx_message_length = PKL_ReplyLength(prev_tx_message);
status = sendto(sock_fd, (void *) prev_tx_message, tx_message_length, 0,
&where_from.u, from_length);
if (status < 0 && !LOG_RateLimited()) {
LOG(LOGS_WARN, LOGF_CmdMon, "Could not send response to %s:%hu", UTI_IPToString(&remote_ip), remote_port);
}
return;
}
/* Otherwise, just fall through into normal processing */
}
if (auth_ok && utoken_ok && token_ok) {
/* See whether we can discard the previous reply from storage */
token_acknowledged(rx_message_token, &now);
}
valid_ts = 0;
if (auth_ok) {
struct timeval ts;
UTI_TimevalNetworkToHost(&rx_message.data.logon.ts, &ts);
if ((utoken_ok && token_ok) ||
((ntohl(rx_message.utoken) == SPECIAL_UTOKEN) &&
(rx_command == REQ_LOGON) &&
(valid_ts = ts_is_unique_and_not_stale(&ts, &now))))
issue_token = 1;
else
issue_token = 0;
} else {
issue_token = 0;
}
authenticated = auth_ok & utoken_ok & token_ok;
if (authenticated) {
CLG_LogCommandAccess(&remote_ip, CLG_CMD_AUTH, cooked_now.tv_sec);
} else {
CLG_LogCommandAccess(&remote_ip, CLG_CMD_NORMAL, cooked_now.tv_sec);
}
if (issue_token) {
/* Only command clients where the user has apparently 'logged on'
get a token to allow them to emit an authenticated command next
time */
tx_message_token = get_token();
} else {
tx_message_token = 0xffffffffUL;
}
tx_message.token = htonl(tx_message_token);
if (rx_command >= N_REQUEST_TYPES) {
/* This should be already handled */
assert(0);
} else {
allowed = 0;
/* Check level of authority required to issue the command */
switch(permissions[rx_command]) {
case PERMIT_AUTH:
if (authenticated) {
allowed = 1;
} else {
allowed = 0;
}
break;
case PERMIT_LOCAL:
if (authenticated || localhost) {
allowed = 1;
} else {
allowed = 0;
}
break;
case PERMIT_OPEN:
allowed = 1;
break;
default:
assert(0);
}
if (allowed) {
switch(rx_command) {
case REQ_NULL:
handle_null(&rx_message, &tx_message);
break;
case REQ_ONLINE:
handle_online(&rx_message, &tx_message);
break;
case REQ_OFFLINE:
handle_offline(&rx_message, &tx_message);
break;
case REQ_BURST:
handle_burst(&rx_message, &tx_message);
break;
case REQ_MODIFY_MINPOLL:
handle_modify_minpoll(&rx_message, &tx_message);
break;
case REQ_MODIFY_MAXPOLL:
handle_modify_maxpoll(&rx_message, &tx_message);
break;
case REQ_DUMP:
SRC_DumpSources();
tx_message.status = htons(STT_SUCCESS);
break;
case REQ_MODIFY_MAXDELAY:
handle_modify_maxdelay(&rx_message, &tx_message);
break;
case REQ_MODIFY_MAXDELAYRATIO:
handle_modify_maxdelayratio(&rx_message, &tx_message);
break;
case REQ_MODIFY_MAXDELAYDEVRATIO:
handle_modify_maxdelaydevratio(&rx_message, &tx_message);
break;
case REQ_MODIFY_MAXUPDATESKEW:
handle_modify_maxupdateskew(&rx_message, &tx_message);
break;
case REQ_LOGON:
/* If the log-on fails, record the reason why */
if (!issue_token && !LOG_RateLimited()) {
LOG(LOGS_WARN, LOGF_CmdMon,
"Bad command logon from %s port %d (auth_ok=%d valid_ts=%d)",
UTI_IPToString(&remote_ip),
remote_port,
auth_ok, valid_ts);
}
if (issue_token == 1) {
tx_message.status = htons(STT_SUCCESS);
} else if (!auth_ok) {
tx_message.status = htons(STT_UNAUTH);
} else if (!valid_ts) {
tx_message.status = htons(STT_INVALIDTS);
} else {
tx_message.status = htons(STT_FAILED);
}
break;
case REQ_SETTIME:
handle_settime(&rx_message, &tx_message);
break;
case REQ_LOCAL:
handle_local(&rx_message, &tx_message);
break;
case REQ_MANUAL:
handle_manual(&rx_message, &tx_message);
break;
case REQ_N_SOURCES:
handle_n_sources(&rx_message, &tx_message);
break;
case REQ_SOURCE_DATA:
handle_source_data(&rx_message, &tx_message);
break;
case REQ_REKEY:
handle_rekey(&rx_message, &tx_message);
break;
case REQ_ALLOW:
handle_allow(&rx_message, &tx_message);
break;
case REQ_ALLOWALL:
handle_allowall(&rx_message, &tx_message);
break;
case REQ_DENY:
handle_deny(&rx_message, &tx_message);
break;
case REQ_DENYALL:
handle_denyall(&rx_message, &tx_message);
break;
case REQ_CMDALLOW:
handle_cmdallow(&rx_message, &tx_message);
break;
case REQ_CMDALLOWALL:
handle_cmdallowall(&rx_message, &tx_message);
break;
case REQ_CMDDENY:
handle_cmddeny(&rx_message, &tx_message);
break;
case REQ_CMDDENYALL:
handle_cmddenyall(&rx_message, &tx_message);
break;
case REQ_ACCHECK:
handle_accheck(&rx_message, &tx_message);
break;
case REQ_CMDACCHECK:
handle_cmdaccheck(&rx_message, &tx_message);
break;
case REQ_ADD_SERVER:
handle_add_source(NTP_SERVER, &rx_message, &tx_message);
break;
case REQ_ADD_PEER:
handle_add_source(NTP_PEER, &rx_message, &tx_message);
break;
case REQ_DEL_SOURCE:
handle_del_source(&rx_message, &tx_message);
break;
case REQ_WRITERTC:
handle_writertc(&rx_message, &tx_message);
break;
case REQ_DFREQ:
handle_dfreq(&rx_message, &tx_message);
break;
case REQ_DOFFSET:
handle_doffset(&rx_message, &tx_message);
break;
case REQ_TRACKING:
handle_tracking(&rx_message, &tx_message);
break;
case REQ_SOURCESTATS:
handle_sourcestats(&rx_message, &tx_message);
break;
case REQ_RTCREPORT:
handle_rtcreport(&rx_message, &tx_message);
break;
case REQ_TRIMRTC:
handle_trimrtc(&rx_message, &tx_message);
break;
case REQ_CYCLELOGS:
handle_cyclelogs(&rx_message, &tx_message);
break;
case REQ_SUBNETS_ACCESSED:
handle_subnets_accessed(&rx_message, &tx_message);
break;
case REQ_CLIENT_ACCESSES:
handle_client_accesses(&rx_message, &tx_message);
break;
case REQ_CLIENT_ACCESSES_BY_INDEX:
handle_client_accesses_by_index(&rx_message, &tx_message);
break;
case REQ_MANUAL_LIST:
handle_manual_list(&rx_message, &tx_message);
break;
case REQ_MANUAL_DELETE:
handle_manual_delete(&rx_message, &tx_message);
break;
case REQ_MAKESTEP:
handle_make_step(&rx_message, &tx_message);
break;
case REQ_ACTIVITY:
handle_activity(&rx_message, &tx_message);
break;
case REQ_RESELECTDISTANCE:
handle_reselect_distance(&rx_message, &tx_message);
break;
case REQ_RESELECT:
handle_reselect(&rx_message, &tx_message);
break;
case REQ_MODIFY_MINSTRATUM:
handle_modify_minstratum(&rx_message, &tx_message);
break;
case REQ_MODIFY_POLLTARGET:
handle_modify_polltarget(&rx_message, &tx_message);
break;
default:
assert(0);
break;
}
} else {
tx_message.status = htons(STT_UNAUTH);
}
}
if (auth_ok) {
auth_length = generate_tx_packet_auth(&tx_message);
} else {
auth_length = 0;
}
if (token_ok) {
save_reply(&tx_message,
rx_message_token,
tx_message_token,
rx_message_seq,
rx_attempt,
&now);
}
/* Transmit the response */
{
/* Include a simple way to lose one message in three to test resend */
static int do_it=1;
if (do_it) {
transmit_reply(&tx_message, &where_from, auth_length);
}
#if 0
do_it = ((do_it + 1) % 3);
#endif
}
}
| 1 |
[
"CWE-189"
] |
chrony
|
7712455d9aa33d0db0945effaa07e900b85987b1
| 227,761,901,160,840,420,000,000,000,000,000,000,000 | 580 |
Fix buffer overflow when processing crafted command packets
When the length of the REQ_SUBNETS_ACCESSED, REQ_CLIENT_ACCESSES
command requests and the RPY_SUBNETS_ACCESSED, RPY_CLIENT_ACCESSES,
RPY_CLIENT_ACCESSES_BY_INDEX, RPY_MANUAL_LIST command replies is
calculated, the number of items stored in the packet is not validated.
A crafted command request/reply can be used to crash the server/client.
Only clients allowed by cmdallow (by default only localhost) can crash
the server.
With chrony versions 1.25 and 1.26 this bug has a smaller security
impact as the server requires the clients to be authenticated in order
to process the subnet and client accesses commands. In 1.27 and 1.28,
however, the invalid calculated length is included also in the
authentication check which may cause another crash.
|
njs_generate_2addr_operation(njs_vm_t *vm, njs_generator_t *generator,
njs_parser_node_t *node)
{
njs_generator_next(generator, njs_generate, node->left);
return njs_generator_after(vm, generator,
njs_queue_first(&generator->stack), node,
njs_generate_2addr_operation_end, NULL, 0);
}
| 0 |
[
"CWE-703",
"CWE-754"
] |
njs
|
404553896792b8f5f429dc8852d15784a59d8d3e
| 309,529,168,466,369,380,000,000,000,000,000,000,000 | 9 |
Fixed break instruction in a try-catch block.
Previously, JUMP offset for a break instruction inside a try-catch
block was not set to a correct offset during code generation
when a return instruction was present in inner try-catch block.
The fix is to update the JUMP offset appropriately.
This closes #553 issue on Github.
|
static int coolkey_get_init_and_get_count(list_t *list, int *countp)
{
*countp = list_size(list);
list_iterator_start(list);
return SC_SUCCESS;
}
| 0 |
[
"CWE-415"
] |
OpenSC
|
c246f6f69a749d4f68626b40795a4f69168008f4
| 102,012,555,199,028,380,000,000,000,000,000,000,000 | 6 |
coolkey: Make sure the object ID is unique when filling list
Thanks to oss-fuzz
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=19208
|
static void hash_id(struct fuse *f, struct node *node)
{
size_t hash = node->nodeid % f->id_table_size;
node->id_next = f->id_table[hash];
f->id_table[hash] = node;
}
| 0 |
[] |
ntfs-3g
|
fb28eef6f1c26170566187c1ab7dc913a13ea43c
| 293,462,044,117,014,600,000,000,000,000,000,000,000 | 6 |
Hardened the checking of directory offset requested by a readdir
When asked for the next directory entries, make sure the chunk offset
is within valid values, otherwise return no more entries in chunk.
|
remove_wpattern (wparam, wstrlen, wpattern, op)
wchar_t *wparam;
size_t wstrlen;
wchar_t *wpattern;
int op;
{
wchar_t wc, *ret;
int n;
switch (op)
{
case RP_LONG_LEFT: /* remove longest match at start */
for (n = wstrlen; n >= 0; n--)
{
wc = wparam[n]; wparam[n] = L'\0';
if (wcsmatch (wpattern, wparam, FNMATCH_EXTFLAG) != FNM_NOMATCH)
{
wparam[n] = wc;
return (wcsdup (wparam + n));
}
wparam[n] = wc;
}
break;
case RP_SHORT_LEFT: /* remove shortest match at start */
for (n = 0; n <= wstrlen; n++)
{
wc = wparam[n]; wparam[n] = L'\0';
if (wcsmatch (wpattern, wparam, FNMATCH_EXTFLAG) != FNM_NOMATCH)
{
wparam[n] = wc;
return (wcsdup (wparam + n));
}
wparam[n] = wc;
}
break;
case RP_LONG_RIGHT: /* remove longest match at end */
for (n = 0; n <= wstrlen; n++)
{
if (wcsmatch (wpattern, wparam + n, FNMATCH_EXTFLAG) != FNM_NOMATCH)
{
wc = wparam[n]; wparam[n] = L'\0';
ret = wcsdup (wparam);
wparam[n] = wc;
return (ret);
}
}
break;
case RP_SHORT_RIGHT: /* remove shortest match at end */
for (n = wstrlen; n >= 0; n--)
{
if (wcsmatch (wpattern, wparam + n, FNMATCH_EXTFLAG) != FNM_NOMATCH)
{
wc = wparam[n]; wparam[n] = L'\0';
ret = wcsdup (wparam);
wparam[n] = wc;
return (ret);
}
}
break;
}
return (wparam); /* no match, return original string */
}
| 0 |
[] |
bash
|
955543877583837c85470f7fb8a97b7aa8d45e6c
| 230,911,198,595,645,240,000,000,000,000,000,000,000 | 66 |
bash-4.4-rc2 release
|
static void __io_req_complete_post32(struct io_kiocb *req, s32 res,
u32 cflags, u64 extra1, u64 extra2)
{
if (!(req->flags & REQ_F_CQE_SKIP))
__io_fill_cqe32_req(req, res, cflags, extra1, extra2);
__io_req_complete_put(req);
}
| 0 |
[
"CWE-416"
] |
linux
|
9cae36a094e7e9d6e5fe8b6dcd4642138b3eb0c7
| 90,744,159,594,259,320,000,000,000,000,000,000,000 | 7 |
io_uring: reinstate the inflight tracking
After some debugging, it was realized that we really do still need the
old inflight tracking for any file type that has io_uring_fops assigned.
If we don't, then trivial circular references will mean that we never get
the ctx cleaned up and hence it'll leak.
Just bring back the inflight tracking, which then also means we can
eliminate the conditional dropping of the file when task_work is queued.
Fixes: d5361233e9ab ("io_uring: drop the old style inflight file tracking")
Signed-off-by: Jens Axboe <[email protected]>
|
njs_generate_assignment_name(njs_vm_t *vm, njs_generator_t *generator,
njs_parser_node_t *node)
{
njs_parser_node_t *lvalue, *expr;
njs_vmcode_move_t *move;
lvalue = node->left;
expr = node->right;
/*
* lvalue and expression indexes are equal if the expression is an
* empty object or expression result is stored directly in variable.
*/
if (lvalue->index != expr->index) {
njs_generate_code_move(generator, move, lvalue->index, expr->index,
expr);
}
node->index = expr->index;
node->temporary = expr->temporary;
return njs_generator_stack_pop(vm, generator, NULL);
}
| 0 |
[
"CWE-703",
"CWE-754"
] |
njs
|
404553896792b8f5f429dc8852d15784a59d8d3e
| 291,581,908,806,728,300,000,000,000,000,000,000,000 | 23 |
Fixed break instruction in a try-catch block.
Previously, JUMP offset for a break instruction inside a try-catch
block was not set to a correct offset during code generation
when a return instruction was present in inner try-catch block.
The fix is to update the JUMP offset appropriately.
This closes #553 issue on Github.
|
dwg_obj_is_table (const Dwg_Object *obj)
{
const unsigned int type = obj->type;
return (obj->supertype == DWG_SUPERTYPE_OBJECT)
&& (type == DWG_TYPE_BLOCK_HEADER || type == DWG_TYPE_LAYER
|| type == DWG_TYPE_STYLE || type == DWG_TYPE_LTYPE
|| type == DWG_TYPE_VIEW || type == DWG_TYPE_UCS
|| type == DWG_TYPE_VPORT || type == DWG_TYPE_APPID
|| type == DWG_TYPE_DIMSTYLE
|| type == DWG_TYPE_VX_TABLE_RECORD);
}
| 0 |
[
"CWE-787"
] |
libredwg
|
ecf5183d8b3b286afe2a30021353b7116e0208dd
| 35,329,267,485,660,535,000,000,000,000,000,000,000 | 11 |
dwg_section_wtype: fix fuzzing overflow
with illegal and overlong section names. Fixes GH #349, #352
section names cannot be longer than 24
|
perf_cgroup_set_timestamp(struct task_struct *task,
struct perf_event_context *ctx)
{
struct perf_cgroup *cgrp;
struct perf_cgroup_info *info;
struct cgroup_subsys_state *css;
/*
* ctx->lock held by caller
* ensure we do not access cgroup data
* unless we have the cgroup pinned (css_get)
*/
if (!task || !ctx->nr_cgroups)
return;
cgrp = perf_cgroup_from_task(task, ctx);
for (css = &cgrp->css; css; css = css->parent) {
cgrp = container_of(css, struct perf_cgroup, css);
info = this_cpu_ptr(cgrp->info);
info->timestamp = ctx->timestamp;
}
}
| 0 |
[
"CWE-401"
] |
tip
|
7bdb157cdebbf95a1cd94ed2e01b338714075d00
| 107,306,627,543,227,160,000,000,000,000,000,000,000 | 23 |
perf/core: Fix a memory leak in perf_event_parse_addr_filter()
As shown through runtime testing, the "filename" allocation is not
always freed in perf_event_parse_addr_filter().
There are three possible ways that this could happen:
- It could be allocated twice on subsequent iterations through the loop,
- or leaked on the success path,
- or on the failure path.
Clean up the code flow to make it obvious that 'filename' is always
freed in the reallocation path and in the two return paths as well.
We rely on the fact that kfree(NULL) is NOP and filename is initialized
with NULL.
This fixes the leak. No other side effects expected.
[ Dan Carpenter: cleaned up the code flow & added a changelog. ]
[ Ingo Molnar: updated the changelog some more. ]
Fixes: 375637bc5249 ("perf/core: Introduce address range filtering")
Signed-off-by: "kiyin(尹亮)" <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Cc: "Srivatsa S. Bhat" <[email protected]>
Cc: Anthony Liguori <[email protected]>
--
kernel/events/core.c | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
|
GF_Err trgt_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_TrackGroupTypeBox *ptr = (GF_TrackGroupTypeBox *)s;
ISOM_DECREASE_SIZE(ptr, 4);
ptr->track_group_id = gf_bs_read_u32(bs);
return GF_OK;
}
| 0 |
[
"CWE-787"
] |
gpac
|
77510778516803b7f7402d7423c6d6bef50254c3
| 104,949,623,050,416,220,000,000,000,000,000,000,000 | 7 |
fixed #2255
|
TEST(AsyncSSLSocketTest, ConnectWriteReadLargeClose) {
// Start listening on a local port
WriteCallbackBase writeCallback;
ReadCallback readCallback(&writeCallback);
HandshakeCallback handshakeCallback(&readCallback);
SSLServerAcceptCallback acceptCallback(&handshakeCallback);
TestSSLServer server(&acceptCallback);
// Set up SSL context.
std::shared_ptr<SSLContext> sslContext(new SSLContext());
sslContext->ciphers("ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH");
// sslContext->loadTrustedCertificates("./trusted-ca-certificate.pem");
// sslContext->authenticate(true, false);
// connect
auto socket =
std::make_shared<BlockingSocket>(server.getAddress(), sslContext);
socket->open(std::chrono::milliseconds(10000));
// write()
uint8_t buf[128];
memset(buf, 'a', sizeof(buf));
socket->write(buf, sizeof(buf));
// read()
uint8_t readbuf[128];
// we will fake the read len but that should be fine
size_t readLen = 1L << 33;
uint32_t bytesRead = socket->read(readbuf, readLen);
EXPECT_EQ(bytesRead, 128);
EXPECT_EQ(memcmp(buf, readbuf, bytesRead), 0);
// close()
socket->close();
cerr << "ConnectWriteReadClose test completed" << endl;
EXPECT_EQ(socket->getSSLSocket()->getTotalConnectTimeout().count(), 10000);
}
| 0 |
[
"CWE-125"
] |
folly
|
c321eb588909646c15aefde035fd3133ba32cdee
| 338,551,558,055,376,280,000,000,000,000,000,000,000 | 38 |
Handle close_notify as standard writeErr in AsyncSSLSocket.
Summary: Fixes CVE-2019-11934
Reviewed By: mingtaoy
Differential Revision: D18020613
fbshipit-source-id: db82bb250e53f0d225f1280bd67bc74abd417836
|
void *ASN1_item_d2i_bio(const ASN1_ITEM *it, BIO *in, void *x)
{
BUF_MEM *b = NULL;
const unsigned char *p;
void *ret = NULL;
int len;
len = asn1_d2i_read_bio(in, &b);
if (len < 0)
goto err;
p = (const unsigned char *)b->data;
ret = ASN1_item_d2i(x, &p, len, it);
err:
BUF_MEM_free(b);
return (ret);
}
| 0 |
[
"CWE-399"
] |
openssl
|
c62981390d6cf9e3d612c489b8b77c2913b25807
| 89,482,448,165,330,170,000,000,000,000,000,000,000 | 17 |
Harden ASN.1 BIO handling of large amounts of data.
If the ASN.1 BIO is presented with a large length field read it in
chunks of increasing size checking for EOF on each read. This prevents
small files allocating excessive amounts of data.
CVE-2016-2109
Thanks to Brian Carpenter for reporting this issue.
Reviewed-by: Viktor Dukhovni <[email protected]>
|
static void restore_part_field_pointers(Field **ptr, uchar **restore_ptr)
{
Field *field;
while ((field= *(ptr++)))
{
field->ptr= *restore_ptr;
restore_ptr++;
}
return;
}
| 0 |
[] |
mysql-server
|
be901b60ae59c93848c829d1b0b2cb523ab8692e
| 143,419,619,634,229,120,000,000,000,000,000,000,000 | 10 |
Bug#26390632: CREATE TABLE CAN CAUSE MYSQL TO EXIT.
Analysis
========
CREATE TABLE of InnoDB table with a partition name
which exceeds the path limit can cause the server
to exit.
During the preparation of the partition name,
there was no check to identify whether the complete
path name for partition exceeds the max supported
path length, causing the server to exit during
subsequent processing.
Fix
===
During the preparation of partition name, check and report
an error if the partition path name exceeds the maximum path
name limit.
This is a 5.5 patch.
|
List<Item>* st_select_lex::get_item_list()
{
return &item_list;
}
| 0 |
[
"CWE-476"
] |
server
|
3a52569499e2f0c4d1f25db1e81617a9d9755400
| 274,357,017,846,320,550,000,000,000,000,000,000,000 | 4 |
MDEV-25636: Bug report: abortion in sql/sql_parse.cc:6294
The asserion failure was caused by this query
select /*id=1*/ from t1
where
col= ( select /*id=2*/ from ... where corr_cond1
union
select /*id=4*/ from ... where corr_cond2)
Here,
- select with id=2 was correlated due to corr_cond1.
- select with id=4 was initially correlated due to corr_cond2, but then
the optimizer optimized away the correlation, making the select with id=4
uncorrelated.
However, since select with id=2 remained correlated, the execution had to
re-compute the whole UNION. When it tried to execute select with id=4, it
hit an assertion (join buffer already free'd).
This is because select with id=4 has freed its execution structures after
it has been executed once. The select is uncorrelated, so it did not expect
it would need to be executed for the second time.
Fixed this by adding this logic in
st_select_lex::optimize_unflattened_subqueries():
If a member of a UNION is correlated, mark all its members as
correlated, so that they are prepared to be executed multiple times.
|
int regulator_list_voltage(struct regulator *regulator, unsigned selector)
{
struct regulator_dev *rdev = regulator->rdev;
const struct regulator_ops *ops = rdev->desc->ops;
int ret;
if (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1 && !selector)
return rdev->desc->fixed_uV;
if (ops->list_voltage) {
if (selector >= rdev->desc->n_voltages)
return -EINVAL;
mutex_lock(&rdev->mutex);
ret = ops->list_voltage(rdev, selector);
mutex_unlock(&rdev->mutex);
} else if (rdev->supply) {
ret = regulator_list_voltage(rdev->supply, selector);
} else {
return -EINVAL;
}
if (ret > 0) {
if (ret < rdev->constraints->min_uV)
ret = 0;
else if (ret > rdev->constraints->max_uV)
ret = 0;
}
return ret;
}
| 0 |
[
"CWE-416"
] |
linux
|
60a2362f769cf549dc466134efe71c8bf9fbaaba
| 236,285,677,224,126,350,000,000,000,000,000,000,000 | 30 |
regulator: core: Fix regualtor_ena_gpio_free not to access pin after freeing
After freeing pin from regulator_ena_gpio_free, loop can access
the pin. So this patch fixes not to access pin after freeing.
Signed-off-by: Seung-Woo Kim <[email protected]>
Signed-off-by: Mark Brown <[email protected]>
|
Item *Item_field::safe_charset_converter(CHARSET_INFO *tocs)
{
no_const_subst= 1;
return Item::safe_charset_converter(tocs);
}
| 0 |
[] |
server
|
b000e169562697aa072600695d4f0c0412f94f4f
| 38,422,768,562,688,030,000,000,000,000,000,000,000 | 5 |
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL))
based on:
commit f7316aa0c9a
Author: Ajo Robert <[email protected]>
Date: Thu Aug 24 17:03:21 2017 +0530
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item.
|
static void build_completion_hash(bool rehash, bool write_info)
{
COMMANDS *cmd=commands;
MYSQL_RES *databases=0,*tables=0;
MYSQL_RES *fields;
static char ***field_names= 0;
MYSQL_ROW database_row,table_row;
MYSQL_FIELD *sql_field;
char buf[NAME_LEN*2+2]; // table name plus field name plus 2
int i,j,num_fields;
DBUG_ENTER("build_completion_hash");
if (status.batch || quick || !current_db)
DBUG_VOID_RETURN; // We don't need completion in batches
if (!rehash)
DBUG_VOID_RETURN;
/* Free old used memory */
if (field_names)
field_names=0;
completion_hash_clean(&ht);
free_root(&hash_mem_root,MYF(0));
/* hash this file's known subset of SQL commands */
while (cmd->name) {
add_word(&ht,(char*) cmd->name);
cmd++;
}
/* hash MySQL functions (to be implemented) */
/* hash all database names */
if (mysql_query(&mysql,"show databases") == 0)
{
if (!(databases = mysql_store_result(&mysql)))
put_info(mysql_error(&mysql),INFO_INFO);
else
{
while ((database_row=mysql_fetch_row(databases)))
{
char *str=strdup_root(&hash_mem_root, (char*) database_row[0]);
if (str)
add_word(&ht,(char*) str);
}
mysql_free_result(databases);
}
}
/* hash all table names */
if (mysql_query(&mysql,"show tables")==0)
{
if (!(tables = mysql_store_result(&mysql)))
put_info(mysql_error(&mysql),INFO_INFO);
else
{
if (mysql_num_rows(tables) > 0 && !opt_silent && write_info)
{
tee_fprintf(stdout, "\
Reading table information for completion of table and column names\n\
You can turn off this feature to get a quicker startup with -A\n\n");
}
while ((table_row=mysql_fetch_row(tables)))
{
char *str=strdup_root(&hash_mem_root, (char*) table_row[0]);
if (str &&
!completion_hash_exists(&ht,(char*) str, (uint) strlen(str)))
add_word(&ht,str);
}
}
}
/* hash all field names, both with the table prefix and without it */
if (!tables) /* no tables */
{
DBUG_VOID_RETURN;
}
mysql_data_seek(tables,0);
if (!(field_names= (char ***) alloc_root(&hash_mem_root,sizeof(char **) *
(uint) (mysql_num_rows(tables)+1))))
{
mysql_free_result(tables);
DBUG_VOID_RETURN;
}
i=0;
while ((table_row=mysql_fetch_row(tables)))
{
if ((fields=mysql_list_fields(&mysql,(const char*) table_row[0],NullS)))
{
num_fields=mysql_num_fields(fields);
if (!(field_names[i] = (char **) alloc_root(&hash_mem_root,
sizeof(char *) *
(num_fields*2+1))))
{
mysql_free_result(fields);
break;
}
field_names[i][num_fields*2]= NULL;
j=0;
while ((sql_field=mysql_fetch_field(fields)))
{
sprintf(buf,"%.64s.%.64s",table_row[0],sql_field->name);
field_names[i][j] = strdup_root(&hash_mem_root,buf);
add_word(&ht,field_names[i][j]);
field_names[i][num_fields+j] = strdup_root(&hash_mem_root,
sql_field->name);
if (!completion_hash_exists(&ht,field_names[i][num_fields+j],
(uint) strlen(field_names[i][num_fields+j])))
add_word(&ht,field_names[i][num_fields+j]);
j++;
}
mysql_free_result(fields);
}
else
field_names[i]= 0;
i++;
}
mysql_free_result(tables);
field_names[i]=0; // End pointer
DBUG_VOID_RETURN;
}
| 0 |
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
| 116,745,221,579,479,910,000,000,000,000,000,000,000 | 120 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
|
static int ext4_check_descriptors(struct super_block *sb,
ext4_fsblk_t sb_block,
ext4_group_t *first_not_zeroed)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
ext4_fsblk_t last_block;
ext4_fsblk_t block_bitmap;
ext4_fsblk_t inode_bitmap;
ext4_fsblk_t inode_table;
int flexbg_flag = 0;
ext4_group_t i, grp = sbi->s_groups_count;
if (ext4_has_feature_flex_bg(sb))
flexbg_flag = 1;
ext4_debug("Checking group descriptors");
for (i = 0; i < sbi->s_groups_count; i++) {
struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
if (i == sbi->s_groups_count - 1 || flexbg_flag)
last_block = ext4_blocks_count(sbi->s_es) - 1;
else
last_block = first_block +
(EXT4_BLOCKS_PER_GROUP(sb) - 1);
if ((grp == sbi->s_groups_count) &&
!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
grp = i;
block_bitmap = ext4_block_bitmap(sb, gdp);
if (block_bitmap == sb_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Block bitmap for group %u overlaps "
"superblock", i);
}
if (block_bitmap < first_block || block_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Block bitmap for group %u not in group "
"(block %llu)!", i, block_bitmap);
return 0;
}
inode_bitmap = ext4_inode_bitmap(sb, gdp);
if (inode_bitmap == sb_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode bitmap for group %u overlaps "
"superblock", i);
}
if (inode_bitmap < first_block || inode_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode bitmap for group %u not in group "
"(block %llu)!", i, inode_bitmap);
return 0;
}
inode_table = ext4_inode_table(sb, gdp);
if (inode_table == sb_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode table for group %u overlaps "
"superblock", i);
}
if (inode_table < first_block ||
inode_table + sbi->s_itb_per_group - 1 > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode table for group %u not in group "
"(block %llu)!", i, inode_table);
return 0;
}
ext4_lock_group(sb, i);
if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Checksum for group %u failed (%u!=%u)",
i, le16_to_cpu(ext4_group_desc_csum(sb, i,
gdp)), le16_to_cpu(gdp->bg_checksum));
if (!sb_rdonly(sb)) {
ext4_unlock_group(sb, i);
return 0;
}
}
ext4_unlock_group(sb, i);
if (!flexbg_flag)
first_block += EXT4_BLOCKS_PER_GROUP(sb);
}
if (NULL != first_not_zeroed)
*first_not_zeroed = grp;
return 1;
}
| 1 |
[] |
linux
|
18db4b4e6fc31eda838dd1c1296d67dbcb3dc957
| 229,672,042,384,761,180,000,000,000,000,000,000,000 | 87 |
ext4: don't allow r/w mounts if metadata blocks overlap the superblock
If some metadata block, such as an allocation bitmap, overlaps the
superblock, it's very likely that if the file system is mounted
read/write, the results will not be pretty. So disallow r/w mounts
for file systems corrupted in this particular way.
Signed-off-by: Theodore Ts'o <[email protected]>
Cc: [email protected]
|
TEST_F(RouterTest, PropagatesUpstreamFilterState) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
// This pattern helps ensure that we're actually invoking the callback.
bool filter_state_verified = false;
router_.config().upstream_logs_.push_back(
std::make_shared<TestAccessLog>([&](const auto& stream_info) {
filter_state_verified =
stream_info.upstreamInfo()->upstreamFilterState()->hasDataWithName("upstream data");
}));
upstream_stream_info_.filterState()->setData(
"upstream data", std::make_unique<StreamInfo::UInt32AccessorImpl>(123),
StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::Connection);
expectResponseTimerCreate();
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _))
.WillOnce(Invoke(
[&](Http::ResponseDecoder& decoder,
Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {
response_decoder = &decoder;
callbacks.onPoolReady(encoder, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
Http::TestRequestHeaderMapImpl headers{};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
// NOLINTNEXTLINE: Silence null pointer access warning
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
EXPECT_TRUE(filter_state_verified);
EXPECT_TRUE(callbacks_.streamInfo().upstreamInfo()->upstreamFilterState()->hasDataWithName(
"upstream data"));
}
| 0 |
[
"CWE-703"
] |
envoy
|
5bf9b0f1e7f247a4eee7180849cb0823926f7fff
| 86,384,963,250,863,040,000,000,000,000,000,000,000 | 40 |
[1.21] CVE-2022-21655
Signed-off-by: Otto van der Schaaf <[email protected]>
|
static int do_check(struct verifier_env *env)
{
struct verifier_state *state = &env->cur_state;
struct bpf_insn *insns = env->prog->insnsi;
struct reg_state *regs = state->regs;
int insn_cnt = env->prog->len;
int insn_idx, prev_insn_idx = 0;
int insn_processed = 0;
bool do_print_state = false;
init_reg_state(regs);
insn_idx = 0;
for (;;) {
struct bpf_insn *insn;
u8 class;
int err;
if (insn_idx >= insn_cnt) {
verbose("invalid insn idx %d insn_cnt %d\n",
insn_idx, insn_cnt);
return -EFAULT;
}
insn = &insns[insn_idx];
class = BPF_CLASS(insn->code);
if (++insn_processed > 32768) {
verbose("BPF program is too large. Proccessed %d insn\n",
insn_processed);
return -E2BIG;
}
err = is_state_visited(env, insn_idx);
if (err < 0)
return err;
if (err == 1) {
/* found equivalent state, can prune the search */
if (log_level) {
if (do_print_state)
verbose("\nfrom %d to %d: safe\n",
prev_insn_idx, insn_idx);
else
verbose("%d: safe\n", insn_idx);
}
goto process_bpf_exit;
}
if (log_level && do_print_state) {
verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx);
print_verifier_state(env);
do_print_state = false;
}
if (log_level) {
verbose("%d: ", insn_idx);
print_bpf_insn(insn);
}
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
if (err)
return err;
} else if (class == BPF_LDX) {
enum bpf_reg_type src_reg_type;
/* check for reserved fields is already done */
/* check src operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
return err;
err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK);
if (err)
return err;
src_reg_type = regs[insn->src_reg].type;
/* check that memory (src_reg + off) is readable,
* the state of dst_reg will be updated by this func
*/
err = check_mem_access(env, insn->src_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ,
insn->dst_reg);
if (err)
return err;
if (BPF_SIZE(insn->code) != BPF_W) {
insn_idx++;
continue;
}
if (insn->imm == 0) {
/* saw a valid insn
* dst_reg = *(u32 *)(src_reg + off)
* use reserved 'imm' field to mark this insn
*/
insn->imm = src_reg_type;
} else if (src_reg_type != insn->imm &&
(src_reg_type == PTR_TO_CTX ||
insn->imm == PTR_TO_CTX)) {
/* ABuser program is trying to use the same insn
* dst_reg = *(u32*) (src_reg + off)
* with different pointer types:
* src_reg == ctx in one branch and
* src_reg == stack|map in some other branch.
* Reject it.
*/
verbose("same insn cannot be used with different pointers\n");
return -EINVAL;
}
} else if (class == BPF_STX) {
enum bpf_reg_type dst_reg_type;
if (BPF_MODE(insn->code) == BPF_XADD) {
err = check_xadd(env, insn);
if (err)
return err;
insn_idx++;
continue;
}
/* check src1 operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
return err;
/* check src2 operand */
err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
if (err)
return err;
dst_reg_type = regs[insn->dst_reg].type;
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
insn->src_reg);
if (err)
return err;
if (insn->imm == 0) {
insn->imm = dst_reg_type;
} else if (dst_reg_type != insn->imm &&
(dst_reg_type == PTR_TO_CTX ||
insn->imm == PTR_TO_CTX)) {
verbose("same insn cannot be used with different pointers\n");
return -EINVAL;
}
} else if (class == BPF_ST) {
if (BPF_MODE(insn->code) != BPF_MEM ||
insn->src_reg != BPF_REG_0) {
verbose("BPF_ST uses reserved fields\n");
return -EINVAL;
}
/* check src operand */
err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
if (err)
return err;
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
-1);
if (err)
return err;
} else if (class == BPF_JMP) {
u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->off != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
verbose("BPF_CALL uses reserved fields\n");
return -EINVAL;
}
err = check_call(env, insn->imm);
if (err)
return err;
} else if (opcode == BPF_JA) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
verbose("BPF_JA uses reserved fields\n");
return -EINVAL;
}
insn_idx += insn->off + 1;
continue;
} else if (opcode == BPF_EXIT) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
verbose("BPF_EXIT uses reserved fields\n");
return -EINVAL;
}
/* eBPF calling convetion is such that R0 is used
* to return the value from eBPF program.
* Make sure that it's readable at this time
* of bpf_exit, which means that program wrote
* something into it earlier
*/
err = check_reg_arg(regs, BPF_REG_0, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, BPF_REG_0)) {
verbose("R0 leaks addr as return value\n");
return -EACCES;
}
process_bpf_exit:
insn_idx = pop_stack(env, &prev_insn_idx);
if (insn_idx < 0) {
break;
} else {
do_print_state = true;
continue;
}
} else {
err = check_cond_jmp_op(env, insn, &insn_idx);
if (err)
return err;
}
} else if (class == BPF_LD) {
u8 mode = BPF_MODE(insn->code);
if (mode == BPF_ABS || mode == BPF_IND) {
err = check_ld_abs(env, insn);
if (err)
return err;
} else if (mode == BPF_IMM) {
err = check_ld_imm(env, insn);
if (err)
return err;
insn_idx++;
} else {
verbose("invalid BPF_LD mode\n");
return -EINVAL;
}
} else {
verbose("unknown insn class %d\n", class);
return -EINVAL;
}
insn_idx++;
}
return 0;
}
| 0 |
[
"CWE-200"
] |
linux
|
a1b14d27ed0965838350f1377ff97c93ee383492
| 17,513,358,867,952,040,000,000,000,000,000,000,000 | 263 |
bpf: fix branch offset adjustment on backjumps after patching ctx expansion
When ctx access is used, the kernel often needs to expand/rewrite
instructions, so after that patching, branch offsets have to be
adjusted for both forward and backward jumps in the new eBPF program,
but for backward jumps it fails to account the delta. Meaning, for
example, if the expansion happens exactly on the insn that sits at
the jump target, it doesn't fix up the back jump offset.
Analysis on what the check in adjust_branches() is currently doing:
/* adjust offset of jmps if necessary */
if (i < pos && i + insn->off + 1 > pos)
insn->off += delta;
else if (i > pos && i + insn->off + 1 < pos)
insn->off -= delta;
First condition (forward jumps):
Before: After:
insns[0] insns[0]
insns[1] <--- i/insn insns[1] <--- i/insn
insns[2] <--- pos insns[P] <--- pos
insns[3] insns[P] `------| delta
insns[4] <--- target_X insns[P] `-----|
insns[5] insns[3]
insns[4] <--- target_X
insns[5]
First case is if we cross pos-boundary and the jump instruction was
before pos. This is handeled correctly. I.e. if i == pos, then this
would mean our jump that we currently check was the patchlet itself
that we just injected. Since such patchlets are self-contained and
have no awareness of any insns before or after the patched one, the
delta is correctly not adjusted. Also, for the second condition in
case of i + insn->off + 1 == pos, means we jump to that newly patched
instruction, so no offset adjustment are needed. That part is correct.
Second condition (backward jumps):
Before: After:
insns[0] insns[0]
insns[1] <--- target_X insns[1] <--- target_X
insns[2] <--- pos <-- target_Y insns[P] <--- pos <-- target_Y
insns[3] insns[P] `------| delta
insns[4] <--- i/insn insns[P] `-----|
insns[5] insns[3]
insns[4] <--- i/insn
insns[5]
Second interesting case is where we cross pos-boundary and the jump
instruction was after pos. Backward jump with i == pos would be
impossible and pose a bug somewhere in the patchlet, so the first
condition checking i > pos is okay only by itself. However, i +
insn->off + 1 < pos does not always work as intended to trigger the
adjustment. It works when jump targets would be far off where the
delta wouldn't matter. But, for example, where the fixed insn->off
before pointed to pos (target_Y), it now points to pos + delta, so
that additional room needs to be taken into account for the check.
This means that i) both tests here need to be adjusted into pos + delta,
and ii) for the second condition, the test needs to be <= as pos
itself can be a target in the backjump, too.
Fixes: 9bac3d6d548e ("bpf: allow extended BPF programs access skb fields")
Signed-off-by: Daniel Borkmann <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
nextproto6_cksum(netdissect_options *ndo,
const struct ip6_hdr *ip6, const uint8_t *data,
u_int len, u_int covlen, u_int next_proto)
{
struct {
struct in6_addr ph_src;
struct in6_addr ph_dst;
uint32_t ph_len;
uint8_t ph_zero[3];
uint8_t ph_nxt;
} ph;
struct cksum_vec vec[2];
/* pseudo-header */
memset(&ph, 0, sizeof(ph));
UNALIGNED_MEMCPY(&ph.ph_src, &ip6->ip6_src, sizeof (struct in6_addr));
switch (ip6->ip6_nxt) {
case IPPROTO_HOPOPTS:
case IPPROTO_DSTOPTS:
case IPPROTO_MOBILITY_OLD:
case IPPROTO_MOBILITY:
case IPPROTO_FRAGMENT:
case IPPROTO_ROUTING:
/*
* The next header is either a routing header or a header
* after which there might be a routing header, so scan
* for a routing header.
*/
ip6_finddst(ndo, &ph.ph_dst, ip6);
break;
default:
UNALIGNED_MEMCPY(&ph.ph_dst, &ip6->ip6_dst, sizeof (struct in6_addr));
break;
}
ph.ph_len = htonl(len);
ph.ph_nxt = next_proto;
vec[0].ptr = (const uint8_t *)(void *)&ph;
vec[0].len = sizeof(ph);
vec[1].ptr = data;
vec[1].len = covlen;
return in_cksum(vec, 2);
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
tcpdump
|
66df248b49095c261138b5a5e34d341a6bf9ac7f
| 4,623,674,490,121,455,700,000,000,000,000,000,000 | 46 |
CVE-2017-12985/IPv6: Check for print routines returning -1 when running past the end.
rt6_print(), ah_print(), and esp_print() return -1 if they run up
against the end of the packet while dissecting; if that happens, stop
dissecting, don't try to fetch the next header value, because 1) *it*
might be past the end of the packet and 2) we won't be using it in any
case, as we'll be exiting the loop.
Also, change mobility_print() to return -1 if it runs up against the
end of the packet, and stop dissecting if it does so.
This fixes a buffer over-read discovered by Brian 'geeknik' Carpenter.
Add tests using the capture files supplied by the reporter(s).
|
FstringParser_check_invariants(FstringParser *state)
{
if (state->last_str)
assert(PyUnicode_CheckExact(state->last_str));
ExprList_check_invariants(&state->expr_list);
}
| 0 |
[
"CWE-125"
] |
cpython
|
a4d78362397fc3bced6ea80fbc7b5f4827aec55e
| 261,284,673,317,336,700,000,000,000,000,000,000,000 | 6 |
bpo-36495: Fix two out-of-bounds array reads (GH-12641)
Research and fix by @bradlarsen.
|
ecma_op_container_free_weakmap_entries (ecma_object_t *object_p, /**< object pointer */
ecma_collection_t *container_p) /**< internal buffer pointer */
{
JERRY_ASSERT (object_p != NULL);
JERRY_ASSERT (container_p != NULL);
uint32_t entry_count = ECMA_CONTAINER_ENTRY_COUNT (container_p);
ecma_value_t *start_p = ECMA_CONTAINER_START (container_p);
for (uint32_t i = 0; i < entry_count; i += ECMA_CONTAINER_PAIR_SIZE)
{
ecma_container_pair_t *entry_p = (ecma_container_pair_t *) (start_p + i);
if (ecma_is_value_empty (entry_p->key))
{
continue;
}
ecma_op_container_unref_weak (ecma_get_object_from_value (entry_p->key), ecma_make_object_value (object_p));
ecma_op_container_remove_weak_entry (object_p, entry_p->key);
ecma_free_value_if_not_object (entry_p->value);
entry_p->key = ECMA_VALUE_EMPTY;
entry_p->value = ECMA_VALUE_EMPTY;
}
} /* ecma_op_container_free_weakmap_entries */
| 0 |
[
"CWE-119",
"CWE-125",
"CWE-703"
] |
jerryscript
|
c2b662170245a16f46ce02eae68815c325d99821
| 113,436,158,557,458,160,000,000,000,000,000,000,000 | 27 |
Fix adding entries to the internal buffer of a Map object (#3805)
When appending the key/value pair separately, garbage collection could be
triggered before the value is added, which could cause problems during
marking. This patch changes insertion to add both values at the same
time, which prevents partial entries from being present in the internal
buffer.
Fixes #3804.
JerryScript-DCO-1.0-Signed-off-by: Dániel Bátyai [email protected]
|
vte_sequence_handler_sc (VteTerminal *terminal, GValueArray *params)
{
VteScreen *screen;
screen = terminal->pvt->screen;
screen->cursor_saved.col = screen->cursor_current.col;
screen->cursor_saved.row = CLAMP(screen->cursor_current.row -
screen->insert_delta,
0, terminal->row_count - 1);
}
| 0 |
[] |
vte
|
58bc3a942f198a1a8788553ca72c19d7c1702b74
| 10,717,473,464,638,068,000,000,000,000,000,000,000 | 9 |
fix bug #548272
svn path=/trunk/; revision=2365
|
c_weeknum_to_jd(int y, int w, int d, int f, double sg, int *rjd, int *ns)
{
int rjd2, ns2;
c_find_fdoy(y, sg, &rjd2, &ns2);
rjd2 += 6;
*rjd = (rjd2 - MOD(((rjd2 - f) + 1), 7) - 7) + 7 * w + d;
*ns = (*rjd < sg) ? 0 : 1;
}
| 0 |
[] |
date
|
3959accef8da5c128f8a8e2fd54e932a4fb253b0
| 112,960,364,535,284,050,000,000,000,000,000,000,000 | 9 |
Add length limit option for methods that parses date strings
`Date.parse` now raises an ArgumentError when a given date string is
longer than 128. You can configure the limit by giving `limit` keyword
arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`,
the limit is disabled.
Not only `Date.parse` but also the following methods are changed.
* Date._parse
* Date.parse
* DateTime.parse
* Date._iso8601
* Date.iso8601
* DateTime.iso8601
* Date._rfc3339
* Date.rfc3339
* DateTime.rfc3339
* Date._xmlschema
* Date.xmlschema
* DateTime.xmlschema
* Date._rfc2822
* Date.rfc2822
* DateTime.rfc2822
* Date._rfc822
* Date.rfc822
* DateTime.rfc822
* Date._jisx0301
* Date.jisx0301
* DateTime.jisx0301
|
select_send(THD *thd_arg):
select_result(thd_arg), is_result_set_started(FALSE) {}
| 0 |
[
"CWE-416"
] |
server
|
4681b6f2d8c82b4ec5cf115e83698251963d80d5
| 133,610,610,724,069,560,000,000,000,000,000,000,000 | 2 |
MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob
the bug was that in_vector array in Item_func_in was allocated in the
statement arena, not in the table->expr_arena.
revert part of the 5acd391e8b2d. Instead, change the arena correctly
in fix_all_session_vcol_exprs().
Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force
item tree changes to be rolled back (because they were allocated in the
wrong arena and didn't persist. now they do)
|
static void put_nodelist_members_to_config(struct totem_config *totem_config)
{
icmap_iter_t iter, iter2;
const char *iter_key, *iter_key2;
int res = 0;
int node_pos;
char tmp_key[ICMAP_KEYNAME_MAXLEN];
char tmp_key2[ICMAP_KEYNAME_MAXLEN];
char *node_addr_str;
int member_count;
unsigned int ringnumber = 0;
iter = icmap_iter_init("nodelist.node.");
while ((iter_key = icmap_iter_next(iter, NULL, NULL)) != NULL) {
res = sscanf(iter_key, "nodelist.node.%u.%s", &node_pos, tmp_key);
if (res != 2) {
continue;
}
if (strcmp(tmp_key, "ring0_addr") != 0) {
continue;
}
snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "nodelist.node.%u.", node_pos);
iter2 = icmap_iter_init(tmp_key);
while ((iter_key2 = icmap_iter_next(iter2, NULL, NULL)) != NULL) {
res = sscanf(iter_key2, "nodelist.node.%u.ring%u%s", &node_pos, &ringnumber, tmp_key2);
if (res != 3 || strcmp(tmp_key2, "_addr") != 0) {
continue;
}
if (icmap_get_string(iter_key2, &node_addr_str) != CS_OK) {
continue;
}
member_count = totem_config->interfaces[ringnumber].member_count;
res = totemip_parse(&totem_config->interfaces[ringnumber].member_list[member_count],
node_addr_str, totem_config->ip_version);
if (res != -1) {
totem_config->interfaces[ringnumber].member_count++;
}
free(node_addr_str);
}
icmap_iter_finalize(iter2);
}
icmap_iter_finalize(iter);
}
| 0 |
[] |
corosync
|
55dc09ea237482f827333759fd45608bc9518d64
| 302,915,393,577,945,900,000,000,000,000,000,000,000 | 50 |
totemconfig: enforce hmac config when crypto is enabled
Signed-off-by: Fabio M. Di Nitto <[email protected]>
Reviewed-by: Jan Friesse <[email protected]>
|
check_leafref_features(struct lys_type *type)
{
struct lys_node *iter;
struct ly_set *src_parents, *trg_parents, *features;
struct lys_node_augment *aug;
struct ly_ctx *ctx = ((struct lys_tpdf *)type->parent)->module->ctx;
unsigned int i, j, size, x;
int ret = EXIT_SUCCESS;
assert(type->parent);
src_parents = ly_set_new();
trg_parents = ly_set_new();
features = ly_set_new();
/* get parents chain of source (leafref) */
for (iter = (struct lys_node *)type->parent; iter; iter = lys_parent(iter)) {
if (iter->nodetype & (LYS_INPUT | LYS_OUTPUT)) {
continue;
}
if (iter->parent && (iter->parent->nodetype == LYS_AUGMENT)) {
aug = (struct lys_node_augment *)iter->parent;
if ((aug->module->implemented && (aug->flags & LYS_NOTAPPLIED)) || !aug->target) {
/* unresolved augment, wait until it's resolved */
LOGVAL(ctx, LYE_SPEC, LY_VLOG_LYS, aug,
"Cannot check leafref \"%s\" if-feature consistency because of an unresolved augment.", type->info.lref.path);
ret = EXIT_FAILURE;
goto cleanup;
}
/* also add this augment */
ly_set_add(src_parents, aug, LY_SET_OPT_USEASLIST);
}
ly_set_add(src_parents, iter, LY_SET_OPT_USEASLIST);
}
/* get parents chain of target */
for (iter = (struct lys_node *)type->info.lref.target; iter; iter = lys_parent(iter)) {
if (iter->nodetype & (LYS_INPUT | LYS_OUTPUT)) {
continue;
}
if (iter->parent && (iter->parent->nodetype == LYS_AUGMENT)) {
aug = (struct lys_node_augment *)iter->parent;
if ((aug->module->implemented && (aug->flags & LYS_NOTAPPLIED)) || !aug->target) {
/* unresolved augment, wait until it's resolved */
LOGVAL(ctx, LYE_SPEC, LY_VLOG_LYS, aug,
"Cannot check leafref \"%s\" if-feature consistency because of an unresolved augment.", type->info.lref.path);
ret = EXIT_FAILURE;
goto cleanup;
}
}
ly_set_add(trg_parents, iter, LY_SET_OPT_USEASLIST);
}
/* compare the features used in if-feature statements in the rest of both
* chains of parents. The set of features used for target must be a subset
* of features used for the leafref. This is not a perfect, we should compare
* the truth tables but it could require too much resources, so we simplify that */
for (i = 0; i < src_parents->number; i++) {
iter = src_parents->set.s[i]; /* shortcut */
if (!iter->iffeature_size) {
continue;
}
for (j = 0; j < iter->iffeature_size; j++) {
resolve_iffeature_getsizes(&iter->iffeature[j], NULL, &size);
for (; size; size--) {
if (!iter->iffeature[j].features[size - 1]) {
/* not yet resolved feature, postpone this check */
ret = EXIT_FAILURE;
goto cleanup;
}
ly_set_add(features, iter->iffeature[j].features[size - 1], 0);
}
}
}
x = features->number;
for (i = 0; i < trg_parents->number; i++) {
iter = trg_parents->set.s[i]; /* shortcut */
if (!iter->iffeature_size) {
continue;
}
for (j = 0; j < iter->iffeature_size; j++) {
resolve_iffeature_getsizes(&iter->iffeature[j], NULL, &size);
for (; size; size--) {
if (!iter->iffeature[j].features[size - 1]) {
/* not yet resolved feature, postpone this check */
ret = EXIT_FAILURE;
goto cleanup;
}
if ((unsigned)ly_set_add(features, iter->iffeature[j].features[size - 1], 0) >= x) {
/* the feature is not present in features set of target's parents chain */
LOGVAL(ctx, LYE_SPEC, LY_VLOG_LYS, type->parent,
"Leafref is not conditional based on \"%s\" feature as its target.",
iter->iffeature[j].features[size - 1]->name);
for (iter = type->info.lref.target->parent; iter && (iter->nodetype != LYS_USES); iter = lys_parent(iter));
if (iter) {
/* we are in a uses so there can still be a refine that will add an if-feature */
ret = EXIT_FAILURE;
} else {
ret = -1;
}
goto cleanup;
}
}
}
}
cleanup:
ly_set_free(features);
ly_set_free(src_parents);
ly_set_free(trg_parents);
return ret;
}
| 0 |
[
"CWE-617"
] |
libyang
|
5ce30801f9ccc372bbe9b7c98bb5324b15fb010a
| 22,038,409,788,329,840,000,000,000,000,000,000,000 | 112 |
schema tree BUGFIX freeing nodes with no module set
Context must be passed explicitly for these cases.
Fixes #1452
|
static void phar_do_403(char *entry, int entry_len) /* {{{ */
{
sapi_header_line ctr = {0};
ctr.response_code = 403;
ctr.line_len = sizeof("HTTP/1.0 403 Access Denied")-1;
ctr.line = "HTTP/1.0 403 Access Denied";
sapi_header_op(SAPI_HEADER_REPLACE, &ctr);
sapi_send_headers();
PHPWRITE("<html>\n <head>\n <title>Access Denied</title>\n </head>\n <body>\n <h1>403 - File ", sizeof("<html>\n <head>\n <title>Access Denied</title>\n </head>\n <body>\n <h1>403 - File ") - 1);
PHPWRITE(entry, entry_len);
PHPWRITE(" Access Denied</h1>\n </body>\n</html>", sizeof(" Access Denied</h1>\n </body>\n</html>") - 1);
}
| 0 |
[
"CWE-20"
] |
php-src
|
1e9b175204e3286d64dfd6c9f09151c31b5e099a
| 307,382,991,566,002,240,000,000,000,000,000,000,000 | 13 |
Fix bug #71860: Require valid paths for phar filenames
|
g_file_eject_mountable_finish (GFile *file,
GAsyncResult *result,
GError **error)
{
GFileIface *iface;
g_return_val_if_fail (G_IS_FILE (file), FALSE);
g_return_val_if_fail (G_IS_ASYNC_RESULT (result), FALSE);
if (g_async_result_legacy_propagate_error (result, error))
return FALSE;
else if (g_async_result_is_tagged (result, g_file_eject_mountable_with_operation))
return g_task_propagate_boolean (G_TASK (result), error);
iface = G_FILE_GET_IFACE (file);
return (* iface->eject_mountable_finish) (file, result, error);
}
| 0 |
[
"CWE-362"
] |
glib
|
d8f8f4d637ce43f8699ba94c9b7648beda0ca174
| 35,701,614,245,101,603,000,000,000,000,000,000,000 | 17 |
gfile: Limit access to files when copying
file_copy_fallback creates new files with default permissions and
set the correct permissions after the operation is finished. This
might cause that the files can be accessible by more users during
the operation than expected. Use G_FILE_CREATE_PRIVATE for the new
files to limit access to those files.
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.