func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
check_subactions(struct ofpact *ofpacts, size_t ofpacts_len,
struct ofpact_check_params *cp)
{
struct ofpact_check_params sub = *cp;
enum ofperr error = ofpacts_check(ofpacts, ofpacts_len, &sub);
cp->usable_protocols &= sub.usable_protocols;
return error;
} | 0 | [
"CWE-416"
]
| ovs | 77cccc74deede443e8b9102299efc869a52b65b2 | 47,495,873,983,618,280,000,000,000,000,000,000,000 | 8 | ofp-actions: Fix use-after-free while decoding RAW_ENCAP.
While decoding RAW_ENCAP action, decode_ed_prop() might re-allocate
ofpbuf if there is no enough space left. However, function
'decode_NXAST_RAW_ENCAP' continues to use old pointer to 'encap'
structure leading to write-after-free and incorrect decoding.
==3549105==ERROR: AddressSanitizer: heap-use-after-free on address
0x60600000011a at pc 0x0000005f6cc6 bp 0x7ffc3a2d4410 sp 0x7ffc3a2d4408
WRITE of size 2 at 0x60600000011a thread T0
#0 0x5f6cc5 in decode_NXAST_RAW_ENCAP lib/ofp-actions.c:4461:20
#1 0x5f0551 in ofpact_decode ./lib/ofp-actions.inc2:4777:16
#2 0x5ed17c in ofpacts_decode lib/ofp-actions.c:7752:21
#3 0x5eba9a in ofpacts_pull_openflow_actions__ lib/ofp-actions.c:7791:13
#4 0x5eb9fc in ofpacts_pull_openflow_actions lib/ofp-actions.c:7835:12
#5 0x64bb8b in ofputil_decode_packet_out lib/ofp-packet.c:1113:17
#6 0x65b6f4 in ofp_print_packet_out lib/ofp-print.c:148:13
#7 0x659e3f in ofp_to_string__ lib/ofp-print.c:1029:16
#8 0x659b24 in ofp_to_string lib/ofp-print.c:1244:21
#9 0x65a28c in ofp_print lib/ofp-print.c:1288:28
#10 0x540d11 in ofctl_ofp_parse utilities/ovs-ofctl.c:2814:9
#11 0x564228 in ovs_cmdl_run_command__ lib/command-line.c:247:17
#12 0x56408a in ovs_cmdl_run_command lib/command-line.c:278:5
#13 0x5391ae in main utilities/ovs-ofctl.c:179:9
#14 0x7f6911ce9081 in __libc_start_main (/lib64/libc.so.6+0x27081)
#15 0x461fed in _start (utilities/ovs-ofctl+0x461fed)
Fix that by getting a new pointer before using.
Credit to OSS-Fuzz.
Fuzzer regression test will fail only with AddressSanitizer enabled.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=27851
Fixes: f839892a206a ("OF support and translation of generic encap and decap")
Acked-by: William Tu <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]> |
static inline void show_saved_mc(void)
{
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | f84598bd7c851f8b0bf8cd0d7c3be0d73c432ff4 | 143,668,854,549,246,870,000,000,000,000,000,000,000 | 3 | x86/microcode/intel: Guard against stack overflow in the loader
mc_saved_tmp is a static array allocated on the stack, we need to make
sure mc_saved_count stays within its bounds, otherwise we're overflowing
the stack in _save_mc(). A specially crafted microcode header could lead
to a kernel crash or potentially kernel execution.
Signed-off-by: Quentin Casasnovas <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Fenghua Yu <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Borislav Petkov <[email protected]> |
int sqlite3VdbeCurrentAddr(Vdbe *p){
assert( p->magic==VDBE_MAGIC_INIT );
return p->nOp;
} | 0 | [
"CWE-755"
]
| sqlite | 8654186b0236d556aa85528c2573ee0b6ab71be3 | 31,950,536,446,388,317,000,000,000,000,000,000,000 | 4 | When an error occurs while rewriting the parser tree for window functions
in the sqlite3WindowRewrite() routine, make sure that pParse->nErr is set,
and make sure that this shuts down any subsequent code generation that might
depend on the transformations that were implemented. This fixes a problem
discovered by the Yongheng and Rui fuzzer.
FossilOrigin-Name: e2bddcd4c55ba3cbe0130332679ff4b048630d0ced9a8899982edb5a3569ba7f |
static int irda_recvmsg_stream(struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct irda_sock *self = irda_sk(sk);
int noblock = flags & MSG_DONTWAIT;
size_t copied = 0;
int target, err;
long timeo;
if ((err = sock_error(sk)) < 0)
return err;
if (sock->flags & __SO_ACCEPTCON)
return -EINVAL;
err =-EOPNOTSUPP;
if (flags & MSG_OOB)
return -EOPNOTSUPP;
err = 0;
target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
timeo = sock_rcvtimeo(sk, noblock);
do {
int chunk;
struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
if (skb == NULL) {
DEFINE_WAIT(wait);
err = 0;
if (copied >= target)
break;
prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
/*
* POSIX 1003.1g mandates this order.
*/
err = sock_error(sk);
if (err)
;
else if (sk->sk_shutdown & RCV_SHUTDOWN)
;
else if (noblock)
err = -EAGAIN;
else if (signal_pending(current))
err = sock_intr_errno(timeo);
else if (sk->sk_state != TCP_ESTABLISHED)
err = -ENOTCONN;
else if (skb_peek(&sk->sk_receive_queue) == NULL)
/* Wait process until data arrives */
schedule();
finish_wait(sk_sleep(sk), &wait);
if (err)
return err;
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
continue;
}
chunk = min_t(unsigned int, skb->len, size);
if (memcpy_to_msg(msg, skb->data, chunk)) {
skb_queue_head(&sk->sk_receive_queue, skb);
if (copied == 0)
copied = -EFAULT;
break;
}
copied += chunk;
size -= chunk;
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
skb_pull(skb, chunk);
/* put the skb back if we didn't use it up.. */
if (skb->len) {
pr_debug("%s(), back on q!\n",
__func__);
skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
kfree_skb(skb);
} else {
pr_debug("%s() questionable!?\n", __func__);
/* put message back and return */
skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
} while (size);
/*
* Check if we have previously stopped IrTTP and we know
* have more free space in our rx_queue. If so tell IrTTP
* to start delivering frames again before our rx_queue gets
* empty
*/
if (self->rx_flow == FLOW_STOP) {
if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
pr_debug("%s(), Starting IrTTP\n", __func__);
self->rx_flow = FLOW_START;
irttp_flow_request(self->tsap, FLOW_START);
}
}
return copied;
} | 0 | []
| net | 79462ad02e861803b3840cc782248c7359451cd9 | 292,123,127,260,560,320,000,000,000,000,000,000,000 | 113 | net: add validation for the socket syscall protocol argument
郭永刚 reported that one could simply crash the kernel as root by
using a simple program:
int socket_fd;
struct sockaddr_in addr;
addr.sin_port = 0;
addr.sin_addr.s_addr = INADDR_ANY;
addr.sin_family = 10;
socket_fd = socket(10,3,0x40000000);
connect(socket_fd , &addr,16);
AF_INET, AF_INET6 sockets actually only support 8-bit protocol
identifiers. inet_sock's skc_protocol field thus is sized accordingly,
thus larger protocol identifiers simply cut off the higher bits and
store a zero in the protocol fields.
This could lead to e.g. NULL function pointer because as a result of
the cut off inet_num is zero and we call down to inet_autobind, which
is NULL for raw sockets.
kernel: Call Trace:
kernel: [<ffffffff816db90e>] ? inet_autobind+0x2e/0x70
kernel: [<ffffffff816db9a4>] inet_dgram_connect+0x54/0x80
kernel: [<ffffffff81645069>] SYSC_connect+0xd9/0x110
kernel: [<ffffffff810ac51b>] ? ptrace_notify+0x5b/0x80
kernel: [<ffffffff810236d8>] ? syscall_trace_enter_phase2+0x108/0x200
kernel: [<ffffffff81645e0e>] SyS_connect+0xe/0x10
kernel: [<ffffffff81779515>] tracesys_phase2+0x84/0x89
I found no particular commit which introduced this problem.
CVE: CVE-2015-8543
Cc: Cong Wang <[email protected]>
Reported-by: 郭永刚 <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int __init ib_cm_init(void)
{
int ret;
memset(&cm, 0, sizeof cm);
INIT_LIST_HEAD(&cm.device_list);
rwlock_init(&cm.device_lock);
spin_lock_init(&cm.lock);
cm.listen_service_table = RB_ROOT;
cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
cm.remote_id_table = RB_ROOT;
cm.remote_qp_table = RB_ROOT;
cm.remote_sidr_table = RB_ROOT;
idr_init(&cm.local_id_table);
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
INIT_LIST_HEAD(&cm.timewait_list);
ret = class_register(&cm_class);
if (ret) {
ret = -ENOMEM;
goto error1;
}
cm.wq = create_workqueue("ib_cm");
if (!cm.wq) {
ret = -ENOMEM;
goto error2;
}
ret = ib_register_client(&cm_client);
if (ret)
goto error3;
return 0;
error3:
destroy_workqueue(cm.wq);
error2:
class_unregister(&cm_class);
error1:
idr_destroy(&cm.local_id_table);
return ret;
} | 0 | [
"CWE-20"
]
| linux | b2853fd6c2d0f383dbdf7427e263eb576a633867 | 210,056,949,206,637,460,000,000,000,000,000,000,000 | 42 | IB/core: Don't resolve passive side RoCE L2 address in CMA REQ handler
The code that resolves the passive side source MAC within the rdma_cm
connection request handler was both redundant and buggy, so remove it.
It was redundant since later, when an RC QP is modified to RTR state,
the resolution will take place in the ib_core module. It was buggy
because this callback also deals with UD SIDR exchange, for which we
incorrectly looked at the REQ member of the CM event and dereferenced
a random value.
Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures")
Signed-off-by: Moni Shoua <[email protected]>
Signed-off-by: Or Gerlitz <[email protected]>
Signed-off-by: Roland Dreier <[email protected]> |
static struct mem_input *dce80_mem_input_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input),
GFP_KERNEL);
if (!dce_mi) {
BREAK_TO_DEBUGGER();
return NULL;
}
dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
dce_mi->wa.single_head_rdreq_dmif_limit = 2;
return &dce_mi->base;
} | 0 | [
"CWE-400",
"CWE-703",
"CWE-401"
]
| linux | 055e547478a11a6360c7ce05e2afc3e366968a12 | 340,118,314,374,401,560,000,000,000,000,000,000,000 | 16 | drm/amd/display: memory leak
In dcn*_clock_source_create when dcn20_clk_src_construct fails allocated
clk_src needs release.
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Alex Deucher <[email protected]> |
port::Status CudnnSupport::DoConvolve(
dnn::ConvolutionKind kind, dnn::DataType element_type,
dnn::DataType output_type, Stream* stream,
const dnn::BatchDescriptor& input_descriptor, DeviceMemoryBase input_data,
const dnn::FilterDescriptor& filter_descriptor,
DeviceMemoryBase filter_data, const dnn::BatchDescriptor& output_descriptor,
DeviceMemoryBase output_data,
const dnn::ConvolutionDescriptor& convolution_descriptor,
dnn::AlgorithmDesc algorithm_desc, DeviceMemory<uint8> scratch_memory,
dnn::ProfileResult* output_profile_result) {
cudnnDataType_t cudnn_type = ToCudnnDataType(element_type);
CudnnTensorDescriptor input_nd(input_descriptor, cudnn_type);
CudnnTensorDescriptor output_nd(output_descriptor,
ToCudnnDataType(output_type));
CudnnFilterDescriptor filter_nd(filter_descriptor, cudnn_type);
auto accumulator_type = GetConvAccumulatorType(element_type);
CudnnConvolutionDescriptor conv(convolution_descriptor,
ToCudnnDataType(accumulator_type));
SE_ASSIGN_OR_RETURN(bool use_tensor_ops,
UseTensorOps(stream, element_type, algorithm_desc));
conv.set_use_tensor_op_math(use_tensor_ops);
auto cudnn = cudnn_->GetHandle(parent_, stream);
// Alpha is the scaling factor for input.
float falpha = 1.0;
double dalpha = 1.0;
void* alpha = cudnn_type == CUDNN_DATA_DOUBLE ? static_cast<void*>(&dalpha)
: static_cast<void*>(&falpha);
// Beta is the scaling factor for output.
float fbeta = 0.0;
double dbeta = 0.0;
void* beta = cudnn_type == CUDNN_DATA_DOUBLE ? static_cast<void*>(&dbeta)
: static_cast<void*>(&fbeta);
const bool is_profiling = output_profile_result != nullptr;
std::unique_ptr<GpuTimer, GpuTimerDeleter> timer;
if (is_profiling) {
timer.reset(new GpuTimer(parent_)); // NOLINT
// The start and stop of the timer should be as close to the Cudnn call as
// possible. It is still possible for other threads to issue workload on
// to this stream. So it could take multiple profiling measurements.
if (!timer->Init() || !timer->Start(AsGpuStream(stream))) {
return port::Status(port::error::INTERNAL, "Failed to start timer");
}
}
const auto get_fwd_bugs = [&]() -> port::Status {
if (CUDNN_VERSION < 8000) {
if (algorithm_desc.algo_id() ==
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM &&
ToCudnnDataType(element_type) == CUDNN_DATA_INT8 &&
ToCudnnDataType(output_type) == CUDNN_DATA_FLOAT) {
return port::Status(
port::error::FAILED_PRECONDITION,
"This configuration potentially produces incorrect results.");
}
}
return port::Status::OK();
};
auto get_bwd_data_bugs = [&]() -> port::Status {
return port::Status::OK();
};
const auto get_bwd_filter_bugs = [&]() -> port::Status {
return port::Status::OK();
};
switch (kind) {
case dnn::ConvolutionKind::FORWARD: {
SE_RETURN_IF_ERROR(get_fwd_bugs());
RETURN_IF_CUDNN_ERROR(cudnnConvolutionForward(
cudnn.handle(),
/*alpha=*/alpha, /*srcDesc=*/input_nd.handle(),
/*srcData=*/input_data.opaque(), /*filterDesc=*/filter_nd.handle(),
/*filterData=*/filter_data.opaque(), /*convDesc=*/conv.handle(),
/*algo=*/ToConvForwardAlgo(algorithm_desc),
/*workSpace=*/scratch_memory.opaque(),
/*workSpaceSizeInBytes=*/scratch_memory.size(), /*beta=*/beta,
/*yDesc=*/output_nd.handle(), /*y=*/output_data.opaque()));
break;
}
case dnn::ConvolutionKind::BACKWARD_DATA: {
SE_RETURN_IF_ERROR(get_bwd_data_bugs());
RETURN_IF_CUDNN_ERROR(cudnnConvolutionBackwardData(
cudnn.handle(),
/*alpha=*/alpha,
/*wDesc=*/filter_nd.handle(),
/*w=*/filter_data.opaque(),
/*dyDesc=*/output_nd.handle(),
/*dy=*/output_data.opaque(),
/*convDesc=*/conv.handle(),
/*algo=*/ToConvBackwardDataAlgo(algorithm_desc),
/*workSpace=*/scratch_memory.opaque(),
/*workSpaceSizeInBytes=*/scratch_memory.size(),
/*beta=*/beta,
/*dxDesc=*/input_nd.handle(),
/*dx=*/input_data.opaque()));
break;
}
case dnn::ConvolutionKind::BACKWARD_FILTER: {
SE_RETURN_IF_ERROR(get_bwd_filter_bugs());
RETURN_IF_CUDNN_ERROR(cudnnConvolutionBackwardFilter(
cudnn.handle(),
/*alpha=*/alpha,
/*srcDesc=*/input_nd.handle(),
/*srcData=*/input_data.opaque(),
/*diffDesc=*/output_nd.handle(),
/*diffData=*/output_data.opaque(),
/*convDesc=*/conv.handle(),
/*algo=*/ToConvBackwardFilterAlgo(algorithm_desc),
/*workSpace=*/scratch_memory.opaque(),
/*workSpaceSizeInBytes=*/scratch_memory.size(),
/*beta=*/beta,
/*gradDesc=*/filter_nd.handle(),
/*dw=*/filter_data.opaque()));
break;
}
default:
return port::InternalError(
absl::StrCat("Unexpected convolution kind ", static_cast<int>(kind)));
}
if (is_profiling) {
if (!timer->Stop(AsGpuStream(stream))) {
return port::Status(port::error::INTERNAL, "Failed to stop timer");
}
output_profile_result->set_algorithm(algorithm_desc);
output_profile_result->set_elapsed_time_in_ms(
timer->GetElapsedMilliseconds());
output_profile_result->set_scratch_size(scratch_memory.size());
}
return port::Status::OK();
} | 0 | [
"CWE-20"
]
| tensorflow | 14755416e364f17fb1870882fa778c7fec7f16e3 | 252,228,671,724,674,460,000,000,000,000,000,000,000 | 136 | Prevent CHECK-fail in LSTM/GRU with zero-length input.
PiperOrigin-RevId: 346239181
Change-Id: I5f233dbc076aab7bb4e31ba24f5abd4eaf99ea4f |
bool ldb_dn_remove_base_components(struct ldb_dn *dn, unsigned int num)
{
unsigned int i;
if ( ! ldb_dn_validate(dn)) {
return false;
}
if (dn->comp_num < num) {
return false;
}
/* free components */
for (i = dn->comp_num - num; i < dn->comp_num; i++) {
LDB_FREE(dn->components[i].name);
LDB_FREE(dn->components[i].value.data);
LDB_FREE(dn->components[i].cf_name);
LDB_FREE(dn->components[i].cf_value.data);
}
dn->comp_num -= num;
if (dn->valid_case) {
for (i = 0; i < dn->comp_num; i++) {
LDB_FREE(dn->components[i].cf_name);
LDB_FREE(dn->components[i].cf_value.data);
}
dn->valid_case = false;
}
LDB_FREE(dn->casefold);
LDB_FREE(dn->linearized);
/* Wipe the ext_linearized DN,
* the GUID and SID are almost certainly no longer valid */
LDB_FREE(dn->ext_linearized);
LDB_FREE(dn->ext_components);
dn->ext_comp_num = 0;
return true;
} | 0 | [
"CWE-200"
]
| samba | 7f51ec8c4ed9ba1f53d722e44fb6fb3cde933b72 | 264,951,261,180,068,530,000,000,000,000,000,000,000 | 41 | CVE-2015-5330: ldb_dn: simplify and fix ldb_dn_escape_internal()
Previously we relied on NUL terminated strings and jumped back and
forth between copying escaped bytes and memcpy()ing un-escaped chunks.
This simple version is easier to reason about and works with
unterminated strings. It may also be faster as it avoids reading the
string twice (first with strcspn, then with memcpy).
Bug: https://bugzilla.samba.org/show_bug.cgi?id=11599
Signed-off-by: Douglas Bagnall <[email protected]>
Pair-programmed-with: Andrew Bartlett <[email protected]>
Reviewed-by: Ralph Boehme <[email protected]> |
static void compose_notify(char *type, char *host, char *buf, size_t len)
{
char usn[256];
if (type) {
if (!strcmp(type, SSDP_ST_ALL))
type = NULL;
else
snprintf(usn, sizeof(usn), "%s::%s", uuid, type);
}
if (!type) {
type = usn;
strncpy(usn, uuid, sizeof(usn));
}
snprintf(buf, len, "NOTIFY * HTTP/1.1\r\n"
"Host: %s:%d\r\n"
"Server: %s\r\n"
"Location: http://%s:%d%s\r\n"
"NT: %s\r\n"
"NTS: ssdp:alive\r\n"
"USN: %s\r\n"
"Cache-Control: max-age=%d\r\n"
"\r\n",
MC_SSDP_GROUP, MC_SSDP_PORT,
server_string,
host, LOCATION_PORT, LOCATION_DESC,
type,
usn,
CACHE_TIMEOUT);
} | 0 | [
"CWE-119",
"CWE-787"
]
| ssdp-responder | ce04b1f29a137198182f60bbb628d5ceb8171765 | 24,118,331,216,645,180,000,000,000,000,000,000,000 | 32 | Fix #1: Ensure recv buf is always NUL terminated
Signed-off-by: Joachim Nilsson <[email protected]> |
char *mg_ntoa(const struct mg_addr *addr, char *buf, size_t len) {
if (addr->is_ip6) {
uint16_t *p = (uint16_t *) addr->ip6;
snprintf(buf, len, "%hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx", mg_htons(p[0]),
mg_htons(p[1]), mg_htons(p[2]), mg_htons(p[3]), mg_htons(p[4]),
mg_htons(p[5]), mg_htons(p[6]), mg_htons(p[7]));
} else {
uint8_t p[4];
memcpy(p, &addr->ip, sizeof(p));
snprintf(buf, len, "%d.%d.%d.%d", (int) p[0], (int) p[1], (int) p[2],
(int) p[3]);
}
return buf;
} | 0 | [
"CWE-552"
]
| mongoose | c65c8fdaaa257e0487ab0aaae9e8f6b439335945 | 112,657,428,941,913,240,000,000,000,000,000,000,000 | 14 | Protect against the directory traversal in mg_upload() |
static int debug_shrink_set(void *data, u64 val)
{
struct ion_heap *heap = data;
struct shrink_control sc;
int objs;
sc.gfp_mask = -1;
sc.nr_to_scan = val;
if (!val) {
objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
sc.nr_to_scan = objs;
}
heap->shrinker.scan_objects(&heap->shrinker, &sc);
return 0;
} | 0 | [
"CWE-416",
"CWE-284"
]
| linux | 9590232bb4f4cc824f3425a6e1349afbe6d6d2b7 | 138,838,430,474,939,530,000,000,000,000,000,000,000 | 17 | staging/android/ion : fix a race condition in the ion driver
There is a use-after-free problem in the ion driver.
This is caused by a race condition in the ion_ioctl()
function.
A handle has ref count of 1 and two tasks on different
cpus calls ION_IOC_FREE simultaneously.
cpu 0 cpu 1
-------------------------------------------------------
ion_handle_get_by_id()
(ref == 2)
ion_handle_get_by_id()
(ref == 3)
ion_free()
(ref == 2)
ion_handle_put()
(ref == 1)
ion_free()
(ref == 0 so ion_handle_destroy() is
called
and the handle is freed.)
ion_handle_put() is called and it
decreases the slub's next free pointer
The problem is detected as an unaligned access in the
spin lock functions since it uses load exclusive
instruction. In some cases it corrupts the slub's
free pointer which causes a mis-aligned access to the
next free pointer.(kmalloc returns a pointer like
ffffc0745b4580aa). And it causes lots of other
hard-to-debug problems.
This symptom is caused since the first member in the
ion_handle structure is the reference count and the
ion driver decrements the reference after it has been
freed.
To fix this problem client->lock mutex is extended
to protect all the codes that uses the handle.
Signed-off-by: Eun Taik Lee <[email protected]>
Reviewed-by: Laura Abbott <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
struct btrfs_inode_item *inode_item;
struct btrfs_path *path;
struct extent_buffer *leaf;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
goto failed;
}
leaf = path->nodes[0];
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
fill_inode_item(trans, leaf, inode_item, inode);
btrfs_mark_buffer_dirty(leaf);
btrfs_set_inode_last_trans(trans, inode);
ret = 0;
failed:
btrfs_free_path(path);
return ret;
} | 0 | [
"CWE-200"
]
| linux | 0305cd5f7fca85dae392b9ba85b116896eb7c1c7 | 7,680,089,506,886,270,000,000,000,000,000,000,000 | 33 | Btrfs: fix truncation of compressed and inlined extents
When truncating a file to a smaller size which consists of an inline
extent that is compressed, we did not discard (or made unusable) the
data between the new file size and the old file size, wasting metadata
space and allowing for the truncated data to be leaked and the data
corruption/loss mentioned below.
We were also not correctly decrementing the number of bytes used by the
inode, we were setting it to zero, giving a wrong report for callers of
the stat(2) syscall. The fsck tool also reported an error about a mismatch
between the nbytes of the file versus the real space used by the file.
Now because we weren't discarding the truncated region of the file, it
was possible for a caller of the clone ioctl to actually read the data
that was truncated, allowing for a security breach without requiring root
access to the system, using only standard filesystem operations. The
scenario is the following:
1) User A creates a file which consists of an inline and compressed
extent with a size of 2000 bytes - the file is not accessible to
any other users (no read, write or execution permission for anyone
else);
2) The user truncates the file to a size of 1000 bytes;
3) User A makes the file world readable;
4) User B creates a file consisting of an inline extent of 2000 bytes;
5) User B issues a clone operation from user A's file into its own
file (using a length argument of 0, clone the whole range);
6) User B now gets to see the 1000 bytes that user A truncated from
its file before it made its file world readbale. User B also lost
the bytes in the range [1000, 2000[ bytes from its own file, but
that might be ok if his/her intention was reading stale data from
user A that was never supposed to be public.
Note that this contrasts with the case where we truncate a file from 2000
bytes to 1000 bytes and then truncate it back from 1000 to 2000 bytes. In
this case reading any byte from the range [1000, 2000[ will return a value
of 0x00, instead of the original data.
This problem exists since the clone ioctl was added and happens both with
and without my recent data loss and file corruption fixes for the clone
ioctl (patch "Btrfs: fix file corruption and data loss after cloning
inline extents").
So fix this by truncating the compressed inline extents as we do for the
non-compressed case, which involves decompressing, if the data isn't already
in the page cache, compressing the truncated version of the extent, writing
the compressed content into the inline extent and then truncate it.
The following test case for fstests reproduces the problem. In order for
the test to pass both this fix and my previous fix for the clone ioctl
that forbids cloning a smaller inline extent into a larger one,
which is titled "Btrfs: fix file corruption and data loss after cloning
inline extents", are needed. Without that other fix the test fails in a
different way that does not leak the truncated data, instead part of
destination file gets replaced with zeroes (because the destination file
has a larger inline extent than the source).
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
tmp=/tmp/$$
status=1 # failure is the default!
trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
rm -f $tmp.*
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
# real QA test starts here
_need_to_be_root
_supported_fs btrfs
_supported_os Linux
_require_scratch
_require_cloner
rm -f $seqres.full
_scratch_mkfs >>$seqres.full 2>&1
_scratch_mount "-o compress"
# Create our test files. File foo is going to be the source of a clone operation
# and consists of a single inline extent with an uncompressed size of 512 bytes,
# while file bar consists of a single inline extent with an uncompressed size of
# 256 bytes. For our test's purpose, it's important that file bar has an inline
# extent with a size smaller than foo's inline extent.
$XFS_IO_PROG -f -c "pwrite -S 0xa1 0 128" \
-c "pwrite -S 0x2a 128 384" \
$SCRATCH_MNT/foo | _filter_xfs_io
$XFS_IO_PROG -f -c "pwrite -S 0xbb 0 256" $SCRATCH_MNT/bar | _filter_xfs_io
# Now durably persist all metadata and data. We do this to make sure that we get
# on disk an inline extent with a size of 512 bytes for file foo.
sync
# Now truncate our file foo to a smaller size. Because it consists of a
# compressed and inline extent, btrfs did not shrink the inline extent to the
# new size (if the extent was not compressed, btrfs would shrink it to 128
# bytes), it only updates the inode's i_size to 128 bytes.
$XFS_IO_PROG -c "truncate 128" $SCRATCH_MNT/foo
# Now clone foo's inline extent into bar.
# This clone operation should fail with errno EOPNOTSUPP because the source
# file consists only of an inline extent and the file's size is smaller than
# the inline extent of the destination (128 bytes < 256 bytes). However the
# clone ioctl was not prepared to deal with a file that has a size smaller
# than the size of its inline extent (something that happens only for compressed
# inline extents), resulting in copying the full inline extent from the source
# file into the destination file.
#
# Note that btrfs' clone operation for inline extents consists of removing the
# inline extent from the destination inode and copy the inline extent from the
# source inode into the destination inode, meaning that if the destination
# inode's inline extent is larger (N bytes) than the source inode's inline
# extent (M bytes), some bytes (N - M bytes) will be lost from the destination
# file. Btrfs could copy the source inline extent's data into the destination's
# inline extent so that we would not lose any data, but that's currently not
# done due to the complexity that would be needed to deal with such cases
# (specially when one or both extents are compressed), returning EOPNOTSUPP, as
# it's normally not a very common case to clone very small files (only case
# where we get inline extents) and copying inline extents does not save any
# space (unlike for normal, non-inlined extents).
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/foo $SCRATCH_MNT/bar
# Now because the above clone operation used to succeed, and due to foo's inline
# extent not being shinked by the truncate operation, our file bar got the whole
# inline extent copied from foo, making us lose the last 128 bytes from bar
# which got replaced by the bytes in range [128, 256[ from foo before foo was
# truncated - in other words, data loss from bar and being able to read old and
# stale data from foo that should not be possible to read anymore through normal
# filesystem operations. Contrast with the case where we truncate a file from a
# size N to a smaller size M, truncate it back to size N and then read the range
# [M, N[, we should always get the value 0x00 for all the bytes in that range.
# We expected the clone operation to fail with errno EOPNOTSUPP and therefore
# not modify our file's bar data/metadata. So its content should be 256 bytes
# long with all bytes having the value 0xbb.
#
# Without the btrfs bug fix, the clone operation succeeded and resulted in
# leaking truncated data from foo, the bytes that belonged to its range
# [128, 256[, and losing data from bar in that same range. So reading the
# file gave us the following content:
#
# 0000000 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1
# *
# 0000200 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a
# *
# 0000400
echo "File bar's content after the clone operation:"
od -t x1 $SCRATCH_MNT/bar
# Also because the foo's inline extent was not shrunk by the truncate
# operation, btrfs' fsck, which is run by the fstests framework everytime a
# test completes, failed reporting the following error:
#
# root 5 inode 257 errors 400, nbytes wrong
status=0
exit
Cc: [email protected]
Signed-off-by: Filipe Manana <[email protected]> |
static int ax25_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
ax25_cb *ax25;
struct ax25_dev *ax25_dev;
char devname[IFNAMSIZ];
void *valptr;
int val = 0;
int maxlen, length;
if (level != SOL_AX25)
return -ENOPROTOOPT;
if (get_user(maxlen, optlen))
return -EFAULT;
if (maxlen < 1)
return -EFAULT;
valptr = (void *) &val;
length = min_t(unsigned int, maxlen, sizeof(int));
lock_sock(sk);
ax25 = sk_to_ax25(sk);
switch (optname) {
case AX25_WINDOW:
val = ax25->window;
break;
case AX25_T1:
val = ax25->t1 / HZ;
break;
case AX25_T2:
val = ax25->t2 / HZ;
break;
case AX25_N2:
val = ax25->n2;
break;
case AX25_T3:
val = ax25->t3 / HZ;
break;
case AX25_IDLE:
val = ax25->idle / (60 * HZ);
break;
case AX25_BACKOFF:
val = ax25->backoff;
break;
case AX25_EXTSEQ:
val = (ax25->modulus == AX25_EMODULUS);
break;
case AX25_PIDINCL:
val = ax25->pidincl;
break;
case AX25_IAMDIGI:
val = ax25->iamdigi;
break;
case AX25_PACLEN:
val = ax25->paclen;
break;
case SO_BINDTODEVICE:
ax25_dev = ax25->ax25_dev;
if (ax25_dev != NULL && ax25_dev->dev != NULL) {
strlcpy(devname, ax25_dev->dev->name, sizeof(devname));
length = strlen(devname) + 1;
} else {
*devname = '\0';
length = 1;
}
valptr = (void *) devname;
break;
default:
release_sock(sk);
return -ENOPROTOOPT;
}
release_sock(sk);
if (put_user(length, optlen))
return -EFAULT;
return copy_to_user(optval, valptr, length) ? -EFAULT : 0;
} | 0 | []
| net | 79462ad02e861803b3840cc782248c7359451cd9 | 210,742,698,922,846,560,000,000,000,000,000,000,000 | 96 | net: add validation for the socket syscall protocol argument
郭永刚 reported that one could simply crash the kernel as root by
using a simple program:
int socket_fd;
struct sockaddr_in addr;
addr.sin_port = 0;
addr.sin_addr.s_addr = INADDR_ANY;
addr.sin_family = 10;
socket_fd = socket(10,3,0x40000000);
connect(socket_fd , &addr,16);
AF_INET, AF_INET6 sockets actually only support 8-bit protocol
identifiers. inet_sock's skc_protocol field thus is sized accordingly,
thus larger protocol identifiers simply cut off the higher bits and
store a zero in the protocol fields.
This could lead to e.g. NULL function pointer because as a result of
the cut off inet_num is zero and we call down to inet_autobind, which
is NULL for raw sockets.
kernel: Call Trace:
kernel: [<ffffffff816db90e>] ? inet_autobind+0x2e/0x70
kernel: [<ffffffff816db9a4>] inet_dgram_connect+0x54/0x80
kernel: [<ffffffff81645069>] SYSC_connect+0xd9/0x110
kernel: [<ffffffff810ac51b>] ? ptrace_notify+0x5b/0x80
kernel: [<ffffffff810236d8>] ? syscall_trace_enter_phase2+0x108/0x200
kernel: [<ffffffff81645e0e>] SyS_connect+0xe/0x10
kernel: [<ffffffff81779515>] tracesys_phase2+0x84/0x89
I found no particular commit which introduced this problem.
CVE: CVE-2015-8543
Cc: Cong Wang <[email protected]>
Reported-by: 郭永刚 <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void cpuid_mask(u32 *word, int wordnum)
{
*word &= boot_cpu_data.x86_capability[wordnum];
} | 0 | [
"CWE-362",
"CWE-401"
]
| linux | fc3a9157d3148ab91039c75423da8ef97be3e105 | 120,694,239,029,510,080,000,000,000,000,000,000,000 | 4 | KVM: X86: Don't report L2 emulation failures to user-space
This patch prevents that emulation failures which result
from emulating an instruction for an L2-Guest results in
being reported to userspace.
Without this patch a malicious L2-Guest would be able to
kill the L1 by triggering a race-condition between an vmexit
and the instruction emulator.
With this patch the L2 will most likely only kill itself in
this situation.
Signed-off-by: Joerg Roedel <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
void sock_set_reuseaddr(struct sock *sk)
{
lock_sock(sk);
sk->sk_reuse = SK_CAN_REUSE;
release_sock(sk);
} | 0 | []
| net | 35306eb23814444bd4021f8a1c3047d3cb0c8b2b | 57,062,956,193,318,020,000,000,000,000,000,000,000 | 6 | af_unix: fix races in sk_peer_pid and sk_peer_cred accesses
Jann Horn reported that SO_PEERCRED and SO_PEERGROUPS implementations
are racy, as af_unix can concurrently change sk_peer_pid and sk_peer_cred.
In order to fix this issue, this patch adds a new spinlock that needs
to be used whenever these fields are read or written.
Jann also pointed out that l2cap_sock_get_peer_pid_cb() is currently
reading sk->sk_peer_pid which makes no sense, as this field
is only possibly set by AF_UNIX sockets.
We will have to clean this in a separate patch.
This could be done by reverting b48596d1dc25 "Bluetooth: L2CAP: Add get_peer_pid callback"
or implementing what was truly expected.
Fixes: 109f6e39fa07 ("af_unix: Allow SO_PEERCRED to work across namespaces.")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jann Horn <[email protected]>
Cc: Eric W. Biederman <[email protected]>
Cc: Luiz Augusto von Dentz <[email protected]>
Cc: Marcel Holtmann <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned i;
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++)
drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
drm_free_large(exec->bo);
}
while (!list_empty(&exec->unref_list)) {
struct vc4_bo *bo = list_first_entry(&exec->unref_list,
struct vc4_bo, unref_head);
list_del(&bo->unref_head);
drm_gem_object_unreference_unlocked(&bo->base.base);
}
mutex_lock(&vc4->power_lock);
if (--vc4->power_refcount == 0) {
pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
}
mutex_unlock(&vc4->power_lock);
kfree(exec);
} | 0 | [
"CWE-190",
"CWE-703"
]
| linux | 0f2ff82e11c86c05d051cae32b58226392d33bbf | 10,625,089,171,012,489,000,000,000,000,000,000,000 | 27 | drm/vc4: Fix an integer overflow in temporary allocation layout.
We copy the unvalidated ioctl arguments from the user into kernel
temporary memory to run the validation from, to avoid a race where the
user updates the unvalidate contents in between validating them and
copying them into the validated BO.
However, in setting up the layout of the kernel side, we failed to
check one of the additions (the roundup() for shader_rec_offset)
against integer overflow, allowing a nearly MAX_UINT value of
bin_cl_size to cause us to under-allocate the temporary space that we
then copy_from_user into.
Reported-by: Murray McAllister <[email protected]>
Signed-off-by: Eric Anholt <[email protected]>
Fixes: d5b1a78a772f ("drm/vc4: Add support for drawing 3D frames.") |
mbuf_append(MBuf *dst, const uint8 *buf, int len)
{
if (dst->no_write)
{
px_debug("mbuf_append: no_write");
return PXE_BUG;
}
prepare_room(dst, len);
memcpy(dst->data_end, buf, len);
dst->data_end += len;
return 0;
} | 0 | [
"CWE-120"
]
| postgres | 1dc75515868454c645ded22d38054ec693e23ec6 | 93,513,463,650,167,640,000,000,000,000,000,000,000 | 15 | Fix buffer overrun after incomplete read in pullf_read_max().
Most callers pass a stack buffer. The ensuing stack smash can crash the
server, and we have not ruled out the viability of attacks that lead to
privilege escalation. Back-patch to 9.0 (all supported versions).
Marko Tiikkaja
Security: CVE-2015-0243 |
static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule)
{
unsigned listnr;
struct audit_entry *entry;
int i, err;
err = -EINVAL;
listnr = rule->flags & ~AUDIT_FILTER_PREPEND;
switch(listnr) {
default:
goto exit_err;
case AUDIT_FILTER_USER:
case AUDIT_FILTER_TYPE:
#ifdef CONFIG_AUDITSYSCALL
case AUDIT_FILTER_ENTRY:
case AUDIT_FILTER_EXIT:
case AUDIT_FILTER_TASK:
#endif
;
}
if (unlikely(rule->action == AUDIT_POSSIBLE)) {
printk(KERN_ERR "AUDIT_POSSIBLE is deprecated\n");
goto exit_err;
}
if (rule->action != AUDIT_NEVER && rule->action != AUDIT_ALWAYS)
goto exit_err;
if (rule->field_count > AUDIT_MAX_FIELDS)
goto exit_err;
err = -ENOMEM;
entry = audit_init_entry(rule->field_count);
if (!entry)
goto exit_err;
entry->rule.flags = rule->flags & AUDIT_FILTER_PREPEND;
entry->rule.listnr = listnr;
entry->rule.action = rule->action;
entry->rule.field_count = rule->field_count;
for (i = 0; i < AUDIT_BITMASK_SIZE; i++)
entry->rule.mask[i] = rule->mask[i];
for (i = 0; i < AUDIT_SYSCALL_CLASSES; i++) {
int bit = AUDIT_BITMASK_SIZE * 32 - i - 1;
__u32 *p = &entry->rule.mask[AUDIT_WORD(bit)];
__u32 *class;
if (!(*p & AUDIT_BIT(bit)))
continue;
*p &= ~AUDIT_BIT(bit);
class = classes[i];
if (class) {
int j;
for (j = 0; j < AUDIT_BITMASK_SIZE; j++)
entry->rule.mask[j] |= class[j];
}
}
return entry;
exit_err:
return ERR_PTR(err);
} | 0 | [
"CWE-362"
]
| linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 48,746,732,130,712,960,000,000,000,000,000,000,000 | 63 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static Address handler(ThreadLocalTop* thread) { return thread->handler_; } | 0 | [
"CWE-20",
"CWE-119"
]
| node | 530af9cb8e700e7596b3ec812bad123c9fa06356 | 6,062,639,364,548,478,000,000,000,000,000,000,000 | 1 | v8: Interrupts must not mask stack overflow.
Backport of https://codereview.chromium.org/339883002 |
static MOBI_RET mobi_parse_index_entry(MOBIIndx *indx, const MOBIIdxt idxt, const MOBITagx *tagx, const MOBIOrdt *ordt, MOBIBuffer *buf, const size_t curr_number) {
if (indx == NULL) {
debug_print("%s", "INDX structure not initialized\n");
return MOBI_INIT_FAILED;
}
const size_t entry_offset = indx->entries_count;
const size_t entry_length = idxt.offsets[curr_number + 1] - idxt.offsets[curr_number];
mobi_buffer_setpos(buf, idxt.offsets[curr_number]);
size_t entry_number = curr_number + entry_offset;
if (entry_number >= indx->total_entries_count) {
debug_print("Entry number beyond array: %zu\n", entry_number);
return MOBI_DATA_CORRUPT;
}
/* save original record maxlen */
const size_t buf_maxlen = buf->maxlen;
if (buf->offset + entry_length >= buf_maxlen) {
debug_print("Entry length too long: %zu\n", entry_length);
return MOBI_DATA_CORRUPT;
}
buf->maxlen = buf->offset + entry_length;
size_t label_length = mobi_buffer_get8(buf);
if (label_length > entry_length) {
debug_print("Label length too long: %zu\n", label_length);
return MOBI_DATA_CORRUPT;
}
char text[INDX_LABEL_SIZEMAX];
/* FIXME: what is ORDT1 for? */
if (ordt->ordt2) {
label_length = mobi_getstring_ordt(ordt, buf, (unsigned char*) text, label_length);
} else {
label_length = mobi_indx_get_label((unsigned char*) text, buf, label_length, indx->ligt_entries_count);
}
indx->entries[entry_number].label = malloc(label_length + 1);
if (indx->entries[entry_number].label == NULL) {
debug_print("Memory allocation failed (%zu bytes)\n", label_length);
return MOBI_MALLOC_FAILED;
}
strncpy(indx->entries[entry_number].label, text, label_length + 1);
//debug_print("tag label[%zu]: %s\n", entry_number, indx->entries[entry_number].label);
unsigned char *control_bytes;
control_bytes = buf->data + buf->offset;
mobi_buffer_seek(buf, (int) tagx->control_byte_count);
indx->entries[entry_number].tags_count = 0;
indx->entries[entry_number].tags = NULL;
if (tagx->tags_count > 0) {
typedef struct {
uint8_t tag;
uint8_t tag_value_count;
uint32_t value_count;
uint32_t value_bytes;
} MOBIPtagx;
MOBIPtagx *ptagx = malloc(tagx->tags_count * sizeof(MOBIPtagx));
if (ptagx == NULL) {
debug_print("Memory allocation failed (%zu bytes)\n", tagx->tags_count * sizeof(MOBIPtagx));
return MOBI_MALLOC_FAILED;
}
uint32_t ptagx_count = 0;
size_t len;
size_t i = 0;
while (i < tagx->tags_count) {
if (tagx->tags[i].control_byte == 1) {
control_bytes++;
i++;
continue;
}
uint32_t value = control_bytes[0] & tagx->tags[i].bitmask;
if (value != 0) {
/* FIXME: is it safe to use MOBI_NOTSET? */
uint32_t value_count = MOBI_NOTSET;
uint32_t value_bytes = MOBI_NOTSET;
/* all bits of masked value are set */
if (value == tagx->tags[i].bitmask) {
/* more than 1 bit set */
if (mobi_bitcount(tagx->tags[i].bitmask) > 1) {
/* read value bytes from entry */
len = 0;
value_bytes = mobi_buffer_get_varlen(buf, &len);
} else {
value_count = 1;
}
} else {
uint8_t mask = tagx->tags[i].bitmask;
while ((mask & 1) == 0) {
mask >>= 1;
value >>= 1;
}
value_count = value;
}
ptagx[ptagx_count].tag = tagx->tags[i].tag;
ptagx[ptagx_count].tag_value_count = tagx->tags[i].values_count;
ptagx[ptagx_count].value_count = value_count;
ptagx[ptagx_count].value_bytes = value_bytes;
ptagx_count++;
}
i++;
}
indx->entries[entry_number].tags = malloc(tagx->tags_count * sizeof(MOBIIndexTag));
if (indx->entries[entry_number].tags == NULL) {
debug_print("Memory allocation failed (%zu bytes)\n", tagx->tags_count * sizeof(MOBIIndexTag));
free(ptagx);
return MOBI_MALLOC_FAILED;
}
i = 0;
while (i < ptagx_count) {
uint32_t tagvalues_count = 0;
/* FIXME: is it safe to use MOBI_NOTSET? */
/* value count is set */
uint32_t tagvalues[INDX_TAGVALUES_MAX];
if (ptagx[i].value_count != MOBI_NOTSET) {
size_t count = ptagx[i].value_count * ptagx[i].tag_value_count;
while (count-- && tagvalues_count < INDX_TAGVALUES_MAX) {
len = 0;
const uint32_t value_bytes = mobi_buffer_get_varlen(buf, &len);
tagvalues[tagvalues_count++] = value_bytes;
}
/* value count is not set */
} else {
/* read value_bytes bytes */
len = 0;
while (len < ptagx[i].value_bytes && tagvalues_count < INDX_TAGVALUES_MAX) {
const uint32_t value_bytes = mobi_buffer_get_varlen(buf, &len);
tagvalues[tagvalues_count++] = value_bytes;
}
}
if (tagvalues_count) {
const size_t arr_size = tagvalues_count * sizeof(*indx->entries[entry_number].tags[i].tagvalues);
indx->entries[entry_number].tags[i].tagvalues = malloc(arr_size);
if (indx->entries[entry_number].tags[i].tagvalues == NULL) {
debug_print("Memory allocation failed (%zu bytes)\n", arr_size);
free(ptagx);
return MOBI_MALLOC_FAILED;
}
memcpy(indx->entries[entry_number].tags[i].tagvalues, tagvalues, arr_size);
} else {
indx->entries[entry_number].tags[i].tagvalues = NULL;
}
indx->entries[entry_number].tags[i].tagid = ptagx[i].tag;
indx->entries[entry_number].tags[i].tagvalues_count = tagvalues_count;
indx->entries[entry_number].tags_count++;
i++;
}
free(ptagx);
}
/* restore buffer maxlen */
buf->maxlen = buf_maxlen;
return MOBI_SUCCESS;
} | 1 | [
"CWE-125"
]
| libmobi | 612562bc1ea38f1708b044e7a079c47a05b1291d | 182,245,659,604,830,670,000,000,000,000,000,000,000 | 147 | Fix: index entry label not being zero-terminated with corrupt input |
GF_Err mfhd_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MovieFragmentHeaderBox *ptr = (GF_MovieFragmentHeaderBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->sequence_number);
return GF_OK;
} | 0 | [
"CWE-400",
"CWE-401"
]
| gpac | d2371b4b204f0a3c0af51ad4e9b491144dd1225c | 323,752,300,626,955,140,000,000,000,000,000,000,000 | 11 | prevent dref memleak on invalid input (#1183) |
static int descriptor_sd_propagation_object(struct ldb_module *module,
struct ldb_message *msg,
bool *stop)
{
struct descriptor_data *descriptor_private =
talloc_get_type_abort(ldb_module_get_private(module),
struct descriptor_data);
struct descriptor_transaction *t = &descriptor_private->transaction;
struct ldb_context *ldb = ldb_module_get_ctx(module);
struct ldb_request *sub_req;
struct ldb_result *mod_res;
struct ldb_control *sd_propagation_control;
struct GUID guid;
int ret;
TDB_DATA key;
TDB_DATA empty_val = { .dsize = 0, };
NTSTATUS status;
struct descriptor_changes *c = NULL;
*stop = false;
/*
* We get the GUID of the object
* in order to have the cache key
* for the object.
*/
status = dsdb_get_extended_dn_guid(msg->dn, &guid, "GUID");
if (!NT_STATUS_IS_OK(status)) {
return ldb_operr(ldb);
}
key = make_tdb_data((const void*)&guid, sizeof(guid));
/*
* Check if we already processed this object.
*/
status = dbwrap_parse_record(t->objects.map, key,
descriptor_object_parser, NULL);
if (NT_STATUS_IS_OK(status)) {
/*
* All work is already one
*/
t->objects.num_skipped += 1;
*stop = true;
return LDB_SUCCESS;
}
if (!NT_STATUS_EQUAL(status, NT_STATUS_NOT_FOUND)) {
ldb_debug(ldb, LDB_DEBUG_FATAL,
"dbwrap_parse_record() - %s\n",
nt_errstr(status));
return ldb_module_operr(module);
}
t->objects.num_processed += 1;
/*
* Remember that we're processing this object.
*/
status = dbwrap_store(t->objects.map, key, empty_val, TDB_INSERT);
if (!NT_STATUS_IS_OK(status)) {
ldb_debug(ldb, LDB_DEBUG_FATAL,
"dbwrap_parse_record() - %s\n",
nt_errstr(status));
return ldb_module_operr(module);
}
/*
* Check that if there's a descriptor_change in our list,
* which we may be able to remove from the pending list
* when we processed the object.
*/
status = dbwrap_parse_record(t->changes.map, key, descriptor_changes_parser, &c);
if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_FOUND)) {
c = NULL;
status = NT_STATUS_OK;
}
if (!NT_STATUS_IS_OK(status)) {
ldb_debug(ldb, LDB_DEBUG_FATAL,
"dbwrap_parse_record() - %s\n",
nt_errstr(status));
return ldb_module_operr(module);
}
mod_res = talloc_zero(msg, struct ldb_result);
if (mod_res == NULL) {
return ldb_module_oom(module);
}
ret = ldb_build_mod_req(&sub_req, ldb, mod_res,
msg,
NULL,
mod_res,
ldb_modify_default_callback,
NULL);
LDB_REQ_SET_LOCATION(sub_req);
if (ret != LDB_SUCCESS) {
return ldb_module_operr(module);
}
ldb_req_mark_trusted(sub_req);
ret = ldb_request_add_control(sub_req,
DSDB_CONTROL_SEC_DESC_PROPAGATION_OID,
true, module);
if (ret != LDB_SUCCESS) {
return ldb_module_operr(module);
}
sd_propagation_control = ldb_request_get_control(sub_req,
DSDB_CONTROL_SEC_DESC_PROPAGATION_OID);
if (sd_propagation_control == NULL) {
return ldb_module_operr(module);
}
ret = dsdb_request_add_controls(sub_req,
DSDB_FLAG_AS_SYSTEM |
DSDB_SEARCH_SHOW_RECYCLED);
if (ret != LDB_SUCCESS) {
return ldb_module_operr(module);
}
ret = descriptor_modify(module, sub_req);
if (ret == LDB_SUCCESS) {
ret = ldb_wait(sub_req->handle, LDB_WAIT_ALL);
}
if (ret != LDB_SUCCESS) {
ldb_asprintf_errstring(ldb_module_get_ctx(module),
"descriptor_modify on %s failed: %s",
ldb_dn_get_linearized(msg->dn),
ldb_errstring(ldb_module_get_ctx(module)));
return LDB_ERR_OPERATIONS_ERROR;
}
if (sd_propagation_control->critical != 0) {
if (c == NULL) {
/*
* If we don't have a
* descriptor_changes structure
* we're done.
*/
*stop = true;
} else if (!c->force_children) {
/*
* If we don't need to
* propagate to children,
* we're done.
*/
*stop = true;
}
}
if (c != NULL && !c->force_children) {
/*
* Remove the pending change,
* we already done all required work,
* there's no need to do it again.
*
* Note DLIST_REMOVE() is a noop
* if the element is not part of
* the list.
*/
DLIST_REMOVE(t->changes.list, c);
}
talloc_free(mod_res);
return LDB_SUCCESS;
} | 0 | [
"CWE-200"
]
| samba | 0a3aa5f908e351201dc9c4d4807b09ed9eedff77 | 276,450,578,380,996,400,000,000,000,000,000,000,000 | 169 | CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]> |
static int threaded_has_dirs_only_path(struct cache_def *cache, const char *name, int len, int prefix_len)
{
return lstat_cache(cache, name, len,
FL_DIR|FL_FULLPATH, prefix_len) &
FL_DIR;
} | 1 | [
"CWE-59",
"CWE-61"
]
| git | 684dd4c2b414bcf648505e74498a608f28de4592 | 270,170,827,826,519,150,000,000,000,000,000,000,000 | 6 | checkout: fix bug that makes checkout follow symlinks in leading path
Before checking out a file, we have to confirm that all of its leading
components are real existing directories. And to reduce the number of
lstat() calls in this process, we cache the last leading path known to
contain only directories. However, when a path collision occurs (e.g.
when checking out case-sensitive files in case-insensitive file
systems), a cached path might have its file type changed on disk,
leaving the cache on an invalid state. Normally, this doesn't bring
any bad consequences as we usually check out files in index order, and
therefore, by the time the cached path becomes outdated, we no longer
need it anyway (because all files in that directory would have already
been written).
But, there are some users of the checkout machinery that do not always
follow the index order. In particular: checkout-index writes the paths
in the same order that they appear on the CLI (or stdin); and the
delayed checkout feature -- used when a long-running filter process
replies with "status=delayed" -- postpones the checkout of some entries,
thus modifying the checkout order.
When we have to check out an out-of-order entry and the lstat() cache is
invalid (due to a previous path collision), checkout_entry() may end up
using the invalid data and thrusting that the leading components are
real directories when, in reality, they are not. In the best case
scenario, where the directory was replaced by a regular file, the user
will get an error: "fatal: unable to create file 'foo/bar': Not a
directory". But if the directory was replaced by a symlink, checkout
could actually end up following the symlink and writing the file at a
wrong place, even outside the repository. Since delayed checkout is
affected by this bug, it could be used by an attacker to write
arbitrary files during the clone of a maliciously crafted repository.
Some candidate solutions considered were to disable the lstat() cache
during unordered checkouts or sort the entries before passing them to
the checkout machinery. But both ideas include some performance penalty
and they don't future-proof the code against new unordered use cases.
Instead, we now manually reset the lstat cache whenever we successfully
remove a directory. Note: We are not even checking whether the directory
was the same as the lstat cache points to because we might face a
scenario where the paths refer to the same location but differ due to
case folding, precomposed UTF-8 issues, or the presence of `..`
components in the path. Two regression tests, with case-collisions and
utf8-collisions, are also added for both checkout-index and delayed
checkout.
Note: to make the previously mentioned clone attack unfeasible, it would
be sufficient to reset the lstat cache only after the remove_subtree()
call inside checkout_entry(). This is the place where we would remove a
directory whose path collides with the path of another entry that we are
currently trying to check out (possibly a symlink). However, in the
interest of a thorough fix that does not leave Git open to
similar-but-not-identical attack vectors, we decided to intercept
all `rmdir()` calls in one fell swoop.
This addresses CVE-2021-21300.
Co-authored-by: Johannes Schindelin <[email protected]>
Signed-off-by: Matheus Tavares <[email protected]> |
**/
T& atXYZC(const int x, const int y, const int z, const int c, const T& out_value) {
return (x<0 || y<0 || z<0 || c<0 || x>=width() || y>=height() || z>=depth() || c>=spectrum())?
(cimg::temporary(out_value)=out_value):(*this)(x,y,z,c); | 0 | [
"CWE-125"
]
| CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 155,404,308,464,963,730,000,000,000,000,000,000,000 | 4 | Fix other issues in 'CImg<T>::load_bmp()'. |
static inline void clear_soft_dirty(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte)
{
} | 0 | [
"CWE-200"
]
| linux | ab676b7d6fbf4b294bf198fb27ade5b0e865c7ce | 71,874,061,065,526,200,000,000,000,000,000,000,000 | 4 | pagemap: do not leak physical addresses to non-privileged userspace
As pointed by recent post[1] on exploiting DRAM physical imperfection,
/proc/PID/pagemap exposes sensitive information which can be used to do
attacks.
This disallows anybody without CAP_SYS_ADMIN to read the pagemap.
[1] http://googleprojectzero.blogspot.com/2015/03/exploiting-dram-rowhammer-bug-to-gain.html
[ Eventually we might want to do anything more finegrained, but for now
this is the simple model. - Linus ]
Signed-off-by: Kirill A. Shutemov <[email protected]>
Acked-by: Konstantin Khlebnikov <[email protected]>
Acked-by: Andy Lutomirski <[email protected]>
Cc: Pavel Emelyanov <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Mark Seaborn <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
void DoRealForwardFFT(OpKernelContext* ctx, uint64* fft_shape,
const Tensor& in, Tensor* out) {
// Create the axes (which are always trailing).
const auto axes = Eigen::ArrayXi::LinSpaced(FFTRank, 1, FFTRank);
auto device = ctx->eigen_device<CPUDevice>();
auto input = Tensor(in).flat_inner_dims<RealT, FFTRank + 1>();
const auto input_dims = input.dimensions();
// Slice input to fft_shape on its inner-most dimensions.
Eigen::DSizes<Eigen::DenseIndex, FFTRank + 1> input_slice_sizes;
input_slice_sizes[0] = input_dims[0];
TensorShape temp_shape{input_dims[0]};
for (int i = 1; i <= FFTRank; ++i) {
input_slice_sizes[i] = fft_shape[i - 1];
temp_shape.AddDim(fft_shape[i - 1]);
}
auto output = out->flat_inner_dims<ComplexT, FFTRank + 1>();
const Eigen::DSizes<Eigen::DenseIndex, FFTRank + 1> zero_start_indices;
// Compute the full FFT using a temporary tensor.
Tensor temp;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<ComplexT>::v(),
temp_shape, &temp));
auto full_fft = temp.flat_inner_dims<ComplexT, FFTRank + 1>();
full_fft.device(device) =
input.slice(zero_start_indices, input_slice_sizes)
.template fft<Eigen::BothParts, Eigen::FFT_FORWARD>(axes);
// Slice away the negative frequency components.
output.device(device) =
full_fft.slice(zero_start_indices, output.dimensions());
} | 1 | [
"CWE-617",
"CWE-703"
]
| tensorflow | 31bd5026304677faa8a0b77602c6154171b9aec1 | 280,992,516,761,239,440,000,000,000,000,000,000,000 | 33 | Prevent check fail in FFT
PiperOrigin-RevId: 372031044
Change-Id: I50994e3e8a5d1342d01bde80256f6bf2730ca299 |
listener_set_session_idle_internal (GSListener *listener,
gboolean idle)
{
listener->priv->session_idle = idle;
if (idle) {
listener->priv->session_idle_start = time (NULL);
} else {
listener->priv->session_idle_start = 0;
}
return TRUE;
} | 0 | []
| gnome-screensaver | 284c9924969a49dbf2d5fae1d680d3310c4df4a3 | 46,261,496,979,556,340,000,000,000,000,000,000,000 | 13 | Remove session inhibitors if the originator falls of the bus
This fixes a problem where totem leaves inhibitors behind, see
bug 600488. |
static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
void *priv)
{
struct epitem *epi, *tmp;
poll_table pt;
int depth = *(int *)priv;
init_poll_funcptr(&pt, NULL);
depth++;
list_for_each_entry_safe(epi, tmp, head, rdllink) {
if (ep_item_poll(epi, &pt, depth)) {
return EPOLLIN | EPOLLRDNORM;
} else {
/*
* Item has been dropped into the ready list by the poll
* callback, but it's not actually ready, as far as
* caller requested events goes. We can remove it here.
*/
__pm_relax(ep_wakeup_source(epi));
list_del_init(&epi->rdllink);
}
}
return 0;
} | 0 | [
"CWE-416"
]
| linux | a9ed4a6560b8562b7e2e2bed9527e88001f7b682 | 13,226,608,264,698,595,000,000,000,000,000,000,000 | 26 | epoll: Keep a reference on files added to the check list
When adding a new fd to an epoll, and that this new fd is an
epoll fd itself, we recursively scan the fds attached to it
to detect cycles, and add non-epool files to a "check list"
that gets subsequently parsed.
However, this check list isn't completely safe when deletions
can happen concurrently. To sidestep the issue, make sure that
a struct file placed on the check list sees its f_count increased,
ensuring that a concurrent deletion won't result in the file
disapearing from under our feet.
Cc: [email protected]
Signed-off-by: Marc Zyngier <[email protected]>
Signed-off-by: Al Viro <[email protected]> |
jpc_ppxstab_t *jpc_ppxstab_create()
{
jpc_ppxstab_t *tab;
if (!(tab = jas_malloc(sizeof(jpc_ppxstab_t)))) {
return 0;
}
tab->numents = 0;
tab->maxents = 0;
tab->ents = 0;
return tab;
} | 0 | [
"CWE-189"
]
| jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 41,935,683,578,717,220,000,000,000,000,000,000,000 | 12 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
int handle(int s, unsigned char* data, int len, struct sockaddr_in *s_in)
{
char buf[2048];
unsigned short *cmd = (unsigned short *)buf;
int plen;
struct in_addr *addr = &s_in->sin_addr;
unsigned short *pid = (unsigned short*) data;
/* inet check */
if (len == S_HELLO_LEN && memcmp(data, "sorbo", 5) == 0) {
unsigned short *id = (unsigned short*) (data+5);
int x = 2+4+2;
*cmd = htons(S_CMD_INET_CHECK);
memcpy(cmd+1, addr, 4);
memcpy(cmd+1+2, id, 2);
printf("Inet check by %s %d\n",
inet_ntoa(*addr), ntohs(*id));
if (send(s, buf, x, 0) != x)
return 1;
return 0;
}
*cmd++ = htons(S_CMD_PACKET);
*cmd++ = *pid;
plen = len - 2;
last_id = ntohs(*pid);
if (last_id > 20000)
wrap = 1;
if (wrap && last_id < 100) {
wrap = 0;
memset(ids, 0, sizeof(ids));
}
printf("Got packet %d %d", last_id, plen);
if (is_dup(last_id)) {
printf(" (DUP)\n");
return 0;
}
printf("\n");
*cmd++ = htons(plen);
memcpy(cmd, data+2, plen);
plen += 2 + 2 + 2;
assert(plen <= (int) sizeof(buf));
if (send(s, buf, plen, 0) != plen)
return 1;
return 0;
} | 1 | [
"CWE-20",
"CWE-787"
]
| aircrack-ng | da087238963c1239fdabd47dc1b65279605aca70 | 173,226,201,243,026,800,000,000,000,000,000,000,000 | 54 | Buddy-ng: Fixed segmentation fault (Closes #15 on GitHub).
git-svn-id: http://svn.aircrack-ng.org/trunk@2418 28c6078b-6c39-48e3-add9-af49d547ecab |
static inline void invlpga(unsigned long addr, u32 asid)
{
asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
} | 0 | [
"CWE-400"
]
| linux-2.6 | 9581d442b9058d3699b4be568b6e5eae38a41493 | 125,583,594,612,238,040,000,000,000,000,000,000,000 | 4 | KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment
descriptors may be invalid due to the user modifying the ldt after loading
them.
Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
of home grown unsafe versions.
This is CVE-2010-3698.
KVM-Stable-Tag.
Signed-off-by: Avi Kivity <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
ecb_ews_filter_out_unchanged_instances (const GSList *to_save_instances,
const GSList *existing_instances,
GSList **out_changed_instances, /* ChangeData * */
GSList **out_removed_instances) /* ECalComponent * */
{
GSList *link = NULL;
GHashTable *existing_hash;
GHashTableIter iter;
gpointer value;
g_return_if_fail (to_save_instances != NULL);
g_return_if_fail (existing_instances != NULL);
g_return_if_fail (out_changed_instances != NULL);
g_return_if_fail (out_removed_instances != NULL);
*out_changed_instances = NULL;
*out_removed_instances = NULL;
existing_hash = g_hash_table_new_full ((GHashFunc)e_cal_component_id_hash, (GEqualFunc) e_cal_component_id_equal,
(GDestroyNotify) e_cal_component_free_id, NULL);
for (link = (GSList *) existing_instances; link; link = g_slist_next (link)) {
ECalComponent *comp = link->data;
ECalComponentId *id;
id = e_cal_component_get_id (comp);
if (id)
g_hash_table_insert (existing_hash, id, comp);
}
for (link = (GSList *) to_save_instances; link; link = g_slist_next (link)) {
ECalComponent *comp = link->data;
ECalComponentId *id = NULL;
id = e_cal_component_get_id (comp);
if (id) {
ECalComponent *old_comp;
old_comp = g_hash_table_lookup (existing_hash, id);
if (!ecb_ews_components_equal (comp, old_comp)) {
ChangeData *cd;
cd = g_new0 (ChangeData, 1);
cd->old_component = old_comp ? g_object_ref (old_comp) : NULL;
cd->new_component = g_object_ref (comp);
*out_changed_instances = g_slist_prepend (*out_changed_instances, cd);
}
g_hash_table_remove (existing_hash, id);
e_cal_component_free_id (id);
}
}
g_hash_table_iter_init (&iter, existing_hash);
while (g_hash_table_iter_next (&iter, NULL, &value)) {
*out_removed_instances = g_slist_prepend (*out_removed_instances, g_object_ref (value));
}
g_hash_table_destroy (existing_hash);
} | 0 | [
"CWE-295"
]
| evolution-ews | 915226eca9454b8b3e5adb6f2fff9698451778de | 110,635,797,888,109,190,000,000,000,000,000,000,000 | 62 | I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27 |
static void stats_reset(void) {
STATS_LOCK();
memset(&stats, 0, sizeof(struct stats));
stats_prefix_clear();
STATS_UNLOCK();
threadlocal_stats_reset();
item_stats_reset();
} | 0 | [
"CWE-190"
]
| memcached | bd578fc34b96abe0f8d99c1409814a09f51ee71c | 294,116,743,603,639,800,000,000,000,000,000,000,000 | 8 | CVE reported by cisco talos |
pkinit_get_certs_pkcs12(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_opts *idopts,
pkinit_identity_crypto_context id_cryptoctx,
krb5_principal princ)
{
krb5_error_code retval = KRB5KDC_ERR_PREAUTH_FAILED;
X509 *x = NULL;
PKCS12 *p12 = NULL;
int ret;
FILE *fp;
EVP_PKEY *y = NULL;
if (idopts->cert_filename == NULL) {
pkiDebug("%s: failed to get user's cert location\n", __FUNCTION__);
goto cleanup;
}
if (idopts->key_filename == NULL) {
pkiDebug("%s: failed to get user's private key location\n", __FUNCTION__);
goto cleanup;
}
fp = fopen(idopts->cert_filename, "rb");
if (fp == NULL) {
pkiDebug("Failed to open PKCS12 file '%s', error %d\n",
idopts->cert_filename, errno);
goto cleanup;
}
set_cloexec_file(fp);
p12 = d2i_PKCS12_fp(fp, NULL);
fclose(fp);
if (p12 == NULL) {
pkiDebug("Failed to decode PKCS12 file '%s' contents\n",
idopts->cert_filename);
goto cleanup;
}
/*
* Try parsing with no pass phrase first. If that fails,
* prompt for the pass phrase and try again.
*/
ret = PKCS12_parse(p12, NULL, &y, &x, NULL);
if (ret == 0) {
krb5_data rdat;
krb5_prompt kprompt;
krb5_prompt_type prompt_type;
int r = 0;
char prompt_string[128];
char prompt_reply[128];
char *prompt_prefix = _("Pass phrase for");
pkiDebug("Initial PKCS12_parse with no password failed\n");
memset(prompt_reply, '\0', sizeof(prompt_reply));
rdat.data = prompt_reply;
rdat.length = sizeof(prompt_reply);
r = snprintf(prompt_string, sizeof(prompt_string), "%s %s",
prompt_prefix, idopts->cert_filename);
if (r >= (int) sizeof(prompt_string)) {
pkiDebug("Prompt string, '%s %s', is too long!\n",
prompt_prefix, idopts->cert_filename);
goto cleanup;
}
kprompt.prompt = prompt_string;
kprompt.hidden = 1;
kprompt.reply = &rdat;
prompt_type = KRB5_PROMPT_TYPE_PREAUTH;
/* PROMPTER_INVOCATION */
k5int_set_prompt_types(context, &prompt_type);
r = (*id_cryptoctx->prompter)(context, id_cryptoctx->prompter_data,
NULL, NULL, 1, &kprompt);
k5int_set_prompt_types(context, 0);
ret = PKCS12_parse(p12, rdat.data, &y, &x, NULL);
if (ret == 0) {
pkiDebug("Seconde PKCS12_parse with password failed\n");
goto cleanup;
}
}
id_cryptoctx->creds[0] = malloc(sizeof(struct _pkinit_cred_info));
if (id_cryptoctx->creds[0] == NULL)
goto cleanup;
id_cryptoctx->creds[0]->name =
reassemble_pkcs12_name(idopts->cert_filename);
id_cryptoctx->creds[0]->cert = x;
#ifndef WITHOUT_PKCS11
id_cryptoctx->creds[0]->cert_id = NULL;
id_cryptoctx->creds[0]->cert_id_len = 0;
#endif
id_cryptoctx->creds[0]->key = y;
id_cryptoctx->creds[1] = NULL;
retval = 0;
cleanup:
if (p12)
PKCS12_free(p12);
if (retval) {
if (x != NULL)
X509_free(x);
if (y != NULL)
EVP_PKEY_free(y);
}
return retval;
} | 0 | [
"CWE-476"
]
| krb5 | f249555301940c6df3a2cdda13b56b5674eebc2e | 290,579,163,395,962,750,000,000,000,000,000,000,000 | 109 | PKINIT null pointer deref [CVE-2013-1415]
Don't dereference a null pointer when cleaning up.
The KDC plugin for PKINIT can dereference a null pointer when a
malformed packet causes processing to terminate early, leading to
a crash of the KDC process. An attacker would need to have a valid
PKINIT certificate or have observed a successful PKINIT authentication,
or an unauthenticated attacker could execute the attack if anonymous
PKINIT is enabled.
CVSSv2 vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:P/RL:O/RC:C
This is a minimal commit for pullup; style fixes in a followup.
[[email protected]: reformat and edit commit message]
(cherry picked from commit c773d3c775e9b2d88bcdff5f8a8ba88d7ec4e8ed)
ticket: 7570
version_fixed: 1.11.1
status: resolved |
char *run_unstrip_n(const char *dump_dir_name, unsigned timeout_sec)
{
int flags = EXECFLG_INPUT_NUL | EXECFLG_OUTPUT | EXECFLG_SETSID | EXECFLG_QUIET;
VERB1 flags &= ~EXECFLG_QUIET;
int pipeout[2];
char* args[4];
args[0] = (char*)"eu-unstrip";
args[1] = xasprintf("--core=%s/"FILENAME_COREDUMP, dump_dir_name);
args[2] = (char*)"-n";
args[3] = NULL;
pid_t child = fork_execv_on_steroids(flags, args, pipeout, /*env_vec:*/ NULL, /*dir:*/ NULL, /*uid(unused):*/ 0);
free(args[1]);
/* Bugs in unstrip or corrupted coredumps can cause it to enter infinite loop.
* Therefore we have a (largish) timeout, after which we kill the child.
*/
ndelay_on(pipeout[0]);
int t = time(NULL); /* int is enough, no need to use time_t */
int endtime = t + timeout_sec;
struct strbuf *buf_out = strbuf_new();
while (1)
{
int timeout = endtime - t;
if (timeout < 0)
{
kill(child, SIGKILL);
strbuf_free(buf_out);
buf_out = NULL;
break;
}
/* We don't check poll result - checking read result is enough */
struct pollfd pfd;
pfd.fd = pipeout[0];
pfd.events = POLLIN;
poll(&pfd, 1, timeout * 1000);
char buff[1024];
int r = read(pipeout[0], buff, sizeof(buff) - 1);
if (r <= 0)
{
/* I did see EAGAIN happening here */
if (r < 0 && errno == EAGAIN)
goto next;
break;
}
buff[r] = '\0';
strbuf_append_str(buf_out, buff);
next:
t = time(NULL);
}
close(pipeout[0]);
/* Prevent having zombie child process */
int status;
safe_waitpid(child, &status, 0);
if (status != 0 || buf_out == NULL)
{
/* unstrip didnt exit with exit code 0, or we timed out */
strbuf_free(buf_out);
return NULL;
}
return strbuf_free_nobuf(buf_out);
} | 0 | [
"CWE-59"
]
| abrt | 7417505e1d93cc95ec648b74e3c801bc67aacb9f | 336,911,611,079,044,480,000,000,000,000,000,000,000 | 66 | daemon, dbus: allow only root to create CCpp, Koops, vmcore and xorg
Florian Weimer <[email protected]>:
This prevents users from feeding things that are not actually
coredumps and excerpts from /proc to these analyzers.
For example, it should not be possible to trigger a rule with
“EVENT=post-create analyzer=CCpp” using NewProblem
Related: #1212861
Signed-off-by: Jakub Filak <[email protected]> |
void do_wait_for_slave_to_stop(struct st_command *c __attribute__((unused)))
{
static int SLAVE_POLL_INTERVAL= 300000;
MYSQL* mysql = &cur_con->mysql;
for (;;)
{
MYSQL_RES *UNINIT_VAR(res);
MYSQL_ROW row;
int done;
if (mysql_query(mysql,"show status like 'Slave_running'") ||
!(res=mysql_store_result(mysql)))
die("Query failed while probing slave for stop: %s",
mysql_error(mysql));
if (!(row=mysql_fetch_row(res)) || !row[1])
{
mysql_free_result(res);
die("Strange result from query while probing slave for stop");
}
done = !strcmp(row[1],"OFF");
mysql_free_result(res);
if (done)
break;
my_sleep(SLAVE_POLL_INTERVAL);
}
return;
} | 0 | [
"CWE-284",
"CWE-295"
]
| mysql-server | 3bd5589e1a5a93f9c224badf983cd65c45215390 | 57,051,189,964,591,350,000,000,000,000,000,000,000 | 27 | WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options |
xmlHashUpdateEntry2(xmlHashTablePtr table, const xmlChar *name,
const xmlChar *name2, void *userdata,
xmlHashDeallocator f) {
return(xmlHashUpdateEntry3(table, name, name2, NULL, userdata, f));
} | 0 | [
"CWE-399"
]
| libxml2 | 8973d58b7498fa5100a876815476b81fd1a2412a | 154,908,868,874,854,770,000,000,000,000,000,000,000 | 5 | Add hash randomization to hash and dict structures
Following http://www.ocert.org/advisories/ocert-2011-003.html
it seems that having hash randomization might be a good idea
when using XML with untrusted data
* configure.in: lookup for rand, srand and time
* dict.c: add randomization to dictionaries hash tables
* hash.c: add randomization to normal hash tables |
rsRetVal MsgReplaceMSG(msg_t *pThis, uchar* pszMSG, int lenMSG)
{
int lenNew;
uchar *bufNew;
DEFiRet;
ISOBJ_TYPE_assert(pThis, msg);
assert(pszMSG != NULL);
lenNew = pThis->iLenRawMsg + lenMSG - pThis->iLenMSG;
if(lenMSG > pThis->iLenMSG && lenNew >= CONF_RAWMSG_BUFSIZE) {
/* we have lost our "bet" and need to alloc a new buffer ;) */
CHKmalloc(bufNew = MALLOC(lenNew + 1));
memcpy(bufNew, pThis->pszRawMsg, pThis->offMSG);
if(pThis->pszRawMsg != pThis->szRawMsg)
free(pThis->pszRawMsg);
pThis->pszRawMsg = bufNew;
}
if(lenMSG > 0)
memcpy(pThis->pszRawMsg + pThis->offMSG, pszMSG, lenMSG);
pThis->pszRawMsg[lenNew] = '\0'; /* this also works with truncation! */
pThis->iLenRawMsg = lenNew;
pThis->iLenMSG = lenMSG;
finalize_it:
RETiRet;
} | 0 | [
"CWE-772"
]
| rsyslog | 8083bd1433449fd2b1b79bf759f782e0f64c0cd2 | 233,920,483,074,668,000,000,000,000,000,000,000,000 | 27 | backporting abort condition fix from 5.7.7 |
void Scanner::lex_c_comment()
{
loop:
#line 3708 "src/parse/lex.cc"
{
unsigned char yych;
if ((lim - cur) < 2) { if (!fill(2)) { error("unexpected end of input"); exit(1); } }
yych = (unsigned char)*cur;
if (yych <= '\f') {
if (yych <= 0x00) goto yy554;
if (yych == '\n') goto yy558;
goto yy556;
} else {
if (yych <= '\r') goto yy560;
if (yych == '*') goto yy561;
goto yy556;
}
yy554:
++cur;
#line 722 "../src/parse/lex.re"
{ fail_if_eof(); goto loop; }
#line 3726 "src/parse/lex.cc"
yy556:
++cur;
yy557:
#line 723 "../src/parse/lex.re"
{ goto loop; }
#line 3732 "src/parse/lex.cc"
yy558:
++cur;
#line 721 "../src/parse/lex.re"
{ next_line(); goto loop; }
#line 3737 "src/parse/lex.cc"
yy560:
yych = (unsigned char)*++cur;
if (yych == '\n') goto yy558;
goto yy557;
yy561:
yych = (unsigned char)*++cur;
if (yych != '/') goto yy557;
++cur;
#line 720 "../src/parse/lex.re"
{ return; }
#line 3748 "src/parse/lex.cc"
}
#line 724 "../src/parse/lex.re"
} | 1 | [
"CWE-787"
]
| re2c | 039c18949190c5de5397eba504d2c75dad2ea9ca | 339,197,659,838,763,120,000,000,000,000,000,000,000 | 49 | Emit an error when repetition lower bound exceeds upper bound.
Historically this was allowed and re2c swapped the bounds. However, it
most likely indicates an error in user code and there is only a single
occurrence in the tests (and the test in an artificial one), so although
the change is backwards incompatible there is low chance of breaking
real-world code.
This fixes second test case in the bug #394 "Stack overflow due to
recursion in src/dfa/dead_rules.cc" (the actual fix is to limit DFA size
but the test also has counted repetition with swapped bounds). |
introduce (struct dwarf_section * section, bool raw)
{
if (raw)
{
if (do_follow_links && section->filename)
printf (_("Raw dump of debug contents of section %s (loaded from %s):\n\n"),
section->name, section->filename);
else
printf (_("Raw dump of debug contents of section %s:\n\n"), section->name);
}
else
{
if (do_follow_links && section->filename)
printf (_("Contents of the %s section (loaded from %s):\n\n"),
section->name, section->filename);
else
printf (_("Contents of the %s section:\n\n"), section->name);
}
} | 0 | [
"CWE-703"
]
| binutils-gdb | 695c6dfe7e85006b98c8b746f3fd5f913c94ebff | 64,771,128,925,618,590,000,000,000,000,000,000,000 | 19 | PR29370, infinite loop in display_debug_abbrev
The PR29370 testcase is a fuzzed object file with multiple
.trace_abbrev sections. Multiple .trace_abbrev or .debug_abbrev
sections are not a violation of the DWARF standard. The DWARF5
standard even gives an example of multiple .debug_abbrev sections
contained in groups. Caching and lookup of processed abbrevs thus
needs to be done by section and offset rather than base and offset.
(Why base anyway?) Or, since section contents are kept, by a pointer
into the contents.
PR 29370
* dwarf.c (struct abbrev_list): Replace abbrev_base and
abbrev_offset with raw field.
(find_abbrev_list_by_abbrev_offset): Delete.
(find_abbrev_list_by_raw_abbrev): New function.
(process_abbrev_set): Set list->raw and list->next.
(find_and_process_abbrev_set): Replace abbrev list lookup with
new function. Don't set list abbrev_base, abbrev_offset or next. |
static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct hid_device *hdev = to_hid_device(dev);
if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
hdev->bus, hdev->vendor, hdev->product))
return -ENOMEM;
if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
return -ENOMEM;
if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
return -ENOMEM;
if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
return -ENOMEM;
if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X",
hdev->bus, hdev->group, hdev->vendor, hdev->product))
return -ENOMEM;
return 0;
} | 0 | [
"CWE-125"
]
| linux | 50220dead1650609206efe91f0cc116132d59b3f | 243,900,863,067,356,100,000,000,000,000,000,000,000 | 23 | HID: core: prevent out-of-bound readings
Plugging a Logitech DJ receiver with KASAN activated raises a bunch of
out-of-bound readings.
The fields are allocated up to MAX_USAGE, meaning that potentially, we do
not have enough fields to fit the incoming values.
Add checks and silence KASAN.
Signed-off-by: Benjamin Tissoires <[email protected]>
Signed-off-by: Jiri Kosina <[email protected]> |
node_equal(wordnode_T *n1, wordnode_T *n2)
{
wordnode_T *p1;
wordnode_T *p2;
for (p1 = n1, p2 = n2; p1 != NULL && p2 != NULL;
p1 = p1->wn_sibling, p2 = p2->wn_sibling)
if (p1->wn_byte != p2->wn_byte
|| (p1->wn_byte == NUL
? (p1->wn_flags != p2->wn_flags
|| p1->wn_region != p2->wn_region
|| p1->wn_affixID != p2->wn_affixID)
: (p1->wn_child != p2->wn_child)))
break;
return p1 == NULL && p2 == NULL;
} | 0 | [
"CWE-190"
]
| vim | 399c297aa93afe2c0a39e2a1b3f972aebba44c9d | 195,999,134,097,782,060,000,000,000,000,000,000,000 | 17 | patch 8.0.0322: possible overflow with corrupted spell file
Problem: Possible overflow with spell file where the tree length is
corrupted.
Solution: Check for an invalid length (suggested by shqking) |
static void message_read_cb(GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
VDAgentConnection *self = user_data;
VDAgentConnectionPrivate *priv = vdagent_connection_get_instance_private(self);
GInputStream *in = G_INPUT_STREAM(source_object);
GError *err = NULL;
gsize bytes_read, data_size;
g_input_stream_read_all_finish(in, res, &bytes_read, &err);
if (err) {
if (g_error_matches(err, G_IO_ERROR, G_IO_ERROR_CANCELLED)) {
g_error_free(err);
} else {
priv->error_cb(self, err);
}
goto unref;
}
if (bytes_read == 0) {
/* see virtio-port.c for the rationale behind this */
if (priv->opening) {
g_usleep(10000);
read_next_message(self);
} else {
priv->error_cb(self, NULL);
}
goto unref;
}
priv->opening = FALSE;
if (!priv->data_buf) {
/* we've read the message header, now let's read its body */
data_size = VDAGENT_CONNECTION_GET_CLASS(self)->handle_header(
self, priv->header_buf);
if (g_cancellable_is_cancelled(priv->cancellable)) {
goto unref;
}
if (data_size > 0) {
priv->data_buf = g_malloc(data_size);
g_input_stream_read_all_async(in,
priv->data_buf, data_size,
G_PRIORITY_DEFAULT, priv->cancellable,
message_read_cb, g_object_ref(self));
goto unref;
}
}
VDAGENT_CONNECTION_GET_CLASS(self)->handle_message(
self, priv->header_buf, priv->data_buf);
g_clear_pointer(&priv->data_buf, g_free);
read_next_message(self);
unref:
g_object_unref(self);
} | 0 | [
"CWE-362"
]
| spice-vd_agent | 51c415df82a52e9ec033225783c77df95f387891 | 258,611,859,188,355,800,000,000,000,000,000,000,000 | 60 | Avoids user session hijacking
Avoids user hijacking sessions by reusing PID.
In theory an attacker could:
- open a connection to the daemon;
- fork and exit the process but keep the file descriptor open
(inheriting or duplicating it in forked process);
- force OS to recycle the initial PID, by creating many short lived
processes.
Daemon would detect the old PID as having the new session.
Check the user to avoid such replacements.
This issue was reported by SUSE security team.
Signed-off-by: Frediano Ziglio <[email protected]>
Acked-by: Uri Lublin <[email protected]> |
int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
{
if (!lapic_in_kernel(vcpu))
return 1;
return kvm_lapic_msr_write(vcpu->arch.apic, reg, data);
} | 0 | [
"CWE-476"
]
| linux | 00b5f37189d24ac3ed46cb7f11742094778c46ce | 291,495,546,533,501,000,000,000,000,000,000,000,000 | 7 | KVM: x86: Avoid theoretical NULL pointer dereference in kvm_irq_delivery_to_apic_fast()
When kvm_irq_delivery_to_apic_fast() is called with APIC_DEST_SELF
shorthand, 'src' must not be NULL. Crash the VM with KVM_BUG_ON()
instead of crashing the host.
Signed-off-by: Vitaly Kuznetsov <[email protected]>
Message-Id: <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]> |
ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
const struct nfs4_xdr_opaque_data *opaque)
{
struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
struct nfs42_layoutstat_devinfo, ld_private);
__be32 *start;
/* layoutupdate length */
start = xdr_reserve_space(xdr, 4);
ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
*start = cpu_to_be32((xdr->p - start - 1) * 4);
} | 0 | [
"CWE-787"
]
| linux | ed34695e15aba74f45247f1ee2cf7e09d449f925 | 287,118,479,485,874,560,000,000,000,000,000,000,000 | 13 | pNFS/flexfiles: fix incorrect size check in decode_nfs_fh()
We (adam zabrocki, alexander matrosov, alexander tereshkin, maksym
bazalii) observed the check:
if (fh->size > sizeof(struct nfs_fh))
should not use the size of the nfs_fh struct which includes an extra two
bytes from the size field.
struct nfs_fh {
unsigned short size;
unsigned char data[NFS_MAXFHSIZE];
}
but should determine the size from data[NFS_MAXFHSIZE] so the memcpy
will not write 2 bytes beyond destination. The proposed fix is to
compare against the NFS_MAXFHSIZE directly, as is done elsewhere in fs
code base.
Fixes: d67ae825a59d ("pnfs/flexfiles: Add the FlexFile Layout Driver")
Signed-off-by: Nikola Livic <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]> |
static int sc_pkcs15emu_sc_hsm_add_prkd(sc_pkcs15_card_t * p15card, u8 keyid) {
sc_card_t *card = p15card->card;
sc_pkcs15_cert_info_t cert_info;
sc_pkcs15_object_t cert_obj;
struct sc_pkcs15_object prkd;
sc_pkcs15_prkey_info_t *key_info;
u8 fid[2];
/* enough to hold a complete certificate */
u8 efbin[4096];
u8 *ptr;
size_t len;
int r;
fid[0] = PRKD_PREFIX;
fid[1] = keyid;
/* Try to select a related EF containing the PKCS#15 description of the key */
len = sizeof efbin;
r = read_file(p15card, fid, efbin, &len, 1);
LOG_TEST_RET(card->ctx, r, "Skipping optional EF.PRKD");
ptr = efbin;
memset(&prkd, 0, sizeof(prkd));
r = sc_pkcs15_decode_prkdf_entry(p15card, &prkd, (const u8 **)&ptr, &len);
LOG_TEST_RET(card->ctx, r, "Skipping optional EF.PRKD");
/* All keys require user PIN authentication */
prkd.auth_id.len = 1;
prkd.auth_id.value[0] = 1;
/*
* Set private key flag as all keys are private anyway
*/
prkd.flags |= SC_PKCS15_CO_FLAG_PRIVATE;
key_info = (sc_pkcs15_prkey_info_t *)prkd.data;
key_info->key_reference = keyid;
key_info->path.aid.len = 0;
if (prkd.type == SC_PKCS15_TYPE_PRKEY_RSA) {
r = sc_pkcs15emu_add_rsa_prkey(p15card, &prkd, key_info);
} else {
r = sc_pkcs15emu_add_ec_prkey(p15card, &prkd, key_info);
}
LOG_TEST_RET(card->ctx, r, "Could not add private key to framework");
/* Check if we also have a certificate for the private key */
fid[0] = EE_CERTIFICATE_PREFIX;
len = sizeof efbin;
r = read_file(p15card, fid, efbin, &len, 0);
LOG_TEST_RET(card->ctx, r, "Could not read EF");
if (efbin[0] == 0x67) { /* Decode CSR and create public key object */
sc_pkcs15emu_sc_hsm_add_pubkey(p15card, efbin, len, key_info, prkd.label);
free(key_info);
return SC_SUCCESS; /* Ignore any errors */
}
if (efbin[0] != 0x30) {
free(key_info);
return SC_SUCCESS;
}
memset(&cert_info, 0, sizeof(cert_info));
memset(&cert_obj, 0, sizeof(cert_obj));
cert_info.id = key_info->id;
sc_path_set(&cert_info.path, SC_PATH_TYPE_FILE_ID, fid, 2, 0, 0);
cert_info.path.count = -1;
if (p15card->opts.use_file_cache) {
/* look this up with our AID, which should already be cached from the
* call to `read_file`. This may have the side effect that OpenSC's
* caching layer re-selects our applet *if the cached file cannot be
* found/used* and we may loose the authentication status. We assume
* that caching works perfectly without this side effect. */
cert_info.path.aid = sc_hsm_aid;
}
strlcpy(cert_obj.label, prkd.label, sizeof(cert_obj.label));
r = sc_pkcs15emu_add_x509_cert(p15card, &cert_obj, &cert_info);
free(key_info);
LOG_TEST_RET(card->ctx, r, "Could not add certificate");
return SC_SUCCESS;
} | 0 | [
"CWE-415",
"CWE-119"
]
| OpenSC | 360e95d45ac4123255a4c796db96337f332160ad | 109,187,732,029,260,470,000,000,000,000,000,000,000 | 91 | fixed out of bounds writes
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting the problems. |
static int wcd9335_setup_irqs(struct wcd9335_codec *wcd)
{
int irq, ret, i;
for (i = 0; i < ARRAY_SIZE(wcd9335_irqs); i++) {
irq = regmap_irq_get_virq(wcd->irq_data, wcd9335_irqs[i].irq);
if (irq < 0) {
dev_err(wcd->dev, "Failed to get %s\n",
wcd9335_irqs[i].name);
return irq;
}
ret = devm_request_threaded_irq(wcd->dev, irq, NULL,
wcd9335_irqs[i].handler,
IRQF_TRIGGER_RISING,
wcd9335_irqs[i].name, wcd);
if (ret) {
dev_err(wcd->dev, "Failed to request %s\n",
wcd9335_irqs[i].name);
return ret;
}
}
/* enable interrupts on all slave ports */
for (i = 0; i < WCD9335_SLIM_NUM_PORT_REG; i++)
regmap_write(wcd->if_regmap, WCD9335_SLIM_PGD_PORT_INT_EN0 + i,
0xFF);
return ret;
} | 0 | []
| sound | a54988113985ca22e414e132054f234fc8a92604 | 196,215,611,931,005,970,000,000,000,000,000,000,000 | 30 | wcd9335: fix a incorrect use of kstrndup()
In wcd9335_codec_enable_dec(), 'widget_name' is allocated by kstrndup().
However, according to doc: "Note: Use kmemdup_nul() instead if the size
is known exactly." So we should use kmemdup_nul() here instead of
kstrndup().
Signed-off-by: Gen Zhang <[email protected]>
Signed-off-by: Mark Brown <[email protected]> |
gimp_channel_get_show_masked (GimpChannel *channel)
{
g_return_val_if_fail (GIMP_IS_CHANNEL (channel), FALSE);
return channel->show_masked;
} | 0 | [
"CWE-703"
]
| gimp | 6ab90ecbbd7cc95901933f62227fd140c0576d55 | 91,088,756,883,825,350,000,000,000,000,000,000,000 | 6 | app: fix #8230 crash in gimp_layer_invalidate_boundary when channel is NULL
gimp_channel_is_empty returns FALSE if channel is NULL. This causes
gimp_layer_invalidate_boundary to crash if the mask channel is NULL.
With a NULL channel gimp_channel_is_empty should return TRUE, just like
the similar gimp_image_is_empty does, because returning FALSE here
suggests we have a non empty channel.
(cherry picked from commit 22af0bcfe67c1c86381f33975ca7fdbde6b36b39) |
DEATH_TEST(EqOp, InvalidEooOperand, "Invariant failure _rhs") {
BSONObj operand;
EqualityMatchExpression eq("", operand.firstElement());
} | 0 | []
| mongo | 64095239f41e9f3841d8be9088347db56d35c891 | 197,073,764,651,601,420,000,000,000,000,000,000,000 | 4 | SERVER-51083 Reject invalid UTF-8 from $regex match expressions |
rdpdr_remove_iorequest(struct async_iorequest *prev, struct async_iorequest *iorq)
{
if (!iorq)
return NULL;
if (iorq->buffer)
xfree(iorq->buffer);
if (prev)
{
prev->next = iorq->next;
xfree(iorq);
iorq = prev->next;
}
else
{
/* Even if NULL */
g_iorequest = iorq->next;
xfree(iorq);
iorq = NULL;
}
return iorq;
} | 0 | [
"CWE-787"
]
| rdesktop | 766ebcf6f23ccfe8323ac10242ae6e127d4505d2 | 37,318,536,282,001,923,000,000,000,000,000,000,000 | 22 | Malicious RDP server security fixes
This commit includes fixes for a set of 21 vulnerabilities in
rdesktop when a malicious RDP server is used.
All vulnerabilities was identified and reported by Eyal Itkin.
* Add rdp_protocol_error function that is used in several fixes
* Refactor of process_bitmap_updates
* Fix possible integer overflow in s_check_rem() on 32bit arch
* Fix memory corruption in process_bitmap_data - CVE-2018-8794
* Fix remote code execution in process_bitmap_data - CVE-2018-8795
* Fix remote code execution in process_plane - CVE-2018-8797
* Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175
* Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175
* Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176
* Fix Denial of Service in sec_recv - CVE-2018-20176
* Fix minor information leak in rdpdr_process - CVE-2018-8791
* Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792
* Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793
* Fix Denial of Service in process_bitmap_data - CVE-2018-8796
* Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798
* Fix Denial of Service in process_secondary_order - CVE-2018-8799
* Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800
* Fix major information leak in ui_clip_handle_data - CVE-2018-20174
* Fix memory corruption in rdp_in_unistr - CVE-2018-20177
* Fix Denial of Service in process_demand_active - CVE-2018-20178
* Fix remote code execution in lspci_process - CVE-2018-20179
* Fix remote code execution in rdpsnddbg_process - CVE-2018-20180
* Fix remote code execution in seamless_process - CVE-2018-20181
* Fix remote code execution in seamless_process_line - CVE-2018-20182 |
BOOLEAN AnalyzeL3Hdr(
PNET_PACKET_INFO packetInfo)
{
if(packetInfo->isIP4)
{
IPv4Header *ip4Hdr = (IPv4Header *) RtlOffsetToPointer(packetInfo->headersBuffer, packetInfo->L2HdrLen);
if(packetInfo->dataLength < packetInfo->L2HdrLen + sizeof(*ip4Hdr))
return FALSE;
packetInfo->L3HdrLen = IP_HEADER_LENGTH(ip4Hdr);
if ((packetInfo->L3HdrLen < sizeof(*ip4Hdr)) ||
(packetInfo->dataLength < packetInfo->L2HdrLen + packetInfo->L3HdrLen))
return FALSE;
if(IP_HEADER_VERSION(ip4Hdr) != 4)
return FALSE;
packetInfo->isFragment = IP_HEADER_IS_FRAGMENT(ip4Hdr);
if(!packetInfo->isFragment)
{
AnalyzeL4Proto(ip4Hdr->ip_protocol, packetInfo);
}
}
else if(packetInfo->isIP6)
{
ULONG homeAddrOffset, destAddrOffset;
UCHAR l4Proto;
IPv6Header *ip6Hdr = (IPv6Header *) RtlOffsetToPointer(packetInfo->headersBuffer, packetInfo->L2HdrLen);
if(IP6_HEADER_VERSION(ip6Hdr) != 6)
return FALSE;
if(!AnalyzeIP6Hdr(ip6Hdr, packetInfo->L2PayloadLen,
&packetInfo->L3HdrLen, &l4Proto, &homeAddrOffset, &destAddrOffset))
return FALSE;
if (packetInfo->L3HdrLen > MAX_SUPPORTED_IPV6_HEADERS)
return FALSE;
packetInfo->ip6HomeAddrOffset = (homeAddrOffset) ? packetInfo->L2HdrLen + homeAddrOffset : 0;
packetInfo->ip6DestAddrOffset = (destAddrOffset) ? packetInfo->L2HdrLen + destAddrOffset : 0;
packetInfo->isFragment = (l4Proto == IP6_HDR_FRAGMENT);
if(!packetInfo->isFragment)
{
AnalyzeL4Proto(l4Proto, packetInfo);
}
}
return TRUE;
} | 0 | [
"CWE-20"
]
| kvm-guest-drivers-windows | 723416fa4210b7464b28eab89cc76252e6193ac1 | 144,991,478,655,329,810,000,000,000,000,000,000,000 | 55 | NetKVM: BZ#1169718: Checking the length only on read
Signed-off-by: Joseph Hindin <[email protected]> |
dns_zone_setjournal(dns_zone_t *zone, const char *myjournal) {
isc_result_t result = ISC_R_SUCCESS;
REQUIRE(DNS_ZONE_VALID(zone));
LOCK_ZONE(zone);
result = dns_zone_setstring(zone, &zone->journal, myjournal);
UNLOCK_ZONE(zone);
return (result);
} | 0 | [
"CWE-327"
]
| bind9 | f09352d20a9d360e50683cd1d2fc52ccedcd77a0 | 188,134,471,832,567,670,000,000,000,000,000,000,000 | 11 | Update keyfetch_done compute_tag check
If in keyfetch_done the compute_tag fails (because for example the
algorithm is not supported), don't crash, but instead ignore the
key. |
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
kvm_pfn_t pfn;
pfn = gfn_to_pfn(kvm, gfn);
return kvm_pfn_to_page(pfn); | 0 | [
"CWE-459"
]
| linux | 683412ccf61294d727ead4a73d97397396e69a6b | 180,560,790,015,802,020,000,000,000,000,000,000,000 | 8 | KVM: SEV: add cache flush to solve SEV cache incoherency issues
Flush the CPU caches when memory is reclaimed from an SEV guest (where
reclaim also includes it being unmapped from KVM's memslots). Due to lack
of coherency for SEV encrypted memory, failure to flush results in silent
data corruption if userspace is malicious/broken and doesn't ensure SEV
guest memory is properly pinned and unpinned.
Cache coherency is not enforced across the VM boundary in SEV (AMD APM
vol.2 Section 15.34.7). Confidential cachelines, generated by confidential
VM guests have to be explicitly flushed on the host side. If a memory page
containing dirty confidential cachelines was released by VM and reallocated
to another user, the cachelines may corrupt the new user at a later time.
KVM takes a shortcut by assuming all confidential memory remain pinned
until the end of VM lifetime. Therefore, KVM does not flush cache at
mmu_notifier invalidation events. Because of this incorrect assumption and
the lack of cache flushing, malicous userspace can crash the host kernel:
creating a malicious VM and continuously allocates/releases unpinned
confidential memory pages when the VM is running.
Add cache flush operations to mmu_notifier operations to ensure that any
physical memory leaving the guest VM get flushed. In particular, hook
mmu_notifier_invalidate_range_start and mmu_notifier_release events and
flush cache accordingly. The hook after releasing the mmu lock to avoid
contention with other vCPUs.
Cc: [email protected]
Suggested-by: Sean Christpherson <[email protected]>
Reported-by: Mingwei Zhang <[email protected]>
Signed-off-by: Mingwei Zhang <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
void redisFreeCommand(char *cmd) {
hi_free(cmd);
} | 0 | [
"CWE-190",
"CWE-680"
]
| redis | 0215324a66af949be39b34be2d55143232c1cb71 | 58,134,331,295,171,530,000,000,000,000,000,000,000 | 3 | Fix redis-cli / redis-sential overflow on some platforms (CVE-2021-32762) (#9587)
The redis-cli command line tool and redis-sentinel service may be vulnerable
to integer overflow when parsing specially crafted large multi-bulk network
replies. This is a result of a vulnerability in the underlying hiredis
library which does not perform an overflow check before calling the calloc()
heap allocation function.
This issue only impacts systems with heap allocators that do not perform their
own overflow checks. Most modern systems do and are therefore not likely to
be affected. Furthermore, by default redis-sentinel uses the jemalloc allocator
which is also not vulnerable.
Co-authored-by: Yossi Gottlieb <[email protected]> |
xmlSchemaParseIncludeOrRedefine(xmlSchemaParserCtxtPtr pctxt,
xmlSchemaPtr schema,
xmlNodePtr node,
int type)
{
xmlNodePtr child = NULL;
const xmlChar *schemaLocation = NULL;
int res = 0; /* hasRedefinitions = 0 */
int isChameleon = 0, wasChameleon = 0;
xmlSchemaBucketPtr bucket = NULL;
if ((pctxt == NULL) || (schema == NULL) || (node == NULL))
return (-1);
/*
* Parse attributes. Note that the returned schemaLocation will
* be already converted to an absolute URI.
*/
res = xmlSchemaParseIncludeOrRedefineAttrs(pctxt, schema,
node, (xmlChar **) (&schemaLocation), type);
if (res != 0)
return(res);
/*
* Load and add the schema document.
*/
res = xmlSchemaAddSchemaDoc(pctxt, type, schemaLocation, NULL,
NULL, 0, node, pctxt->targetNamespace, NULL, &bucket);
if (res != 0)
return(res);
/*
* If we get no schema bucket back, then this means that the schema
* document could not be located or was broken XML or was not
* a schema document.
*/
if ((bucket == NULL) || (bucket->doc == NULL)) {
if (type == XML_SCHEMA_SCHEMA_INCLUDE) {
/*
* WARNING for <include>:
* We will raise an error if the schema cannot be located
* for inclusions, since the that was the feedback from the
* schema people. I.e. the following spec piece will *not* be
* satisfied:
* SPEC src-include: "It is not an error for the `actual value` of the
* schemaLocation [attribute] to fail to resolve it all, in which
* case no corresponding inclusion is performed.
* So do we need a warning report here?"
*/
res = XML_SCHEMAP_SRC_INCLUDE;
xmlSchemaCustomErr(ACTXT_CAST pctxt, res,
node, NULL,
"Failed to load the document '%s' for inclusion",
schemaLocation, NULL);
} else {
/*
* NOTE: This was changed to raise an error even if no redefinitions
* are specified.
*
* SPEC src-redefine (1)
* "If there are any element information items among the [children]
* other than <annotation> then the `actual value` of the
* schemaLocation [attribute] must successfully resolve."
* TODO: Ask the WG if a the location has always to resolve
* here as well!
*/
res = XML_SCHEMAP_SRC_REDEFINE;
xmlSchemaCustomErr(ACTXT_CAST pctxt, res,
node, NULL,
"Failed to load the document '%s' for redefinition",
schemaLocation, NULL);
}
} else {
/*
* Check targetNamespace sanity before parsing the new schema.
* TODO: Note that we won't check further content if the
* targetNamespace was bad.
*/
if (bucket->origTargetNamespace != NULL) {
/*
* SPEC src-include (2.1)
* "SII has a targetNamespace [attribute], and its `actual
* value` is identical to the `actual value` of the targetNamespace
* [attribute] of SII' (which must have such an [attribute])."
*/
if (pctxt->targetNamespace == NULL) {
xmlSchemaCustomErr(ACTXT_CAST pctxt,
XML_SCHEMAP_SRC_INCLUDE,
node, NULL,
"The target namespace of the included/redefined schema "
"'%s' has to be absent, since the including/redefining "
"schema has no target namespace",
schemaLocation, NULL);
goto exit_error;
} else if (!xmlStrEqual(bucket->origTargetNamespace,
pctxt->targetNamespace)) {
/* TODO: Change error function. */
xmlSchemaPCustomErrExt(pctxt,
XML_SCHEMAP_SRC_INCLUDE,
NULL, node,
"The target namespace '%s' of the included/redefined "
"schema '%s' differs from '%s' of the "
"including/redefining schema",
bucket->origTargetNamespace, schemaLocation,
pctxt->targetNamespace);
goto exit_error;
}
} else if (pctxt->targetNamespace != NULL) {
/*
* Chameleons: the original target namespace will
* differ from the resulting namespace.
*/
isChameleon = 1;
if (bucket->parsed &&
bucket->origTargetNamespace != NULL) {
xmlSchemaCustomErr(ACTXT_CAST pctxt,
XML_SCHEMAP_SRC_INCLUDE,
node, NULL,
"The target namespace of the included/redefined schema "
"'%s' has to be absent or the same as the "
"including/redefining schema's target namespace",
schemaLocation, NULL);
goto exit_error;
}
bucket->targetNamespace = pctxt->targetNamespace;
}
}
/*
* Parse the schema.
*/
if (bucket && (!bucket->parsed) && (bucket->doc != NULL)) {
if (isChameleon) {
/* TODO: Get rid of this flag on the schema itself. */
if ((schema->flags & XML_SCHEMAS_INCLUDING_CONVERT_NS) == 0) {
schema->flags |= XML_SCHEMAS_INCLUDING_CONVERT_NS;
} else
wasChameleon = 1;
}
xmlSchemaParseNewDoc(pctxt, schema, bucket);
/* Restore chameleon flag. */
if (isChameleon && (!wasChameleon))
schema->flags ^= XML_SCHEMAS_INCLUDING_CONVERT_NS;
}
/*
* And now for the children...
*/
child = node->children;
if (type == XML_SCHEMA_SCHEMA_REDEFINE) {
/*
* Parse (simpleType | complexType | group | attributeGroup))*
*/
pctxt->redefined = bucket;
/*
* How to proceed if the redefined schema was not located?
*/
pctxt->isRedefine = 1;
while (IS_SCHEMA(child, "annotation") ||
IS_SCHEMA(child, "simpleType") ||
IS_SCHEMA(child, "complexType") ||
IS_SCHEMA(child, "group") ||
IS_SCHEMA(child, "attributeGroup")) {
if (IS_SCHEMA(child, "annotation")) {
/*
* TODO: discard or not?
*/
} else if (IS_SCHEMA(child, "simpleType")) {
xmlSchemaParseSimpleType(pctxt, schema, child, 1);
} else if (IS_SCHEMA(child, "complexType")) {
xmlSchemaParseComplexType(pctxt, schema, child, 1);
/* hasRedefinitions = 1; */
} else if (IS_SCHEMA(child, "group")) {
/* hasRedefinitions = 1; */
xmlSchemaParseModelGroupDefinition(pctxt,
schema, child);
} else if (IS_SCHEMA(child, "attributeGroup")) {
/* hasRedefinitions = 1; */
xmlSchemaParseAttributeGroupDefinition(pctxt, schema,
child);
}
child = child->next;
}
pctxt->redefined = NULL;
pctxt->isRedefine = 0;
} else {
if (IS_SCHEMA(child, "annotation")) {
/*
* TODO: discard or not?
*/
child = child->next;
}
}
if (child != NULL) {
res = XML_SCHEMAP_S4S_ELEM_NOT_ALLOWED;
if (type == XML_SCHEMA_SCHEMA_REDEFINE) {
xmlSchemaPContentErr(pctxt, res,
NULL, node, child, NULL,
"(annotation | (simpleType | complexType | group | attributeGroup))*");
} else {
xmlSchemaPContentErr(pctxt, res,
NULL, node, child, NULL,
"(annotation?)");
}
}
return(res);
exit_error:
return(pctxt->err);
} | 0 | [
"CWE-134"
]
| libxml2 | 4472c3a5a5b516aaf59b89be602fbce52756c3e9 | 289,900,780,928,906,240,000,000,000,000,000,000,000 | 206 | Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports. |
void Smb4KMountDialog::slotIPEntered()
{
KCompletion *completion = m_ip_input->completionObject();
if ( !m_ip_input->userText().isEmpty() )
{
completion->addItem( m_ip_input->userText() );
}
else
{
// Do nothing
}
} | 0 | [
"CWE-20"
]
| smb4k | 71554140bdaede27b95dbe4c9b5a028a83c83cce | 308,204,890,630,810,600,000,000,000,000,000,000,000 | 13 | Find the mount/umount commands in the helper
Instead of trusting what we get passed in
CVE-2017-8849 |
std::string Box_iloc::dump(Indent& indent) const
{
std::ostringstream sstr;
sstr << Box::dump(indent);
for (const Item& item : m_items) {
sstr << indent << "item ID: " << item.item_ID << "\n"
<< indent << " construction method: " << ((int)item.construction_method) << "\n"
<< indent << " data_reference_index: " << std::hex
<< item.data_reference_index << std::dec << "\n"
<< indent << " base_offset: " << item.base_offset << "\n";
sstr << indent << " extents: ";
for (const Extent& extent : item.extents) {
sstr << extent.offset << "," << extent.length;
if (extent.index != 0) {
sstr << ";index=" << extent.index;
}
sstr << " ";
}
sstr << "\n";
}
return sstr.str();
} | 0 | [
"CWE-703"
]
| libheif | 2710c930918609caaf0a664e9c7bc3dce05d5b58 | 316,623,330,549,611,900,000,000,000,000,000,000,000 | 25 | force fraction to a limited resolution to finally solve those pesky numerical edge cases |
MONGO_EXPORT int gridfile_get_numchunks( gridfile *gfile ) {
bson_iterator it;
gridfs_offset length;
gridfs_offset chunkSize;
double numchunks;
bson_find( &it, gfile->meta, "length" );
if( bson_iterator_type( &it ) == BSON_INT )
length = ( gridfs_offset )bson_iterator_int( &it );
else
length = ( gridfs_offset )bson_iterator_long( &it );
bson_find( &it, gfile->meta, "chunkSize" );
chunkSize = bson_iterator_int( &it );
numchunks = ( ( double )length/( double )chunkSize );
return ( numchunks - ( int )numchunks > 0 )
? ( int )( numchunks+1 )
: ( int )( numchunks );
} | 0 | [
"CWE-190"
]
| mongo-c-driver-legacy | 1a1f5e26a4309480d88598913f9eebf9e9cba8ca | 325,887,958,193,466,840,000,000,000,000,000,000,000 | 20 | don't mix up int and size_t (first pass to fix that) |
bgp_attr_local_pref (struct peer *peer, bgp_size_t length,
struct attr *attr, u_char flag, u_char *startp)
{
bgp_size_t total;
total = length + (CHECK_FLAG (flag, BGP_ATTR_FLAG_EXTLEN) ? 4 : 3);
/* Flag checks. */
if (bgp_attr_flag_invalid (peer, BGP_ATTR_LOCAL_PREF, flag))
return bgp_attr_malformed (peer, BGP_ATTR_LOCAL_PREF, flag,
BGP_NOTIFY_UPDATE_ATTR_FLAG_ERR,
startp, total);
/* Length check. */
if (length != 4)
{
zlog (peer->log, LOG_ERR, "LOCAL_PREF attribute length isn't 4 [%u]", length);
return bgp_attr_malformed (peer, BGP_ATTR_LOCAL_PREF, flag,
BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
startp, total);
}
/* If it is contained in an UPDATE message that is received from an
external peer, then this attribute MUST be ignored by the
receiving speaker. */
if (peer_sort (peer) == BGP_PEER_EBGP)
{
stream_forward_getp (peer->ibuf, length);
return BGP_ATTR_PARSE_PROCEED;
}
attr->local_pref = stream_getl (peer->ibuf);
/* Set atomic aggregate flag. */
attr->flag |= ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF);
return BGP_ATTR_PARSE_PROCEED;
} | 1 | []
| quagga | 835315bfb49bff2b2fb354f2075c6d6693c2a151 | 131,422,032,227,743,300,000,000,000,000,000,000,000 | 37 | bgpd: Move up flag-check calls, parcel up attr-parser args, and other cleanups
* bgp_attr.h: (struct bgp_attr_parser_args) Attribute parsing context,
containing common arguments.
* bgp_attr.c: (general) Move the bgp_attr_flag_invalid flag-check calls up,
out of each individual attr parser function, to be done once in attr_parse.
Similarly move the calculation of the 'total' attribute length field up
to attr_parse.
Bundle together common arguments to attr-parsing functions and helpers
into (struct bgp_attr_parser_args), so it can be passed by reference down
the stack & also de-clutter the argument lists & make it easier to
add/modify the context for attr-parsing - add local const aliases to avoid
modifying body of code too much. This also should help avoid cut & paste
errors, where calls to helpers with hard-coded attribute types are pasted
to other functions but the code isn't changed.
(bgp_attr_flags_diagnose) as above.
(bgp_attr_flag_invalid) as above.
(bgp_attr_{origin,aspath,as4_path,nexthop,med,local_pref,atomic}) as above.
(bgp_attr_{aggregator,as4_aggregator,community,originator_id}) as above
(bgp_attr_{cluster_list,ext_communities},bgp_mp_{un,}reach_parse) as above
(bgp_attr_unknown) as above.
(bgp_attr_malformed) as above. Also, startp and length have to be
special-cased, because whether or not to send attribute data depends
on the particular error - a separate length argument, distinct from
args->length, indicates whether or not the attribute data should be sent
in the NOTIFY.
(bgp_attr_aspath_check) Call to bgp_attr_malformed is wrong here, there is
no attribute parsing context - e.g. the 'flag' argument is unlikely to be
right, remove it. Explicitly handle the error instead.
(bgp_attr_munge_as4_attrs) Flag argument is pointless.
As the comment notes, the check here is pointless as AS_PATH presence
already checked elsewhere.
(bgp_attr_parse) Do bgp_attr_flag_invalid call here.
Use (struct bgp_attr_parser_args) for args to attr parser functions.
Remove out-of-context 'flag' argument to as4 checking functions. |
static int do_grow(struct inode *inode, u64 size)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct buffer_head *dibh;
struct gfs2_alloc *al = NULL;
int error;
if (gfs2_is_stuffed(ip) &&
(size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
al = gfs2_alloc_get(ip);
if (al == NULL)
return -ENOMEM;
error = gfs2_quota_lock_check(ip);
if (error)
goto do_grow_alloc_put;
al->al_requested = 1;
error = gfs2_inplace_reserve(ip);
if (error)
goto do_grow_qunlock;
}
error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT, 0);
if (error)
goto do_grow_release;
if (al) {
error = gfs2_unstuff_dinode(ip, NULL);
if (error)
goto do_end_trans;
}
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
goto do_end_trans;
i_size_write(inode, size);
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
do_end_trans:
gfs2_trans_end(sdp);
do_grow_release:
if (al) {
gfs2_inplace_release(ip);
do_grow_qunlock:
gfs2_quota_unlock(ip);
do_grow_alloc_put:
gfs2_alloc_put(ip);
}
return error;
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | 64dd153c83743af81f20924c6343652d731eeecb | 330,671,237,134,916,200,000,000,000,000,000,000,000 | 56 | GFS2: rewrite fallocate code to write blocks directly
GFS2's fallocate code currently goes through the page cache. Since it's only
writing to the end of the file or to holes in it, it doesn't need to, and it
was causing issues on low memory environments. This patch pulls in some of
Steve's block allocation work, and uses it to simply allocate the blocks for
the file, and zero them out at allocation time. It provides a slight
performance increase, and it dramatically simplifies the code.
Signed-off-by: Benjamin Marzinski <[email protected]>
Signed-off-by: Steven Whitehouse <[email protected]> |
static void perf_sample_regs_user(struct perf_regs_user *regs_user,
struct pt_regs *regs)
{
if (!user_mode(regs)) {
if (current->mm)
regs = task_pt_regs(current);
else
regs = NULL;
}
if (regs) {
regs_user->regs = regs;
regs_user->abi = perf_reg_abi(current);
}
} | 0 | [
"CWE-703",
"CWE-189"
]
| linux | 8176cced706b5e5d15887584150764894e94e02f | 190,083,709,924,656,940,000,000,000,000,000,000,000 | 15 | perf: Treat attr.config as u64 in perf_swevent_init()
Trinity discovered that we fail to check all 64 bits of
attr.config passed by user space, resulting to out-of-bounds
access of the perf_swevent_enabled array in
sw_perf_event_destroy().
Introduced in commit b0a873ebb ("perf: Register PMU
implementations").
Signed-off-by: Tommi Rantala <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: [email protected]
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
static int SFTP_ServerSendInit(WOLFSSH* ssh) {
int ret;
byte buf[LENGTH_SZ + MSG_ID_SZ + UINT32_SZ];
c32toa(MSG_ID_SZ + UINT32_SZ, buf);
buf[LENGTH_SZ] = WOLFSSH_FTP_VERSION;
/* version */
c32toa((word32)WOLFSSH_SFTP_VERSION, buf + LENGTH_SZ + MSG_ID_SZ);
if ((ret = wolfSSH_stream_send(ssh, buf, sizeof(buf))) != sizeof(buf)) {
return ret;
}
return WS_SUCCESS;
} | 0 | [
"CWE-190"
]
| wolfssh | edb272e35ee57e7b89f3e127222c6981b6a1e730 | 13,919,544,247,039,025,000,000,000,000,000,000,000 | 15 | ASAN SFTP Fixes
When decoding SFTP messages, fix the size checks so they don't wrap. (ZD12766) |
static NTSTATUS winsdb_addr_decode(struct winsdb_handle *h, struct winsdb_record *rec, struct ldb_val *val,
TALLOC_CTX *mem_ctx, struct winsdb_addr **_addr)
{
NTSTATUS status;
struct winsdb_addr *addr;
const char *address;
const char *wins_owner;
const char *expire_time;
char *p;
addr = talloc(mem_ctx, struct winsdb_addr);
if (!addr) {
status = NT_STATUS_NO_MEMORY;
goto failed;
}
address = (char *)val->data;
p = strchr(address, ';');
if (!p) {
/* support old entries, with only the address */
addr->address = (const char *)talloc_steal(addr, val->data);
addr->wins_owner = talloc_strdup(addr, rec->wins_owner);
if (!addr->wins_owner) {
status = NT_STATUS_NO_MEMORY;
goto failed;
}
addr->expire_time = rec->expire_time;
*_addr = addr;
return NT_STATUS_OK;
}
*p = '\0'; p++;
addr->address = talloc_strdup(addr, address);
if (!addr->address) {
status = NT_STATUS_NO_MEMORY;
goto failed;
}
if (strncmp("winsOwner:", p, 10) != 0) {
status = NT_STATUS_INTERNAL_DB_CORRUPTION;
goto failed;
}
wins_owner = p + 10;
p = strchr(wins_owner, ';');
if (!p) {
status = NT_STATUS_INTERNAL_DB_CORRUPTION;
goto failed;
}
*p = '\0';p++;
if (strcmp(wins_owner, "0.0.0.0") == 0) {
wins_owner = h->local_owner;
}
addr->wins_owner = talloc_strdup(addr, wins_owner);
if (!addr->wins_owner) {
status = NT_STATUS_NO_MEMORY;
goto failed;
}
if (strncmp("expireTime:", p, 11) != 0) {
status = NT_STATUS_INTERNAL_DB_CORRUPTION;
goto failed;
}
expire_time = p + 11;
p = strchr(expire_time, ';');
if (!p) {
status = NT_STATUS_INTERNAL_DB_CORRUPTION;
goto failed;
}
*p = '\0';p++;
addr->expire_time = ldb_string_to_time(expire_time);
*_addr = addr;
return NT_STATUS_OK;
failed:
talloc_free(addr);
return status;
} | 0 | [
"CWE-200"
]
| samba | 0a3aa5f908e351201dc9c4d4807b09ed9eedff77 | 13,174,190,830,729,144,000,000,000,000,000,000,000 | 81 | CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]> |
explicit SummaryTensorOp(OpKernelConstruction* context) : OpKernel(context) {} | 0 | [
"CWE-20",
"CWE-703"
]
| tensorflow | 290bb05c80c327ed74fae1d089f1001b1e2a4ef7 | 278,417,553,380,189,280,000,000,000,000,000,000,000 | 1 | Fix tf.raw_ops.TensorSummaryV2 vulnerability with invalid serialized_summary_metadata.
Check that input is actually a scalar before treating it as such.
PiperOrigin-RevId: 445197183 |
local void send_bits(s, value, length)
deflate_state *s;
int value; /* value to send */
int length; /* number of bits */
{
Tracevv((stderr," l %2d v %4x ", length, value));
Assert(length > 0 && length <= 15, "invalid length");
s->bits_sent += (ulg)length;
/* If not enough room in bi_buf, use (valid) bits from bi_buf and
* (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
* unused bits in value.
*/
if (s->bi_valid > (int)Buf_size - length) {
s->bi_buf |= (ush)value << s->bi_valid;
put_short(s, s->bi_buf);
s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
s->bi_valid += length - Buf_size;
} else {
s->bi_buf |= (ush)value << s->bi_valid;
s->bi_valid += length;
}
} | 0 | [
"CWE-284",
"CWE-787"
]
| zlib | 5c44459c3b28a9bd3283aaceab7c615f8020c531 | 103,210,919,414,325,340,000,000,000,000,000,000 | 23 | Fix a bug that can crash deflate on some input when using Z_FIXED.
This bug was reported by Danilo Ramos of Eideticom, Inc. It has
lain in wait 13 years before being found! The bug was introduced
in zlib 1.2.2.2, with the addition of the Z_FIXED option. That
option forces the use of fixed Huffman codes. For rare inputs with
a large number of distant matches, the pending buffer into which
the compressed data is written can overwrite the distance symbol
table which it overlays. That results in corrupted output due to
invalid distances, and can result in out-of-bound accesses,
crashing the application.
The fix here combines the distance buffer and literal/length
buffers into a single symbol buffer. Now three bytes of pending
buffer space are opened up for each literal or length/distance
pair consumed, instead of the previous two bytes. This assures
that the pending buffer cannot overwrite the symbol table, since
the maximum fixed code compressed length/distance is 31 bits, and
since there are four bytes of pending space for every three bytes
of symbol space. |
get_nks_version (int slot)
{
unsigned char *result = NULL;
size_t resultlen;
int type;
if (iso7816_apdu_direct (slot, "\x80\xaa\x06\x00\x00", 5, 0,
&result, &resultlen))
return 2; /* NKS 2 does not support this command. */
/* Example value: 04 11 19 22 21 6A 20 80 03 03 01 01 01 00 00 00
vv tt ccccccccccccccccc aa bb cc vvvvvvvvvvv xx
vendor (Philips) -+ | | | | | | |
chip type -----------+ | | | | | |
chip id ----------------+ | | | | |
card type (3 - tcos 3) -------------------+ | | | |
OS version of card type ---------------------+ | | |
OS release of card type ------------------------+ | |
OS vendor internal version ------------------------+ |
RFU -----------------------------------------------------------+
*/
if (resultlen < 16)
type = 0; /* Invalid data returned. */
else
type = result[8];
xfree (result);
return type;
} | 0 | [
"CWE-20"
]
| gnupg | 2183683bd633818dd031b090b5530951de76f392 | 125,166,825,992,598,320,000,000,000,000,000,000,000 | 29 | Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]> |
**/
CImg<T>& sqrt() {
if (is_empty()) return *this;
cimg_pragma_openmp(parallel for cimg_openmp_if(size()>=8192))
cimg_rof(*this,ptrd,T) *ptrd = (T)std::sqrt((double)*ptrd);
return *this; | 0 | [
"CWE-125"
]
| CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 268,849,891,071,461,000,000,000,000,000,000,000,000 | 6 | Fix other issues in 'CImg<T>::load_bmp()'. |
static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
{
return false;
} | 0 | [
"CWE-416"
]
| linux | 0774a964ef561b7170d8d1b1bfe6f88002b6d219 | 113,536,976,195,694,660,000,000,000,000,000,000,000 | 4 | KVM: Fix out of range accesses to memslots
Reset the LRU slot if it becomes invalid when deleting a memslot to fix
an out-of-bounds/use-after-free access when searching through memslots.
Explicitly check for there being no used slots in search_memslots(), and
in the caller of s390's approximation variant.
Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots")
Reported-by: Qian Cai <[email protected]>
Cc: Peter Xu <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Acked-by: Christian Borntraeger <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
loff_t *ppos)
{
struct usb_yurex *dev;
int retval = 0;
int bytes_read = 0;
char in_buffer[20];
unsigned long flags;
dev = file->private_data;
mutex_lock(&dev->io_mutex);
if (!dev->interface) { /* already disconnected */
retval = -ENODEV;
goto exit;
}
spin_lock_irqsave(&dev->lock, flags);
bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
spin_unlock_irqrestore(&dev->lock, flags);
if (*ppos < bytes_read) {
if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos))
retval = -EFAULT;
else {
retval = bytes_read - *ppos;
*ppos += bytes_read;
}
}
exit:
mutex_unlock(&dev->io_mutex);
return retval;
} | 1 | [
"CWE-20",
"CWE-200"
]
| linux | f1e255d60ae66a9f672ff9a207ee6cd8e33d2679 | 235,660,846,301,235,150,000,000,000,000,000,000,000 | 34 | USB: yurex: fix out-of-bounds uaccess in read handler
In general, accessing userspace memory beyond the length of the supplied
buffer in VFS read/write handlers can lead to both kernel memory corruption
(via kernel_read()/kernel_write(), which can e.g. be triggered via
sys_splice()) and privilege escalation inside userspace.
Fix it by using simple_read_from_buffer() instead of custom logic.
Fixes: 6bc235a2e24a ("USB: add driver for Meywa-Denki & Kayac YUREX")
Signed-off-by: Jann Horn <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static int vrend_transfer_send_readpixels(struct vrend_resource *res,
struct iovec *iov, int num_iovs,
const struct vrend_transfer_info *info)
{
char *myptr = (char*)iov[0].iov_base + info->offset;
int need_temp = 0;
GLuint fb_id;
char *data;
bool actually_invert, separate_invert = false;
GLenum format, type;
GLint y1;
uint32_t send_size = 0;
uint32_t h = u_minify(res->base.height0, info->level);
int elsize = util_format_get_blocksize(res->base.format);
float depth_scale;
int row_stride = info->stride / elsize;
GLint old_fbo;
glUseProgram(0);
enum virgl_formats fmt = vrend_format_replace_emulated(res->base.bind, res->base.format);
format = tex_conv_table[fmt].glformat;
type = tex_conv_table[fmt].gltype;
/* if we are asked to invert and reading from a front then don't */
actually_invert = res->y_0_top;
if (actually_invert && !has_feature(feat_mesa_invert))
separate_invert = true;
if (num_iovs > 1 || separate_invert)
need_temp = 1;
if (need_temp) {
send_size = util_format_get_nblocks(res->base.format, info->box->width, info->box->height) * info->box->depth * util_format_get_blocksize(res->base.format);
data = malloc(send_size);
if (!data) {
vrend_printf("malloc failed %d\n", send_size);
return ENOMEM;
}
} else {
send_size = iov[0].iov_len - info->offset;
data = myptr;
if (!row_stride)
row_stride = util_format_get_nblocksx(res->base.format, u_minify(res->base.width0, info->level));
}
glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &old_fbo);
if (res->readback_fb_id == 0 || (int)res->readback_fb_level != info->level ||
(int)res->readback_fb_z != info->box->z) {
if (res->readback_fb_id)
glDeleteFramebuffers(1, &res->readback_fb_id);
glGenFramebuffers(1, &fb_id);
glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
vrend_fb_bind_texture(res, 0, info->level, info->box->z);
res->readback_fb_id = fb_id;
res->readback_fb_level = info->level;
res->readback_fb_z = info->box->z;
} else
glBindFramebuffer(GL_FRAMEBUFFER, res->readback_fb_id);
if (actually_invert)
y1 = h - info->box->y - info->box->height;
else
y1 = info->box->y;
if (has_feature(feat_mesa_invert) && actually_invert)
glPixelStorei(GL_PACK_INVERT_MESA, 1);
if (!vrend_format_is_ds(res->base.format))
glReadBuffer(GL_COLOR_ATTACHMENT0);
if (!need_temp && row_stride)
glPixelStorei(GL_PACK_ROW_LENGTH, row_stride);
switch (elsize) {
case 1:
glPixelStorei(GL_PACK_ALIGNMENT, 1);
break;
case 2:
glPixelStorei(GL_PACK_ALIGNMENT, 2);
break;
case 4:
default:
glPixelStorei(GL_PACK_ALIGNMENT, 4);
break;
case 8:
glPixelStorei(GL_PACK_ALIGNMENT, 8);
break;
}
if (res->base.format == (enum pipe_format)VIRGL_FORMAT_Z24X8_UNORM) {
/* we get values from the guest as 24-bit scaled integers
but we give them to the host GL and it interprets them
as 32-bit scaled integers, so we need to scale them here */
depth_scale = 1.0 / 256.0;
if (!vrend_state.use_core_profile) {
glPixelTransferf(GL_DEPTH_SCALE, depth_scale);
}
}
/* Warn if the driver doesn't agree about the read format and type.
On desktop GL we can use basically any format and type to glReadPixels,
so we picked the format and type that matches the native format.
But on GLES we are limited to a very few set, luckily most GLES
implementations should return type and format that match the native
formats, and can be used for glReadPixels acording to the GLES spec.
But we have found that at least Mesa returned the wrong formats, again
luckily we are able to change Mesa. But just in case there are more bad
drivers out there, or we mess up the format somewhere, we warn here. */
if (vrend_state.use_gles) {
GLint imp;
if (type != GL_UNSIGNED_BYTE && type != GL_UNSIGNED_INT &&
type != GL_INT && type != GL_FLOAT) {
glGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_TYPE, &imp);
if (imp != (GLint)type) {
vrend_printf( "GL_IMPLEMENTATION_COLOR_READ_TYPE is not expected native type 0x%x != imp 0x%x\n", type, imp);
}
}
if (format != GL_RGBA && format != GL_RGBA_INTEGER) {
glGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_FORMAT, &imp);
if (imp != (GLint)format) {
vrend_printf( "GL_IMPLEMENTATION_COLOR_READ_FORMAT is not expected native format 0x%x != imp 0x%x\n", format, imp);
}
}
}
do_readpixels(info->box->x, y1, info->box->width, info->box->height, format, type, send_size, data);
if (res->base.format == (enum pipe_format)VIRGL_FORMAT_Z24X8_UNORM) {
if (!vrend_state.use_core_profile)
glPixelTransferf(GL_DEPTH_SCALE, 1.0);
else
vrend_scale_depth(data, send_size, depth_scale);
}
if (has_feature(feat_mesa_invert) && actually_invert)
glPixelStorei(GL_PACK_INVERT_MESA, 0);
if (!need_temp && row_stride)
glPixelStorei(GL_PACK_ROW_LENGTH, 0);
glPixelStorei(GL_PACK_ALIGNMENT, 4);
if (need_temp) {
write_transfer_data(&res->base, iov, num_iovs, data,
info->stride, info->box, info->level, info->offset,
separate_invert);
free(data);
}
glBindFramebuffer(GL_FRAMEBUFFER, old_fbo);
return 0;
} | 0 | [
"CWE-787"
]
| virglrenderer | cbc8d8b75be360236cada63784046688aeb6d921 | 285,069,320,061,533,640,000,000,000,000,000,000,000 | 155 | vrend: check transfer bounds for negative values too and report error
Closes #138
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Emil Velikov <[email protected]> |
set_table_entry(struct ctl_table *entry,
const char *procname, void *data, int maxlen,
mode_t mode, proc_handler *proc_handler)
{
entry->procname = procname;
entry->data = data;
entry->maxlen = maxlen;
entry->mode = mode;
entry->proc_handler = proc_handler;
} | 0 | []
| linux-2.6 | 8f1bc385cfbab474db6c27b5af1e439614f3025c | 97,223,423,330,511,130,000,000,000,000,000,000,000 | 10 | sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]> |
static int sctp_setsockopt_connectx_old(struct sock *sk,
struct sockaddr *kaddrs,
int addrs_size)
{
return __sctp_setsockopt_connectx(sk, kaddrs, addrs_size, NULL);
} | 0 | [
"CWE-362"
]
| linux | b166a20b07382b8bc1dcee2a448715c9c2c81b5b | 94,484,487,606,027,500,000,000,000,000,000,000,000 | 6 | net/sctp: fix race condition in sctp_destroy_sock
If sctp_destroy_sock is called without sock_net(sk)->sctp.addr_wq_lock
held and sp->do_auto_asconf is true, then an element is removed
from the auto_asconf_splist without any proper locking.
This can happen in the following functions:
1. In sctp_accept, if sctp_sock_migrate fails.
2. In inet_create or inet6_create, if there is a bpf program
attached to BPF_CGROUP_INET_SOCK_CREATE which denies
creation of the sctp socket.
The bug is fixed by acquiring addr_wq_lock in sctp_destroy_sock
instead of sctp_close.
This addresses CVE-2021-23133.
Reported-by: Or Cohen <[email protected]>
Reviewed-by: Xin Long <[email protected]>
Fixes: 610236587600 ("bpf: Add new cgroup attach type to enable sock modifications")
Signed-off-by: Or Cohen <[email protected]>
Acked-by: Marcelo Ricardo Leitner <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
GC_INNER void GC_unmap_old(void)
{
struct hblk * h;
hdr * hhdr;
int i;
if (GC_unmap_threshold == 0)
return; /* unmapping disabled */
for (i = 0; i <= N_HBLK_FLS; ++i) {
for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
hhdr = HDR(h);
if (!IS_MAPPED(hhdr)) continue;
if ((unsigned short)GC_gc_no - hhdr -> hb_last_reclaimed >
(unsigned short)GC_unmap_threshold) {
GC_unmap((ptr_t)h, hhdr -> hb_sz);
hhdr -> hb_flags |= WAS_UNMAPPED;
}
}
}
} | 0 | [
"CWE-119"
]
| bdwgc | 7292c02fac2066d39dd1bcc37d1a7054fd1e32ee | 323,010,137,665,265,800,000,000,000,000,000,000,000 | 22 | Fix malloc routines to prevent size value wrap-around
See issue #135 on Github.
* allchblk.c (GC_allochblk, GC_allochblk_nth): Use
OBJ_SZ_TO_BLOCKS_CHECKED instead of OBJ_SZ_TO_BLOCKS.
* malloc.c (GC_alloc_large): Likewise.
* alloc.c (GC_expand_hp_inner): Type of "bytes" local variable changed
from word to size_t; cast ROUNDUP_PAGESIZE argument to size_t; prevent
overflow when computing GC_heapsize+bytes > GC_max_heapsize.
* dbg_mlc.c (GC_debug_malloc, GC_debug_malloc_ignore_off_page,
GC_debug_malloc_atomic_ignore_off_page, GC_debug_generic_malloc,
GC_debug_generic_malloc_inner,
GC_debug_generic_malloc_inner_ignore_off_page,
GC_debug_malloc_stubborn, GC_debug_malloc_atomic,
GC_debug_malloc_uncollectable, GC_debug_malloc_atomic_uncollectable):
Use SIZET_SAT_ADD (instead of "+" operator) to add extra bytes to lb
value.
* fnlz_mlc.c (GC_finalized_malloc): Likewise.
* gcj_mlc.c (GC_debug_gcj_malloc): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Likewise.
* include/private/gcconfig.h (GET_MEM): Likewise.
* mallocx.c (GC_malloc_many, GC_memalign): Likewise.
* os_dep.c (GC_wince_get_mem, GC_win32_get_mem): Likewise.
* typd_mlc.c (GC_malloc_explicitly_typed,
GC_malloc_explicitly_typed_ignore_off_page,
GC_calloc_explicitly_typed): Likewise.
* headers.c (GC_scratch_alloc): Change type of bytes_to_get from word
to size_t (because ROUNDUP_PAGESIZE_IF_MMAP result type changed).
* include/private/gc_priv.h: Include limits.h (unless SIZE_MAX already
defined).
* include/private/gc_priv.h (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Move from
malloc.c file.
* include/private/gc_priv.h (SIZET_SAT_ADD): New macro (defined before
include gcconfig.h).
* include/private/gc_priv.h (EXTRA_BYTES, GC_page_size): Change type
to size_t.
* os_dep.c (GC_page_size): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Add comment about the argument.
* include/private/gcconfig.h (GET_MEM): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, OBJ_SZ_TO_BLOCKS, ROUNDUP_PAGESIZE,
ROUNDUP_PAGESIZE_IF_MMAP): Rename argument to "lb".
* include/private/gc_priv.h (OBJ_SZ_TO_BLOCKS_CHECKED): New macro.
* include/private/gcconfig.h (GC_win32_get_mem, GC_wince_get_mem,
GC_unix_get_mem): Change argument type from word to int.
* os_dep.c (GC_unix_mmap_get_mem, GC_unix_get_mem,
GC_unix_sbrk_get_mem, GC_wince_get_mem, GC_win32_get_mem): Likewise.
* malloc.c (GC_alloc_large_and_clear): Call OBJ_SZ_TO_BLOCKS only
if no value wrap around is guaranteed.
* malloc.c (GC_generic_malloc): Do not check for lb_rounded < lb case
(because ROUNDED_UP_GRANULES and GRANULES_TO_BYTES guarantees no value
wrap around).
* mallocx.c (GC_generic_malloc_ignore_off_page): Likewise.
* misc.c (GC_init_size_map): Change "i" local variable type from int
to size_t.
* os_dep.c (GC_write_fault_handler, catch_exception_raise): Likewise.
* misc.c (GC_envfile_init): Cast len to size_t when passed to
ROUNDUP_PAGESIZE_IF_MMAP.
* os_dep.c (GC_setpagesize): Cast GC_sysinfo.dwPageSize and
GETPAGESIZE() to size_t (when setting GC_page_size).
* os_dep.c (GC_unix_mmap_get_mem, GC_unmap_start, GC_remove_protection):
Expand ROUNDUP_PAGESIZE macro but without value wrap-around checking
(the argument is of word type).
* os_dep.c (GC_unix_mmap_get_mem): Replace -GC_page_size with
~GC_page_size+1 (because GC_page_size is unsigned); remove redundant
cast to size_t.
* os_dep.c (GC_unix_sbrk_get_mem): Add explicit cast of GC_page_size
to SBRK_ARG_T.
* os_dep.c (GC_wince_get_mem): Change type of res_bytes local variable
to size_t.
* typd_mlc.c: Do not include limits.h.
* typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Remove (as defined in
gc_priv.h now). |
static int ntop_is_enterprise(lua_State *vm) {
ntop->getTrace()->traceEvent(TRACE_DEBUG, "%s() called", __FUNCTION__);
lua_pushboolean(vm, ntop->getPrefs()->is_enterprise_edition());
return(CONST_LUA_OK);
} | 0 | [
"CWE-476"
]
| ntopng | 01f47e04fd7c8d54399c9e465f823f0017069f8f | 90,587,510,533,984,900,000,000,000,000,000,000,000 | 5 | Security fix: prevents empty host from being used |
static BOOL _update_read_pointer_large(wStream* s, POINTER_LARGE_UPDATE* pointer)
{
BYTE* newMask;
UINT32 scanlineSize;
if (!pointer)
goto fail;
if (Stream_GetRemainingLength(s) < 14)
goto fail;
Stream_Read_UINT16(s, pointer->xorBpp);
Stream_Read_UINT16(s, pointer->cacheIndex); /* cacheIndex (2 bytes) */
Stream_Read_UINT16(s, pointer->hotSpotX); /* xPos (2 bytes) */
Stream_Read_UINT16(s, pointer->hotSpotY); /* yPos (2 bytes) */
Stream_Read_UINT16(s, pointer->width); /* width (2 bytes) */
Stream_Read_UINT16(s, pointer->height); /* height (2 bytes) */
if ((pointer->width > 384) || (pointer->height > 384))
goto fail;
Stream_Read_UINT16(s, pointer->lengthAndMask); /* lengthAndMask (2 bytes) */
Stream_Read_UINT16(s, pointer->lengthXorMask); /* lengthXorMask (2 bytes) */
if (pointer->hotSpotX >= pointer->width)
pointer->hotSpotX = 0;
if (pointer->hotSpotY >= pointer->height)
pointer->hotSpotY = 0;
if (pointer->lengthXorMask > 0)
{
/**
* Spec states that:
*
* xorMaskData (variable): A variable-length array of bytes. Contains the 24-bpp, bottom-up
* XOR mask scan-line data. The XOR mask is padded to a 2-byte boundary for each encoded
* scan-line. For example, if a 3x3 pixel cursor is being sent, then each scan-line will
* consume 10 bytes (3 pixels per scan-line multiplied by 3 bytes per pixel, rounded up to
* the next even number of bytes).
*
* In fact instead of 24-bpp, the bpp parameter is given by the containing packet.
*/
if (Stream_GetRemainingLength(s) < pointer->lengthXorMask)
goto fail;
scanlineSize = (7 + pointer->xorBpp * pointer->width) / 8;
scanlineSize = ((scanlineSize + 1) / 2) * 2;
if (scanlineSize * pointer->height != pointer->lengthXorMask)
{
WLog_ERR(TAG,
"invalid lengthXorMask: width=%" PRIu32 " height=%" PRIu32 ", %" PRIu32
" instead of %" PRIu32 "",
pointer->width, pointer->height, pointer->lengthXorMask,
scanlineSize * pointer->height);
goto fail;
}
newMask = realloc(pointer->xorMaskData, pointer->lengthXorMask);
if (!newMask)
goto fail;
pointer->xorMaskData = newMask;
Stream_Read(s, pointer->xorMaskData, pointer->lengthXorMask);
}
if (pointer->lengthAndMask > 0)
{
/**
* andMaskData (variable): A variable-length array of bytes. Contains the 1-bpp, bottom-up
* AND mask scan-line data. The AND mask is padded to a 2-byte boundary for each encoded
* scan-line. For example, if a 7x7 pixel cursor is being sent, then each scan-line will
* consume 2 bytes (7 pixels per scan-line multiplied by 1 bpp, rounded up to the next even
* number of bytes).
*/
if (Stream_GetRemainingLength(s) < pointer->lengthAndMask)
goto fail;
scanlineSize = ((7 + pointer->width) / 8);
scanlineSize = ((1 + scanlineSize) / 2) * 2;
if (scanlineSize * pointer->height != pointer->lengthAndMask)
{
WLog_ERR(TAG, "invalid lengthAndMask: %" PRIu32 " instead of %" PRIu32 "",
pointer->lengthAndMask, scanlineSize * pointer->height);
goto fail;
}
newMask = realloc(pointer->andMaskData, pointer->lengthAndMask);
if (!newMask)
goto fail;
pointer->andMaskData = newMask;
Stream_Read(s, pointer->andMaskData, pointer->lengthAndMask);
}
if (Stream_GetRemainingLength(s) > 0)
Stream_Seek_UINT8(s); /* pad (1 byte) */
return TRUE;
fail:
return FALSE;
} | 0 | [
"CWE-125"
]
| FreeRDP | f8890a645c221823ac133dbf991f8a65ae50d637 | 221,124,485,378,056,380,000,000,000,000,000,000,000 | 107 | Fixed #6005: Bounds checks in update_read_bitmap_data |
static int mbedtls_ssl_dtls_record_replay_check( mbedtls_ssl_context *ssl, uint8_t *record_in_ctr )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
unsigned char *original_in_ctr;
// save original in_ctr
original_in_ctr = ssl->in_ctr;
// use counter from record
ssl->in_ctr = record_in_ctr;
ret = mbedtls_ssl_dtls_replay_check( (mbedtls_ssl_context const *) ssl );
// restore the counter
ssl->in_ctr = original_in_ctr;
return ret;
} | 0 | [
"CWE-787"
]
| mbedtls | f333dfab4a6c2d8a604a61558a8f783145161de4 | 42,681,367,550,264,140,000,000,000,000,000,000,000 | 18 | More SSL debug messages for ClientHello parsing
In particular, be verbose when checking the ClientHello cookie in a possible
DTLS reconnection.
Signed-off-by: Gilles Peskine <[email protected]> |
void ContentLine_Analyzer::SetPlainDelivery(int64_t length)
{
if ( length < 0 )
{
reporter->AnalyzerError(this,
"negative length for plain delivery");
return;
}
plain_delivery_length = length;
} | 0 | [
"CWE-787"
]
| bro | 6c0f101a62489b1c5927b4ed63b0e1d37db40282 | 101,554,332,188,998,800,000,000,000,000,000,000,000 | 11 | Patch OOB write in content-line analyzer.
A combination of packets can trigger an out of bound write of '0' byte
in the content-line analyzer.
This bug was found by Frank Meier.
Addresses BIT-1856. |
static int fdt_add_region(struct fdt_region_state *info, int offset, int size)
{
struct fdt_region *reg;
reg = info->region ? &info->region[info->count - 1] : NULL;
if (info->can_merge && info->count &&
info->count <= info->max_regions &&
reg && offset <= reg->offset + reg->size) {
reg->size = offset + size - reg->offset;
} else if (info->count++ < info->max_regions) {
if (reg) {
reg++;
reg->offset = offset;
reg->size = size;
}
} else {
return -1;
}
return 0;
} | 0 | []
| u-boot | 8a7d4cf9820ea16fabd25a6379351b4dc291204b | 198,182,867,624,222,770,000,000,000,000,000,000,000 | 21 | fdt_region: Check for a single root node of the correct name
At present fdt_find_regions() assumes that the FIT is a valid devicetree.
If the FIT has two root nodes this is currently not detected in this
function, nor does libfdt's fdt_check_full() notice. Also it is possible
for the root node to have a name even though it should not.
Add checks for these and return -FDT_ERR_BADSTRUCTURE if a problem is
detected.
CVE-2021-27097
Signed-off-by: Simon Glass <[email protected]>
Reported-by: Bruce Monroe <[email protected]>
Reported-by: Arie Haenel <[email protected]>
Reported-by: Julien Lenoir <[email protected]> |
void fiin_del(GF_Box *s)
{
FDItemInformationBox *ptr = (FDItemInformationBox *)s;
if (ptr == NULL) return;
if (ptr->partition_entries) gf_isom_box_array_del(ptr->partition_entries);
if (ptr->session_info) gf_isom_box_del((GF_Box*)ptr->session_info);
if (ptr->group_id_to_name) gf_isom_box_del((GF_Box*)ptr->group_id_to_name);
gf_free(ptr); | 0 | [
"CWE-400",
"CWE-401"
]
| gpac | d2371b4b204f0a3c0af51ad4e9b491144dd1225c | 30,999,671,371,713,440,000,000,000,000,000,000,000 | 9 | prevent dref memleak on invalid input (#1183) |
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size)
{
gnutls_datum_t raw;
gnutls_datum_t ca;
gnutls_x509_crt_t crt, ca_crt;
int ret;
unsigned status;
raw.data = (unsigned char *)data;
raw.size = size;
ca.data = _ca_der;
ca.size = _ca_der_len;
ret = gnutls_x509_crt_init(&ca_crt);
assert(ret >= 0);
ret = gnutls_x509_crt_init(&crt);
assert(ret >= 0);
ret = gnutls_x509_crt_import(ca_crt, &ca, GNUTLS_X509_FMT_DER);
if (ret < 0)
goto fail;
ret = gnutls_x509_crt_import(crt, &raw, GNUTLS_X509_FMT_DER);
if (ret < 0)
goto fail;
ret = gnutls_x509_crt_list_verify(&crt, 1, &ca_crt, 1, NULL, 0,
GNUTLS_VERIFY_ALLOW_UNSORTED_CHAIN|GNUTLS_VERIFY_ALLOW_BROKEN|
GNUTLS_VERIFY_DISABLE_TRUSTED_TIME_CHECKS,
&status);
assert(ret >= 0 || status != 0);
fail:
gnutls_x509_crt_deinit(crt);
gnutls_x509_crt_deinit(ca_crt);
return 0;
} | 0 | [
"CWE-415"
]
| gnutls | ad27713bef613e6c4600a0fb83ae48c6d390ff5b | 293,781,912,234,020,440,000,000,000,000,000,000,000 | 39 | fuzz: added fuzzer for certificate verification
This also adds a reproducer for CVE-2019-3829.
Resolves: #694
Signed-off-by: Nikos Mavrogiannopoulos <[email protected]> |
read_subs(uschar **sub, int n, int m, uschar **sptr, BOOL skipping,
BOOL check_end, uschar *name, BOOL *resetok)
{
int i;
uschar *s = *sptr;
while (isspace(*s)) s++;
for (i = 0; i < n; i++)
{
if (*s != '{')
{
if (i < m) return 1;
sub[i] = NULL;
break;
}
sub[i] = expand_string_internal(s+1, TRUE, &s, skipping, TRUE, resetok);
if (sub[i] == NULL) return 3;
if (*s++ != '}') return 1;
while (isspace(*s)) s++;
}
if (check_end && *s++ != '}')
{
if (s[-1] == '{')
{
expand_string_message = string_sprintf("Too many arguments for \"%s\" "
"(max is %d)", name, n);
return 2;
}
return 1;
}
*sptr = s;
return 0;
} | 0 | [
"CWE-189"
]
| exim | 7685ce68148a083d7759e78d01aa5198fc099c44 | 180,204,629,064,485,160,000,000,000,000,000,000,000 | 34 | Only expand integers for integer math once |
static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
const struct cfg80211_bitrate_mask *mask)
{
struct sk_buff *skb;
int ret, mode, band;
u32 mcsrate, ratemask[ATH6KL_NUM_BANDS];
struct wmi_set_tx_select_rates32_cmd *cmd;
memset(&ratemask, 0, sizeof(ratemask));
/* only check 2.4 and 5 GHz bands, skip the rest */
for (band = 0; band <= NL80211_BAND_5GHZ; band++) {
/* copy legacy rate mask */
ratemask[band] = mask->control[band].legacy;
if (band == NL80211_BAND_5GHZ)
ratemask[band] =
mask->control[band].legacy << 4;
/* copy mcs rate mask */
mcsrate = mask->control[band].ht_mcs[0];
ratemask[band] |= mcsrate << 12;
ratemask[band] |= mcsrate << 20;
}
ath6kl_dbg(ATH6KL_DBG_WMI,
"Ratemask 32 bit: 2.4:%x 5:%x\n",
ratemask[0], ratemask[1]);
skb = ath6kl_wmi_get_new_buf(sizeof(*cmd) * WMI_RATES_MODE_MAX);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_set_tx_select_rates32_cmd *) skb->data;
for (mode = 0; mode < WMI_RATES_MODE_MAX; mode++) {
/* A mode operate in 5GHZ band */
if (mode == WMI_RATES_MODE_11A ||
mode == WMI_RATES_MODE_11A_HT20 ||
mode == WMI_RATES_MODE_11A_HT40)
band = NL80211_BAND_5GHZ;
else
band = NL80211_BAND_2GHZ;
cmd->ratemask[mode] = cpu_to_le32(ratemask[band]);
}
ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
WMI_SET_TX_SELECT_RATES_CMDID,
NO_SYNC_WMIFLAG);
return ret;
} | 0 | [
"CWE-125"
]
| linux | 5d6751eaff672ea77642e74e92e6c0ac7f9709ab | 159,423,398,413,895,250,000,000,000,000,000,000,000 | 49 | ath6kl: add some bounds checking
The "ev->traffic_class" and "reply->ac" variables come from the network
and they're used as an offset into the wmi->stream_exist_for_ac[] array.
Those variables are u8 so they can be 0-255 but the stream_exist_for_ac[]
array only has WMM_NUM_AC (4) elements. We need to add a couple bounds
checks to prevent array overflows.
I also modified one existing check from "if (traffic_class > 3) {" to
"if (traffic_class >= WMM_NUM_AC) {" just to make them all consistent.
Fixes: bdcd81707973 (" Add ath6kl cleaned up driver")
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Kalle Valo <[email protected]> |
get_text_rgb_row (j_compress_ptr cinfo, cjpeg_source_ptr sinfo)
/* This version is for reading text-format PPM files with any maxval */
{
ppm_source_ptr source = (ppm_source_ptr) sinfo;
FILE * infile = source->pub.input_file;
register JSAMPROW ptr;
register JSAMPLE *rescale = source->rescale;
JDIMENSION col;
ptr = source->pub.buffer[0];
for (col = cinfo->image_width; col > 0; col--) {
*ptr++ = rescale[read_pbm_integer(cinfo, infile)];
*ptr++ = rescale[read_pbm_integer(cinfo, infile)];
*ptr++ = rescale[read_pbm_integer(cinfo, infile)];
}
return 1;
} | 1 | []
| libjpeg-turbo | 6709e4a0cfa44d4f54ee8ad05753d4aa9260cb91 | 18,132,256,636,125,684,000,000,000,000,000,000,000 | 17 | Check range of integer values in PPM text file
Add checks to ensure values are within the specified range.
Fixes mozilla/mozjpeg#141, closes #8 |
static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
{
struct super_block *sb = ac->ac_sb;
struct ext4_locality_group *lg = ac->ac_lg;
BUG_ON(lg == NULL);
ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
} | 0 | [
"CWE-703"
]
| linux | ce9f24cccdc019229b70a5c15e2b09ad9c0ab5d1 | 162,687,809,222,419,060,000,000,000,000,000,000,000 | 9 | ext4: check journal inode extents more carefully
Currently, system zones just track ranges of block, that are "important"
fs metadata (bitmaps, group descriptors, journal blocks, etc.). This
however complicates how extent tree (or indirect blocks) can be checked
for inodes that actually track such metadata - currently the journal
inode but arguably we should be treating quota files or resize inode
similarly. We cannot run __ext4_ext_check() on such metadata inodes when
loading their extents as that would immediately trigger the validity
checks and so we just hack around that and special-case the journal
inode. This however leads to a situation that a journal inode which has
extent tree of depth at least one can have invalid extent tree that gets
unnoticed until ext4_cache_extents() crashes.
To overcome this limitation, track inode number each system zone belongs
to (0 is used for zones not belonging to any inode). We can then verify
inode number matches the expected one when verifying extent tree and
thus avoid the false errors. With this there's no need to to
special-case journal inode during extent tree checking anymore so remove
it.
Fixes: 0a944e8a6c66 ("ext4: don't perform block validity checks on the journal inode")
Reported-by: Wolfgang Frisch <[email protected]>
Reviewed-by: Lukas Czerner <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Theodore Ts'o <[email protected]> |
pci_emul_msix_twrite(struct pci_vdev *dev, uint64_t offset, int size,
uint64_t value)
{
int msix_entry_offset;
int tab_index;
char *dest;
/* support only 4 or 8 byte writes */
if (size != 4 && size != 8)
return -1;
/*
* Return if table index is beyond what device supports
*/
tab_index = offset / MSIX_TABLE_ENTRY_SIZE;
if (tab_index >= dev->msix.table_count)
return -1;
msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE;
/* support only aligned writes */
if ((msix_entry_offset % size) != 0)
return -1;
dest = (char *)(dev->msix.table + tab_index);
dest += msix_entry_offset;
if (size == 4)
*((uint32_t *)dest) = value;
else
*((uint64_t *)dest) = value;
return 0;
} | 0 | [
"CWE-617",
"CWE-703"
]
| acrn-hypervisor | 6199e653418eda58cd698d8769820904453e2535 | 58,919,495,664,977,680,000,000,000,000,000,000,000 | 34 | dm: validate the input in 'pci_emul_mem_handler()'
checking the inputs explicitly instead of using Assert.
Tracked-On: #4003
Signed-off-by: Yonghua Huang <[email protected]>
Reviewed-by: Shuo Liu <[email protected]>
Acked-by: Yu Wang <[email protected]> |
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
struct idpair *idmap)
{
if (!(rold->live & REG_LIVE_READ))
/* explored state didn't use this */
return true;
if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0)
return true;
if (rold->type == NOT_INIT)
/* explored state can't have used this */
return true;
if (rcur->type == NOT_INIT)
return false;
switch (rold->type) {
case SCALAR_VALUE:
if (rcur->type == SCALAR_VALUE) {
/* new val must satisfy old val knowledge */
return range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
} else {
/* We're trying to use a pointer in place of a scalar.
* Even if the scalar was unbounded, this could lead to
* pointer leaks because scalars are allowed to leak
* while pointers are not. We could make this safe in
* special cases if root is calling us, but it's
* probably not worth the hassle.
*/
return false;
}
case PTR_TO_MAP_VALUE:
/* If the new min/max/var_off satisfy the old ones and
* everything else matches, we are OK.
* We don't care about the 'id' value, because nothing
* uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
*/
return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
case PTR_TO_MAP_VALUE_OR_NULL:
/* a PTR_TO_MAP_VALUE could be safe to use as a
* PTR_TO_MAP_VALUE_OR_NULL into the same map.
* However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
* checked, doing so could have affected others with the same
* id, and we can't check for that because we lost the id when
* we converted to a PTR_TO_MAP_VALUE.
*/
if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
return false;
if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
return false;
/* Check our ids match any regs they're supposed to */
return check_ids(rold->id, rcur->id, idmap);
case PTR_TO_PACKET_META:
case PTR_TO_PACKET:
if (rcur->type != rold->type)
return false;
/* We must have at least as much range as the old ptr
* did, so that any accesses which were safe before are
* still safe. This is true even if old range < old off,
* since someone could have accessed through (ptr - k), or
* even done ptr -= k in a register, to get a safe access.
*/
if (rold->range > rcur->range)
return false;
/* If the offsets don't match, we can't trust our alignment;
* nor can we be sure that we won't fall out of range.
*/
if (rold->off != rcur->off)
return false;
/* id relations must be preserved */
if (rold->id && !check_ids(rold->id, rcur->id, idmap))
return false;
/* new val must satisfy old val knowledge */
return range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
case PTR_TO_CTX:
case CONST_PTR_TO_MAP:
case PTR_TO_STACK:
case PTR_TO_PACKET_END:
/* Only valid matches are exact, which memcmp() above
* would have accepted
*/
default:
/* Don't know what's going on, just say it's not safe */
return false;
}
/* Shouldn't get here; if we do, say it's not safe */
WARN_ON_ONCE(1);
return false;
} | 0 | [
"CWE-190"
]
| linux | bb7f0f989ca7de1153bd128a40a71709e339fa03 | 109,244,476,547,347,380,000,000,000,000,000,000,000 | 93 | bpf: fix integer overflows
There were various issues related to the limited size of integers used in
the verifier:
- `off + size` overflow in __check_map_access()
- `off + reg->off` overflow in check_mem_access()
- `off + reg->var_off.value` overflow or 32-bit truncation of
`reg->var_off.value` in check_mem_access()
- 32-bit truncation in check_stack_boundary()
Make sure that any integer math cannot overflow by not allowing
pointer math with large values.
Also reduce the scope of "scalar op scalar" tracking.
Fixes: f1174f77b50c ("bpf/verifier: rework value tracking")
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]> |
static int php_stream_http_stream_stat(php_stream_wrapper *wrapper, php_stream *stream, php_stream_statbuf *ssb TSRMLS_DC) /* {{{ */
{
/* one day, we could fill in the details based on Date: and Content-Length:
* headers. For now, we return with a failure code to prevent the underlying
* file's details from being used instead. */
return -1;
} | 0 | []
| php-src | d82d68742c69fc20a5180a2dbcca4cac52435931 | 38,749,575,362,857,357,000,000,000,000,000,000,000 | 7 | Fix bug #69337 (php_stream_url_wrap_http_ex() type-confusion vulnerability) |
static unsigned getHash(const unsigned char* data, size_t size, size_t pos)
{
unsigned result = 0;
if (pos + 2 < size)
{
/*A simple shift and xor hash is used. Since the data of PNGs is dominated
by zeroes due to the filters, a better hash does not have a significant
effect on speed in traversing the chain, and causes more time spend on
calculating the hash.*/
result ^= (unsigned)(data[pos + 0] << 0u);
result ^= (unsigned)(data[pos + 1] << 4u);
result ^= (unsigned)(data[pos + 2] << 8u);
} else {
size_t amount, i;
if(pos >= size) return 0;
amount = size - pos;
for(i = 0; i < amount; i++) result ^= (unsigned)(data[pos + i] << (i * 8u));
}
return result & HASH_BIT_MASK;
} | 0 | [
"CWE-401"
]
| FreeRDP | 9fee4ae076b1ec97b97efb79ece08d1dab4df29a | 109,094,973,687,658,070,000,000,000,000,000,000,000 | 20 | Fixed #5645: realloc return handling |
compile_lhs(
char_u *var_start,
lhs_T *lhs,
cmdidx_T cmdidx,
int heredoc,
int has_cmd, // "var" before "var_start"
int oplen,
cctx_T *cctx)
{
char_u *var_end;
int is_decl = is_decl_command(cmdidx);
CLEAR_POINTER(lhs);
lhs->lhs_dest = dest_local;
lhs->lhs_vimvaridx = -1;
lhs->lhs_scriptvar_idx = -1;
// "dest_end" is the end of the destination, including "[expr]" or
// ".name".
// "var_end" is the end of the variable/option/etc. name.
lhs->lhs_dest_end = skip_var_one(var_start, FALSE);
if (*var_start == '@')
var_end = var_start + 2;
else
{
// skip over the leading "&", "&l:", "&g:" and "$"
var_end = skip_option_env_lead(var_start);
var_end = to_name_end(var_end, TRUE);
}
// "a: type" is declaring variable "a" with a type, not dict "a:".
if (is_decl && lhs->lhs_dest_end == var_start + 2
&& lhs->lhs_dest_end[-1] == ':')
--lhs->lhs_dest_end;
if (is_decl && var_end == var_start + 2 && var_end[-1] == ':')
--var_end;
lhs->lhs_end = lhs->lhs_dest_end;
// compute the length of the destination without "[expr]" or ".name"
lhs->lhs_varlen = var_end - var_start;
lhs->lhs_varlen_total = lhs->lhs_varlen;
lhs->lhs_name = vim_strnsave(var_start, lhs->lhs_varlen);
if (lhs->lhs_name == NULL)
return FAIL;
if (lhs->lhs_dest_end > var_start + lhs->lhs_varlen)
// Something follows after the variable: "var[idx]" or "var.key".
lhs->lhs_has_index = TRUE;
if (heredoc)
lhs->lhs_type = &t_list_string;
else
lhs->lhs_type = &t_any;
if (cctx->ctx_skip != SKIP_YES)
{
int declare_error = FALSE;
if (get_var_dest(lhs->lhs_name, &lhs->lhs_dest, cmdidx,
&lhs->lhs_opt_flags, &lhs->lhs_vimvaridx,
&lhs->lhs_type, cctx) == FAIL)
return FAIL;
if (lhs->lhs_dest != dest_local
&& cmdidx != CMD_const && cmdidx != CMD_final)
{
// Specific kind of variable recognized.
declare_error = is_decl;
}
else
{
// No specific kind of variable recognized, just a name.
if (check_reserved_name(lhs->lhs_name) == FAIL)
return FAIL;
if (lookup_local(var_start, lhs->lhs_varlen,
&lhs->lhs_local_lvar, cctx) == OK)
lhs->lhs_lvar = &lhs->lhs_local_lvar;
else
{
CLEAR_FIELD(lhs->lhs_arg_lvar);
if (arg_exists(var_start, lhs->lhs_varlen,
&lhs->lhs_arg_lvar.lv_idx, &lhs->lhs_arg_lvar.lv_type,
&lhs->lhs_arg_lvar.lv_from_outer, cctx) == OK)
{
if (is_decl)
{
semsg(_(e_str_is_used_as_argument), lhs->lhs_name);
return FAIL;
}
lhs->lhs_lvar = &lhs->lhs_arg_lvar;
}
}
if (lhs->lhs_lvar != NULL)
{
if (is_decl)
{
semsg(_(e_variable_already_declared), lhs->lhs_name);
return FAIL;
}
}
else
{
int script_namespace = lhs->lhs_varlen > 1
&& STRNCMP(var_start, "s:", 2) == 0;
int script_var = (script_namespace
? script_var_exists(var_start + 2, lhs->lhs_varlen - 2,
cctx, NULL)
: script_var_exists(var_start, lhs->lhs_varlen,
cctx, NULL)) == OK;
imported_T *import =
find_imported(var_start, lhs->lhs_varlen, FALSE);
if (script_namespace || script_var || import != NULL)
{
char_u *rawname = lhs->lhs_name
+ (lhs->lhs_name[1] == ':' ? 2 : 0);
if (script_namespace && current_script_is_vim9())
{
semsg(_(e_cannot_use_s_colon_in_vim9_script_str),
var_start);
return FAIL;
}
if (is_decl)
{
if (script_namespace)
semsg(_(e_cannot_declare_script_variable_in_function_str),
lhs->lhs_name);
else
semsg(_(e_variable_already_declared_in_script_str),
lhs->lhs_name);
return FAIL;
}
else if (cctx->ctx_ufunc->uf_script_ctx_version
== SCRIPT_VERSION_VIM9
&& script_namespace
&& !script_var && import == NULL)
{
semsg(_(e_unknown_variable_str), lhs->lhs_name);
return FAIL;
}
lhs->lhs_dest = dest_script;
// existing script-local variables should have a type
lhs->lhs_scriptvar_sid = current_sctx.sc_sid;
if (import != NULL)
{
char_u *dot = vim_strchr(var_start, '.');
char_u *p;
// for an import the name is what comes after the dot
if (dot == NULL)
{
semsg(_(e_no_dot_after_imported_name_str),
var_start);
return FAIL;
}
p = skipwhite(dot + 1);
var_end = to_name_end(p, TRUE);
if (var_end == p)
{
semsg(_(e_missing_name_after_imported_name_str),
var_start);
return FAIL;
}
vim_free(lhs->lhs_name);
lhs->lhs_varlen = var_end - p;
lhs->lhs_name = vim_strnsave(p, lhs->lhs_varlen);
if (lhs->lhs_name == NULL)
return FAIL;
rawname = lhs->lhs_name;
lhs->lhs_scriptvar_sid = import->imp_sid;
// TODO: where do we check this name is exported?
// Check if something follows: "exp.var[idx]" or
// "exp.var.key".
lhs->lhs_has_index = lhs->lhs_dest_end
> skipwhite(var_end);
}
if (SCRIPT_ID_VALID(lhs->lhs_scriptvar_sid))
{
// Check writable only when no index follows.
lhs->lhs_scriptvar_idx = get_script_item_idx(
lhs->lhs_scriptvar_sid, rawname,
lhs->lhs_has_index ? ASSIGN_FINAL : ASSIGN_CONST,
cctx, NULL);
if (lhs->lhs_scriptvar_idx >= 0)
{
scriptitem_T *si = SCRIPT_ITEM(
lhs->lhs_scriptvar_sid);
svar_T *sv =
((svar_T *)si->sn_var_vals.ga_data)
+ lhs->lhs_scriptvar_idx;
lhs->lhs_type = sv->sv_type;
}
}
}
else if (check_defined(var_start, lhs->lhs_varlen, cctx,
NULL, FALSE) == FAIL)
return FAIL;
}
}
if (declare_error)
{
vim9_declare_error(lhs->lhs_name);
return FAIL;
}
}
// handle "a:name" as a name, not index "name" in "a"
if (lhs->lhs_varlen > 1 || var_start[lhs->lhs_varlen] != ':')
var_end = lhs->lhs_dest_end;
if (lhs->lhs_dest != dest_option && lhs->lhs_dest != dest_func_option)
{
if (is_decl && *var_end == ':')
{
char_u *p;
// parse optional type: "let var: type = expr"
if (!VIM_ISWHITE(var_end[1]))
{
semsg(_(e_white_space_required_after_str_str), ":", var_end);
return FAIL;
}
p = skipwhite(var_end + 1);
lhs->lhs_type = parse_type(&p, cctx->ctx_type_list, TRUE);
if (lhs->lhs_type == NULL)
return FAIL;
lhs->lhs_has_type = TRUE;
lhs->lhs_end = p;
}
else if (lhs->lhs_lvar != NULL)
lhs->lhs_type = lhs->lhs_lvar->lv_type;
}
if (oplen == 3 && !heredoc
&& lhs->lhs_dest != dest_global
&& !lhs->lhs_has_index
&& lhs->lhs_type->tt_type != VAR_STRING
&& lhs->lhs_type->tt_type != VAR_ANY)
{
emsg(_(e_can_only_concatenate_to_string));
return FAIL;
}
if (lhs->lhs_lvar == NULL && lhs->lhs_dest == dest_local
&& cctx->ctx_skip != SKIP_YES)
{
if (oplen > 1 && !heredoc)
{
// +=, /=, etc. require an existing variable
semsg(_(e_cannot_use_operator_on_new_variable), lhs->lhs_name);
return FAIL;
}
if (!is_decl || (lhs->lhs_has_index && !has_cmd
&& cctx->ctx_skip != SKIP_YES))
{
semsg(_(e_unknown_variable_str), lhs->lhs_name);
return FAIL;
}
// Check the name is valid for a funcref.
if ((lhs->lhs_type->tt_type == VAR_FUNC
|| lhs->lhs_type->tt_type == VAR_PARTIAL)
&& var_wrong_func_name(lhs->lhs_name, TRUE))
return FAIL;
// New local variable.
lhs->lhs_lvar = reserve_local(cctx, var_start, lhs->lhs_varlen,
cmdidx == CMD_final || cmdidx == CMD_const, lhs->lhs_type);
if (lhs->lhs_lvar == NULL)
return FAIL;
lhs->lhs_new_local = TRUE;
}
lhs->lhs_member_type = lhs->lhs_type;
if (lhs->lhs_has_index)
{
char_u *after = var_start + lhs->lhs_varlen;
char_u *p;
// Something follows after the variable: "var[idx]" or "var.key".
if (is_decl && cctx->ctx_skip != SKIP_YES)
{
if (has_cmd)
emsg(_(e_cannot_use_index_when_declaring_variable));
else
semsg(_(e_unknown_variable_str), lhs->lhs_name);
return FAIL;
}
// Now: var_start[lhs->lhs_varlen] is '[' or '.'
// Only the last index is used below, if there are others
// before it generate code for the expression. Thus for
// "ll[1][2]" the expression is "ll[1]" and "[2]" is the index.
for (;;)
{
p = skip_index(after);
if (*p != '[' && *p != '.')
{
lhs->lhs_varlen_total = p - var_start;
break;
}
after = p;
}
if (after > var_start + lhs->lhs_varlen)
{
lhs->lhs_varlen = after - var_start;
lhs->lhs_dest = dest_expr;
// We don't know the type before evaluating the expression,
// use "any" until then.
lhs->lhs_type = &t_any;
}
if (lhs->lhs_type->tt_member == NULL)
lhs->lhs_member_type = &t_any;
else
lhs->lhs_member_type = lhs->lhs_type->tt_member;
}
return OK;
} | 0 | [
"CWE-416"
]
| vim | 1889f499a4f248cd84e0e0bf6d0d820016774494 | 11,911,007,193,155,048,000,000,000,000,000,000,000 | 324 | patch 9.0.0221: accessing freed memory if compiling nested function fails
Problem: Accessing freed memory if compiling nested function fails.
Solution: Mess up the variable name so that it won't be found. |
llsec_tfm_by_len(struct mac802154_llsec_key *key, int authlen)
{
int i;
for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
if (crypto_aead_authsize(key->tfm[i]) == authlen)
return key->tfm[i];
BUG();
} | 0 | [
"CWE-416"
]
| linux | 1165affd484889d4986cf3b724318935a0b120d8 | 70,449,586,034,956,990,000,000,000,000,000,000,000 | 10 | net: mac802154: Fix general protection fault
syzbot found general protection fault in crypto_destroy_tfm()[1].
It was caused by wrong clean up loop in llsec_key_alloc().
If one of the tfm array members is in IS_ERR() range it will
cause general protection fault in clean up function [1].
Call Trace:
crypto_free_aead include/crypto/aead.h:191 [inline] [1]
llsec_key_alloc net/mac802154/llsec.c:156 [inline]
mac802154_llsec_key_add+0x9e0/0xcc0 net/mac802154/llsec.c:249
ieee802154_add_llsec_key+0x56/0x80 net/mac802154/cfg.c:338
rdev_add_llsec_key net/ieee802154/rdev-ops.h:260 [inline]
nl802154_add_llsec_key+0x3d3/0x560 net/ieee802154/nl802154.c:1584
genl_family_rcv_msg_doit+0x228/0x320 net/netlink/genetlink.c:739
genl_family_rcv_msg net/netlink/genetlink.c:783 [inline]
genl_rcv_msg+0x328/0x580 net/netlink/genetlink.c:800
netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2502
genl_rcv+0x24/0x40 net/netlink/genetlink.c:811
netlink_unicast_kernel net/netlink/af_netlink.c:1312 [inline]
netlink_unicast+0x533/0x7d0 net/netlink/af_netlink.c:1338
netlink_sendmsg+0x856/0xd90 net/netlink/af_netlink.c:1927
sock_sendmsg_nosec net/socket.c:654 [inline]
sock_sendmsg+0xcf/0x120 net/socket.c:674
____sys_sendmsg+0x6e8/0x810 net/socket.c:2350
___sys_sendmsg+0xf3/0x170 net/socket.c:2404
__sys_sendmsg+0xe5/0x1b0 net/socket.c:2433
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
Signed-off-by: Pavel Skripkin <[email protected]>
Reported-by: [email protected]
Acked-by: Alexander Aring <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Stefan Schmidt <[email protected]> |
xmlDictAddQString(xmlDictPtr dict, const xmlChar *prefix, int plen,
const xmlChar *name, int namelen)
{
xmlDictStringsPtr pool;
const xmlChar *ret;
int size = 0; /* + sizeof(_xmlDictStrings) == 1024 */
if (prefix == NULL) return(xmlDictAddString(dict, name, namelen));
#ifdef DICT_DEBUG_PATTERNS
fprintf(stderr, "=");
#endif
pool = dict->strings;
while (pool != NULL) {
if (pool->end - pool->free > namelen + plen + 1)
goto found_pool;
if (pool->size > size) size = pool->size;
pool = pool->next;
}
/*
* Not found, need to allocate
*/
if (pool == NULL) {
if (size == 0) size = 1000;
else size *= 4; /* exponential growth */
if (size < 4 * (namelen + plen + 1))
size = 4 * (namelen + plen + 1); /* just in case ! */
pool = (xmlDictStringsPtr) xmlMalloc(sizeof(xmlDictStrings) + size);
if (pool == NULL)
return(NULL);
pool->size = size;
pool->nbStrings = 0;
pool->free = &pool->array[0];
pool->end = &pool->array[size];
pool->next = dict->strings;
dict->strings = pool;
#ifdef DICT_DEBUG_PATTERNS
fprintf(stderr, "+");
#endif
}
found_pool:
ret = pool->free;
memcpy(pool->free, prefix, plen);
pool->free += plen;
*(pool->free++) = ':';
memcpy(pool->free, name, namelen);
pool->free += namelen;
*(pool->free++) = 0;
pool->nbStrings++;
return(ret);
} | 0 | [
"CWE-399"
]
| libxml2 | 8973d58b7498fa5100a876815476b81fd1a2412a | 12,020,751,812,175,130,000,000,000,000,000,000,000 | 51 | Add hash randomization to hash and dict structures
Following http://www.ocert.org/advisories/ocert-2011-003.html
it seems that having hash randomization might be a good idea
when using XML with untrusted data
* configure.in: lookup for rand, srand and time
* dict.c: add randomization to dictionaries hash tables
* hash.c: add randomization to normal hash tables |
f_win_screenpos(typval_T *argvars, typval_T *rettv)
{
win_T *wp;
if (rettv_list_alloc(rettv) == FAIL)
return;
wp = find_win_by_nr_or_id(&argvars[0]);
list_append_number(rettv->vval.v_list, wp == NULL ? 0 : wp->w_winrow + 1);
list_append_number(rettv->vval.v_list, wp == NULL ? 0 : wp->w_wincol + 1);
} | 0 | [
"CWE-78"
]
| vim | 8c62a08faf89663e5633dc5036cd8695c80f1075 | 159,469,801,479,447,430,000,000,000,000,000,000,000 | 11 | patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others. |
static void
MYSQLND_METHOD(mysqlnd_protocol, init_row_packet)(struct st_mysqlnd_packet_row *packet)
{
DBG_ENTER("mysqlnd_protocol::init_row_packet");
memset(packet, 0, sizeof(*packet));
packet->header.m = &packet_methods[PROT_ROW_PACKET];
DBG_VOID_RETURN; | 0 | [
"CWE-120"
]
| php-src | 58006537fc5f133ae8549efe5118cde418b3ace9 | 193,063,622,448,810,080,000,000,000,000,000,000,000 | 7 | Fix bug #81719: mysqlnd/pdo password buffer overflow |
ResourceManagerImpl(Runtime::Loader& runtime, const std::string& runtime_key,
uint64_t max_connections, uint64_t max_pending_requests,
uint64_t max_requests, uint64_t max_retries, uint64_t max_connection_pools,
ClusterCircuitBreakersStats cb_stats, absl::optional<double> budget_percent,
absl::optional<uint32_t> min_retry_concurrency)
: connections_(max_connections, runtime, runtime_key + "max_connections", cb_stats.cx_open_,
cb_stats.remaining_cx_),
pending_requests_(max_pending_requests, runtime, runtime_key + "max_pending_requests",
cb_stats.rq_pending_open_, cb_stats.remaining_pending_),
requests_(max_requests, runtime, runtime_key + "max_requests", cb_stats.rq_open_,
cb_stats.remaining_rq_),
connection_pools_(max_connection_pools, runtime, runtime_key + "max_connection_pools",
cb_stats.cx_pool_open_, cb_stats.remaining_cx_pools_),
retries_(budget_percent, min_retry_concurrency, max_retries, runtime,
runtime_key + "retry_budget.", runtime_key + "max_retries",
cb_stats.rq_retry_open_, cb_stats.remaining_retries_, requests_,
pending_requests_) {} | 0 | [
"CWE-400"
]
| envoy | dfddb529e914d794ac552e906b13d71233609bf7 | 59,785,670,961,960,340,000,000,000,000,000,000,000 | 17 | listener: Add configurable accepted connection limits (#153)
Add support for per-listener limits on accepted connections.
Signed-off-by: Tony Allen <[email protected]> |
test_bson_install (TestSuite *suite)
{
TestSuite_Add (suite, "/bson/new", test_bson_new);
TestSuite_Add (suite, "/bson/new_from_buffer", test_bson_new_from_buffer);
TestSuite_Add (suite, "/bson/init", test_bson_init);
TestSuite_Add (suite, "/bson/init_static", test_bson_init_static);
TestSuite_Add (suite, "/bson/basic", test_bson_alloc);
TestSuite_Add (suite, "/bson/append_overflow", test_bson_append_overflow);
TestSuite_Add (suite, "/bson/append_array", test_bson_append_array);
TestSuite_Add (suite, "/bson/append_binary", test_bson_append_binary);
TestSuite_Add (suite,
"/bson/append_binary_deprecated",
test_bson_append_binary_deprecated);
TestSuite_Add (suite, "/bson/append_bool", test_bson_append_bool);
TestSuite_Add (suite, "/bson/append_code", test_bson_append_code);
TestSuite_Add (
suite, "/bson/append_code_with_scope", test_bson_append_code_with_scope);
TestSuite_Add (suite, "/bson/append_dbpointer", test_bson_append_dbpointer);
TestSuite_Add (suite, "/bson/append_document", test_bson_append_document);
TestSuite_Add (suite, "/bson/append_double", test_bson_append_double);
TestSuite_Add (suite, "/bson/append_int32", test_bson_append_int32);
TestSuite_Add (suite, "/bson/append_int64", test_bson_append_int64);
TestSuite_Add (
suite, "/bson/append_decimal128", test_bson_append_decimal128);
TestSuite_Add (suite, "/bson/append_iter", test_bson_append_iter);
TestSuite_Add (suite, "/bson/append_maxkey", test_bson_append_maxkey);
TestSuite_Add (suite, "/bson/append_minkey", test_bson_append_minkey);
TestSuite_Add (suite, "/bson/append_null", test_bson_append_null);
TestSuite_Add (suite, "/bson/append_oid", test_bson_append_oid);
TestSuite_Add (suite, "/bson/append_regex", test_bson_append_regex);
TestSuite_Add (suite, "/bson/append_utf8", test_bson_append_utf8);
TestSuite_Add (suite, "/bson/append_symbol", test_bson_append_symbol);
TestSuite_Add (suite, "/bson/append_time_t", test_bson_append_time_t);
TestSuite_Add (suite, "/bson/append_timestamp", test_bson_append_timestamp);
TestSuite_Add (suite, "/bson/append_timeval", test_bson_append_timeval);
TestSuite_Add (suite, "/bson/append_undefined", test_bson_append_undefined);
TestSuite_Add (suite, "/bson/append_general", test_bson_append_general);
TestSuite_Add (suite, "/bson/append_deep", test_bson_append_deep);
TestSuite_Add (suite, "/bson/utf8_key", test_bson_utf8_key);
TestSuite_Add (suite, "/bson/validate", test_bson_validate);
TestSuite_Add (suite, "/bson/validate/dbref", test_bson_validate_dbref);
TestSuite_Add (suite, "/bson/validate/bool", test_bson_validate_bool);
TestSuite_Add (
suite, "/bson/validate/dbpointer", test_bson_validate_dbpointer);
TestSuite_Add (suite, "/bson/new_1mm", test_bson_new_1mm);
TestSuite_Add (suite, "/bson/init_1mm", test_bson_init_1mm);
TestSuite_Add (suite, "/bson/build_child", test_bson_build_child);
TestSuite_Add (suite, "/bson/build_child_deep", test_bson_build_child_deep);
TestSuite_Add (suite,
"/bson/build_child_deep_no_begin_end",
test_bson_build_child_deep_no_begin_end);
TestSuite_Add (
suite, "/bson/build_child_array", test_bson_build_child_array);
TestSuite_Add (suite, "/bson/count", test_bson_count_keys);
TestSuite_Add (suite, "/bson/copy", test_bson_copy);
TestSuite_Add (suite, "/bson/copy_to", test_bson_copy_to);
TestSuite_Add (suite,
"/bson/copy_to_excluding_noinit",
test_bson_copy_to_excluding_noinit);
TestSuite_Add (suite, "/bson/initializer", test_bson_initializer);
TestSuite_Add (suite, "/bson/concat", test_bson_concat);
TestSuite_Add (suite, "/bson/reinit", test_bson_reinit);
TestSuite_Add (suite, "/bson/macros", test_bson_macros);
TestSuite_Add (suite, "/bson/clear", test_bson_clear);
TestSuite_Add (suite, "/bson/steal", test_bson_steal);
TestSuite_Add (suite, "/bson/reserve_buffer", test_bson_reserve_buffer);
TestSuite_Add (
suite, "/bson/reserve_buffer/errors", test_bson_reserve_buffer_errors);
TestSuite_Add (
suite, "/bson/destroy_with_steal", test_bson_destroy_with_steal);
TestSuite_Add (suite, "/bson/has_field", test_bson_has_field);
TestSuite_Add (
suite, "/bson/visit_invalid_field", test_bson_visit_invalid_field);
TestSuite_Add (
suite, "/bson/unsupported_type", test_bson_visit_unsupported_type);
TestSuite_Add (suite,
"/bson/unsupported_type/bad_key",
test_bson_visit_unsupported_type_bad_key);
TestSuite_Add (suite,
"/bson/unsupported_type/empty_key",
test_bson_visit_unsupported_type_empty_key);
TestSuite_Add (suite, "/bson/binary_subtype_2", test_bson_subtype_2);
TestSuite_Add (suite, "/util/next_power_of_two", test_next_power_of_two);
} | 0 | [
"CWE-125"
]
| libbson | 42900956dc461dfe7fb91d93361d10737c1602b3 | 176,569,707,742,221,800,000,000,000,000,000,000,000 | 84 | CDRIVER-2269 Check for zero string length in codewscope |
MagickExport MagickBooleanType GetOpenCLEnabled(void)
{
MagickCLEnv
clEnv;
clEnv=GetCurrentOpenCLEnv();
if (clEnv == (MagickCLEnv) NULL)
return(MagickFalse);
return(clEnv->enabled);
} | 0 | [
"CWE-476"
]
| ImageMagick | cca91aa1861818342e3d072bb0fad7dc4ffac24a | 35,640,572,317,606,345,000,000,000,000,000,000,000 | 10 | https://github.com/ImageMagick/ImageMagick/issues/790 |
pixReadStreamTiff(FILE *fp,
l_int32 n)
{
PIX *pix;
TIFF *tif;
PROCNAME("pixReadStreamTiff");
if (!fp)
return (PIX *)ERROR_PTR("stream not defined", procName, NULL);
if ((tif = fopenTiff(fp, "r")) == NULL)
return (PIX *)ERROR_PTR("tif not opened", procName, NULL);
if (TIFFSetDirectory(tif, n) == 0) {
TIFFCleanup(tif);
return NULL;
}
if ((pix = pixReadFromTiffStream(tif)) == NULL) {
TIFFCleanup(tif);
return NULL;
}
TIFFCleanup(tif);
return pix;
} | 0 | [
"CWE-125"
]
| leptonica | 5ba34b1fe741d69d43a6c8cf767756997eadd87c | 96,172,108,748,792,550,000,000,000,000,000,000,000 | 25 | Issue 23654 in oss-fuzz: Heap-buffer-overflow in pixReadFromTiffStream
* Increase scanline buffer for reading gray+alpha and converting to RGBA |
int kvm_handle_invalid_op(struct kvm_vcpu *vcpu)
{
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
} | 0 | [
"CWE-476"
]
| linux | 55749769fe608fa3f4a075e42e89d237c8e37637 | 104,129,505,989,116,600,000,000,000,000,000,000,000 | 5 | KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty
When dirty ring logging is enabled, any dirty logging without an active
vCPU context will cause a kernel oops. But we've already declared that
the shared_info page doesn't get dirty tracking anyway, since it would
be kind of insane to mark it dirty every time we deliver an event channel
interrupt. Userspace is supposed to just assume it's always dirty any
time a vCPU can run or event channels are routed.
So stop using the generic kvm_write_wall_clock() and just write directly
through the gfn_to_pfn_cache that we already have set up.
We can make kvm_write_wall_clock() static in x86.c again now, but let's
not remove the 'sec_hi_ofs' argument even though it's not used yet. At
some point we *will* want to use that for KVM guests too.
Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region")
Reported-by: butt3rflyh4ck <[email protected]>
Signed-off-by: David Woodhouse <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
{
return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
} | 0 | [
"CWE-20"
]
| linux | 8914a595110a6eca69a5e275b323f5d09e18f4f9 | 42,946,210,626,488,770,000,000,000,000,000,000,000 | 4 | bnx2x: disable GSO where gso_size is too big for hardware
If a bnx2x card is passed a GSO packet with a gso_size larger than
~9700 bytes, it will cause a firmware error that will bring the card
down:
bnx2x: [bnx2x_attn_int_deasserted3:4323(enP24p1s0f0)]MC assert!
bnx2x: [bnx2x_mc_assert:720(enP24p1s0f0)]XSTORM_ASSERT_LIST_INDEX 0x2
bnx2x: [bnx2x_mc_assert:736(enP24p1s0f0)]XSTORM_ASSERT_INDEX 0x0 = 0x00000000 0x25e43e47 0x00463e01 0x00010052
bnx2x: [bnx2x_mc_assert:750(enP24p1s0f0)]Chip Revision: everest3, FW Version: 7_13_1
... (dump of values continues) ...
Detect when the mac length of a GSO packet is greater than the maximum
packet size (9700 bytes) and disable GSO.
Signed-off-by: Daniel Axtens <[email protected]>
Reviewed-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
} | 0 | [
"CWE-703",
"CWE-834"
]
| ImageMagick | 04a567494786d5bb50894fc8bb8fea0cf496bea8 | 325,705,394,434,656,900,000,000,000,000,000,000,000 | 118 | Slightly different fix for #714 |
Subsets and Splits