func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
struct inode *inode = udf_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
inode->i_data.a_ops = &udf_adinicb_aops;
else
inode->i_data.a_ops = &udf_aops;
inode->i_op = &udf_file_inode_operations;
inode->i_fop = &udf_file_operations;
mark_inode_dirty(inode);
return udf_add_nondir(dentry, inode);
}
| 0 |
[
"CWE-17"
] |
linux
|
0e5cc9a40ada6046e6bc3bdfcd0c0d7e4b706b14
| 261,151,154,635,553,160,000,000,000,000,000,000,000 | 18 |
udf: Check path length when reading symlink
Symlink reading code does not check whether the resulting path fits into
the page provided by the generic code. This isn't as easy as just
checking the symlink size because of various encoding conversions we
perform on path. So we have to check whether there is still enough space
in the buffer on the fly.
CC: [email protected]
Reported-by: Carl Henrik Lunde <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
|
amgtar_restore(
application_argument_t *argument)
{
char *cmd;
GPtrArray *argv_ptr = g_ptr_array_new();
GPtrArray *include_array = g_ptr_array_new();
char **env;
int j;
char *e;
char *include_filename = NULL;
char *exclude_filename = NULL;
int tarpid;
amwait_t wait_status;
int exit_status = 0;
char *errmsg = NULL;
FILE *dar_file;
if (!gnutar_path) {
error(_("GNUTAR-PATH not defined"));
}
if (!check_exec_for_suid(gnutar_path, FALSE)) {
error("'%s' binary is not secure", gnutar_path);
}
cmd = g_strdup(gnutar_path);
g_ptr_array_add(argv_ptr, g_strdup(gnutar_path));
g_ptr_array_add(argv_ptr, g_strdup("--numeric-owner"));
if (gnutar_no_unquote)
g_ptr_array_add(argv_ptr, g_strdup("--no-unquote"));
if (gnutar_acls)
g_ptr_array_add(argv_ptr, g_strdup("--acls"));
if (gnutar_selinux)
g_ptr_array_add(argv_ptr, g_strdup("--selinux"));
if (gnutar_xattrs)
g_ptr_array_add(argv_ptr, g_strdup("--xattrs"));
/* ignore trailing zero blocks on input (this was the default until tar-1.21) */
if (argument->ignore_zeros) {
g_ptr_array_add(argv_ptr, g_strdup("--ignore-zeros"));
}
if (argument->tar_blocksize) {
g_ptr_array_add(argv_ptr, g_strdup("--blocking-factor"));
g_ptr_array_add(argv_ptr, g_strdup(argument->tar_blocksize));
}
g_ptr_array_add(argv_ptr, g_strdup("-xpGvf"));
g_ptr_array_add(argv_ptr, g_strdup("-"));
if (gnutar_directory) {
struct stat stat_buf;
if(stat(gnutar_directory, &stat_buf) != 0) {
fprintf(stderr, "can not stat directory %s: %s\n",
gnutar_directory, strerror(errno));
exit(1);
}
if (!S_ISDIR(stat_buf.st_mode)) {
fprintf(stderr,"%s is not a directory\n", gnutar_directory);
exit(1);
}
if (access(gnutar_directory, W_OK) != 0) {
fprintf(stderr, "Can't write to %s: %s\n",
gnutar_directory, strerror(errno));
exit(1);
}
g_ptr_array_add(argv_ptr, g_strdup("--directory"));
g_ptr_array_add(argv_ptr, g_strdup(gnutar_directory));
}
g_ptr_array_add(argv_ptr, g_strdup("--wildcards"));
if (argument->dle.exclude_list &&
argument->dle.exclude_list->nb_element == 1) {
FILE *exclude;
char *sdisk;
int in_argv;
int entry_in_exclude = 0;
char line[2*PATH_MAX];
FILE *exclude_list;
if (argument->dle.disk) {
sdisk = sanitise_filename(argument->dle.disk);
} else {
sdisk = g_strdup_printf("no_dle-%d", (int)getpid());
}
exclude_filename= g_strjoin(NULL, AMANDA_TMPDIR, "/", "exclude-",
sdisk, NULL);
exclude_list = fopen(argument->dle.exclude_list->first->name, "r");
if (!exclude_list) {
fprintf(stderr, "Cannot open exclude file '%s': %s\n",
argument->dle.exclude_list->first->name, strerror(errno));
error("Cannot open exclude file '%s': %s\n",
argument->dle.exclude_list->first->name, strerror(errno));
/*NOTREACHED*/
}
exclude = fopen(exclude_filename, "w");
if (!exclude) {
fprintf(stderr, "Cannot open exclude file '%s': %s\n",
exclude_filename, strerror(errno));
fclose(exclude_list);
error("Cannot open exclude file '%s': %s\n",
exclude_filename, strerror(errno));
/*NOTREACHED*/
}
while (fgets(line, 2*PATH_MAX, exclude_list)) {
char *escaped;
line[strlen(line)-1] = '\0'; /* remove '\n' */
escaped = escape_tar_glob(line, &in_argv);
if (in_argv) {
g_ptr_array_add(argv_ptr, "--exclude");
g_ptr_array_add(argv_ptr, escaped);
} else {
fprintf(exclude,"%s\n", escaped);
entry_in_exclude++;
amfree(escaped);
}
}
fclose(exclude_list);
fclose(exclude);
g_ptr_array_add(argv_ptr, g_strdup("--exclude-from"));
g_ptr_array_add(argv_ptr, exclude_filename);
}
if (argument->exclude_list_glob) {
g_ptr_array_add(argv_ptr, g_strdup("--exclude-from"));
g_ptr_array_add(argv_ptr, g_strdup(argument->exclude_list_glob));
}
{
GPtrArray *argv_include = g_ptr_array_new();
FILE *include;
char *sdisk;
int in_argv;
guint i;
int entry_in_include = 0;
if (argument->dle.disk) {
sdisk = sanitise_filename(argument->dle.disk);
} else {
sdisk = g_strdup_printf("no_dle-%d", (int)getpid());
}
include_filename = g_strjoin(NULL, AMANDA_TMPDIR, "/", "include-", sdisk, NULL);
include = fopen(include_filename, "w");
if (!include) {
fprintf(stderr, "Cannot open include file '%s': %s\n",
include_filename, strerror(errno));
error("Cannot open include file '%s': %s\n",
include_filename, strerror(errno));
/*NOTREACHED*/
}
if (argument->dle.include_list &&
argument->dle.include_list->nb_element == 1) {
char line[2*PATH_MAX];
FILE *include_list = fopen(argument->dle.include_list->first->name, "r");
if (!include_list) {
fclose(include);
fprintf(stderr, "Cannot open include file '%s': %s\n",
argument->dle.include_list->first->name,
strerror(errno));
error("Cannot open include file '%s': %s\n",
argument->dle.include_list->first->name,
strerror(errno));
/*NOTREACHED*/
}
while (fgets(line, 2*PATH_MAX, include_list)) {
char *escaped;
line[strlen(line)-1] = '\0'; /* remove '\n' */
escaped = escape_tar_glob(line, &in_argv);
g_ptr_array_add(include_array, g_strdup(escaped));
if (in_argv) {
g_ptr_array_add(argv_include, escaped);
} else {
fprintf(include,"%s\n", escaped);
entry_in_include++;
amfree(escaped);
}
}
fclose(include_list);
}
if (argument->dle.include_file) {
sle_t *slif;
for (slif = argument->dle.include_file->first; slif != NULL; slif = slif->next) {
char *escaped = escape_tar_glob(slif->name, &in_argv);
g_ptr_array_add(include_array, g_strdup(escaped));
if (in_argv) {
g_ptr_array_add(argv_include, escaped);
} else {
fprintf(include,"%s\n", escaped);
entry_in_include++;
amfree(escaped);
}
}
}
for (j=1; j< argument->argc; j++) {
char *escaped = escape_tar_glob(argument->argv[j], &in_argv);
g_ptr_array_add(include_array, g_strdup(escaped));
if (in_argv) {
g_ptr_array_add(argv_include, escaped);
} else {
fprintf(include,"%s\n", escaped);
entry_in_include++;
amfree(escaped);
}
}
fclose(include);
if (entry_in_include) {
g_ptr_array_add(argv_ptr, g_strdup("--files-from"));
g_ptr_array_add(argv_ptr, include_filename);
}
if (argument->include_list_glob) {
g_ptr_array_add(argv_ptr, g_strdup("--files-from"));
g_ptr_array_add(argv_ptr, g_strdup(argument->include_list_glob));
}
for (i = 0; i < argv_include->len; i++) {
g_ptr_array_add(argv_ptr, (char *)g_ptr_array_index(argv_include,i));
}
amfree(sdisk);
}
g_ptr_array_add(argv_ptr, NULL);
if (argument->dar) {
int dar_fd = argument->state_stream;
if (dar_fd == -1) dar_fd = 3;
dar_file = fdopen(dar_fd, "w");
if (!dar_file) {
int save_errno = errno;
fprintf(stderr, "Can't fdopen the DAR file (fd %d): %s\n",
dar_fd, strerror(save_errno));
g_debug("Can't fdopen the DAR file (fd %d): %s",
dar_fd, strerror(save_errno));
exit(1);
}
if (argument->recover_dump_state_file &&
include_array->len > 0) {
char line[32768];
FILE *recover_state_file = fopen(argument->recover_dump_state_file,
"r");
int previous_block = -1;
while (fgets(line, 32768, recover_state_file) != NULL) {
off_t block_no = g_ascii_strtoull(line, NULL, 0);
gboolean match = FALSE;
char *filename = strchr(line, ' ');
char *ii;
guint i;
if (!filename)
continue;
filename++;
if (filename[strlen(filename)-1] == '\n')
filename[strlen(filename)-1] = '\0';
g_debug("recover_dump_state_file: %lld %s", (long long)block_no, filename);
for (i = 0; i < include_array->len; i++) {
size_t strlenii;
ii = g_ptr_array_index(include_array, i);
ii++; // remove leading '.'
//g_debug("check %s : %s :", filename, ii);
strlenii = strlen(ii);
if (g_str_equal(filename, ii) == 1 ||
(strlenii < strlen(filename) &&
strncmp(filename, ii, strlenii) == 0 &&
filename[strlenii] == '/')) {
//g_debug("match %s", ii);
match = TRUE;
break;
}
}
if (match) {
if (previous_block < 0)
previous_block = block_no;
} else if (previous_block >= 0) {
g_debug("restore block %lld (%lld) to %lld (%lld)",
(long long)previous_block * 512,
(long long)previous_block,
(long long)block_no * 512 - 1,
(long long)block_no);
fprintf(dar_file, "DAR %lld:%lld\n",
(long long)previous_block * 512,
(long long)block_no * 512- 1);
previous_block = -1;
}
}
fclose(recover_state_file);
if (previous_block >= 0) {
g_debug("restore block %lld (%lld) to END",
(long long)previous_block * 512,
(long long)previous_block);
fprintf(dar_file, "DAR %lld:-1\n",
(long long)previous_block * 512);
}
} else {
fprintf(dar_file,"DAR 0:-1\n");
g_debug("full dar: 0:-1");
}
fflush(dar_file);
fclose(dar_file);
}
debug_executing(argv_ptr);
tarpid = fork();
switch (tarpid) {
case -1: error(_("%s: fork returned: %s"), get_pname(), strerror(errno));
case 0:
env = safe_env();
become_root();
execve(cmd, (char **)argv_ptr->pdata, env);
free_env(env);
e = strerror(errno);
error(_("error [exec %s: %s]"), cmd, e);
break;
default: break;
}
waitpid(tarpid, &wait_status, 0);
if (WIFSIGNALED(wait_status)) {
errmsg = g_strdup_printf(_("%s terminated with signal %d: see %s"),
cmd, WTERMSIG(wait_status), dbfn());
exit_status = 1;
} else if (WIFEXITED(wait_status)) {
if (WEXITSTATUS(wait_status) > 0) {
errmsg = g_strdup_printf(_("%s exited with status %d: see %s"),
cmd, WEXITSTATUS(wait_status), dbfn());
exit_status = 1;
} else {
/* Normal exit */
exit_status = 0;
}
} else {
errmsg = g_strdup_printf(_("%s got bad exit: see %s"),
cmd, dbfn());
exit_status = 1;
}
if (errmsg) {
dbprintf("%s", errmsg);
fprintf(stderr, "ERROR %s\n", errmsg);
amfree(errmsg);
}
if (argument->verbose == 0) {
if (exclude_filename)
unlink(exclude_filename);
unlink(include_filename);
}
amfree(cmd);
amfree(include_filename);
amfree(exclude_filename);
exit(exit_status);
}
| 0 |
[
"CWE-77"
] |
amanda
|
29bae2e271093cd8d06ea98f73a474c685c5a314
| 87,398,532,643,576,380,000,000,000,000,000,000,000 | 355 |
* application-src/ambsdtar.c, application-src/amgtar.c,
application-src/amstar.c: Filter option from COMMAND-OPTIONS
* common-src/ammessage.c: Add message.
git-svn-id: https://svn.code.sf.net/p/amanda/code/amanda/trunk@6483 a8d146d6-cc15-0410-8900-af154a0219e0
|
static void qemu_chr_parse_vc(QemuOpts *opts, ChardevBackend *backend,
Error **errp)
{
int val;
ChardevVC *vc;
vc = backend->u.vc.data = g_new0(ChardevVC, 1);
qemu_chr_parse_common(opts, qapi_ChardevVC_base(vc));
val = qemu_opt_get_number(opts, "width", 0);
if (val != 0) {
vc->has_width = true;
vc->width = val;
}
val = qemu_opt_get_number(opts, "height", 0);
if (val != 0) {
vc->has_height = true;
vc->height = val;
}
val = qemu_opt_get_number(opts, "cols", 0);
if (val != 0) {
vc->has_cols = true;
vc->cols = val;
}
val = qemu_opt_get_number(opts, "rows", 0);
if (val != 0) {
vc->has_rows = true;
vc->rows = val;
}
}
| 0 |
[
"CWE-416"
] |
qemu
|
a4afa548fc6dd9842ed86639b4d37d4d1c4ad480
| 225,602,035,546,576,400,000,000,000,000,000,000,000 | 33 |
char: move front end handlers in CharBackend
Since the hanlders are associated with a CharBackend, rather than the
CharDriverState, it is more appropriate to store in CharBackend. This
avoids the handler copy dance in qemu_chr_fe_set_handlers() then
mux_chr_update_read_handler(), by storing the CharBackend pointer
directly.
Also a mux CharDriver should go through mux->backends[focused], since
chr->be will stay NULL. Before that, it was possible to call
chr->handler by mistake with surprising results, for ex through
qemu_chr_be_can_write(), which would result in calling the last set
handler front end, not the one with focus.
Signed-off-by: Marc-André Lureau <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static inline void gen_string_movl_A0_ESI(DisasContext *s)
{
gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
}
| 0 |
[
"CWE-94"
] |
qemu
|
30663fd26c0307e414622c7a8607fbc04f92ec14
| 59,736,438,787,008,790,000,000,000,000,000,000,000 | 4 |
tcg/i386: Check the size of instruction being translated
This fixes the bug: 'user-to-root privesc inside VM via bad translation
caching' reported by Jann Horn here:
https://bugs.chromium.org/p/project-zero/issues/detail?id=1122
Reviewed-by: Richard Henderson <[email protected]>
CC: Peter Maydell <[email protected]>
CC: Paolo Bonzini <[email protected]>
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Pranith Kumar <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
int x25_addr_aton(unsigned char *p, struct x25_address *called_addr,
struct x25_address *calling_addr)
{
unsigned int called_len, calling_len;
char *called, *calling;
int i;
called = called_addr->x25_addr;
calling = calling_addr->x25_addr;
called_len = strlen(called);
calling_len = strlen(calling);
*p++ = (calling_len << 4) | (called_len << 0);
for (i = 0; i < (called_len + calling_len); i++) {
if (i < called_len) {
if (i % 2 != 0) {
*p |= (*called++ - '0') << 0;
p++;
} else {
*p = 0x00;
*p |= (*called++ - '0') << 4;
}
} else {
if (i % 2 != 0) {
*p |= (*calling++ - '0') << 0;
p++;
} else {
*p = 0x00;
*p |= (*calling++ - '0') << 4;
}
}
}
return 1 + (called_len + calling_len + 1) / 2;
}
| 0 |
[] |
net
|
7781607938c8371d4c2b243527430241c62e39c2
| 311,594,746,685,362,740,000,000,000,000,000,000,000 | 37 |
net/x25: Fix null-ptr-deref caused by x25_disconnect
When the link layer is terminating, x25->neighbour will be set to NULL
in x25_disconnect(). As a result, it could cause null-ptr-deref bugs in
x25_sendmsg(),x25_recvmsg() and x25_connect(). One of the bugs is
shown below.
(Thread 1) | (Thread 2)
x25_link_terminated() | x25_recvmsg()
x25_kill_by_neigh() | ...
x25_disconnect() | lock_sock(sk)
... | ...
x25->neighbour = NULL //(1) |
... | x25->neighbour->extended //(2)
The code sets NULL to x25->neighbour in position (1) and dereferences
x25->neighbour in position (2), which could cause null-ptr-deref bug.
This patch adds lock_sock() in x25_kill_by_neigh() in order to synchronize
with x25_sendmsg(), x25_recvmsg() and x25_connect(). What`s more, the
sock held by lock_sock() is not NULL, because it is extracted from x25_list
and uses x25_list_lock to synchronize.
Fixes: 4becb7ee5b3d ("net/x25: Fix x25_neigh refcnt leak when x25 disconnect")
Signed-off-by: Duoming Zhou <[email protected]>
Reviewed-by: Lin Ma <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static inline int unix_recvq_full_lockless(const struct sock *sk)
{
return skb_queue_len_lockless(&sk->sk_receive_queue) >
READ_ONCE(sk->sk_max_ack_backlog);
}
| 0 |
[
"CWE-362"
] |
linux
|
cbcf01128d0a92e131bd09f1688fe032480b65ca
| 210,114,821,505,408,100,000,000,000,000,000,000,000 | 5 |
af_unix: fix garbage collect vs MSG_PEEK
unix_gc() assumes that candidate sockets can never gain an external
reference (i.e. be installed into an fd) while the unix_gc_lock is
held. Except for MSG_PEEK this is guaranteed by modifying inflight
count under the unix_gc_lock.
MSG_PEEK does not touch any variable protected by unix_gc_lock (file
count is not), yet it needs to be serialized with garbage collection.
Do this by locking/unlocking unix_gc_lock:
1) increment file count
2) lock/unlock barrier to make sure incremented file count is visible
to garbage collection
3) install file into fd
This is a lock barrier (unlike smp_mb()) that ensures that garbage
collection is run completely before or completely after the barrier.
Cc: <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
TEST(Context, ResponseAttributes) {
NiceMock<StreamInfo::MockStreamInfo> info;
NiceMock<StreamInfo::MockStreamInfo> empty_info;
const std::string header_name = "test-header";
const std::string trailer_name = "test-trailer";
const std::string grpc_status = "grpc-status";
Http::TestResponseHeaderMapImpl header_map{{header_name, "a"}};
Http::TestResponseTrailerMapImpl trailer_map{{trailer_name, "b"}, {grpc_status, "8"}};
Protobuf::Arena arena;
ResponseWrapper response(arena, &header_map, &trailer_map, info);
ResponseWrapper empty_response(arena, nullptr, nullptr, empty_info);
EXPECT_CALL(info, responseCode()).WillRepeatedly(Return(404));
EXPECT_CALL(info, bytesSent()).WillRepeatedly(Return(123));
EXPECT_CALL(info, responseFlags()).WillRepeatedly(Return(0x1));
const absl::optional<std::string> code_details = "unauthorized";
EXPECT_CALL(info, responseCodeDetails()).WillRepeatedly(ReturnRef(code_details));
{
auto value = response[CelValue::CreateStringView(Undefined)];
EXPECT_FALSE(value.has_value());
}
{
auto value = response[CelValue::CreateInt64(13)];
EXPECT_FALSE(value.has_value());
}
{
auto value = response[CelValue::CreateStringView(Size)];
EXPECT_TRUE(value.has_value());
ASSERT_TRUE(value.value().IsInt64());
EXPECT_EQ(123, value.value().Int64OrDie());
}
{
auto value = response[CelValue::CreateStringView(TotalSize)];
EXPECT_TRUE(value.has_value());
ASSERT_TRUE(value.value().IsInt64());
EXPECT_EQ(160, value.value().Int64OrDie());
}
{
auto value = empty_response[CelValue::CreateStringView(TotalSize)];
EXPECT_TRUE(value.has_value());
ASSERT_TRUE(value.value().IsInt64());
EXPECT_EQ(0, value.value().Int64OrDie());
}
{
auto value = response[CelValue::CreateStringView(Code)];
EXPECT_TRUE(value.has_value());
ASSERT_TRUE(value.value().IsInt64());
EXPECT_EQ(404, value.value().Int64OrDie());
}
{
auto value = response[CelValue::CreateStringView(CodeDetails)];
EXPECT_TRUE(value.has_value());
ASSERT_TRUE(value.value().IsString());
EXPECT_EQ(code_details.value(), value.value().StringOrDie().value());
}
{
auto value = response[CelValue::CreateStringView(Headers)];
EXPECT_TRUE(value.has_value());
ASSERT_TRUE(value.value().IsMap());
auto& map = *value.value().MapOrDie();
EXPECT_FALSE(map.empty());
EXPECT_EQ(1, map.size());
auto header = map[CelValue::CreateStringView(header_name)];
EXPECT_TRUE(header.has_value());
ASSERT_TRUE(header.value().IsString());
EXPECT_EQ("a", header.value().StringOrDie().value());
auto missing = map[CelValue::CreateStringView(Undefined)];
EXPECT_FALSE(missing.has_value());
}
{
auto value = response[CelValue::CreateStringView(Trailers)];
EXPECT_TRUE(value.has_value());
ASSERT_TRUE(value.value().IsMap());
auto& map = *value.value().MapOrDie();
EXPECT_FALSE(map.empty());
EXPECT_EQ(2, map.size());
auto header = map[CelValue::CreateString(&trailer_name)];
EXPECT_TRUE(header.has_value());
ASSERT_TRUE(header.value().IsString());
EXPECT_EQ("b", header.value().StringOrDie().value());
}
{
auto value = response[CelValue::CreateStringView(Flags)];
EXPECT_TRUE(value.has_value());
ASSERT_TRUE(value.value().IsInt64());
EXPECT_EQ(0x1, value.value().Int64OrDie());
}
{
auto value = response[CelValue::CreateStringView(GrpcStatus)];
EXPECT_TRUE(value.has_value());
ASSERT_TRUE(value.value().IsInt64());
EXPECT_EQ(0x8, value.value().Int64OrDie());
}
{
auto value = empty_response[CelValue::CreateStringView(GrpcStatus)];
EXPECT_FALSE(value.has_value());
}
{
auto value = empty_response[CelValue::CreateStringView(Code)];
EXPECT_FALSE(value.has_value());
}
{
auto value = empty_response[CelValue::CreateStringView(CodeDetails)];
EXPECT_FALSE(value.has_value());
}
{
Http::TestResponseHeaderMapImpl header_map{{header_name, "a"}, {grpc_status, "7"}};
Http::TestResponseTrailerMapImpl trailer_map{{trailer_name, "b"}};
Protobuf::Arena arena;
ResponseWrapper response_header_status(arena, &header_map, &trailer_map, info);
auto value = response_header_status[CelValue::CreateStringView(GrpcStatus)];
EXPECT_TRUE(value.has_value());
ASSERT_TRUE(value.value().IsInt64());
EXPECT_EQ(0x7, value.value().Int64OrDie());
}
{
Http::TestResponseHeaderMapImpl header_map{{header_name, "a"}};
Http::TestResponseTrailerMapImpl trailer_map{{trailer_name, "b"}};
Protobuf::Arena arena;
ResponseWrapper response_no_status(arena, &header_map, &trailer_map, info);
auto value = response_no_status[CelValue::CreateStringView(GrpcStatus)];
EXPECT_TRUE(value.has_value());
ASSERT_TRUE(value.value().IsInt64());
EXPECT_EQ(0xc, value.value().Int64OrDie()); // http:404 -> grpc:12
}
{
NiceMock<StreamInfo::MockStreamInfo> info_without_code;
Http::TestResponseHeaderMapImpl header_map{{header_name, "a"}};
Http::TestResponseTrailerMapImpl trailer_map{{trailer_name, "b"}};
Protobuf::Arena arena;
ResponseWrapper response_no_status(arena, &header_map, &trailer_map, info_without_code);
auto value = response_no_status[CelValue::CreateStringView(GrpcStatus)];
EXPECT_FALSE(value.has_value());
}
}
| 0 |
[] |
envoy
|
2c60632d41555ec8b3d9ef5246242be637a2db0f
| 141,580,274,282,794,310,000,000,000,000,000,000,000 | 154 |
http: header map security fixes for duplicate headers (#197)
Previously header matching did not match on all headers for
non-inline headers. This patch changes the default behavior to
always logically match on all headers. Multiple individual
headers will be logically concatenated with ',' similar to what
is done with inline headers. This makes the behavior effectively
consistent. This behavior can be temporary reverted by setting
the runtime value "envoy.reloadable_features.header_match_on_all_headers"
to "false".
Targeted fixes have been additionally performed on the following
extensions which make them consider all duplicate headers by default as
a comma concatenated list:
1) Any extension using CEL matching on headers.
2) The header to metadata filter.
3) The JWT filter.
4) The Lua filter.
Like primary header matching used in routing, RBAC, etc. this behavior
can be disabled by setting the runtime value
"envoy.reloadable_features.header_match_on_all_headers" to false.
Finally, the setCopy() header map API previously only set the first
header in the case of duplicate non-inline headers. setCopy() now
behaves similiarly to the other set*() APIs and replaces all found
headers with a single value. This may have had security implications
in the extauth filter which uses this API. This behavior can be disabled
by setting the runtime value
"envoy.reloadable_features.http_set_copy_replace_all_headers" to false.
Fixes https://github.com/envoyproxy/envoy-setec/issues/188
Signed-off-by: Matt Klein <[email protected]>
|
insertEmphasisSymbol(const EmphasisInfo *buffer, const int at,
const EmphRuleNumber emphRule, const EmphasisClass class,
const TranslationTableHeader *table, int pos, const InString *input,
OutString *output, int *posMapping, int *cursorPosition, int *cursorStatus) {
if (buffer[at].symbol & class) {
const TranslationTableRule *indicRule;
if (brailleIndicatorDefined(
table->emphRules[emphRule][letterOffset], table, &indicRule))
for_updatePositions(&indicRule->charsdots[0], 0, indicRule->dotslen, 0, pos,
input, output, posMapping, cursorPosition, cursorStatus);
}
}
| 0 |
[
"CWE-125"
] |
liblouis
|
5e4089659bb49b3095fa541fa6387b4c40d7396e
| 213,502,976,018,599,600,000,000,000,000,000,000,000 | 12 |
Fix a buffer overflow
Fixes #635
Thanks to HongxuChen for reporting it
|
struct razer_report razer_chroma_extended_matrix_effect_spectrum(unsigned char variable_storage, unsigned char led_id)
{
return razer_chroma_extended_matrix_effect_base(0x06, variable_storage, led_id, 0x03);
}
| 0 |
[
"CWE-787"
] |
openrazer
|
7e8a04feb378a679f1bcdcae079a5100cc45663b
| 288,889,182,176,966,600,000,000,000,000,000,000,000 | 4 |
Fix oob memcpy in matrix_custom_frame methods
Adjust row_length if it exeeds the arguments array
|
ModResult BuildChannelExempts(User* source, Channel* channel, SilenceEntry::SilenceFlags flag, CUList& exemptions)
{
const Channel::MemberMap& members = channel->GetUsers();
for (Channel::MemberMap::const_iterator member = members.begin(); member != members.end(); ++member)
{
if (!CanReceiveMessage(source, member->first, flag))
exemptions.insert(member->first);
}
return MOD_RES_PASSTHRU;
}
| 0 |
[
"CWE-416"
] |
inspircd
|
7b47de3c194f239c5fea09a0e49696c9af017d51
| 265,211,006,880,001,300,000,000,000,000,000,000,000 | 10 |
Copy the silence flags when sending update notifications.
This fixes a crash bug in the silence module on some versions of GCC.
|
bool is_single_level_stmt()
{
/*
This check exploits the fact that the last added to all_select_list is
on its top. So select_lex (as the first added) will be at the tail
of the list.
*/
if (first_select_lex() == all_selects_list && !sroutines.records)
{
return TRUE;
}
return FALSE;
}
| 0 |
[
"CWE-703"
] |
server
|
39feab3cd31b5414aa9b428eaba915c251ac34a2
| 104,154,528,293,415,720,000,000,000,000,000,000,000 | 13 |
MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <[email protected]>
|
static u64 window_update(struct psi_window *win, u64 now, u64 value)
{
u64 elapsed;
u64 growth;
elapsed = now - win->start_time;
growth = value - win->start_value;
/*
* After each tracking window passes win->start_value and
* win->start_time get reset and win->prev_growth stores
* the average per-window growth of the previous window.
* win->prev_growth is then used to interpolate additional
* growth from the previous window assuming it was linear.
*/
if (elapsed > win->size)
window_reset(win, now, value, growth);
else {
u32 remaining;
remaining = win->size - elapsed;
growth += div64_u64(win->prev_growth * remaining, win->size);
}
return growth;
}
| 0 |
[
"CWE-787"
] |
linux
|
6fcca0fa48118e6d63733eb4644c6cd880c15b8f
| 237,718,834,551,464,350,000,000,000,000,000,000,000 | 25 |
sched/psi: Fix OOB write when writing 0 bytes to PSI files
Issuing write() with count parameter set to 0 on any file under
/proc/pressure/ will cause an OOB write because of the access to
buf[buf_size-1] when NUL-termination is performed. Fix this by checking
for buf_size to be non-zero.
Signed-off-by: Suren Baghdasaryan <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
|
TEST(LteOp, MatchesScalar) {
BSONObj operand = BSON("$lte" << 5);
LTEMatchExpression lte;
ASSERT(lte.init("a", operand["$lte"]).isOK());
ASSERT(lte.matchesBSON(BSON("a" << 4.5), NULL));
ASSERT(!lte.matchesBSON(BSON("a" << 6), NULL));
}
| 0 |
[] |
mongo
|
b0ef26c639112b50648a02d969298650fbd402a4
| 267,431,988,492,437,100,000,000,000,000,000,000,000 | 7 |
SERVER-51083 Reject invalid UTF-8 from $regex match expressions
|
txtwrite_close_device(gx_device * dev)
{
int code = 0;
gx_device_txtwrite_t *const tdev = (gx_device_txtwrite_t *) dev;
if (tdev->file) {
code = gx_device_close_output_file(dev, tdev->fname, tdev->file);
tdev->file = 0;
}
#ifdef TRACE_TXTWRITE
fclose(tdev->DebugFile);
#endif
return code;
}
| 0 |
[
"CWE-476"
] |
ghostpdl
|
407c98a38c3a6ac1681144ed45cc2f4fc374c91f
| 73,708,435,707,051,750,000,000,000,000,000,000,000 | 15 |
txtwrite - guard against using GS_NO_GLYPH to retrieve Unicode values
Bug 701822 "Segmentation fault at psi/iname.c:296 in names_index_ref"
Avoid using a glyph with the value GS_NO_GLYPH to retrieve a glyph
name or Unicode code point from the glyph ID, as this is not a valid
ID.
|
char **fill_out_embedded_options(PerlIO *stream,
char *options,
int options_type,
int slen, int cnt)
{
int ind, len;
char c;
char *ptr;
char **options_list= NULL;
if (!(options_list= (char **) calloc(cnt, sizeof(char *))))
{
PerlIO_printf(stream,
"Initialize embedded server. Out of memory \n");
return NULL;
}
ptr= options;
ind= 0;
if (options_type == 0)
{
/* server_groups list NULL terminated */
options_list[cnt]= (char *) NULL;
}
if (options_type == 1)
{
/* first item in server_options list is ignored. fill it with \0 */
if (!(options_list[0]= calloc(1,sizeof(char))))
return NULL;
ind++;
}
while ((c= *ptr++))
{
slen--;
if (c == ',' || !slen)
{
len= ptr - options;
if (c == ',')
len--;
if (!(options_list[ind]=calloc(len+1,sizeof(char))))
return NULL;
strncpy(options_list[ind], options, len);
ind++;
options= ptr;
}
}
return options_list;
}
| 0 |
[
"CWE-125"
] |
DBD-mysql
|
793b72b1a0baa5070adacaac0e12fd995a6fbabe
| 127,592,390,388,094,670,000,000,000,000,000,000,000 | 53 |
Added Pali's fix for CVE-2016-1249
|
uint32_t readListEnd() {
T_VIRTUAL_CALL();
return readListEnd_virt();
}
| 0 |
[
"CWE-20"
] |
thrift
|
cfaadcc4adcfde2a8232c62ec89870b73ef40df1
| 47,020,930,943,600,580,000,000,000,000,000,000,000 | 4 |
THRIFT-3231 CPP: Limit recursion depth to 64
Client: cpp
Patch: Ben Craig <[email protected]>
|
x509::x509(const x509& that) : length_(that.length_),
buffer_(NEW_YS opaque[length_])
{
memcpy(buffer_, that.buffer_, length_);
}
| 0 |
[
"CWE-254"
] |
mysql-server
|
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
| 279,346,414,943,726,130,000,000,000,000,000,000,000 | 5 |
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
|
static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *stop_at;
int res = 0;
int i;
pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
(bond_dev ? bond_dev->name : "None"), new_mtu);
/* Can't hold bond->lock with bh disabled here since
* some base drivers panic. On the other hand we can't
* hold bond->lock without bh disabled because we'll
* deadlock. The only solution is to rely on the fact
* that we're under rtnl_lock here, and the slaves
* list won't change. This doesn't solve the problem
* of setting the slave's MTU while it is
* transmitting, but the assumption is that the base
* driver can handle that.
*
* TODO: figure out a way to safely iterate the slaves
* list, but without holding a lock around the actual
* call to the base driver.
*/
bond_for_each_slave(bond, slave, i) {
pr_debug("s %p s->p %p c_m %p\n",
slave,
slave->prev,
slave->dev->netdev_ops->ndo_change_mtu);
res = dev_set_mtu(slave->dev, new_mtu);
if (res) {
/* If we failed to set the slave's mtu to the new value
* we must abort the operation even in ACTIVE_BACKUP
* mode, because if we allow the backup slaves to have
* different mtu values than the active slave we'll
* need to change their mtu when doing a failover. That
* means changing their mtu from timer context, which
* is probably not a good idea.
*/
pr_debug("err %d %s\n", res, slave->dev->name);
goto unwind;
}
}
bond_dev->mtu = new_mtu;
return 0;
unwind:
/* unwind from head to the slave that failed */
stop_at = slave;
bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
int tmp_res;
tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
if (tmp_res) {
pr_debug("unwind err %d dev %s\n",
tmp_res, slave->dev->name);
}
}
return res;
}
| 0 |
[
"CWE-703",
"CWE-264"
] |
linux
|
550fd08c2cebad61c548def135f67aba284c6162
| 292,448,247,504,670,340,000,000,000,000,000,000,000 | 66 |
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <[email protected]>
CC: Karsten Keil <[email protected]>
CC: "David S. Miller" <[email protected]>
CC: Jay Vosburgh <[email protected]>
CC: Andy Gospodarek <[email protected]>
CC: Patrick McHardy <[email protected]>
CC: Krzysztof Halasa <[email protected]>
CC: "John W. Linville" <[email protected]>
CC: Greg Kroah-Hartman <[email protected]>
CC: Marcel Holtmann <[email protected]>
CC: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static __always_inline int __do_follow_link(struct path *path, struct nameidata *nd)
{
int error;
void *cookie;
struct dentry *dentry = path->dentry;
touch_atime(path->mnt, dentry);
nd_set_link(nd, NULL);
if (path->mnt != nd->path.mnt) {
path_to_nameidata(path, nd);
dget(dentry);
}
mntget(path->mnt);
cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
error = PTR_ERR(cookie);
if (!IS_ERR(cookie)) {
char *s = nd_get_link(nd);
error = 0;
if (s)
error = __vfs_follow_link(nd, s);
if (dentry->d_inode->i_op->put_link)
dentry->d_inode->i_op->put_link(dentry, nd, cookie);
}
path_put(path);
return error;
}
| 0 |
[
"CWE-120"
] |
linux-2.6
|
d70b67c8bc72ee23b55381bd6a884f4796692f77
| 336,609,845,976,666,100,000,000,000,000,000,000,000 | 28 |
[patch] vfs: fix lookup on deleted directory
Lookup can install a child dentry for a deleted directory. This keeps
the directory dentry alive, and the inode pinned in the cache and on
disk, even after all external references have gone away.
This isn't a big problem normally, since memory pressure or umount
will clear out the directory dentry and its children, releasing the
inode. But for UBIFS this causes problems because its orphan area can
overflow.
Fix this by returning ENOENT for all lookups on a S_DEAD directory
before creating a child dentry.
Thanks to Zoltan Sogor for noticing this while testing UBIFS, and
Artem for the excellent analysis of the problem and testing.
Reported-by: Artem Bityutskiy <[email protected]>
Tested-by: Artem Bityutskiy <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
Signed-off-by: Al Viro <[email protected]>
|
static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_user_sec_ctx *uctx;
if (!rt)
return 0;
uctx = nla_data(rt);
return security_xfrm_policy_alloc(&pol->security, uctx);
}
| 0 |
[
"CWE-200"
] |
linux
|
1f86840f897717f86d523a13e99a447e6a5d2fa5
| 97,559,453,261,071,910,000,000,000,000,000,000,000 | 11 |
xfrm_user: fix info leak in copy_to_user_tmpl()
The memory used for the template copy is a local stack variable. As
struct xfrm_user_tmpl contains multiple holes added by the compiler for
alignment, not initializing the memory will lead to leaking stack bytes
to userland. Add an explicit memset(0) to avoid the info leak.
Initial version of the patch by Brad Spengler.
Cc: Brad Spengler <[email protected]>
Signed-off-by: Mathias Krause <[email protected]>
Acked-by: Steffen Klassert <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int RGWPutObjRetention_ObjStore_S3::get_params()
{
const char *bypass_gov_header = s->info.env->get("HTTP_X_AMZ_BYPASS_GOVERNANCE_RETENTION");
if (bypass_gov_header) {
std::string bypass_gov_decoded = url_decode(bypass_gov_header);
bypass_governance_mode = boost::algorithm::iequals(bypass_gov_decoded, "true");
}
const auto max_size = s->cct->_conf->rgw_max_put_param_size;
std::tie(op_ret, data) = rgw_rest_read_all_input(s, max_size, false);
return op_ret;
}
| 0 |
[
"CWE-79"
] |
ceph
|
8f90658c731499722d5f4393c8ad70b971d05f77
| 71,460,423,336,566,270,000,000,000,000,000,000,000 | 12 |
rgw: reject unauthenticated response-header actions
Signed-off-by: Matt Benjamin <[email protected]>
Reviewed-by: Casey Bodley <[email protected]>
(cherry picked from commit d8dd5e513c0c62bbd7d3044d7e2eddcd897bd400)
|
// Return a random filename.
inline const char* filenamerand() {
cimg::mutex(6);
static char randomid[9];
cimg::srand();
for (unsigned int k = 0; k<8; ++k) {
const int v = (int)cimg::rand(65535)%3;
randomid[k] = (char)(v==0?('0' + ((int)cimg::rand(65535)%10)):
(v==1?('a' + ((int)cimg::rand(65535)%26)):
('A' + ((int)cimg::rand(65535)%26))));
}
cimg::mutex(6,0);
return randomid;
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 152,501,843,865,933,450,000,000,000,000,000,000,000 | 13 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
cdf_namecmp(const char *d, const uint16_t *s, size_t l)
{
for (; l--; d++, s++)
if (*d != CDF_TOLE2(*s))
return CAST(unsigned char, *d) - CDF_TOLE2(*s);
return 0;
}
| 0 |
[
"CWE-787"
] |
file
|
46a8443f76cec4b41ec736eca396984c74664f84
| 19,221,926,245,309,407,000,000,000,000,000,000,000 | 7 |
Limit the number of elements in a vector (found by oss-fuzz)
|
int gnutls_x509_ext_export_authority_key_id(gnutls_x509_aki_t aki,
gnutls_datum_t * ext)
{
ASN1_TYPE c2 = ASN1_TYPE_EMPTY;
unsigned i;
int result, ret;
result =
asn1_create_element(_gnutls_get_pkix(),
"PKIX1.AuthorityKeyIdentifier", &c2);
if (result != ASN1_SUCCESS) {
gnutls_assert();
return _gnutls_asn2err(result);
}
if (aki->id.data != NULL) {
result =
asn1_write_value(c2, "keyIdentifier", aki->id.data,
aki->id.size);
if (result != ASN1_SUCCESS) {
gnutls_assert();
ret = _gnutls_asn2err(result);
goto cleanup;
}
} else {
asn1_write_value(c2, "keyIdentifier", NULL, 0);
}
if (aki->serial.data != NULL) {
result =
asn1_write_value(c2, "authorityCertSerialNumber",
aki->serial.data, aki->serial.size);
if (result != ASN1_SUCCESS) {
gnutls_assert();
ret = _gnutls_asn2err(result);
goto cleanup;
}
} else {
asn1_write_value(c2, "authorityCertSerialNumber", NULL, 0);
}
if (aki->cert_issuer.size == 0) {
asn1_write_value(c2, "authorityCertIssuer", NULL, 0);
} else {
for (i = 0; i < aki->cert_issuer.size; i++) {
ret =
_gnutls_write_new_general_name(c2,
"authorityCertIssuer",
aki->cert_issuer.
names[i].type,
aki->
cert_issuer.names[i].
san.data,
aki->cert_issuer.
names[i].san.size);
if (result < 0) {
gnutls_assert();
goto cleanup;
}
}
}
ret = _gnutls_x509_der_encode(c2, "", ext, 0);
if (ret < 0) {
gnutls_assert();
goto cleanup;
}
ret = 0;
cleanup:
asn1_delete_structure(&c2);
return ret;
}
| 0 |
[] |
gnutls
|
d6972be33264ecc49a86cd0958209cd7363af1e9
| 257,966,216,721,729,000,000,000,000,000,000,000,000 | 74 |
eliminated double-free in the parsing of dist points
Reported by Robert Święcki.
|
void dtls1_get_message_header(unsigned char *data, struct hm_header_st *msg_hdr)
{
memset(msg_hdr, 0, sizeof(*msg_hdr));
msg_hdr->type = *(data++);
n2l3(data, msg_hdr->msg_len);
n2s(data, msg_hdr->seq);
n2l3(data, msg_hdr->frag_off);
n2l3(data, msg_hdr->frag_len);
}
| 0 |
[
"CWE-399"
] |
openssl
|
df6b5e29ffea2d5a3e08de92fb765fdb21c7a21e
| 17,082,898,793,630,977,000,000,000,000,000,000,000 | 10 |
Excessive allocation of memory in dtls1_preprocess_fragment()
This issue is very similar to CVE-2016-6307 described in the previous
commit. The underlying defect is different but the security analysis and
impacts are the same except that it impacts DTLS.
A DTLS message includes 3 bytes for its length in the header for the
message.
This would allow for messages up to 16Mb in length. Messages of this length
are excessive and OpenSSL includes a check to ensure that a peer is sending
reasonably sized messages in order to avoid too much memory being consumed
to service a connection. A flaw in the logic of version 1.1.0 means that
memory for the message is allocated too early, prior to the excessive
message length check. Due to way memory is allocated in OpenSSL this could
mean an attacker could force up to 21Mb to be allocated to service a
connection. This could lead to a Denial of Service through memory
exhaustion. However, the excessive message length check still takes place,
and this would cause the connection to immediately fail. Assuming that the
application calls SSL_free() on the failed conneciton in a timely manner
then the 21Mb of allocated memory will then be immediately freed again.
Therefore the excessive memory allocation will be transitory in nature.
This then means that there is only a security impact if:
1) The application does not call SSL_free() in a timely manner in the
event that the connection fails
or
2) The application is working in a constrained environment where there
is very little free memory
or
3) The attacker initiates multiple connection attempts such that there
are multiple connections in a state where memory has been allocated for
the connection; SSL_free() has not yet been called; and there is
insufficient memory to service the multiple requests.
Except in the instance of (1) above any Denial Of Service is likely to
be transitory because as soon as the connection fails the memory is
subsequently freed again in the SSL_free() call. However there is an
increased risk during this period of application crashes due to the lack
of memory - which would then mean a more serious Denial of Service.
This issue does not affect TLS users.
Issue was reported by Shi Lei (Gear Team, Qihoo 360 Inc.).
CVE-2016-6308
Reviewed-by: Richard Levitte <[email protected]>
(cherry picked from commit 48c054fec3506417b2598837b8062aae7114c200)
|
static MemTxResult flatview_read(struct uc_struct *uc, FlatView *fv, hwaddr addr,
MemTxAttrs attrs, void *buf, hwaddr len)
{
hwaddr l;
hwaddr addr1;
MemoryRegion *mr;
l = len;
mr = flatview_translate(uc, fv, addr, &addr1, &l, false, attrs);
return flatview_read_continue(uc, fv, addr, attrs, buf, len,
addr1, l, mr);
}
| 0 |
[
"CWE-476"
] |
unicorn
|
3d3deac5e6d38602b689c4fef5dac004f07a2e63
| 270,948,610,068,123,130,000,000,000,000,000,000,000 | 12 |
Fix crash when mapping a big memory and calling uc_close
|
static MagickBooleanType LoadOpenCLLibrary(void)
{
openCL_library=(MagickLibrary *) AcquireMagickMemory(sizeof(MagickLibrary));
if (openCL_library == (MagickLibrary *) NULL)
return(MagickFalse);
if (BindOpenCLFunctions() == MagickFalse)
{
openCL_library=(MagickLibrary *)RelinquishMagickMemory(openCL_library);
return(MagickFalse);
}
return(MagickTrue);
}
| 0 |
[
"CWE-476"
] |
ImageMagick
|
cca91aa1861818342e3d072bb0fad7dc4ffac24a
| 318,331,230,720,128,030,000,000,000,000,000,000,000 | 14 |
https://github.com/ImageMagick/ImageMagick/issues/790
|
getOwnedSequences(Oid relid)
{
return getOwnedSequences_internal(relid, 0, 0);
}
| 0 |
[
"CWE-94"
] |
postgres
|
b9b21acc766db54d8c337d508d0fe2f5bf2daab0
| 160,162,096,561,423,300,000,000,000,000,000,000,000 | 4 |
In extensions, don't replace objects not belonging to the extension.
Previously, if an extension script did CREATE OR REPLACE and there was
an existing object not belonging to the extension, it would overwrite
the object and adopt it into the extension. This is problematic, first
because the overwrite is probably unintentional, and second because we
didn't change the object's ownership. Thus a hostile user could create
an object in advance of an expected CREATE EXTENSION command, and would
then have ownership rights on an extension object, which could be
modified for trojan-horse-type attacks.
Hence, forbid CREATE OR REPLACE of an existing object unless it already
belongs to the extension. (Note that we've always forbidden replacing
an object that belongs to some other extension; only the behavior for
previously-free-standing objects changes here.)
For the same reason, also fail CREATE IF NOT EXISTS when there is
an existing object that doesn't belong to the extension.
Our thanks to Sven Klemm for reporting this problem.
Security: CVE-2022-2625
|
handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0)
{
int ret;
/* Set up the stack frame */
if (ka->sa.sa_flags & SA_SIGINFO)
ret = setup_rt_frame(sig, ka, info, oldset, regs);
else
ret = setup_frame(sig, ka, oldset, regs);
if (ka->sa.sa_flags & SA_ONESHOT)
ka->sa.sa_handler = SIG_DFL;
if (ret == 0) {
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(¤t->blocked,sig);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
}
return ret;
}
| 0 |
[] |
linux-2.6
|
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
| 212,557,548,818,429,100,000,000,000,000,000,000,000 | 25 |
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]>
|
void DL_Dxf::addImageDef(DL_CreationInterface* creationInterface) {
DL_ImageDefData id(// handle
getStringValue(5, ""),
getStringValue(1, ""));
creationInterface->linkImage(id);
creationInterface->endEntity();
currentObjectType = DL_UNKNOWN;
}
| 0 |
[
"CWE-191"
] |
qcad
|
1eeffc5daf5a06cf6213ffc19e95923cdebb2eb8
| 737,850,883,275,849,600,000,000,000,000,000,000 | 9 |
check vertexIndex which might be -1 for broken DXF
|
zip_read_mac_metadata(struct archive_read *a, struct archive_entry *entry,
struct zip_entry *rsrc)
{
struct zip *zip = (struct zip *)a->format->data;
unsigned char *metadata, *mp;
int64_t offset = archive_filter_bytes(&a->archive, 0);
size_t remaining_bytes, metadata_bytes;
ssize_t hsize;
int ret = ARCHIVE_OK, eof;
switch(rsrc->compression) {
case 0: /* No compression. */
if (rsrc->uncompressed_size != rsrc->compressed_size) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Malformed OS X metadata entry: "
"inconsistent size");
return (ARCHIVE_FATAL);
}
#ifdef HAVE_ZLIB_H
case 8: /* Deflate compression. */
#endif
break;
default: /* Unsupported compression. */
/* Return a warning. */
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Unsupported ZIP compression method (%s)",
compression_name(rsrc->compression));
/* We can't decompress this entry, but we will
* be able to skip() it and try the next entry. */
return (ARCHIVE_WARN);
}
if (rsrc->uncompressed_size > (4 * 1024 * 1024)) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Mac metadata is too large: %jd > 4M bytes",
(intmax_t)rsrc->uncompressed_size);
return (ARCHIVE_WARN);
}
if (rsrc->compressed_size > (4 * 1024 * 1024)) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Mac metadata is too large: %jd > 4M bytes",
(intmax_t)rsrc->compressed_size);
return (ARCHIVE_WARN);
}
metadata = malloc((size_t)rsrc->uncompressed_size);
if (metadata == NULL) {
archive_set_error(&a->archive, ENOMEM,
"Can't allocate memory for Mac metadata");
return (ARCHIVE_FATAL);
}
if (offset < rsrc->local_header_offset)
__archive_read_consume(a, rsrc->local_header_offset - offset);
else if (offset != rsrc->local_header_offset) {
__archive_read_seek(a, rsrc->local_header_offset, SEEK_SET);
}
hsize = zip_get_local_file_header_size(a, 0);
__archive_read_consume(a, hsize);
remaining_bytes = (size_t)rsrc->compressed_size;
metadata_bytes = (size_t)rsrc->uncompressed_size;
mp = metadata;
eof = 0;
while (!eof && remaining_bytes) {
const unsigned char *p;
ssize_t bytes_avail;
size_t bytes_used;
p = __archive_read_ahead(a, 1, &bytes_avail);
if (p == NULL) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated ZIP file header");
ret = ARCHIVE_WARN;
goto exit_mac_metadata;
}
if ((size_t)bytes_avail > remaining_bytes)
bytes_avail = remaining_bytes;
switch(rsrc->compression) {
case 0: /* No compression. */
if ((size_t)bytes_avail > metadata_bytes)
bytes_avail = metadata_bytes;
memcpy(mp, p, bytes_avail);
bytes_used = (size_t)bytes_avail;
metadata_bytes -= bytes_used;
mp += bytes_used;
if (metadata_bytes == 0)
eof = 1;
break;
#ifdef HAVE_ZLIB_H
case 8: /* Deflate compression. */
{
int r;
ret = zip_deflate_init(a, zip);
if (ret != ARCHIVE_OK)
goto exit_mac_metadata;
zip->stream.next_in =
(Bytef *)(uintptr_t)(const void *)p;
zip->stream.avail_in = (uInt)bytes_avail;
zip->stream.total_in = 0;
zip->stream.next_out = mp;
zip->stream.avail_out = (uInt)metadata_bytes;
zip->stream.total_out = 0;
r = inflate(&zip->stream, 0);
switch (r) {
case Z_OK:
break;
case Z_STREAM_END:
eof = 1;
break;
case Z_MEM_ERROR:
archive_set_error(&a->archive, ENOMEM,
"Out of memory for ZIP decompression");
ret = ARCHIVE_FATAL;
goto exit_mac_metadata;
default:
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"ZIP decompression failed (%d)", r);
ret = ARCHIVE_FATAL;
goto exit_mac_metadata;
}
bytes_used = zip->stream.total_in;
metadata_bytes -= zip->stream.total_out;
mp += zip->stream.total_out;
break;
}
#endif
default:
bytes_used = 0;
break;
}
__archive_read_consume(a, bytes_used);
remaining_bytes -= bytes_used;
}
archive_entry_copy_mac_metadata(entry, metadata,
(size_t)rsrc->uncompressed_size - metadata_bytes);
exit_mac_metadata:
__archive_read_seek(a, offset, SEEK_SET);
zip->decompress_init = 0;
free(metadata);
return (ret);
}
| 0 |
[
"CWE-703",
"CWE-125"
] |
libarchive
|
cfaa28168a07ea4a53276b63068f94fce37d6aff
| 211,190,384,557,105,100,000,000,000,000,000,000,000 | 149 |
ZIP reader: fix possible out-of-bounds read in zipx_lzma_alone_init()
Fixes #1672
|
int ssl3_write_pending(SSL *s, int type, const unsigned char *buf,
unsigned int len)
{
int i;
SSL3_BUFFER *wb = s->rlayer.wbuf;
unsigned int currbuf = 0;
/* XXXX */
if ((s->rlayer.wpend_tot > (int)len)
|| ((s->rlayer.wpend_buf != buf) &&
!(s->mode & SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER))
|| (s->rlayer.wpend_type != type)) {
SSLerr(SSL_F_SSL3_WRITE_PENDING, SSL_R_BAD_WRITE_RETRY);
return (-1);
}
for (;;) {
/* Loop until we find a buffer we haven't written out yet */
if (SSL3_BUFFER_get_left(&wb[currbuf]) == 0
&& currbuf < s->rlayer.numwpipes - 1) {
currbuf++;
continue;
}
clear_sys_error();
if (s->wbio != NULL) {
s->rwstate = SSL_WRITING;
i = BIO_write(s->wbio, (char *)
&(SSL3_BUFFER_get_buf(&wb[currbuf])
[SSL3_BUFFER_get_offset(&wb[currbuf])]),
(unsigned int)SSL3_BUFFER_get_left(&wb[currbuf]));
} else {
SSLerr(SSL_F_SSL3_WRITE_PENDING, SSL_R_BIO_NOT_SET);
i = -1;
}
if (i == SSL3_BUFFER_get_left(&wb[currbuf])) {
SSL3_BUFFER_set_left(&wb[currbuf], 0);
SSL3_BUFFER_add_offset(&wb[currbuf], i);
if (currbuf + 1 < s->rlayer.numwpipes)
continue;
s->rwstate = SSL_NOTHING;
return (s->rlayer.wpend_ret);
} else if (i <= 0) {
if (SSL_IS_DTLS(s)) {
/*
* For DTLS, just drop it. That's kind of the whole point in
* using a datagram service
*/
SSL3_BUFFER_set_left(&wb[currbuf], 0);
}
return i;
}
SSL3_BUFFER_add_offset(&wb[currbuf], i);
SSL3_BUFFER_add_left(&wb[currbuf], -i);
}
}
| 0 |
[
"CWE-20"
] |
openssl
|
4ad93618d26a3ea23d36ad5498ff4f59eff3a4d2
| 250,847,303,423,980,640,000,000,000,000,000,000,000 | 55 |
Don't change the state of the ETM flags until CCS processing
Changing the ciphersuite during a renegotiation can result in a crash
leading to a DoS attack. ETM has not been implemented in 1.1.0 for DTLS
so this is TLS only.
The problem is caused by changing the flag indicating whether to use ETM
or not immediately on negotiation of ETM, rather than at CCS. Therefore,
during a renegotiation, if the ETM state is changing (usually due to a
change of ciphersuite), then an error/crash will occur.
Due to the fact that there are separate CCS messages for read and write
we actually now need two flags to determine whether to use ETM or not.
CVE-2017-3733
Reviewed-by: Richard Levitte <[email protected]>
|
static pyc_object *get_int64_object(RzBuffer *buffer) {
pyc_object *ret = NULL;
bool error = false;
st64 i;
i = get_st64(buffer, &error);
if (error) {
return NULL;
}
ret = RZ_NEW0(pyc_object);
if (!ret) {
return NULL;
}
ret->type = TYPE_INT64;
ret->data = rz_str_newf("%lld", i);
if (!ret->data) {
RZ_FREE(ret);
}
return ret;
}
| 0 |
[
"CWE-190"
] |
rizin
|
e645e5827327d945307ddfde4f617ae4c36561fd
| 222,192,142,938,341,100,000,000,000,000,000,000,000 | 21 |
Fix the crash caused by get_long_object() #2739 from PeiweiHu/Peiwei_0625
|
unsigned int scalar7(const mp_func op,
const unsigned int arg1, const unsigned int arg2, const unsigned int arg3,
const unsigned int arg4, const unsigned int arg5, const unsigned int arg6,
const unsigned int arg7) {
const unsigned int pos =
arg1>_cimg_mp_slot_c && _cimg_mp_is_comp(arg1)?arg1:
arg2>_cimg_mp_slot_c && _cimg_mp_is_comp(arg2)?arg2:
arg3>_cimg_mp_slot_c && _cimg_mp_is_comp(arg3)?arg3:
arg4>_cimg_mp_slot_c && _cimg_mp_is_comp(arg4)?arg4:
arg5>_cimg_mp_slot_c && _cimg_mp_is_comp(arg5)?arg5:
arg6>_cimg_mp_slot_c && _cimg_mp_is_comp(arg6)?arg6:
arg7>_cimg_mp_slot_c && _cimg_mp_is_comp(arg7)?arg7:scalar();
CImg<ulongT>::vector((ulongT)op,pos,arg1,arg2,arg3,arg4,arg5,arg6,arg7).move_to(code);
return pos;
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 115,032,623,058,365,540,000,000,000,000,000,000,000 | 15 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
static double mp_list_set_jxyzc(_cimg_math_parser& mp) {
const unsigned int ind = (unsigned int)cimg::mod((int)_mp_arg(2),mp.listin.width());
CImg<T> &img = mp.listout[ind];
const double
ox = mp.mem[_cimg_mp_slot_x], oy = mp.mem[_cimg_mp_slot_y],
oz = mp.mem[_cimg_mp_slot_z], oc = mp.mem[_cimg_mp_slot_c];
const int
x = (int)(ox + _mp_arg(3)), y = (int)(oy + _mp_arg(4)),
z = (int)(oz + _mp_arg(5)), c = (int)(oc + _mp_arg(6));
const double val = _mp_arg(1);
if (x>=0 && x<img.width() && y>=0 && y<img.height() &&
z>=0 && z<img.depth() && c>=0 && c<img.spectrum())
img(x,y,z,c) = (T)val;
return val;
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 188,696,157,968,384,070,000,000,000,000,000,000,000 | 15 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
static void add_stub(compiler_common *common, struct sljit_jump *start)
{
DEFINE_COMPILER;
stub_list *list_item = sljit_alloc_memory(compiler, sizeof(stub_list));
if (list_item)
{
list_item->start = start;
list_item->quit = LABEL();
list_item->next = common->stubs;
common->stubs = list_item;
}
}
| 0 |
[
"CWE-125"
] |
php-src
|
8947fd9e9fdce87cd6c59817b1db58e789538fe9
| 263,435,920,881,770,670,000,000,000,000,000,000,000 | 13 |
Fix #78338: Array cross-border reading in PCRE
We backport r1092 from pcre2.
|
int lxc_attach(const char* name, const char* lxcpath, lxc_attach_exec_t exec_function, void* exec_payload, lxc_attach_options_t* options, pid_t* attached_process)
{
int ret, status;
pid_t init_pid, pid, attached_pid, expected;
struct lxc_proc_context_info *init_ctx;
char* cwd;
char* new_cwd;
int ipc_sockets[2];
signed long personality;
if (!options)
options = &attach_static_default_options;
init_pid = lxc_cmd_get_init_pid(name, lxcpath);
if (init_pid < 0) {
ERROR("failed to get the init pid");
return -1;
}
init_ctx = lxc_proc_get_context_info(init_pid);
if (!init_ctx) {
ERROR("failed to get context of the init process, pid = %ld", (long)init_pid);
return -1;
}
personality = get_personality(name, lxcpath);
if (init_ctx->personality < 0) {
ERROR("Failed to get personality of the container");
lxc_proc_put_context_info(init_ctx);
return -1;
}
init_ctx->personality = personality;
init_ctx->container = lxc_container_new(name, lxcpath);
if (!init_ctx->container)
return -1;
if (!fetch_seccomp(init_ctx->container, options))
WARN("Failed to get seccomp policy");
if (!no_new_privs(init_ctx->container, options))
WARN("Could not determine whether PR_SET_NO_NEW_PRIVS is set.");
cwd = getcwd(NULL, 0);
/* determine which namespaces the container was created with
* by asking lxc-start, if necessary
*/
if (options->namespaces == -1) {
options->namespaces = lxc_cmd_get_clone_flags(name, lxcpath);
/* call failed */
if (options->namespaces == -1) {
ERROR("failed to automatically determine the "
"namespaces which the container unshared");
free(cwd);
lxc_proc_put_context_info(init_ctx);
return -1;
}
}
/* create a socket pair for IPC communication; set SOCK_CLOEXEC in order
* to make sure we don't irritate other threads that want to fork+exec away
*
* IMPORTANT: if the initial process is multithreaded and another call
* just fork()s away without exec'ing directly after, the socket fd will
* exist in the forked process from the other thread and any close() in
* our own child process will not really cause the socket to close properly,
* potentiall causing the parent to hang.
*
* For this reason, while IPC is still active, we have to use shutdown()
* if the child exits prematurely in order to signal that the socket
* is closed and cannot assume that the child exiting will automatically
* do that.
*
* IPC mechanism: (X is receiver)
* initial process intermediate attached
* X <--- send pid of
* attached proc,
* then exit
* send 0 ------------------------------------> X
* [do initialization]
* X <------------------------------------ send 1
* [add to cgroup, ...]
* send 2 ------------------------------------> X
* [set LXC_ATTACH_NO_NEW_PRIVS]
* X <------------------------------------ send 3
* [open LSM label fd]
* send 4 ------------------------------------> X
* [set LSM label]
* close socket close socket
* run program
*/
ret = socketpair(PF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
if (ret < 0) {
SYSERROR("could not set up required IPC mechanism for attaching");
free(cwd);
lxc_proc_put_context_info(init_ctx);
return -1;
}
/* create intermediate subprocess, three reasons:
* 1. runs all pthread_atfork handlers and the
* child will no longer be threaded
* (we can't properly setns() in a threaded process)
* 2. we can't setns() in the child itself, since
* we want to make sure we are properly attached to
* the pidns
* 3. also, the initial thread has to put the attached
* process into the cgroup, which we can only do if
* we didn't already setns() (otherwise, user
* namespaces will hate us)
*/
pid = fork();
if (pid < 0) {
SYSERROR("failed to create first subprocess");
free(cwd);
lxc_proc_put_context_info(init_ctx);
return -1;
}
if (pid) {
int procfd = -1;
pid_t to_cleanup_pid = pid;
/* initial thread, we close the socket that is for the
* subprocesses
*/
close(ipc_sockets[1]);
free(cwd);
/* attach to cgroup, if requested */
if (options->attach_flags & LXC_ATTACH_MOVE_TO_CGROUP) {
if (!cgroup_attach(name, lxcpath, pid))
goto cleanup_error;
}
/* Open /proc before setns() to the containers namespace so we
* don't rely on any information from inside the container.
*/
procfd = open("/proc", O_DIRECTORY | O_RDONLY | O_CLOEXEC);
if (procfd < 0) {
SYSERROR("Unable to open /proc.");
goto cleanup_error;
}
/* Let the child process know to go ahead */
status = 0;
ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status));
if (ret <= 0) {
ERROR("error using IPC to notify attached process for initialization (0)");
goto cleanup_error;
}
/* get pid from intermediate process */
ret = lxc_read_nointr_expect(ipc_sockets[0], &attached_pid, sizeof(attached_pid), NULL);
if (ret <= 0) {
if (ret != 0)
ERROR("error using IPC to receive pid of attached process");
goto cleanup_error;
}
/* ignore SIGKILL (CTRL-C) and SIGQUIT (CTRL-\) - issue #313 */
if (options->stdin_fd == 0) {
signal(SIGINT, SIG_IGN);
signal(SIGQUIT, SIG_IGN);
}
/* reap intermediate process */
ret = wait_for_pid(pid);
if (ret < 0)
goto cleanup_error;
/* we will always have to reap the grandchild now */
to_cleanup_pid = attached_pid;
/* tell attached process it may start initializing */
status = 0;
ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status));
if (ret <= 0) {
ERROR("error using IPC to notify attached process for initialization (0)");
goto cleanup_error;
}
/* wait for the attached process to finish initializing */
expected = 1;
ret = lxc_read_nointr_expect(ipc_sockets[0], &status, sizeof(status), &expected);
if (ret <= 0) {
if (ret != 0)
ERROR("error using IPC to receive notification "
"from attached process (1)");
goto cleanup_error;
}
/* tell attached process we're done */
status = 2;
ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status));
if (ret <= 0) {
ERROR("Error using IPC to notify attached process for "
"initialization (2): %s.", strerror(errno));
goto cleanup_error;
}
/* Wait for the (grand)child to tell us that it's ready to set
* up its LSM labels.
*/
expected = 3;
ret = lxc_read_nointr_expect(ipc_sockets[0], &status, sizeof(status), &expected);
if (ret <= 0) {
ERROR("Error using IPC for the child to tell us to open LSM fd (3): %s.",
strerror(errno));
goto cleanup_error;
}
/* Open LSM fd and send it to child. */
if ((options->namespaces & CLONE_NEWNS) && (options->attach_flags & LXC_ATTACH_LSM) && init_ctx->lsm_label) {
int on_exec, labelfd;
on_exec = options->attach_flags & LXC_ATTACH_LSM_EXEC ? 1 : 0;
/* Open fd for the LSM security module. */
labelfd = lsm_openat(procfd, attached_pid, on_exec);
if (labelfd < 0)
goto cleanup_error;
/* Send child fd of the LSM security module to write to. */
ret = lxc_abstract_unix_send_fd(ipc_sockets[0], labelfd, NULL, 0);
if (ret <= 0) {
ERROR("Error using IPC to send child LSM fd (4): %s.",
strerror(errno));
goto cleanup_error;
}
}
/* now shut down communication with child, we're done */
shutdown(ipc_sockets[0], SHUT_RDWR);
close(ipc_sockets[0]);
lxc_proc_put_context_info(init_ctx);
/* we're done, the child process should now execute whatever
* it is that the user requested. The parent can now track it
* with waitpid() or similar.
*/
*attached_process = attached_pid;
return 0;
cleanup_error:
/* first shut down the socket, then wait for the pid,
* otherwise the pid we're waiting for may never exit
*/
if (procfd >= 0)
close(procfd);
shutdown(ipc_sockets[0], SHUT_RDWR);
close(ipc_sockets[0]);
if (to_cleanup_pid)
(void) wait_for_pid(to_cleanup_pid);
lxc_proc_put_context_info(init_ctx);
return -1;
}
/* first subprocess begins here, we close the socket that is for the
* initial thread
*/
close(ipc_sockets[0]);
/* Wait for the parent to have setup cgroups */
expected = 0;
status = -1;
ret = lxc_read_nointr_expect(ipc_sockets[1], &status, sizeof(status), &expected);
if (ret <= 0) {
ERROR("error communicating with child process");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
if ((options->attach_flags & LXC_ATTACH_MOVE_TO_CGROUP) && cgns_supported())
options->namespaces |= CLONE_NEWCGROUP;
/* attach now, create another subprocess later, since pid namespaces
* only really affect the children of the current process
*/
ret = lxc_attach_to_ns(init_pid, options->namespaces);
if (ret < 0) {
ERROR("failed to enter the namespace");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
/* attach succeeded, try to cwd */
if (options->initial_cwd)
new_cwd = options->initial_cwd;
else
new_cwd = cwd;
ret = chdir(new_cwd);
if (ret < 0)
WARN("could not change directory to '%s'", new_cwd);
free(cwd);
/* now create the real child process */
{
struct attach_clone_payload payload = {
.ipc_socket = ipc_sockets[1],
.options = options,
.init_ctx = init_ctx,
.exec_function = exec_function,
.exec_payload = exec_payload,
};
/* We use clone_parent here to make this subprocess a direct child of
* the initial process. Then this intermediate process can exit and
* the parent can directly track the attached process.
*/
pid = lxc_clone(attach_child_main, &payload, CLONE_PARENT);
}
/* shouldn't happen, clone() should always return positive pid */
if (pid <= 0) {
SYSERROR("failed to create subprocess");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
/* tell grandparent the pid of the pid of the newly created child */
ret = lxc_write_nointr(ipc_sockets[1], &pid, sizeof(pid));
if (ret != sizeof(pid)) {
/* if this really happens here, this is very unfortunate, since the
* parent will not know the pid of the attached process and will
* not be able to wait for it (and we won't either due to CLONE_PARENT)
* so the parent won't be able to reap it and the attached process
* will remain a zombie
*/
ERROR("error using IPC to notify main process of pid of the attached process");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
/* the rest is in the hands of the initial and the attached process */
rexit(0);
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
lxc
|
81f466d05f2a89cb4f122ef7f593ff3f279b165c
| 90,114,988,577,592,660,000,000,000,000,000,000,000 | 337 |
attach: do not send procfd to attached process
So far, we opened a file descriptor refering to proc on the host inside the
host namespace and handed that fd to the attached process in
attach_child_main(). This was done to ensure that LSM labels were correctly
setup. However, by exploiting a potential kernel bug, ptrace could be used to
prevent the file descriptor from being closed which in turn could be used by an
unprivileged container to gain access to the host namespace. Aside from this
needing an upstream kernel fix, we should make sure that we don't pass the fd
for proc itself to the attached process. However, we cannot completely prevent
this, as the attached process needs to be able to change its apparmor profile
by writing to /proc/self/attr/exec or /proc/self/attr/current. To minimize the
attack surface, we only send the fd for /proc/self/attr/exec or
/proc/self/attr/current to the attached process. To do this we introduce a
little more IPC between the child and parent:
* IPC mechanism: (X is receiver)
* initial process intermediate attached
* X <--- send pid of
* attached proc,
* then exit
* send 0 ------------------------------------> X
* [do initialization]
* X <------------------------------------ send 1
* [add to cgroup, ...]
* send 2 ------------------------------------> X
* [set LXC_ATTACH_NO_NEW_PRIVS]
* X <------------------------------------ send 3
* [open LSM label fd]
* send 4 ------------------------------------> X
* [set LSM label]
* close socket close socket
* run program
The attached child tells the parent when it is ready to have its LSM labels set
up. The parent then opens an approriate fd for the child PID to
/proc/<pid>/attr/exec or /proc/<pid>/attr/current and sends it via SCM_RIGHTS
to the child. The child can then set its LSM laben. Both sides then close the
socket fds and the child execs the requested process.
Signed-off-by: Christian Brauner <[email protected]>
|
reallocarray (void *ptr,
size_t nmemb,
size_t size)
{
assert (nmemb >= 0 && size >= 0);
if (nmemb != 0 && SIZE_MAX / nmemb < size) {
errno = ENOMEM;
return NULL;
}
return realloc (ptr, nmemb * size);
}
| 0 |
[
"CWE-190"
] |
p11-kit
|
bd670b1d4984b27d6a397b9ddafaf89ab26e4e7f
| 21,095,124,535,045,270,000,000,000,000,000,000,000 | 11 |
Follow-up to arithmetic overflow fix
Check if nmemb is zero in p11_rpc_message_alloc_extra_array to avoid a
division by zero trap. Additionally, change the reallocarray
compatibility shim so that it won't assert when resizing an array to
zero, and add the same nmemb != 0 check there.
|
static void init_packet(struct dhcp_packet *packet, struct dhcp_packet *oldpacket, char type)
{
/* Sets op, htype, hlen, cookie fields
* and adds DHCP_MESSAGE_TYPE option */
udhcp_init_header(packet, type);
packet->xid = oldpacket->xid;
memcpy(packet->chaddr, oldpacket->chaddr, sizeof(oldpacket->chaddr));
packet->flags = oldpacket->flags;
packet->gateway_nip = oldpacket->gateway_nip;
packet->ciaddr = oldpacket->ciaddr;
udhcp_add_simple_option(packet, DHCP_SERVER_ID, server_config.server_nip);
}
| 0 |
[
"CWE-125"
] |
busybox
|
6d3b4bb24da9a07c263f3c1acf8df85382ff562c
| 266,369,636,655,207,820,000,000,000,000,000,000,000 | 13 |
udhcpc: check that 4-byte options are indeed 4-byte, closes 11506
function old new delta
udhcp_get_option32 - 27 +27
udhcp_get_option 231 248 +17
------------------------------------------------------------------------------
(add/remove: 1/0 grow/shrink: 1/0 up/down: 44/0) Total: 44 bytes
Signed-off-by: Denys Vlasenko <[email protected]>
|
static inline ut32 r_read_at_be32(const void *src, size_t offset) {
const ut8 *s = (const ut8*)src + offset;
return r_read_be32 (s);
}
| 0 |
[
"CWE-476"
] |
radare2
|
1ea23bd6040441a21fbcfba69dce9a01af03f989
| 180,549,059,250,806,980,000,000,000,000,000,000,000 | 4 |
Fix #6816 - null deref in r_read_*
|
OVS_REQUIRES(ct->ct_lock)
{
uint32_t hash = zone_key_hash(zone, ct->hash_basis);
struct zone_limit *zl;
HMAP_FOR_EACH_IN_BUCKET (zl, node, hash, &ct->zone_limits) {
if (zl->czl.zone == zone) {
return zl;
}
}
return NULL;
}
| 0 |
[
"CWE-400"
] |
ovs
|
79349cbab0b2a755140eedb91833ad2760520a83
| 214,745,014,421,701,540,000,000,000,000,000,000,000 | 11 |
flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]>
|
CairoOutputDev::~CairoOutputDev() {
if (fontEngine_owner && fontEngine) {
delete fontEngine;
}
if (cairo)
cairo_destroy (cairo);
cairo_pattern_destroy (stroke_pattern);
cairo_pattern_destroy (fill_pattern);
if (group)
cairo_pattern_destroy (group);
if (mask)
cairo_pattern_destroy (mask);
if (shape)
cairo_pattern_destroy (shape);
if (text)
text->decRefCnt();
if (actualText)
delete actualText;
}
| 0 |
[] |
poppler
|
abf167af8b15e5f3b510275ce619e6fdb42edd40
| 30,396,073,156,840,763,000,000,000,000,000,000,000 | 20 |
Implement tiling/patterns in SplashOutputDev
Fixes bug 13518
|
virtual void updateLineJoin(GfxState *state) { }
| 0 |
[] |
poppler
|
abf167af8b15e5f3b510275ce619e6fdb42edd40
| 132,522,023,769,210,750,000,000,000,000,000,000,000 | 1 |
Implement tiling/patterns in SplashOutputDev
Fixes bug 13518
|
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
{
struct sd *sd = (struct sd *) gspca_dev;
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
ov511_pkt_scan(gspca_dev, data, len);
break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
ov518_pkt_scan(gspca_dev, data, len);
break;
case BRIDGE_OV519:
ov519_pkt_scan(gspca_dev, data, len);
break;
case BRIDGE_OVFX2:
ovfx2_pkt_scan(gspca_dev, data, len);
break;
case BRIDGE_W9968CF:
w9968cf_pkt_scan(gspca_dev, data, len);
break;
}
}
| 0 |
[
"CWE-476"
] |
linux
|
998912346c0da53a6dbb71fab3a138586b596b30
| 42,331,720,888,348,710,000,000,000,000,000,000,000 | 26 |
media: ov519: add missing endpoint sanity checks
Make sure to check that we have at least one endpoint before accessing
the endpoint array to avoid dereferencing a NULL-pointer on stream
start.
Note that these sanity checks are not redundant as the driver is mixing
looking up altsettings by index and by number, which need not coincide.
Fixes: 1876bb923c98 ("V4L/DVB (12079): gspca_ov519: add support for the ov511 bridge")
Fixes: b282d87332f5 ("V4L/DVB (12080): gspca_ov519: Fix ov518+ with OV7620AE (Trust spacecam 320)")
Cc: stable <[email protected]> # 2.6.31
Cc: Hans de Goede <[email protected]>
Signed-off-by: Johan Hovold <[email protected]>
Signed-off-by: Hans Verkuil <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
static void device_flags_changed_callback(uint16_t index, uint16_t length,
const void *param, void *user_data)
{
const struct mgmt_ev_device_flags_changed *ev = param;
struct btd_adapter *adapter = user_data;
struct btd_device *dev;
char addr[18];
if (length < sizeof(*ev)) {
btd_error(adapter->dev_id,
"Too small Device Flags Changed event: %d",
length);
return;
}
ba2str(&ev->addr.bdaddr, addr);
dev = btd_adapter_find_device(adapter, &ev->addr.bdaddr, ev->addr.type);
if (!dev) {
btd_error(adapter->dev_id,
"Device Flags Changed for unknown device %s", addr);
return;
}
btd_device_flags_changed(dev, ev->supported_flags, ev->current_flags);
}
| 0 |
[
"CWE-862",
"CWE-863"
] |
bluez
|
b497b5942a8beb8f89ca1c359c54ad67ec843055
| 319,080,921,451,922,860,000,000,000,000,000,000,000 | 26 |
adapter: Fix storing discoverable setting
discoverable setting shall only be store when changed via Discoverable
property and not when discovery client set it as that be considered
temporary just for the lifetime of the discovery.
|
static void zipfileDequote(char *zIn){
char q = zIn[0];
if( q=='"' || q=='\'' || q=='`' || q=='[' ){
int iIn = 1;
int iOut = 0;
if( q=='[' ) q = ']';
while( ALWAYS(zIn[iIn]) ){
char c = zIn[iIn++];
if( c==q && zIn[iIn++]!=q ) break;
zIn[iOut++] = c;
}
zIn[iOut] = '\0';
}
}
| 0 |
[
"CWE-434"
] |
sqlite
|
54d501092d88c0cf89bec4279951f548fb0b8618
| 328,181,401,712,467,700,000,000,000,000,000,000,000 | 14 |
Fix the zipfile extension so that INSERT works even if the pathname of
the file being inserted is a NULL. Bug discovered by the
Yongheng and Rui fuzzer.
FossilOrigin-Name: a80f84b511231204658304226de3e075a55afc2e3f39ac063716f7a57f585c06
|
bool map_attr_check_remote(const struct ldb_map_context *data, const char *attr)
{
const struct ldb_map_attribute *map = map_attr_find_local(data, attr);
if (map == NULL) {
return false;
}
if (map->type == LDB_MAP_IGNORE) {
return false;
}
return true;
}
| 0 |
[
"CWE-200"
] |
samba
|
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
| 318,696,331,066,436,260,000,000,000,000,000,000,000 | 13 |
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]>
|
repodata_lookup_type(Repodata *data, Id solvid, Id keyname)
{
Id schema, *keyp, *kp;
if (!maybe_load_repodata(data, keyname))
return 0;
if (!solvid2data(data, solvid, &schema))
return 0;
keyp = data->schemadata + data->schemata[schema];
for (kp = keyp; *kp; kp++)
if (data->keys[*kp].name == keyname)
return data->keys[*kp].type;
return 0;
}
| 0 |
[
"CWE-125"
] |
libsolv
|
fdb9c9c03508990e4583046b590c30d958f272da
| 143,016,637,106,929,410,000,000,000,000,000,000,000 | 13 |
repodata_schema2id: fix heap-buffer-overflow in memcmp
When the length of last schema in data->schemadata is
less than length of input schema, we got a read overflow
in asan test.
Signed-off-by: Zhipeng Xie <[email protected]>
|
template<typename t, typename T>
static void _render_resize(const T *ptrs, const unsigned int ws, const unsigned int hs,
t *ptrd, const unsigned int wd, const unsigned int hd) {
unsigned int *const offx = new unsigned int[wd], *const offy = new unsigned int[hd + 1], *poffx, *poffy;
float s, curr, old;
s = (float)ws/wd;
poffx = offx; curr = 0; for (unsigned int x = 0; x<wd; ++x) {
old = curr; curr+=s; *(poffx++) = (unsigned int)curr - (unsigned int)old;
}
s = (float)hs/hd;
poffy = offy; curr = 0; for (unsigned int y = 0; y<hd; ++y) {
old = curr; curr+=s; *(poffy++) = ws*((unsigned int)curr - (unsigned int)old);
}
*poffy = 0;
poffy = offy;
for (unsigned int y = 0; y<hd; ) {
const T *ptr = ptrs;
poffx = offx;
for (unsigned int x = 0; x<wd; ++x) { *(ptrd++) = *ptr; ptr+=*(poffx++); }
++y;
unsigned int dy = *(poffy++);
for ( ; !dy && y<hd; std::memcpy(ptrd,ptrd - wd,sizeof(t)*wd), ++y, ptrd+=wd, dy = *(poffy++)) {}
ptrs+=dy;
}
delete[] offx; delete[] offy;
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 9,549,416,496,008,574,000,000,000,000,000,000,000 | 25 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
{
struct dentry *dentry = __d_alloc(parent->d_sb, name);
if (!dentry)
return NULL;
dentry->d_flags |= DCACHE_RCUACCESS;
spin_lock(&parent->d_lock);
/*
* don't need child lock because it is not subject
* to concurrency here
*/
__dget_dlock(parent);
dentry->d_parent = parent;
list_add(&dentry->d_child, &parent->d_subdirs);
spin_unlock(&parent->d_lock);
return dentry;
}
| 0 |
[
"CWE-362",
"CWE-399"
] |
linux
|
49d31c2f389acfe83417083e1208422b4091cd9e
| 12,372,746,332,684,646,000,000,000,000,000,000,000 | 18 |
dentry name snapshots
take_dentry_name_snapshot() takes a safe snapshot of dentry name;
if the name is a short one, it gets copied into caller-supplied
structure, otherwise an extra reference to external name is grabbed
(those are never modified). In either case the pointer to stable
string is stored into the same structure.
dentry must be held by the caller of take_dentry_name_snapshot(),
but may be freely dropped afterwards - the snapshot will stay
until destroyed by release_dentry_name_snapshot().
Intended use:
struct name_snapshot s;
take_dentry_name_snapshot(&s, dentry);
...
access s.name
...
release_dentry_name_snapshot(&s);
Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name
to pass down with event.
Signed-off-by: Al Viro <[email protected]>
|
static inline int fxrstor_user(struct i387_fxsave_struct __user *fx)
{
if (config_enabled(CONFIG_X86_32))
return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
else if (config_enabled(CONFIG_AS_FXSAVEQ))
return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
/* See comment in fpu_fxsave() below. */
return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
"m" (*fx));
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
26bef1318adc1b3a530ecc807ef99346db2aa8b0
| 288,772,115,362,092,580,000,000,000,000,000,000,000 | 11 |
x86, fpu, amd: Clear exceptions in AMD FXSAVE workaround
Before we do an EMMS in the AMD FXSAVE information leak workaround we
need to clear any pending exceptions, otherwise we trap with a
floating-point exception inside this code.
Reported-by: halfdog <[email protected]>
Tested-by: Borislav Petkov <[email protected]>
Link: http://lkml.kernel.org/r/CA%2B55aFxQnY_PCG_n4=0w-VG=YLXL-yr7oMxyy0WU2gCBAf3ydg@mail.gmail.com
Signed-off-by: H. Peter Anvin <[email protected]>
|
static void check_active(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
if (ioat2_ring_active(ioat)) {
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
return;
}
if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
else if (ioat->alloc_order > ioat_get_alloc_order()) {
/* if the ring is idle, empty, and oversized try to step
* down the size
*/
reshape_ring(ioat, ioat->alloc_order - 1);
/* keep shrinking until we get back to our minimum
* default size
*/
if (ioat->alloc_order > ioat_get_alloc_order())
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
}
| 0 |
[] |
linux
|
7bced397510ab569d31de4c70b39e13355046387
| 256,086,590,058,035,900,000,000,000,000,000,000,000 | 25 |
net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.
This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.
Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():
https://lkml.org/lkml/2014/9/3/177
Cc: Dave Jiang <[email protected]>
Cc: Vinod Koul <[email protected]>
Cc: David Whipple <[email protected]>
Cc: Alexander Duyck <[email protected]>
Cc: <[email protected]>
Reported-by: Roman Gushchin <[email protected]>
Acked-by: David S. Miller <[email protected]>
Signed-off-by: Dan Williams <[email protected]>
|
static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
__wsum csum;
struct udphdr *uh;
struct iphdr *iph;
if (skb->encapsulation &&
(skb_shinfo(skb)->gso_type &
(SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
segs = skb_udp_tunnel_segment(skb, features, false);
goto out;
}
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto out;
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
goto out;
if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
int type = skb_shinfo(skb)->gso_type;
if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM |
SKB_GSO_TUNNEL_REMCSUM |
SKB_GSO_IPIP |
SKB_GSO_GRE | SKB_GSO_GRE_CSUM) ||
!(type & (SKB_GSO_UDP))))
goto out;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
segs = NULL;
goto out;
}
/* Do software UFO. Complete and fill in the UDP checksum as
* HW cannot do checksum of UDP packets sent as multiple
* IP fragments.
*/
uh = udp_hdr(skb);
iph = ip_hdr(skb);
uh->check = 0;
csum = skb_checksum(skb, 0, skb->len, 0);
uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
skb->ip_summed = CHECKSUM_NONE;
/* If there is no outer header we can fake a checksum offload
* due to the fact that we have already done the checksum in
* software prior to segmenting the frame.
*/
if (!skb->encap_hdr_csum)
features |= NETIF_F_HW_CSUM;
/* Fragment the skb. IP headers of the fragments are updated in
* inet_gso_segment()
*/
segs = skb_segment(skb, features);
out:
return segs;
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
linux
|
fac8e0f579695a3ecbc4d3cac369139d7f819971
| 249,793,079,692,174,650,000,000,000,000,000,000,000 | 72 |
tunnels: Don't apply GRO to multiple layers of encapsulation.
When drivers express support for TSO of encapsulated packets, they
only mean that they can do it for one layer of encapsulation.
Supporting additional levels would mean updating, at a minimum,
more IP length fields and they are unaware of this.
No encapsulation device expresses support for handling offloaded
encapsulated packets, so we won't generate these types of frames
in the transmit path. However, GRO doesn't have a check for
multiple levels of encapsulation and will attempt to build them.
UDP tunnel GRO actually does prevent this situation but it only
handles multiple UDP tunnels stacked on top of each other. This
generalizes that solution to prevent any kind of tunnel stacking
that would cause problems.
Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack")
Signed-off-by: Jesse Gross <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
TEST_F(TcpHealthCheckerImplTest, TimeoutWithoutReusingConnection) {
InSequence s;
setupDataDontReuseConnection();
cluster_->prioritySet().getMockHostSet(0)->hosts_ = {
makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())};
expectSessionCreate();
expectClientCreate();
EXPECT_CALL(*connection_, write(_, _));
EXPECT_CALL(*timeout_timer_, enableTimer(_, _));
health_checker_->start();
connection_->raiseEvent(Network::ConnectionEvent::Connected);
// Expected flow when a healthcheck is successful and reuse_connection is false.
EXPECT_CALL(*timeout_timer_, disableTimer());
EXPECT_CALL(*interval_timer_, enableTimer(_, _));
EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush));
Buffer::OwnedImpl response;
addUint8(response, 2);
read_filter_->onData(response, false);
EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter("health_check.success").value());
EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter("health_check.failure").value());
// The healthcheck will run again.
expectClientCreate();
EXPECT_CALL(*connection_, write(_, _));
EXPECT_CALL(*timeout_timer_, enableTimer(_, _));
interval_timer_->invokeCallback();
connection_->raiseEvent(Network::ConnectionEvent::Connected);
// Expected flow when a healthcheck times out.
EXPECT_CALL(*timeout_timer_, disableTimer());
EXPECT_CALL(*interval_timer_, enableTimer(_, _));
connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
// The healthcheck is not yet at the unhealthy threshold.
EXPECT_FALSE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(
Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());
// The healthcheck metric results after first timeout block.
EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter("health_check.success").value());
EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter("health_check.failure").value());
// The healthcheck will run again, it should be failing after this attempt.
expectClientCreate();
EXPECT_CALL(*connection_, write(_, _));
EXPECT_CALL(*timeout_timer_, enableTimer(_, _));
interval_timer_->invokeCallback();
connection_->raiseEvent(Network::ConnectionEvent::Connected);
// Expected flow when a healthcheck times out.
EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));
EXPECT_CALL(*timeout_timer_, disableTimer());
EXPECT_CALL(*interval_timer_, enableTimer(_, _));
connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(
Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_EQ(Host::Health::Unhealthy,
cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());
// The healthcheck metric results after the second timeout block.
EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter("health_check.success").value());
EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter("health_check.failure").value());
}
| 0 |
[
"CWE-476"
] |
envoy
|
9b1c3962172a972bc0359398af6daa3790bb59db
| 317,922,418,168,550,860,000,000,000,000,000,000,000 | 69 |
healthcheck: fix grpc inline removal crashes (#749)
Signed-off-by: Matt Klein <[email protected]>
Signed-off-by: Pradeep Rao <[email protected]>
|
void CalculateOutputIndexValueRowID(
OpKernelContext* context, const RowPartitionTensor& value_rowids,
const vector<INDEX_TYPE>& parent_output_index,
INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size,
vector<INDEX_TYPE>* result) {
const INDEX_TYPE index_size = value_rowids.size();
result->reserve(index_size);
if (index_size == 0) {
return;
}
INDEX_TYPE current_output_column = 0;
INDEX_TYPE current_value_rowid = value_rowids(0);
DCHECK_LT(current_value_rowid, parent_output_index.size());
INDEX_TYPE current_output_index = parent_output_index[current_value_rowid];
result->push_back(current_output_index);
for (INDEX_TYPE i = 1; i < index_size; ++i) {
INDEX_TYPE next_value_rowid = value_rowids(i);
if (next_value_rowid == current_value_rowid) {
if (current_output_index >= 0) {
++current_output_column;
if (current_output_column < output_size) {
current_output_index += output_index_multiplier;
} else {
current_output_index = -1;
}
}
} else {
current_output_column = 0;
current_value_rowid = next_value_rowid;
DCHECK_LT(next_value_rowid, parent_output_index.size());
current_output_index = parent_output_index[next_value_rowid];
}
result->push_back(current_output_index);
}
OP_REQUIRES(context, result->size() == value_rowids.size(),
errors::InvalidArgument("Invalid row ids."));
}
| 1 |
[
"CWE-131",
"CWE-787"
] |
tensorflow
|
c4d7afb6a5986b04505aca4466ae1951686c80f6
| 118,135,674,707,118,300,000,000,000,000,000,000,000 | 38 |
Fix heap OOB / undefined behavior in `RaggedTensorToTensor`
PiperOrigin-RevId: 373244623
Change-Id: I2d6cbbc8c67b238a8815bf58097f7586d87c54f2
|
Opal::Call::toggle_stream_pause (StreamType type)
{
OpalMediaStreamPtr stream = NULL;
PString codec_name;
std::string stream_name;
bool paused = false;
PSafePtr<OpalConnection> connection = get_remote_connection ();
if (connection != NULL) {
stream = connection->GetMediaStream ((type == Audio) ? OpalMediaType::Audio () : OpalMediaType::Video (), false);
if (stream != NULL) {
stream_name = std::string ((const char *) stream->GetMediaFormat ().GetEncodingName ());
std::transform (stream_name.begin (), stream_name.end (), stream_name.begin (), (int (*) (int)) toupper);
paused = stream->IsPaused ();
stream->SetPaused (!paused);
if (paused)
Ekiga::Runtime::run_in_main (boost::bind (boost::ref (stream_resumed), stream_name, type));
else
Ekiga::Runtime::run_in_main (boost::bind (boost::ref (stream_paused), stream_name, type));
}
}
}
| 0 |
[] |
ekiga
|
7d09807257963a4f5168a01aec1795a398746372
| 97,956,245,149,561,970,000,000,000,000,000,000,000 | 26 |
Validate UTF-8 strings before showing them
Closes bug #653009.
|
void *load_device_tree_from_sysfs(void)
{
void *host_fdt;
int host_fdt_size;
host_fdt = create_device_tree(&host_fdt_size);
read_fstree(host_fdt, SYSFS_DT_BASEDIR);
if (fdt_check_header(host_fdt)) {
error_report("%s host device tree extracted into memory is invalid",
__func__);
exit(1);
}
return host_fdt;
}
| 0 |
[
"CWE-119"
] |
qemu
|
da885fe1ee8b4589047484bd7fa05a4905b52b17
| 194,002,945,219,724,240,000,000,000,000,000,000,000 | 14 |
device_tree.c: Don't use load_image()
The load_image() function is deprecated, as it does not let the
caller specify how large the buffer to read the file into is.
Instead use load_image_size().
Signed-off-by: Peter Maydell <[email protected]>
Reviewed-by: Richard Henderson <[email protected]>
Reviewed-by: Stefan Hajnoczi <[email protected]>
Reviewed-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Eric Blake <[email protected]>
Message-id: [email protected]
|
dns_zone_setrcvquerystats(dns_zone_t *zone, dns_stats_t *stats) {
REQUIRE(DNS_ZONE_VALID(zone));
LOCK_ZONE(zone);
if (zone->requeststats_on && stats != NULL) {
if (zone->rcvquerystats == NULL) {
dns_stats_attach(stats, &zone->rcvquerystats);
zone->requeststats_on = true;
}
}
UNLOCK_ZONE(zone);
}
| 0 |
[
"CWE-327"
] |
bind9
|
f09352d20a9d360e50683cd1d2fc52ccedcd77a0
| 170,840,649,747,708,900,000,000,000,000,000,000,000 | 13 |
Update keyfetch_done compute_tag check
If in keyfetch_done the compute_tag fails (because for example the
algorithm is not supported), don't crash, but instead ignore the
key.
|
Sequence(const std::vector<std::shared_ptr<Ope>> &opes) : opes_(opes) {}
| 0 |
[
"CWE-125"
] |
cpp-peglib
|
b3b29ce8f3acf3a32733d930105a17d7b0ba347e
| 300,855,285,898,347,000,000,000,000,000,000,000,000 | 1 |
Fix #122
|
ldns_rr_list2str_fmt(const ldns_output_format *fmt, const ldns_rr_list *list)
{
char *result = NULL;
ldns_buffer *tmp_buffer = ldns_buffer_new(LDNS_MAX_PACKETLEN);
if (!tmp_buffer) {
return NULL;
}
if (list) {
if (ldns_rr_list2buffer_str_fmt(
tmp_buffer, fmt, list)
== LDNS_STATUS_OK) {
}
} else {
if (fmt == NULL) {
fmt = ldns_output_format_default;
}
if (fmt->flags & LDNS_COMMENT_NULLS) {
ldns_buffer_printf(tmp_buffer, "; (null)\n");
}
}
/* export and return string, destroy rest */
result = ldns_buffer_export2str(tmp_buffer);
ldns_buffer_free(tmp_buffer);
return result;
}
| 0 |
[
"CWE-415"
] |
ldns
|
070b4595981f48a21cc6b4f5047fdc2d09d3da91
| 325,845,452,361,313,980,000,000,000,000,000,000,000 | 27 |
CAA and URI
|
PyBytes_FromFormat(const char *format, ...)
{
PyObject* ret;
va_list vargs;
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, format);
#else
va_start(vargs);
#endif
ret = PyBytes_FromFormatV(format, vargs);
va_end(vargs);
return ret;
}
| 0 |
[
"CWE-190"
] |
cpython
|
6c004b40f9d51872d848981ef1a18bb08c2dfc42
| 137,207,635,390,158,760,000,000,000,000,000,000,000 | 14 |
bpo-30657: Fix CVE-2017-1000158 (#4758)
Fixes possible integer overflow in PyBytes_DecodeEscape.
Co-Authored-By: Jay Bosamiya <[email protected]>
|
std::string help() const override {
return "Used to update a user, for example to change its password";
}
| 0 |
[
"CWE-613"
] |
mongo
|
e55d6e2292e5dbe2f97153251d8193d1cc89f5d7
| 159,397,457,367,307,330,000,000,000,000,000,000,000 | 3 |
SERVER-38984 Validate unique User ID on UserCache hit
|
static void sr9700_set_multicast(struct net_device *netdev)
{
struct usbnet *dev = netdev_priv(netdev);
/* We use the 20 byte dev->data for our 8 byte filter buffer
* to avoid allocating memory that is tricky to free later
*/
u8 *hashes = (u8 *)&dev->data;
/* rx_ctl setting : enable, disable_long, disable_crc */
u8 rx_ctl = RCR_RXEN | RCR_DIS_CRC | RCR_DIS_LONG;
memset(hashes, 0x00, SR_MCAST_SIZE);
/* broadcast address */
hashes[SR_MCAST_SIZE - 1] |= SR_MCAST_ADDR_FLAG;
if (netdev->flags & IFF_PROMISC) {
rx_ctl |= RCR_PRMSC;
} else if (netdev->flags & IFF_ALLMULTI ||
netdev_mc_count(netdev) > SR_MCAST_MAX) {
rx_ctl |= RCR_RUNT;
} else if (!netdev_mc_empty(netdev)) {
struct netdev_hw_addr *ha;
netdev_for_each_mc_addr(ha, netdev) {
u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26;
hashes[crc >> 3] |= 1 << (crc & 0x7);
}
}
sr_write_async(dev, SR_MAR, SR_MCAST_SIZE, hashes);
sr_write_reg_async(dev, SR_RCR, rx_ctl);
}
| 0 |
[] |
linux
|
e9da0b56fe27206b49f39805f7dcda8a89379062
| 263,067,624,300,392,300,000,000,000,000,000,000,000 | 30 |
sr9700: sanity check for packet length
A malicious device can leak heap data to user space
providing bogus frame lengths. Introduce a sanity check.
Signed-off-by: Oliver Neukum <[email protected]>
Reviewed-by: Grant Grundler <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void node_lost_contact(struct tipc_node *n,
struct sk_buff_head *inputq)
{
struct tipc_sock_conn *conn, *safe;
struct tipc_link *l;
struct list_head *conns = &n->conn_sks;
struct sk_buff *skb;
uint i;
pr_debug("Lost contact with %x\n", n->addr);
n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
trace_tipc_node_lost_contact(n, true, " ");
/* Clean up broadcast state */
tipc_bcast_remove_peer(n->net, n->bc_entry.link);
skb_queue_purge(&n->bc_entry.namedq);
/* Abort any ongoing link failover */
for (i = 0; i < MAX_BEARERS; i++) {
l = n->links[i].link;
if (l)
tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
}
/* Notify publications from this node */
n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
n->peer_net = NULL;
n->peer_hash_mix = 0;
/* Notify sockets connected to node */
list_for_each_entry_safe(conn, safe, conns, list) {
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
SHORT_H_SIZE, 0, tipc_own_addr(n->net),
conn->peer_node, conn->port,
conn->peer_port, TIPC_ERR_NO_NODE);
if (likely(skb))
skb_queue_tail(inputq, skb);
list_del(&conn->list);
kfree(conn);
}
}
| 0 |
[] |
linux
|
0217ed2848e8538bcf9172d97ed2eeb4a26041bb
| 113,427,308,451,920,440,000,000,000,000,000,000,000 | 40 |
tipc: better validate user input in tipc_nl_retrieve_key()
Before calling tipc_aead_key_size(ptr), we need to ensure
we have enough data to dereference ptr->keylen.
We probably also want to make sure tipc_aead_key_size()
wont overflow with malicious ptr->keylen values.
Syzbot reported:
BUG: KMSAN: uninit-value in __tipc_nl_node_set_key net/tipc/node.c:2971 [inline]
BUG: KMSAN: uninit-value in tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023
CPU: 0 PID: 21060 Comm: syz-executor.5 Not tainted 5.11.0-rc7-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:79 [inline]
dump_stack+0x21c/0x280 lib/dump_stack.c:120
kmsan_report+0xfb/0x1e0 mm/kmsan/kmsan_report.c:118
__msan_warning+0x5f/0xa0 mm/kmsan/kmsan_instr.c:197
__tipc_nl_node_set_key net/tipc/node.c:2971 [inline]
tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023
genl_family_rcv_msg_doit net/netlink/genetlink.c:739 [inline]
genl_family_rcv_msg net/netlink/genetlink.c:783 [inline]
genl_rcv_msg+0x1319/0x1610 net/netlink/genetlink.c:800
netlink_rcv_skb+0x6fa/0x810 net/netlink/af_netlink.c:2494
genl_rcv+0x63/0x80 net/netlink/genetlink.c:811
netlink_unicast_kernel net/netlink/af_netlink.c:1304 [inline]
netlink_unicast+0x11d6/0x14a0 net/netlink/af_netlink.c:1330
netlink_sendmsg+0x1740/0x1840 net/netlink/af_netlink.c:1919
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg net/socket.c:672 [inline]
____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345
___sys_sendmsg net/socket.c:2399 [inline]
__sys_sendmsg+0x714/0x830 net/socket.c:2432
__compat_sys_sendmsg net/compat.c:347 [inline]
__do_compat_sys_sendmsg net/compat.c:354 [inline]
__se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351
__ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351
do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline]
__do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141
do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166
do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209
entry_SYSENTER_compat_after_hwframe+0x4d/0x5c
RIP: 0023:0xf7f60549
Code: 03 74 c0 01 10 05 03 74 b8 01 10 06 03 74 b4 01 10 07 03 74 b0 01 10 08 03 74 d8 01 00 00 00 00 00 51 52 55 89 e5 0f 34 cd 80 <5d> 5a 59 c3 90 90 90 90 8d b4 26 00 00 00 00 8d b4 26 00 00 00 00
RSP: 002b:00000000f555a5fc EFLAGS: 00000296 ORIG_RAX: 0000000000000172
RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000020000200
RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
Uninit was created at:
kmsan_save_stack_with_flags mm/kmsan/kmsan.c:121 [inline]
kmsan_internal_poison_shadow+0x5c/0xf0 mm/kmsan/kmsan.c:104
kmsan_slab_alloc+0x8d/0xe0 mm/kmsan/kmsan_hooks.c:76
slab_alloc_node mm/slub.c:2907 [inline]
__kmalloc_node_track_caller+0xa37/0x1430 mm/slub.c:4527
__kmalloc_reserve net/core/skbuff.c:142 [inline]
__alloc_skb+0x2f8/0xb30 net/core/skbuff.c:210
alloc_skb include/linux/skbuff.h:1099 [inline]
netlink_alloc_large_skb net/netlink/af_netlink.c:1176 [inline]
netlink_sendmsg+0xdbc/0x1840 net/netlink/af_netlink.c:1894
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg net/socket.c:672 [inline]
____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345
___sys_sendmsg net/socket.c:2399 [inline]
__sys_sendmsg+0x714/0x830 net/socket.c:2432
__compat_sys_sendmsg net/compat.c:347 [inline]
__do_compat_sys_sendmsg net/compat.c:354 [inline]
__se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351
__ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351
do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline]
__do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141
do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166
do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209
entry_SYSENTER_compat_after_hwframe+0x4d/0x5c
Fixes: e1f32190cf7d ("tipc: add support for AEAD key setting via netlink")
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Tuong Lien <[email protected]>
Cc: Jon Maloy <[email protected]>
Cc: Ying Xue <[email protected]>
Reported-by: syzbot <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void Box_iref::derive_box_version()
{
uint8_t version = 0;
for (const auto& ref : m_references) {
if (ref.from_item_ID > 0xFFFF) {
version=1;
break;
}
for (uint32_t r : ref.to_item_ID) {
if (r > 0xFFFF) {
version=1;
break;
}
}
}
set_version(version);
}
| 0 |
[
"CWE-703"
] |
libheif
|
2710c930918609caaf0a664e9c7bc3dce05d5b58
| 193,612,758,154,967,460,000,000,000,000,000,000,000 | 20 |
force fraction to a limited resolution to finally solve those pesky numerical edge cases
|
nv_bck_word(cmdarg_T *cap)
{
cap->oap->motion_type = MCHAR;
cap->oap->inclusive = FALSE;
curwin->w_set_curswant = TRUE;
if (bck_word(cap->count1, cap->arg, FALSE) == FAIL)
clearopbeep(cap->oap);
#ifdef FEAT_FOLDING
else if ((fdo_flags & FDO_HOR) && KeyTyped && cap->oap->op_type == OP_NOP)
foldOpenCursor();
#endif
}
| 0 |
[
"CWE-416"
] |
vim
|
35a9a00afcb20897d462a766793ff45534810dc3
| 308,391,840,162,165,470,000,000,000,000,000,000,000 | 12 |
patch 8.2.3428: using freed memory when replacing
Problem: Using freed memory when replacing. (Dhiraj Mishra)
Solution: Get the line pointer after calling ins_copychar().
|
static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
struct udphdr *uh,
__be32 saddr, __be32 daddr,
struct udp_table *udptable,
int proto)
{
struct sock *sk, *stack[256 / sizeof(struct sock *)];
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(uh->dest);
struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
int dif = skb->dev->ifindex;
unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
bool inner_flushed = false;
if (use_hash2) {
hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
udp_table.mask;
hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask;
start_lookup:
hslot = &udp_table.hash2[hash2];
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
}
spin_lock(&hslot->lock);
sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) {
if (__udp_is_mcast_sock(net, sk,
uh->dest, daddr,
uh->source, saddr,
dif, hnum)) {
if (unlikely(count == ARRAY_SIZE(stack))) {
flush_stack(stack, count, skb, ~0);
inner_flushed = true;
count = 0;
}
stack[count++] = sk;
sock_hold(sk);
}
}
spin_unlock(&hslot->lock);
/* Also lookup *:port if we are using hash2 and haven't done so yet. */
if (use_hash2 && hash2 != hash2_any) {
hash2 = hash2_any;
goto start_lookup;
}
/*
* do the slow work with no lock held
*/
if (count) {
flush_stack(stack, count, skb, count - 1);
} else {
if (!inner_flushed)
UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
proto == IPPROTO_UDPLITE);
consume_skb(skb);
}
return 0;
}
| 0 |
[
"CWE-358"
] |
linux
|
197c949e7798fbf28cfadc69d9ca0c2abbf93191
| 315,852,470,003,936,130,000,000,000,000,000,000,000 | 61 |
udp: properly support MSG_PEEK with truncated buffers
Backport of this upstream commit into stable kernels :
89c22d8c3b27 ("net: Fix skb csum races when peeking")
exposed a bug in udp stack vs MSG_PEEK support, when user provides
a buffer smaller than skb payload.
In this case,
skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr),
msg->msg_iov);
returns -EFAULT.
This bug does not happen in upstream kernels since Al Viro did a great
job to replace this into :
skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), msg);
This variant is safe vs short buffers.
For the time being, instead reverting Herbert Xu patch and add back
skb->ip_summed invalid changes, simply store the result of
udp_lib_checksum_complete() so that we avoid computing the checksum a
second time, and avoid the problematic
skb_copy_and_csum_datagram_iovec() call.
This patch can be applied on recent kernels as it avoids a double
checksumming, then backported to stable kernels as a bug fix.
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
{
u64 rt_runtime, rt_period;
rt_period = (u64)rt_period_us * NSEC_PER_USEC;
rt_runtime = tg->rt_bandwidth.rt_runtime;
if (rt_period == 0)
return -EINVAL;
return tg_set_bandwidth(tg, rt_period, rt_runtime);
}
| 0 |
[
"CWE-703",
"CWE-835"
] |
linux
|
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
| 281,745,270,053,122,520,000,000,000,000,000,000,000 | 12 |
Sched: fix skip_clock_update optimization
idle_balance() drops/retakes rq->lock, leaving the previous task
vulnerable to set_tsk_need_resched(). Clear it after we return
from balancing instead, and in setup_thread_stack() as well, so
no successfully descheduled or never scheduled task has it set.
Need resched confused the skip_clock_update logic, which assumes
that the next call to update_rq_clock() will come nearly immediately
after being set. Make the optimization robust against the waking
a sleeper before it sucessfully deschedules case by checking that
the current task has not been dequeued before setting the flag,
since it is that useless clock update we're trying to save, and
clear unconditionally in schedule() proper instead of conditionally
in put_prev_task().
Signed-off-by: Mike Galbraith <[email protected]>
Reported-by: Bjoern B. Brandenburg <[email protected]>
Tested-by: Yong Zhang <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: [email protected]
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
size_t *acl_len)
{
__be32 *savep;
uint32_t attrlen,
bitmap[2] = {0};
struct kvec *iov = req->rq_rcv_buf.head;
int status;
*acl_len = 0;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
goto out;
if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
goto out;
if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
goto out;
if (unlikely(bitmap[0] & (FATTR4_WORD0_ACL - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_ACL)) {
size_t hdrlen;
u32 recvd;
/* We ignore &savep and don't do consistency checks on
* the attr length. Let userspace figure it out.... */
hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
recvd = req->rq_rcv_buf.len - hdrlen;
if (attrlen > recvd) {
dprintk("NFS: server cheating in getattr"
" acl reply: attrlen %u > recvd %u\n",
attrlen, recvd);
return -EINVAL;
}
xdr_read_pages(xdr, attrlen);
*acl_len = attrlen;
} else
status = -EOPNOTSUPP;
out:
return status;
}
| 0 |
[
"CWE-703"
] |
linux
|
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
| 172,870,473,833,642,560,000,000,000,000,000,000,000 | 41 |
NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <[email protected]>
|
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
kvm_init_shadow_mmu(vcpu, ®s);
context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
}
| 0 |
[
"CWE-476"
] |
linux
|
9f46c187e2e680ecd9de7983e4d081c3391acc76
| 327,347,191,515,803,750,000,000,000,000,000,000,000 | 11 |
KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID
With shadow paging enabled, the INVPCID instruction results in a call
to kvm_mmu_invpcid_gva. If INVPCID is executed with CR0.PG=0, the
invlpg callback is not set and the result is a NULL pointer dereference.
Fix it trivially by checking for mmu->invlpg before every call.
There are other possibilities:
- check for CR0.PG, because KVM (like all Intel processors after P5)
flushes guest TLB on CR0.PG changes so that INVPCID/INVLPG are a
nop with paging disabled
- check for EFER.LMA, because KVM syncs and flushes when switching
MMU contexts outside of 64-bit mode
All of these are tricky, go for the simple solution. This is CVE-2022-1789.
Reported-by: Yongkang Jia <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]>
|
rsvg_filter_primitive_specular_lighting_free (RsvgNode * self)
{
RsvgFilterPrimitiveSpecularLighting *upself;
upself = (RsvgFilterPrimitiveSpecularLighting *) self;
g_string_free (upself->super.result, TRUE);
g_string_free (upself->super.in, TRUE);
_rsvg_node_free (self);
}
| 0 |
[] |
librsvg
|
34c95743ca692ea0e44778e41a7c0a129363de84
| 185,635,099,051,467,530,000,000,000,000,000,000,000 | 9 |
Store node type separately in RsvgNode
The node name (formerly RsvgNode:type) cannot be used to infer
the sub-type of RsvgNode that we're dealing with, since for unknown
elements we put type = node-name. This lead to a (potentially exploitable)
crash e.g. when the element name started with "fe" which tricked
the old code into considering it as a RsvgFilterPrimitive.
CVE-2011-3146
https://bugzilla.gnome.org/show_bug.cgi?id=658014
|
static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
int flags, struct nfs_open_context *ctx)
{
struct nfs4_state_owner *sp = opendata->owner;
struct nfs_server *server = sp->so_server;
struct dentry *dentry;
struct nfs4_state *state;
fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
struct inode *dir = d_inode(opendata->dir);
unsigned long dir_verifier;
unsigned int seq;
int ret;
seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
dir_verifier = nfs_save_change_attribute(dir);
ret = _nfs4_proc_open(opendata, ctx);
if (ret != 0)
goto out;
state = _nfs4_opendata_to_nfs4_state(opendata);
ret = PTR_ERR(state);
if (IS_ERR(state))
goto out;
ctx->state = state;
if (server->caps & NFS_CAP_POSIX_LOCK)
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags);
dentry = opendata->dentry;
if (d_really_is_negative(dentry)) {
struct dentry *alias;
d_drop(dentry);
alias = d_exact_alias(dentry, state->inode);
if (!alias)
alias = d_splice_alias(igrab(state->inode), dentry);
/* d_splice_alias() can't fail here - it's a non-directory */
if (alias) {
dput(ctx->dentry);
ctx->dentry = dentry = alias;
}
}
switch(opendata->o_arg.claim) {
default:
break;
case NFS4_OPEN_CLAIM_NULL:
case NFS4_OPEN_CLAIM_DELEGATE_CUR:
case NFS4_OPEN_CLAIM_DELEGATE_PREV:
if (!opendata->rpc_done)
break;
if (opendata->o_res.delegation_type != 0)
dir_verifier = nfs_save_change_attribute(dir);
nfs_set_verifier(dentry, dir_verifier);
}
/* Parse layoutget results before we check for access */
pnfs_parse_lgopen(state->inode, opendata->lgp, ctx);
ret = nfs4_opendata_access(sp->so_cred, opendata, state,
acc_mode, flags);
if (ret != 0)
goto out;
if (d_inode(dentry) == state->inode) {
nfs_inode_attach_open_context(ctx);
if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
nfs4_schedule_stateid_recovery(server, state);
}
out:
if (!opendata->cancelled)
nfs4_sequence_free_slot(&opendata->o_res.seq_res);
return ret;
}
| 0 |
[
"CWE-787"
] |
linux
|
b4487b93545214a9db8cbf32e86411677b0cca21
| 88,488,132,374,394,770,000,000,000,000,000,000,000 | 76 |
nfs: Fix getxattr kernel panic and memory overflow
Move the buffer size check to decode_attr_security_label() before memcpy()
Only call memcpy() if the buffer is large enough
Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS")
Signed-off-by: Jeffrey Mitchell <[email protected]>
[Trond: clean up duplicate test of label->len != 0]
Signed-off-by: Trond Myklebust <[email protected]>
|
qb_rb_chunk_commit(struct qb_ringbuffer_s * rb, size_t len)
{
uint32_t old_write_pt;
if (rb == NULL) {
return -EINVAL;
}
/*
* commit the magic & chunk_size
*/
old_write_pt = rb->shared_hdr->write_pt;
rb->shared_data[old_write_pt] = len;
/*
* commit the new write pointer
*/
rb->shared_hdr->write_pt = qb_rb_chunk_step(rb, old_write_pt);
QB_RB_CHUNK_MAGIC_SET(rb, old_write_pt, QB_RB_CHUNK_MAGIC);
DEBUG_PRINTF("commit [%zd] read: %u, write: %u -> %u (%u)\n",
(rb->notifier.q_len_fn ?
rb->notifier.q_len_fn(rb->notifier.instance) : 0),
rb->shared_hdr->read_pt,
old_write_pt,
rb->shared_hdr->write_pt,
rb->shared_hdr->word_size);
/*
* post the notification to the reader
*/
if (rb->notifier.post_fn) {
return rb->notifier.post_fn(rb->notifier.instance, len);
}
return 0;
}
| 0 |
[
"CWE-59"
] |
libqb
|
e322e98dc264bc5911d6fe1d371e55ac9f95a71e
| 63,877,067,024,709,240,000,000,000,000,000,000,000 | 35 |
ipc: use O_EXCL on SHM files, and randomize the names
Signed-off-by: Christine Caulfield <[email protected]>
|
rb_f_chop_bang(str)
VALUE str;
{
return rb_str_chop_bang(uscore_get());
}
| 0 |
[
"CWE-20"
] |
ruby
|
e926ef5233cc9f1035d3d51068abe9df8b5429da
| 296,476,246,598,357,270,000,000,000,000,000,000,000 | 5 |
* random.c (rb_genrand_int32, rb_genrand_real), intern.h: Export.
* string.c (rb_str_tmp_new), intern.h: New function.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_1_8@16014 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
|
ves_icall_get_attributes (MonoReflectionType *type)
{
MonoClass *klass = mono_class_from_mono_type (type->type);
MONO_ARCH_SAVE_REGS;
return klass->flags;
}
| 0 |
[
"CWE-264"
] |
mono
|
035c8587c0d8d307e45f1b7171a0d337bb451f1e
| 123,111,956,369,983,270,000,000,000,000,000,000,000 | 8 |
Allow only primitive types/enums in RuntimeHelpers.InitializeArray ().
|
static CURLcode pop3_disconnect(struct connectdata *conn, bool dead_connection)
{
struct pop3_conn *pop3c= &conn->proto.pop3c;
/* We cannot send quit unconditionally. If this connection is stale or
bad in any way, sending quit and waiting around here will make the
disconnect wait in vain and cause more problems than we need to.
*/
/* The POP3 session may or may not have been allocated/setup at this
point! */
if(!dead_connection && pop3c->pp.conn)
(void)pop3_quit(conn); /* ignore errors on the LOGOUT */
Curl_pp_disconnect(&pop3c->pp);
return CURLE_OK;
}
| 0 |
[
"CWE-89"
] |
curl
|
75ca568fa1c19de4c5358fed246686de8467c238
| 249,431,156,892,942,600,000,000,000,000,000,000,000 | 19 |
URL sanitize: reject URLs containing bad data
Protocols (IMAP, POP3 and SMTP) that use the path part of a URL in a
decoded manner now use the new Curl_urldecode() function to reject URLs
with embedded control codes (anything that is or decodes to a byte value
less than 32).
URLs containing such codes could easily otherwise be used to do harm and
allow users to do unintended actions with otherwise innocent tools and
applications. Like for example using a URL like
pop3://pop3.example.com/1%0d%0aDELE%201 when the app wants a URL to get
a mail and instead this would delete one.
This flaw is considered a security vulnerability: CVE-2012-0036
Security advisory at: http://curl.haxx.se/docs/adv_20120124.html
Reported by: Dan Fandrich
|
static void sctp_addr_wq_timeout_handler(unsigned long arg)
{
struct net *net = (struct net *)arg;
struct sctp_sockaddr_entry *addrw, *temp;
struct sctp_sock *sp;
spin_lock_bh(&net->sctp.addr_wq_lock);
list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) {
pr_debug("%s: the first ent in wq:%p is addr:%pISc for cmd:%d at "
"entry:%p\n", __func__, &net->sctp.addr_waitq, &addrw->a.sa,
addrw->state, addrw);
#if IS_ENABLED(CONFIG_IPV6)
/* Now we send an ASCONF for each association */
/* Note. we currently don't handle link local IPv6 addressees */
if (addrw->a.sa.sa_family == AF_INET6) {
struct in6_addr *in6;
if (ipv6_addr_type(&addrw->a.v6.sin6_addr) &
IPV6_ADDR_LINKLOCAL)
goto free_next;
in6 = (struct in6_addr *)&addrw->a.v6.sin6_addr;
if (ipv6_chk_addr(net, in6, NULL, 0) == 0 &&
addrw->state == SCTP_ADDR_NEW) {
unsigned long timeo_val;
pr_debug("%s: this is on DAD, trying %d sec "
"later\n", __func__,
SCTP_ADDRESS_TICK_DELAY);
timeo_val = jiffies;
timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
mod_timer(&net->sctp.addr_wq_timer, timeo_val);
break;
}
}
#endif
list_for_each_entry(sp, &net->sctp.auto_asconf_splist, auto_asconf_list) {
struct sock *sk;
sk = sctp_opt2sk(sp);
/* ignore bound-specific endpoints */
if (!sctp_is_ep_boundall(sk))
continue;
bh_lock_sock(sk);
if (sctp_asconf_mgmt(sp, addrw) < 0)
pr_debug("%s: sctp_asconf_mgmt failed\n", __func__);
bh_unlock_sock(sk);
}
#if IS_ENABLED(CONFIG_IPV6)
free_next:
#endif
list_del(&addrw->list);
kfree(addrw);
}
spin_unlock_bh(&net->sctp.addr_wq_lock);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
8e2d61e0aed2b7c4ecb35844fe07e0b2b762dee4
| 306,591,684,740,840,500,000,000,000,000,000,000,000 | 59 |
sctp: fix race on protocol/netns initialization
Consider sctp module is unloaded and is being requested because an user
is creating a sctp socket.
During initialization, sctp will add the new protocol type and then
initialize pernet subsys:
status = sctp_v4_protosw_init();
if (status)
goto err_protosw_init;
status = sctp_v6_protosw_init();
if (status)
goto err_v6_protosw_init;
status = register_pernet_subsys(&sctp_net_ops);
The problem is that after those calls to sctp_v{4,6}_protosw_init(), it
is possible for userspace to create SCTP sockets like if the module is
already fully loaded. If that happens, one of the possible effects is
that we will have readers for net->sctp.local_addr_list list earlier
than expected and sctp_net_init() does not take precautions while
dealing with that list, leading to a potential panic but not limited to
that, as sctp_sock_init() will copy a bunch of blank/partially
initialized values from net->sctp.
The race happens like this:
CPU 0 | CPU 1
socket() |
__sock_create | socket()
inet_create | __sock_create
list_for_each_entry_rcu( |
answer, &inetsw[sock->type], |
list) { | inet_create
/* no hits */ |
if (unlikely(err)) { |
... |
request_module() |
/* socket creation is blocked |
* the module is fully loaded |
*/ |
sctp_init |
sctp_v4_protosw_init |
inet_register_protosw |
list_add_rcu(&p->list, |
last_perm); |
| list_for_each_entry_rcu(
| answer, &inetsw[sock->type],
sctp_v6_protosw_init | list) {
| /* hit, so assumes protocol
| * is already loaded
| */
| /* socket creation continues
| * before netns is initialized
| */
register_pernet_subsys |
Simply inverting the initialization order between
register_pernet_subsys() and sctp_v4_protosw_init() is not possible
because register_pernet_subsys() will create a control sctp socket, so
the protocol must be already visible by then. Deferring the socket
creation to a work-queue is not good specially because we loose the
ability to handle its errors.
So, as suggested by Vlad, the fix is to split netns initialization in
two moments: defaults and control socket, so that the defaults are
already loaded by when we register the protocol, while control socket
initialization is kept at the same moment it is today.
Fixes: 4db67e808640 ("sctp: Make the address lists per network namespace")
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: Marcelo Ricardo Leitner <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
syslog_sigsafe(int priority, const char *msg, size_t msglen)
{
static int syslog_fd = -1;
char buf[sizeof("<1234567890>ripngd[1234567890]: ")+msglen+50];
char *s;
if ((syslog_fd < 0) && ((syslog_fd = syslog_connect()) < 0))
return;
#define LOC s,buf+sizeof(buf)-s
s = buf;
s = str_append(LOC,"<");
s = num_append(LOC,priority);
s = str_append(LOC,">");
/* forget about the timestamp, too difficult in a signal handler */
s = str_append(LOC,zlog_default->ident);
if (zlog_default->syslog_options & LOG_PID)
{
s = str_append(LOC,"[");
s = num_append(LOC,getpid());
s = str_append(LOC,"]");
}
s = str_append(LOC,": ");
s = str_append(LOC,msg);
write(syslog_fd,buf,s-buf);
#undef LOC
}
| 0 |
[
"CWE-125"
] |
frr
|
6d58272b4cf96f0daa846210dd2104877900f921
| 165,909,966,318,102,310,000,000,000,000,000,000,000 | 27 |
[bgpd] cleanup, compact and consolidate capability parsing code
2007-07-26 Paul Jakma <[email protected]>
* (general) Clean up and compact capability parsing slightly.
Consolidate validation of length and logging of generic TLV, and
memcpy of capability data, thus removing such from cap specifc
code (not always present or correct).
* bgp_open.h: Add structures for the generic capability TLV header
and for the data formats of the various specific capabilities we
support. Hence remove the badly named, or else misdefined, struct
capability.
* bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data.
Do the length checks *before* memcpy()'ing based on that length
(stored capability - should have been validated anyway on input,
but..).
(bgp_afi_safi_valid_indices) new function to validate (afi,safi)
which is about to be used as index into arrays, consolidates
several instances of same, at least one of which appeared to be
incomplete..
(bgp_capability_mp) Much condensed.
(bgp_capability_orf_entry) New, process one ORF entry
(bgp_capability_orf) Condensed. Fixed to process all ORF entries.
(bgp_capability_restart) Condensed, and fixed to use a
cap-specific type, rather than abusing capability_mp.
(struct message capcode_str) added to aid generic logging.
(size_t cap_minsizes[]) added to aid generic validation of
capability length field.
(bgp_capability_parse) Generic logging and validation of TLV
consolidated here. Code compacted as much as possible.
* bgp_packet.c: (bgp_open_receive) Capability parsers now use
streams, so no more need here to manually fudge the input stream
getp.
(bgp_capability_msg_parse) use struct capability_mp_data. Validate
lengths /before/ memcpy. Use bgp_afi_safi_valid_indices.
(bgp_capability_receive) Exported for use by test harness.
* bgp_vty.c: (bgp_show_summary) fix conversion warning
(bgp_show_peer) ditto
* bgp_debug.h: Fix storage 'extern' after type 'const'.
* lib/log.c: (mes_lookup) warning about code not being in
same-number array slot should be debug, not warning. E.g. BGP
has several discontigious number spaces, allocating from
different parts of a space is not uncommon (e.g. IANA
assigned versus vendor-assigned code points in some number
space).
|
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
s64 adjustment)
{
u64 tsc_offset = vcpu->arch.l1_tsc_offset;
kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
}
| 0 |
[
"CWE-476"
] |
linux
|
55749769fe608fa3f4a075e42e89d237c8e37637
| 143,523,522,914,736,770,000,000,000,000,000,000,000 | 6 |
KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty
When dirty ring logging is enabled, any dirty logging without an active
vCPU context will cause a kernel oops. But we've already declared that
the shared_info page doesn't get dirty tracking anyway, since it would
be kind of insane to mark it dirty every time we deliver an event channel
interrupt. Userspace is supposed to just assume it's always dirty any
time a vCPU can run or event channels are routed.
So stop using the generic kvm_write_wall_clock() and just write directly
through the gfn_to_pfn_cache that we already have set up.
We can make kvm_write_wall_clock() static in x86.c again now, but let's
not remove the 'sec_hi_ofs' argument even though it's not used yet. At
some point we *will* want to use that for KVM guests too.
Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region")
Reported-by: butt3rflyh4ck <[email protected]>
Signed-off-by: David Woodhouse <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
void addrconf_leave_solict(struct inet6_dev *idev, struct in6_addr *addr)
{
struct in6_addr maddr;
if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
return;
addrconf_addr_solict_mult(addr, &maddr);
__ipv6_dev_mc_dec(idev, &maddr);
}
| 0 |
[
"CWE-200"
] |
linux-2.6
|
8a47077a0b5aa2649751c46e7a27884e6686ccbf
| 290,279,652,391,664,930,000,000,000,000,000,000,000 | 10 |
[NETLINK]: Missing padding fields in dumped structures
Plug holes with padding fields and initialized them to zero.
Signed-off-by: Patrick McHardy <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int main(int argc, char *argv[])
{
int err = 0, r, c, long_optind = 0;
int do_info = 0;
int do_get_conf_entry = 0;
int do_set_conf_entry = 0;
int do_list_readers = 0;
int do_list_drivers = 0;
int do_list_files = 0;
int do_send_apdu = 0;
int do_print_atr = 0;
int do_print_version = 0;
int do_print_serial = 0;
int do_print_name = 0;
int do_list_algorithms = 0;
int do_reset = 0;
int action_count = 0;
const char *opt_driver = NULL;
const char *opt_conf_entry = NULL;
const char *opt_reset_type = NULL;
char **p;
sc_context_param_t ctx_param;
setbuf(stderr, NULL);
setbuf(stdout, NULL);
while (1) {
c = getopt_long(argc, argv, "inlG:S:fr:vs:Dc:aw", options, &long_optind);
if (c == -1)
break;
if (c == '?')
util_print_usage_and_die(app_name, options, option_help, NULL);
switch (c) {
case 'i':
do_info = 1;
action_count++;
break;
case 'G':
do_get_conf_entry = 1;
opt_conf_entry = optarg;
action_count++;
break;
case 'S':
do_set_conf_entry = 1;
opt_conf_entry = optarg;
action_count++;
break;
case 'l':
do_list_readers = 1;
action_count++;
break;
case 'D':
do_list_drivers = 1;
action_count++;
break;
case 'f':
do_list_files = 1;
action_count++;
break;
case 's':
p = (char **) realloc(opt_apdus,
(opt_apdu_count + 1) * sizeof(char *));
if (!p) {
fprintf(stderr, "Not enough memory\n");
err = 1;
goto end;
}
opt_apdus = p;
opt_apdus[opt_apdu_count] = optarg;
do_send_apdu++;
if (opt_apdu_count == 0)
action_count++;
opt_apdu_count++;
break;
case 'a':
do_print_atr = 1;
action_count++;
break;
case 'n':
do_print_name = 1;
action_count++;
break;
case 'r':
opt_reader = optarg;
break;
case 'v':
verbose++;
break;
case OPT_VERSION:
do_print_version = 1;
action_count++;
break;
case 'c':
opt_driver = optarg;
break;
case 'w':
opt_wait = 1;
break;
case OPT_SERIAL:
do_print_serial = 1;
action_count++;
break;
case OPT_LIST_ALG:
do_list_algorithms = 1;
action_count++;
break;
case OPT_RESET:
do_reset = 1;
opt_reset_type = optarg;
action_count++;
break;
}
}
if (action_count == 0)
util_print_usage_and_die(app_name, options, option_help, NULL);
if (do_print_version) {
printf("%s\n", OPENSC_SCM_REVISION);
action_count--;
}
if (do_info) {
opensc_info();
action_count--;
}
memset(&ctx_param, 0, sizeof(ctx_param));
ctx_param.ver = 0;
ctx_param.app_name = app_name;
r = sc_context_create(&ctx, &ctx_param);
if (r) {
fprintf(stderr, "Failed to establish context: %s\n", sc_strerror(r));
return 1;
}
ctx->flags |= SC_CTX_FLAG_ENABLE_DEFAULT_DRIVER;
if (verbose > 1) {
ctx->debug = verbose;
sc_ctx_log_to_file(ctx, "stderr");
}
if (do_get_conf_entry) {
if ((err = opensc_get_conf_entry (opt_conf_entry)))
goto end;
action_count--;
}
if (do_set_conf_entry) {
if ((err = opensc_set_conf_entry (opt_conf_entry)))
goto end;
action_count--;
}
if (do_list_readers) {
if ((err = list_readers()))
goto end;
action_count--;
}
if (do_list_drivers) {
if ((err = list_drivers()))
goto end;
action_count--;
}
if (action_count <= 0)
goto end;
if (opt_driver != NULL) {
err = sc_set_card_driver(ctx, opt_driver);
if (err) {
fprintf(stderr, "Driver '%s' not found!\n", opt_driver);
err = 1;
goto end;
}
}
err = util_connect_card_ex(ctx, &card, opt_reader, opt_wait, 0, verbose);
if (err)
goto end;
if (do_print_atr) {
if (verbose) {
printf("Card ATR:\n");
util_hex_dump_asc(stdout, card->atr.value, card->atr.len, -1);
} else {
char tmp[SC_MAX_ATR_SIZE*3];
sc_bin_to_hex(card->atr.value, card->atr.len, tmp, sizeof(tmp) - 1, ':');
fprintf(stdout,"%s\n",tmp);
}
action_count--;
}
if (do_print_serial) {
if (verbose)
printf("Card serial number:");
print_serial(card);
action_count--;
}
if (do_print_name) {
if (verbose)
printf("Card name: ");
printf("%s\n", card->name);
action_count--;
}
if (do_send_apdu) {
if ((err = send_apdu()))
goto end;
action_count--;
}
if (do_list_files) {
if ((err = list_files()))
goto end;
action_count--;
}
if (do_list_algorithms) {
if ((err = list_algorithms()))
goto end;
action_count--;
}
if (do_reset) {
if ((err = card_reset(opt_reset_type)))
goto end;
action_count--;
}
end:
if (card) {
sc_disconnect_card(card);
}
if (ctx)
sc_release_context(ctx);
return err;
}
| 0 |
[
"CWE-125"
] |
OpenSC
|
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
| 21,364,646,990,279,130,000,000,000,000,000,000,000 | 233 |
fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
|
m_local_df(union DateData *x)
{
if (simple_dat_p(x))
return 0;
else {
get_c_df(x);
return local_df(x);
}
}
| 0 |
[] |
date
|
3959accef8da5c128f8a8e2fd54e932a4fb253b0
| 186,592,217,340,400,500,000,000,000,000,000,000,000 | 9 |
Add length limit option for methods that parses date strings
`Date.parse` now raises an ArgumentError when a given date string is
longer than 128. You can configure the limit by giving `limit` keyword
arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`,
the limit is disabled.
Not only `Date.parse` but also the following methods are changed.
* Date._parse
* Date.parse
* DateTime.parse
* Date._iso8601
* Date.iso8601
* DateTime.iso8601
* Date._rfc3339
* Date.rfc3339
* DateTime.rfc3339
* Date._xmlschema
* Date.xmlschema
* DateTime.xmlschema
* Date._rfc2822
* Date.rfc2822
* DateTime.rfc2822
* Date._rfc822
* Date.rfc822
* DateTime.rfc822
* Date._jisx0301
* Date.jisx0301
* DateTime.jisx0301
|
static void nsg_mrxu_parse_report(struct sony_sc *sc, u8 *rd, int size)
{
int n, offset, relx, rely;
u8 active;
/*
* The NSG-MRxU multi-touch trackpad data starts at offset 1 and
* the touch-related data starts at offset 2.
* For the first byte, bit 0 is set when touchpad button is pressed.
* Bit 2 is set when a touch is active and the drag (Fn) key is pressed.
* This drag key is mapped to BTN_LEFT. It is operational only when a
* touch point is active.
* Bit 4 is set when only the first touch point is active.
* Bit 6 is set when only the second touch point is active.
* Bits 5 and 7 are set when both touch points are active.
* The next 3 bytes are two 12 bit X/Y coordinates for the first touch.
* The following byte, offset 5, has the touch width and length.
* Bits 0-4=X (width), bits 5-7=Y (length).
* A signed relative X coordinate is at offset 6.
* The bytes at offset 7-9 are the second touch X/Y coordinates.
* Offset 10 has the second touch width and length.
* Offset 11 has the relative Y coordinate.
*/
offset = 1;
input_report_key(sc->touchpad, BTN_LEFT, rd[offset] & 0x0F);
active = (rd[offset] >> 4);
relx = (s8) rd[offset+5];
rely = ((s8) rd[offset+10]) * -1;
offset++;
for (n = 0; n < 2; n++) {
u16 x, y;
u8 contactx, contacty;
x = rd[offset] | ((rd[offset+1] & 0x0F) << 8);
y = ((rd[offset+1] & 0xF0) >> 4) | (rd[offset+2] << 4);
input_mt_slot(sc->touchpad, n);
input_mt_report_slot_state(sc->touchpad, MT_TOOL_FINGER, active & 0x03);
if (active & 0x03) {
contactx = rd[offset+3] & 0x0F;
contacty = rd[offset+3] >> 4;
input_report_abs(sc->touchpad, ABS_MT_TOUCH_MAJOR,
max(contactx, contacty));
input_report_abs(sc->touchpad, ABS_MT_TOUCH_MINOR,
min(contactx, contacty));
input_report_abs(sc->touchpad, ABS_MT_ORIENTATION,
(bool) (contactx > contacty));
input_report_abs(sc->touchpad, ABS_MT_POSITION_X, x);
input_report_abs(sc->touchpad, ABS_MT_POSITION_Y,
NSG_MRXU_MAX_Y - y);
/*
* The relative coordinates belong to the first touch
* point, when present, or to the second touch point
* when the first is not active.
*/
if ((n == 0) || ((n == 1) && (active & 0x01))) {
input_report_rel(sc->touchpad, REL_X, relx);
input_report_rel(sc->touchpad, REL_Y, rely);
}
}
offset += 5;
active >>= 2;
}
input_mt_sync_frame(sc->touchpad);
input_sync(sc->touchpad);
}
| 0 |
[
"CWE-787"
] |
linux
|
d9d4b1e46d9543a82c23f6df03f4ad697dab361b
| 122,232,380,017,707,560,000,000,000,000,000,000,000 | 73 |
HID: Fix assumption that devices have inputs
The syzbot fuzzer found a slab-out-of-bounds write bug in the hid-gaff
driver. The problem is caused by the driver's assumption that the
device must have an input report. While this will be true for all
normal HID input devices, a suitably malicious device can violate the
assumption.
The same assumption is present in over a dozen other HID drivers.
This patch fixes them by checking that the list of hid_inputs for the
hid_device is nonempty before allowing it to be used.
Reported-and-tested-by: [email protected]
Signed-off-by: Alan Stern <[email protected]>
CC: <[email protected]>
Signed-off-by: Benjamin Tissoires <[email protected]>
|
sign_hash(const struct private_key_stuff *pks
, const u_char *hash_val, size_t hash_len
, u_char *sig_val, size_t sig_len)
{
chunk_t ch;
mpz_t t1;
size_t padlen;
u_char *p = sig_val;
const struct RSA_private_key *k = &pks->u.RSA_private_key;
DBG(DBG_CONTROL | DBG_CRYPT,
DBG_log("signing hash with RSA Key *%s", pks->pub->u.rsa.keyid)
);
/* PKCS#1 v1.5 8.1 encryption-block formatting */
*p++ = 0x00;
*p++ = 0x01; /* BT (block type) 01 */
padlen = sig_len - 3 - hash_len;
memset(p, 0xFF, padlen);
p += padlen;
*p++ = 0x00;
memcpy(p, hash_val, hash_len);
passert(p + hash_len - sig_val == (ptrdiff_t)sig_len);
/* PKCS#1 v1.5 8.2 octet-string-to-integer conversion */
n_to_mpz(t1, sig_val, sig_len); /* (could skip leading 0x00) */
/* PKCS#1 v1.5 8.3 RSA computation y = x^c mod n
* Better described in PKCS#1 v2.0 5.1 RSADP.
* There are two methods, depending on the form of the private key.
* We use the one based on the Chinese Remainder Theorem.
*/
oswcrypto.rsa_mod_exp_crt(t1, t1, &k->p, &k->dP, &k->q, &k->dQ, &k->qInv);
/* PKCS#1 v1.5 8.4 integer-to-octet-string conversion */
ch = mpz_to_n(t1, sig_len);
memcpy(sig_val, ch.ptr, sig_len);
pfree(ch.ptr);
mpz_clear(t1);
}
| 0 |
[
"CWE-347"
] |
Openswan
|
9eaa6c2a823c1d2b58913506a15f9474bf857a3d
| 190,499,251,484,939,130,000,000,000,000,000,000,000 | 40 |
wo#7449 . verify padding contents for IKEv2 RSA sig check
Special thanks to Sze Yiu Chau of Purdue University ([email protected])
who reported the issue.
|
void sched_move_task(struct task_struct *tsk)
{
int on_rq, running;
unsigned long flags;
struct rq *rq;
rq = task_rq_lock(tsk, &flags);
running = task_current(rq, tsk);
on_rq = tsk->se.on_rq;
if (on_rq)
dequeue_task(rq, tsk, 0);
if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk);
#ifdef CONFIG_FAIR_GROUP_SCHED
if (tsk->sched_class->task_move_group)
tsk->sched_class->task_move_group(tsk, on_rq);
else
#endif
set_task_rq(tsk, task_cpu(tsk));
if (unlikely(running))
tsk->sched_class->set_curr_task(rq);
if (on_rq)
enqueue_task(rq, tsk, 0);
task_rq_unlock(rq, &flags);
}
| 0 |
[
"CWE-703",
"CWE-835"
] |
linux
|
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
| 129,748,106,938,181,190,000,000,000,000,000,000,000 | 30 |
Sched: fix skip_clock_update optimization
idle_balance() drops/retakes rq->lock, leaving the previous task
vulnerable to set_tsk_need_resched(). Clear it after we return
from balancing instead, and in setup_thread_stack() as well, so
no successfully descheduled or never scheduled task has it set.
Need resched confused the skip_clock_update logic, which assumes
that the next call to update_rq_clock() will come nearly immediately
after being set. Make the optimization robust against the waking
a sleeper before it sucessfully deschedules case by checking that
the current task has not been dequeued before setting the flag,
since it is that useless clock update we're trying to save, and
clear unconditionally in schedule() proper instead of conditionally
in put_prev_task().
Signed-off-by: Mike Galbraith <[email protected]>
Reported-by: Bjoern B. Brandenburg <[email protected]>
Tested-by: Yong Zhang <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: [email protected]
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
static void adjust_link(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
if (unlikely(phydev->link != priv->oldlink ||
(phydev->link && (phydev->duplex != priv->oldduplex ||
phydev->speed != priv->oldspeed))))
gfar_update_link_state(priv);
}
| 0 |
[] |
linux
|
d8861bab48b6c1fc3cdbcab8ff9d1eaea43afe7f
| 281,191,560,872,727,900,000,000,000,000,000,000,000 | 10 |
gianfar: fix jumbo packets+napi+rx overrun crash
When using jumbo packets and overrunning rx queue with napi enabled,
the following sequence is observed in gfar_add_rx_frag:
| lstatus | | skb |
t | lstatus, size, flags | first | len, data_len, *ptr |
---+--------------------------------------+-------+-----------------------+
13 | 18002348, 9032, INTERRUPT LAST | 0 | 9600, 8000, f554c12e |
12 | 10000640, 1600, INTERRUPT | 0 | 8000, 6400, f554c12e |
11 | 10000640, 1600, INTERRUPT | 0 | 6400, 4800, f554c12e |
10 | 10000640, 1600, INTERRUPT | 0 | 4800, 3200, f554c12e |
09 | 10000640, 1600, INTERRUPT | 0 | 3200, 1600, f554c12e |
08 | 14000640, 1600, INTERRUPT FIRST | 0 | 1600, 0, f554c12e |
07 | 14000640, 1600, INTERRUPT FIRST | 1 | 0, 0, f554c12e |
06 | 1c000080, 128, INTERRUPT LAST FIRST | 1 | 0, 0, abf3bd6e |
05 | 18002348, 9032, INTERRUPT LAST | 0 | 8000, 6400, c5a57780 |
04 | 10000640, 1600, INTERRUPT | 0 | 6400, 4800, c5a57780 |
03 | 10000640, 1600, INTERRUPT | 0 | 4800, 3200, c5a57780 |
02 | 10000640, 1600, INTERRUPT | 0 | 3200, 1600, c5a57780 |
01 | 10000640, 1600, INTERRUPT | 0 | 1600, 0, c5a57780 |
00 | 14000640, 1600, INTERRUPT FIRST | 1 | 0, 0, c5a57780 |
So at t=7 a new packets is started but not finished, probably due to rx
overrun - but rx overrun is not indicated in the flags. Instead a new
packets starts at t=8. This results in skb->len to exceed size for the LAST
fragment at t=13 and thus a negative fragment size added to the skb.
This then crashes:
kernel BUG at include/linux/skbuff.h:2277!
Oops: Exception in kernel mode, sig: 5 [#1]
...
NIP [c04689f4] skb_pull+0x2c/0x48
LR [c03f62ac] gfar_clean_rx_ring+0x2e4/0x844
Call Trace:
[ec4bfd38] [c06a84c4] _raw_spin_unlock_irqrestore+0x60/0x7c (unreliable)
[ec4bfda8] [c03f6a44] gfar_poll_rx_sq+0x48/0xe4
[ec4bfdc8] [c048d504] __napi_poll+0x54/0x26c
[ec4bfdf8] [c048d908] net_rx_action+0x138/0x2c0
[ec4bfe68] [c06a8f34] __do_softirq+0x3a4/0x4fc
[ec4bfed8] [c0040150] run_ksoftirqd+0x58/0x70
[ec4bfee8] [c0066ecc] smpboot_thread_fn+0x184/0x1cc
[ec4bff08] [c0062718] kthread+0x140/0x144
[ec4bff38] [c0012350] ret_from_kernel_thread+0x14/0x1c
This patch fixes this by checking for computed LAST fragment size, so a
negative sized fragment is never added.
In order to prevent the newer rx frame from getting corrupted, the FIRST
flag is checked to discard the incomplete older frame.
Signed-off-by: Michael Braun <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
ObjectComputeName(
UINT32 size, // IN: the size of the area to digest
BYTE *publicArea, // IN: the public area to digest
TPM_ALG_ID nameAlg, // IN: the hash algorithm to use
TPM2B_NAME *name // OUT: Computed name
)
{
// Hash the publicArea into the name buffer leaving room for the nameAlg
name->t.size = CryptHashBlock(nameAlg, size, publicArea,
sizeof(name->t.name) - 2,
&name->t.name[2]);
// set the nameAlg
UINT16_TO_BYTE_ARRAY(nameAlg, name->t.name);
name->t.size += 2;
return name;
}
| 0 |
[
"CWE-119"
] |
libtpms
|
ea62fd9679f8c6fc5e79471b33cfbd8227bfed72
| 147,341,238,967,145,260,000,000,000,000,000,000,000 | 16 |
tpm2: Initialize a whole OBJECT before using it
Initialize a whole OBJECT before using it. This is necessary since
an OBJECT may also be used as a HASH_OBJECT via the ANY_OBJECT
union and that HASH_OBJECT can leave bad size inidicators in TPM2B
buffer in the OBJECT. To get rid of this problem we reset the whole
OBJECT to 0 before using it. This is as if the memory for the
OBJECT was just initialized.
Signed-off-by: Stefan Berger <[email protected]>
|
static gboolean sasl_reassemble_incoming(IRC_SERVER_REC *server, const char *fragment, GString **decoded)
{
GString *enc_req;
gsize fragment_len;
fragment_len = strlen(fragment);
/* Check if there is an existing fragment to prepend. */
if (server->sasl_buffer != NULL) {
if (g_strcmp0("+", fragment) == 0) {
enc_req = server->sasl_buffer;
} else {
enc_req = g_string_append_len(server->sasl_buffer, fragment, fragment_len);
}
server->sasl_buffer = NULL;
} else {
enc_req = g_string_new_len(fragment, fragment_len);
}
/*
* Fail authentication with this server. They have sent too much data.
*/
if (enc_req->len > AUTHENTICATE_MAX_SIZE) {
return FALSE;
}
/*
* If the the request is exactly the chunk size, this is a fragment
* and more data is expected.
*/
if (fragment_len == AUTHENTICATE_CHUNK_SIZE) {
server->sasl_buffer = enc_req;
return TRUE;
}
if (enc_req->len == 1 && *enc_req->str == '+') {
*decoded = g_string_new_len("", 0);
} else {
gsize dec_len;
gint state = 0;
guint save = 0;
/* Since we're not going to use the enc_req GString anymore we
* can perform the decoding in place. */
dec_len = g_base64_decode_step(enc_req->str, enc_req->len,
(guchar *)enc_req->str,
&state, &save);
/* A copy of the data is made when the GString is created. */
*decoded = g_string_new_len(enc_req->str, dec_len);
}
g_string_free(enc_req, TRUE);
return TRUE;
}
| 0 |
[
"CWE-416"
] |
irssi
|
36564717c9f701e3a339da362ab46d220d27e0c1
| 284,871,671,905,264,200,000,000,000,000,000,000,000 | 54 |
Merge branch 'security' into 'master'
Security
See merge request irssi/irssi!34
(cherry picked from commit b0d9cb33cd9ef9da7c331409e8b7c57a6f3aef3f)
|
virDomainBlockJobAbort(virDomainPtr dom, const char *disk,
unsigned int flags)
{
virConnectPtr conn;
VIR_DOMAIN_DEBUG(dom, "disk=%s, flags=%x", disk, flags);
virResetLastError();
virCheckDomainReturn(dom, -1);
conn = dom->conn;
virCheckReadOnlyGoto(conn->flags, error);
virCheckNonNullArgGoto(disk, error);
if (conn->driver->domainBlockJobAbort) {
int ret;
ret = conn->driver->domainBlockJobAbort(dom, disk, flags);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(dom->conn);
return -1;
}
| 0 |
[
"CWE-254"
] |
libvirt
|
506e9d6c2d4baaf580d489fff0690c0ff2ff588f
| 291,854,667,832,262,300,000,000,000,000,000,000,000 | 29 |
virDomainGetTime: Deny on RO connections
We have a policy that if API may end up talking to a guest agent
it should require RW connection. We don't obey the rule in
virDomainGetTime().
Signed-off-by: Michal Privoznik <[email protected]>
|
void virtio_gpu_process_cmdq(VirtIOGPU *g)
{
struct virtio_gpu_ctrl_command *cmd;
while (!QTAILQ_EMPTY(&g->cmdq)) {
cmd = QTAILQ_FIRST(&g->cmdq);
/* process command */
VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd,
g, cmd);
if (cmd->waiting) {
break;
}
QTAILQ_REMOVE(&g->cmdq, cmd, next);
if (virtio_gpu_stats_enabled(g->conf)) {
g->stats.requests++;
}
if (!cmd->finished) {
QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
g->inflight++;
if (virtio_gpu_stats_enabled(g->conf)) {
if (g->stats.max_inflight < g->inflight) {
g->stats.max_inflight = g->inflight;
}
fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
}
} else {
g_free(cmd);
}
}
}
| 0 |
[] |
qemu
|
acfc4846508a02cc4c83aa27799fd74ac280bdb2
| 207,934,389,998,784,580,000,000,000,000,000,000,000 | 32 |
virtio-gpu: use VIRTIO_GPU_MAX_SCANOUTS
The value is defined in virtio_gpu.h already (changing from 4 to 16).
Signed-off-by: Marc-André Lureau <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]>
|
static inline int mount_entry_on_systemfs(struct mntent *mntent)
{
return mount_entry_on_generic(mntent, mntent->mnt_dir, NULL);
}
| 0 |
[
"CWE-59",
"CWE-61"
] |
lxc
|
592fd47a6245508b79fe6ac819fe6d3b2c1289be
| 205,260,484,344,746,500,000,000,000,000,000,000,000 | 4 |
CVE-2015-1335: Protect container mounts against symlinks
When a container starts up, lxc sets up the container's inital fstree
by doing a bunch of mounting, guided by the container configuration
file. The container config is owned by the admin or user on the host,
so we do not try to guard against bad entries. However, since the
mount target is in the container, it's possible that the container admin
could divert the mount with symbolic links. This could bypass proper
container startup (i.e. confinement of a root-owned container by the
restrictive apparmor policy, by diverting the required write to
/proc/self/attr/current), or bypass the (path-based) apparmor policy
by diverting, say, /proc to /mnt in the container.
To prevent this,
1. do not allow mounts to paths containing symbolic links
2. do not allow bind mounts from relative paths containing symbolic
links.
Details:
Define safe_mount which ensures that the container has not inserted any
symbolic links into any mount targets for mounts to be done during
container setup.
The host's mount path may contain symbolic links. As it is under the
control of the administrator, that's ok. So safe_mount begins the check
for symbolic links after the rootfs->mount, by opening that directory.
It opens each directory along the path using openat() relative to the
parent directory using O_NOFOLLOW. When the target is reached, it
mounts onto /proc/self/fd/<targetfd>.
Use safe_mount() in mount_entry(), when mounting container proc,
and when needed. In particular, safe_mount() need not be used in
any case where:
1. the mount is done in the container's namespace
2. the mount is for the container's rootfs
3. the mount is relative to a tmpfs or proc/sysfs which we have
just safe_mount()ed ourselves
Since we were using proc/net as a temporary placeholder for /proc/sys/net
during container startup, and proc/net is a symbolic link, use proc/tty
instead.
Update the lxc.container.conf manpage with details about the new
restrictions.
Finally, add a testcase to test some symbolic link possibilities.
Reported-by: Roman Fiedler
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: Stéphane Graber <[email protected]>
|
static void synic_init(struct kvm_vcpu_hv_synic *synic)
{
int i;
memset(synic, 0, sizeof(*synic));
synic->version = HV_SYNIC_VERSION_1;
for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
atomic_set(&synic->sint_to_gsi[i], -1);
}
}
| 0 |
[
"CWE-476"
] |
linux
|
919f4ebc598701670e80e31573a58f1f2d2bf918
| 227,755,418,898,628,300,000,000,000,000,000,000,000 | 11 |
KVM: x86: hyper-v: Fix Hyper-V context null-ptr-deref
Reported by syzkaller:
KASAN: null-ptr-deref in range [0x0000000000000140-0x0000000000000147]
CPU: 1 PID: 8370 Comm: syz-executor859 Not tainted 5.11.0-syzkaller #0
RIP: 0010:synic_get arch/x86/kvm/hyperv.c:165 [inline]
RIP: 0010:kvm_hv_set_sint_gsi arch/x86/kvm/hyperv.c:475 [inline]
RIP: 0010:kvm_hv_irq_routing_update+0x230/0x460 arch/x86/kvm/hyperv.c:498
Call Trace:
kvm_set_irq_routing+0x69b/0x940 arch/x86/kvm/../../../virt/kvm/irqchip.c:223
kvm_vm_ioctl+0x12d0/0x2800 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3959
vfs_ioctl fs/ioctl.c:48 [inline]
__do_sys_ioctl fs/ioctl.c:753 [inline]
__se_sys_ioctl fs/ioctl.c:739 [inline]
__x64_sys_ioctl+0x193/0x200 fs/ioctl.c:739
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
Hyper-V context is lazily allocated until Hyper-V specific MSRs are accessed
or SynIC is enabled. However, the syzkaller testcase sets irq routing table
directly w/o enabling SynIC. This results in null-ptr-deref when accessing
SynIC Hyper-V context. This patch fixes it.
syzkaller source: https://syzkaller.appspot.com/x/repro.c?x=163342ccd00000
Reported-by: [email protected]
Fixes: 8f014550dfb1 ("KVM: x86: hyper-v: Make Hyper-V emulation enablement conditional")
Signed-off-by: Wanpeng Li <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
replace_readwrite_async_thread (GTask *task,
gpointer object,
gpointer task_data,
GCancellable *cancellable)
{
GFileIOStream *stream;
GError *error = NULL;
ReplaceRWAsyncData *data = task_data;
stream = g_file_replace_readwrite (G_FILE (object),
data->etag,
data->make_backup,
data->flags,
cancellable,
&error);
if (stream == NULL)
g_task_return_error (task, error);
else
g_task_return_pointer (task, stream, g_object_unref);
}
| 0 |
[
"CWE-362"
] |
glib
|
d8f8f4d637ce43f8699ba94c9b7648beda0ca174
| 256,426,136,109,616,100,000,000,000,000,000,000,000 | 21 |
gfile: Limit access to files when copying
file_copy_fallback creates new files with default permissions and
set the correct permissions after the operation is finished. This
might cause that the files can be accessible by more users during
the operation than expected. Use G_FILE_CREATE_PRIVATE for the new
files to limit access to those files.
|
read_signature(cdk_stream_t inp, size_t pktlen, cdk_pkt_signature_t sig)
{
size_t nbytes;
size_t i, nsig;
ssize_t size;
cdk_error_t rc;
if (!inp || !sig)
return gnutls_assert_val(CDK_Inv_Value);
if (DEBUG_PKT)
_gnutls_write_log("read_signature: %d octets\n",
(int) pktlen);
if (pktlen < 16)
return gnutls_assert_val(CDK_Inv_Packet);
sig->version = cdk_stream_getc(inp);
if (sig->version < 2 || sig->version > 4)
return gnutls_assert_val(CDK_Inv_Packet_Ver);
sig->flags.exportable = 1;
sig->flags.revocable = 1;
if (sig->version < 4) {
if (cdk_stream_getc(inp) != 5)
return gnutls_assert_val(CDK_Inv_Packet);
sig->sig_class = cdk_stream_getc(inp);
sig->timestamp = read_32(inp);
sig->keyid[0] = read_32(inp);
sig->keyid[1] = read_32(inp);
sig->pubkey_algo =
_pgp_pub_algo_to_cdk(cdk_stream_getc(inp));
sig->digest_algo =
_pgp_hash_algo_to_gnutls(cdk_stream_getc(inp));
sig->digest_start[0] = cdk_stream_getc(inp);
sig->digest_start[1] = cdk_stream_getc(inp);
nsig = cdk_pk_get_nsig(sig->pubkey_algo);
if (!nsig)
return gnutls_assert_val(CDK_Inv_Algo);
for (i = 0; i < nsig; i++) {
rc = read_mpi(inp, &sig->mpi[i], 0);
if (rc)
return gnutls_assert_val(rc);
}
} else {
sig->sig_class = cdk_stream_getc(inp);
sig->pubkey_algo =
_pgp_pub_algo_to_cdk(cdk_stream_getc(inp));
sig->digest_algo =
_pgp_hash_algo_to_gnutls(cdk_stream_getc(inp));
sig->hashed_size = read_16(inp);
size = sig->hashed_size;
sig->hashed = NULL;
while (size > 0) {
rc = read_subpkt(inp, &sig->hashed, &nbytes);
if (rc)
return gnutls_assert_val(rc);
size -= nbytes;
}
sig->unhashed_size = read_16(inp);
size = sig->unhashed_size;
sig->unhashed = NULL;
while (size > 0) {
rc = read_subpkt(inp, &sig->unhashed, &nbytes);
if (rc)
return gnutls_assert_val(rc);
size -= nbytes;
}
rc = parse_sig_subpackets(sig);
if (rc)
return gnutls_assert_val(rc);
sig->digest_start[0] = cdk_stream_getc(inp);
sig->digest_start[1] = cdk_stream_getc(inp);
nsig = cdk_pk_get_nsig(sig->pubkey_algo);
if (!nsig)
return gnutls_assert_val(CDK_Inv_Algo);
for (i = 0; i < nsig; i++) {
rc = read_mpi(inp, &sig->mpi[i], 0);
if (rc)
return gnutls_assert_val(rc);
}
}
return 0;
}
| 0 |
[
"CWE-119"
] |
gnutls
|
94fcf1645ea17223237aaf8d19132e004afddc1a
| 172,503,192,860,254,520,000,000,000,000,000,000,000 | 87 |
opencdk: read_attribute: added more precise checks when reading stream
That addresses heap read overflows found using oss-fuzz:
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=338
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=346
Signed-off-by: Nikos Mavrogiannopoulos <[email protected]>
|
static void subpage_write(void *opaque, hwaddr addr,
uint64_t value, unsigned len)
{
subpage_t *subpage = opaque;
uint8_t buf[8];
#if defined(DEBUG_SUBPAGE)
printf("%s: subpage %p len %u addr " TARGET_FMT_plx
" value %"PRIx64"\n",
__func__, subpage, len, addr, value);
#endif
switch (len) {
case 1:
stb_p(buf, value);
break;
case 2:
stw_p(buf, value);
break;
case 4:
stl_p(buf, value);
break;
case 8:
stq_p(buf, value);
break;
default:
abort();
}
address_space_write(subpage->as, addr + subpage->base, buf, len);
}
| 0 |
[] |
qemu
|
c3c1bb99d1c11978d9ce94d1bdcf0705378c1459
| 110,323,293,291,119,680,000,000,000,000,000,000,000 | 29 |
exec: Respect as_tranlsate_internal length clamp
address_space_translate_internal will clamp the *plen length argument
based on the size of the memory region being queried. The iommu walker
logic in addresss_space_translate was ignoring this by discarding the
post fn call value of *plen. Fix by just always using *plen as the
length argument throughout the fn, removing the len local variable.
This fixes a bootloader bug when a single elf section spans multiple
QEMU memory regions.
Signed-off-by: Peter Crosthwaite <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
void ssh_reseed(void){
}
| 0 |
[
"CWE-310"
] |
libssh
|
e99246246b4061f7e71463f8806b9dcad65affa0
| 291,255,402,854,087,080,000,000,000,000,000,000,000 | 2 |
security: fix for vulnerability CVE-2014-0017
When accepting a new connection, a forking server based on libssh forks
and the child process handles the request. The RAND_bytes() function of
openssl doesn't reset its state after the fork, but simply adds the
current process id (getpid) to the PRNG state, which is not guaranteed
to be unique.
This can cause several children to end up with same PRNG state which is
a security issue.
|
XML_GetCurrentColumnNumber(XML_Parser parser) {
if (parser == NULL)
return 0;
if (parser->m_eventPtr && parser->m_eventPtr >= parser->m_positionPtr) {
XmlUpdatePosition(parser->m_encoding, parser->m_positionPtr,
parser->m_eventPtr, &parser->m_position);
parser->m_positionPtr = parser->m_eventPtr;
}
return parser->m_position.columnNumber;
}
| 0 |
[
"CWE-611",
"CWE-776",
"CWE-415",
"CWE-125"
] |
libexpat
|
c20b758c332d9a13afbbb276d30db1d183a85d43
| 299,791,044,091,602,560,000,000,000,000,000,000,000 | 10 |
xmlparse.c: Deny internal entities closing the doctype
|
uint32_t CompactProtocolWriter::serializedSizeZCBinary(
folly::ByteRange v) const {
return serializedSizeBinary(v);
}
| 0 |
[
"CWE-703",
"CWE-770"
] |
fbthrift
|
c9a903e5902834e95bbd4ab0e9fa53ba0189f351
| 81,529,957,751,399,490,000,000,000,000,000,000,000 | 4 |
Better handling of truncated data when reading strings
Summary:
Currently we read string size and blindly pre-allocate it. This allows malicious attacker to send a few bytes message and cause server to allocate huge amount of memory (>1GB).
This diff changes the logic to check if we have enough data in the buffer before allocating the string.
This is a second part of a fix for CVE-2019-3553.
Reviewed By: vitaut
Differential Revision: D14393393
fbshipit-source-id: e2046d2f5b087d3abc9a9d2c6c107cf088673057
|
static int report_consumption_iter(void *ctx, void *val)
{
h2_stream *stream = val;
h2_mplx *m = ctx;
input_consumed_signal(m, stream);
if (stream->state == H2_SS_CLOSED_L
&& (!stream->task || stream->task->worker_done)) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c,
H2_STRM_LOG(APLOGNO(10026), stream, "remote close missing"));
nghttp2_submit_rst_stream(stream->session->ngh2, NGHTTP2_FLAG_NONE,
stream->id, NGHTTP2_NO_ERROR);
}
return 1;
}
| 0 |
[
"CWE-444"
] |
mod_h2
|
825de6a46027b2f4c30d7ff5a0c8b852d639c207
| 245,037,540,647,311,800,000,000,000,000,000,000,000 | 15 |
* Fixed keepalives counter on slave connections.
|
getenv_TZ (void)
{
return getenv ("TZ");
}
| 0 |
[] |
gnulib
|
94e01571507835ff59dd8ce2a0b56a4b566965a4
| 288,769,467,144,585,940,000,000,000,000,000,000,000 | 4 |
time_rz: fix heap buffer overflow vulnerability
This issue has been assigned CVE-2017-7476 and was
detected with American Fuzzy Lop 2.41b run on the
coreutils date(1) program with ASAN enabled.
ERROR: AddressSanitizer: heap-buffer-overflow on address 0x...
WRITE of size 8 at 0x60d00000cff8 thread T0
#1 0x443020 in extend_abbrs lib/time_rz.c:88
#2 0x443356 in save_abbr lib/time_rz.c:155
#3 0x44393f in localtime_rz lib/time_rz.c:290
#4 0x41e4fe in parse_datetime2 lib/parse-datetime.y:1798
A minimized reproducer is the following 120 byte TZ value,
which goes beyond the value of ABBR_SIZE_MIN (119) on x86_64.
Extend the aa...b portion to overwrite more of the heap.
date -d $(printf 'TZ="aaa%020daaaaaab%089d"')
localtime_rz and mktime_z were affected since commit 4bc76593.
parse_datetime was affected since commit 4e6e16b3f.
* lib/time_rz.c (save_abbr): Rearrange the calculation determining
whether there is enough buffer space available. The rearrangement
ensures we're only dealing with positive numbers, thus avoiding
the problematic promotion of signed to unsigned causing an invalid
comparison when zone_copy is more than ABBR_SIZE_MIN bytes beyond
the start of the buffer.
* tests/test-parse-datetime.c (main): Add a test case written by
Paul Eggert, which overwrites enough of the heap so that
standard glibc will fail with "free(): invalid pointer"
without the patch applied.
Reported and analyzed at https://bugzilla.redhat.com/1444774
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.