func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static Node *GetCDATA( TidyDocImpl* doc, Node *container )
{
Lexer* lexer = doc->lexer;
uint start = 0;
int nested = 0;
CDATAState state = CDATA_INTERMEDIATE;
uint i;
Bool isEmpty = yes;
Bool matches = no;
uint c;
Bool hasSrc = TY_(AttrGetById)(container, TidyAttr_SRC) != NULL;
SetLexerLocus( doc, lexer );
lexer->waswhite = no;
lexer->txtstart = lexer->txtend = lexer->lexsize;
/* seen start tag, look for matching end tag */
while ((c = TY_(ReadChar)(doc->docIn)) != EndOfStream)
{
TY_(AddCharToLexer)(lexer, c);
lexer->txtend = lexer->lexsize;
if (state == CDATA_INTERMEDIATE)
{
if (c != '<')
{
if (isEmpty && !TY_(IsWhite)(c))
isEmpty = no;
continue;
}
c = TY_(ReadChar)(doc->docIn);
if (TY_(IsLetter)(c))
{
/* <head><script src=foo><meta name=foo content=bar>*/
if (hasSrc && isEmpty && nodeIsSCRIPT(container))
{
/* ReportError(doc, container, NULL, MISSING_ENDTAG_FOR); */
lexer->lexsize = lexer->txtstart;
TY_(UngetChar)(c, doc->docIn);
TY_(UngetChar)('<', doc->docIn);
return NULL;
}
TY_(AddCharToLexer)(lexer, c);
start = lexer->lexsize - 1;
state = CDATA_STARTTAG;
}
else if (c == '/')
{
TY_(AddCharToLexer)(lexer, c);
c = TY_(ReadChar)(doc->docIn);
if (!TY_(IsLetter)(c))
{
TY_(UngetChar)(c, doc->docIn);
continue;
}
TY_(UngetChar)(c, doc->docIn);
start = lexer->lexsize;
state = CDATA_ENDTAG;
}
else if (c == '\\')
{
/* recognize document.write("<script><\/script>") */
TY_(AddCharToLexer)(lexer, c);
c = TY_(ReadChar)(doc->docIn);
if (c != '/')
{
TY_(UngetChar)(c, doc->docIn);
continue;
}
TY_(AddCharToLexer)(lexer, c);
c = TY_(ReadChar)(doc->docIn);
if (!TY_(IsLetter)(c))
{
TY_(UngetChar)(c, doc->docIn);
continue;
}
TY_(UngetChar)(c, doc->docIn);
start = lexer->lexsize;
state = CDATA_ENDTAG;
}
else
{
TY_(UngetChar)(c, doc->docIn);
}
}
/* '<' + Letter found */
else if (state == CDATA_STARTTAG)
{
if (TY_(IsLetter)(c))
continue;
matches = TY_(tmbstrncasecmp)(container->element, lexer->lexbuf + start,
TY_(tmbstrlen)(container->element)) == 0;
if (matches)
nested++;
state = CDATA_INTERMEDIATE;
}
/* '<' + '/' + Letter found */
else if (state == CDATA_ENDTAG)
{
if (TY_(IsLetter)(c))
continue;
matches = TY_(tmbstrncasecmp)(container->element, lexer->lexbuf + start,
TY_(tmbstrlen)(container->element)) == 0;
if (isEmpty && !matches)
{
/* ReportError(doc, container, NULL, MISSING_ENDTAG_FOR); */
for (i = lexer->lexsize - 1; i >= start; --i)
TY_(UngetChar)((uint)lexer->lexbuf[i], doc->docIn);
TY_(UngetChar)('/', doc->docIn);
TY_(UngetChar)('<', doc->docIn);
break;
}
if (matches && nested-- <= 0)
{
for (i = lexer->lexsize - 1; i >= start; --i)
TY_(UngetChar)((uint)lexer->lexbuf[i], doc->docIn);
TY_(UngetChar)('/', doc->docIn);
TY_(UngetChar)('<', doc->docIn);
lexer->lexsize -= (lexer->lexsize - start) + 2;
break;
}
else if (lexer->lexbuf[start - 2] != '\\')
{
/* if the end tag is not already escaped using backslash */
SetLexerLocus( doc, lexer );
lexer->columns -= 3;
TY_(ReportError)(doc, NULL, NULL, BAD_CDATA_CONTENT);
/* if javascript insert backslash before / */
if (TY_(IsJavaScript)(container))
{
for (i = lexer->lexsize; i > start-1; --i)
lexer->lexbuf[i] = lexer->lexbuf[i-1];
lexer->lexbuf[start-1] = '\\';
lexer->lexsize++;
}
}
state = CDATA_INTERMEDIATE;
}
}
if (isEmpty)
lexer->lexsize = lexer->txtstart = lexer->txtend;
else
lexer->txtend = lexer->lexsize;
if (c == EndOfStream)
TY_(ReportError)(doc, container, NULL, MISSING_ENDTAG_FOR );
/* this was disabled for some reason... */
#if 0
if (lexer->txtend > lexer->txtstart)
return TextToken(lexer);
else
return NULL;
#else
return TY_(TextToken)(lexer);
#endif
}
| 0 |
[
"CWE-119"
] |
tidy-html5
|
c18f27a58792f7fbd0b30a0ff50d6b40a82f940d
| 226,920,318,769,718,550,000,000,000,000,000,000,000 | 175 |
Issue #217 - avoid len going negative, ever...
|
struct sk_buff *skb_segment(struct sk_buff *head_skb,
netdev_features_t features)
{
struct sk_buff *segs = NULL;
struct sk_buff *tail = NULL;
struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
skb_frag_t *frag = skb_shinfo(head_skb)->frags;
unsigned int mss = skb_shinfo(head_skb)->gso_size;
unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
struct sk_buff *frag_skb = head_skb;
unsigned int offset = doffset;
unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
unsigned int headroom;
unsigned int len;
__be16 proto;
bool csum;
int sg = !!(features & NETIF_F_SG);
int nfrags = skb_shinfo(head_skb)->nr_frags;
int err = -ENOMEM;
int i = 0;
int pos;
proto = skb_network_protocol(head_skb);
if (unlikely(!proto))
return ERR_PTR(-EINVAL);
csum = !!can_checksum_protocol(features, proto);
__skb_push(head_skb, doffset);
headroom = skb_headroom(head_skb);
pos = skb_headlen(head_skb);
do {
struct sk_buff *nskb;
skb_frag_t *nskb_frag;
int hsize;
int size;
len = head_skb->len - offset;
if (len > mss)
len = mss;
hsize = skb_headlen(head_skb) - offset;
if (hsize < 0)
hsize = 0;
if (hsize > len || !sg)
hsize = len;
if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
(skb_headlen(list_skb) == len || sg)) {
BUG_ON(skb_headlen(list_skb) > len);
i = 0;
nfrags = skb_shinfo(list_skb)->nr_frags;
frag = skb_shinfo(list_skb)->frags;
frag_skb = list_skb;
pos += skb_headlen(list_skb);
while (pos < offset + len) {
BUG_ON(i >= nfrags);
size = skb_frag_size(frag);
if (pos + size > offset + len)
break;
i++;
pos += size;
frag++;
}
nskb = skb_clone(list_skb, GFP_ATOMIC);
list_skb = list_skb->next;
if (unlikely(!nskb))
goto err;
if (unlikely(pskb_trim(nskb, len))) {
kfree_skb(nskb);
goto err;
}
hsize = skb_end_offset(nskb);
if (skb_cow_head(nskb, doffset + headroom)) {
kfree_skb(nskb);
goto err;
}
nskb->truesize += skb_end_offset(nskb) - hsize;
skb_release_head_state(nskb);
__skb_push(nskb, doffset);
} else {
nskb = __alloc_skb(hsize + doffset + headroom,
GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
NUMA_NO_NODE);
if (unlikely(!nskb))
goto err;
skb_reserve(nskb, headroom);
__skb_put(nskb, doffset);
}
if (segs)
tail->next = nskb;
else
segs = nskb;
tail = nskb;
__copy_skb_header(nskb, head_skb);
nskb->mac_len = head_skb->mac_len;
skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
nskb->data - tnl_hlen,
doffset + tnl_hlen);
if (nskb->len == len + doffset)
goto perform_csum_check;
if (!sg) {
nskb->ip_summed = CHECKSUM_NONE;
nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
skb_put(nskb, len),
len, 0);
continue;
}
nskb_frag = skb_shinfo(nskb)->frags;
skb_copy_from_linear_data_offset(head_skb, offset,
skb_put(nskb, hsize), hsize);
skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
SKBTX_SHARED_FRAG;
while (pos < offset + len) {
if (i >= nfrags) {
BUG_ON(skb_headlen(list_skb));
i = 0;
nfrags = skb_shinfo(list_skb)->nr_frags;
frag = skb_shinfo(list_skb)->frags;
frag_skb = list_skb;
BUG_ON(!nfrags);
list_skb = list_skb->next;
}
if (unlikely(skb_shinfo(nskb)->nr_frags >=
MAX_SKB_FRAGS)) {
net_warn_ratelimited(
"skb_segment: too many frags: %u %u\n",
pos, mss);
goto err;
}
if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
goto err;
*nskb_frag = *frag;
__skb_frag_ref(nskb_frag);
size = skb_frag_size(nskb_frag);
if (pos < offset) {
nskb_frag->page_offset += offset - pos;
skb_frag_size_sub(nskb_frag, offset - pos);
}
skb_shinfo(nskb)->nr_frags++;
if (pos + size <= offset + len) {
i++;
frag++;
pos += size;
} else {
skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
goto skip_fraglist;
}
nskb_frag++;
}
skip_fraglist:
nskb->data_len = len - hsize;
nskb->len += nskb->data_len;
nskb->truesize += nskb->data_len;
perform_csum_check:
if (!csum) {
nskb->csum = skb_checksum(nskb, doffset,
nskb->len - doffset, 0);
nskb->ip_summed = CHECKSUM_NONE;
}
} while ((offset += len) < head_skb->len);
return segs;
err:
kfree_skb_list(segs);
return ERR_PTR(err);
}
| 0 |
[
"CWE-416"
] |
net
|
36d5fe6a000790f56039afe26834265db0a3ad4c
| 20,811,249,040,694,270,000,000,000,000,000,000,000 | 202 |
core, nfqueue, openvswitch: Orphan frags in skb_zerocopy and handle errors
skb_zerocopy can copy elements of the frags array between skbs, but it doesn't
orphan them. Also, it doesn't handle errors, so this patch takes care of that
as well, and modify the callers accordingly. skb_tx_error() is also added to
the callers so they will signal the failed delivery towards the creator of the
skb.
Signed-off-by: Zoltan Kiss <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static inline bool skip_blocked_update(struct sched_entity *se)
{
struct cfs_rq *gcfs_rq = group_cfs_rq(se);
/*
* If sched_entity still have not zero load or utilization, we have to
* decay it:
*/
if (se->avg.load_avg || se->avg.util_avg)
return false;
/*
* If there is a pending propagation, we have to update the load and
* the utilization of the sched_entity:
*/
if (gcfs_rq->propagate)
return false;
/*
* Otherwise, the load and the utilization of the sched_entity is
* already zero and there is no pending propagation, so it will be a
* waste of time to try to decay it:
*/
return true;
}
| 0 |
[
"CWE-400",
"CWE-703",
"CWE-835"
] |
linux
|
c40f7d74c741a907cfaeb73a7697081881c497d0
| 266,330,265,376,606,350,000,000,000,000,000,000,000 | 25 |
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c
Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the
scheduler under high loads, starting at around the v4.18 time frame,
and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list
manipulation.
Do a (manual) revert of:
a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
It turns out that the list_del_leaf_cfs_rq() introduced by this commit
is a surprising property that was not considered in followup commits
such as:
9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list")
As Vincent Guittot explains:
"I think that there is a bigger problem with commit a9e7f6544b9c and
cfs_rq throttling:
Let take the example of the following topology TG2 --> TG1 --> root:
1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1
cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in
one path because it has never been used and can't be throttled so
tmp_alone_branch will point to leaf_cfs_rq_list at the end.
2) Then TG1 is throttled
3) and we add TG3 as a new child of TG1.
4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1
cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list.
With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list.
So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1
cfs_rq is removed from the list.
Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list
but tmp_alone_branch still points to TG3 cfs_rq because its throttled
parent can't be enqueued when the lock is released.
tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should.
So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch
points on another TG cfs_rq, the next TG cfs_rq that will be added,
will be linked outside rq->leaf_cfs_rq_list - which is bad.
In addition, we can break the ordering of the cfs_rq in
rq->leaf_cfs_rq_list but this ordering is used to update and
propagate the update from leaf down to root."
Instead of trying to work through all these cases and trying to reproduce
the very high loads that produced the lockup to begin with, simplify
the code temporarily by reverting a9e7f6544b9c - which change was clearly
not thought through completely.
This (hopefully) gives us a kernel that doesn't lock up so people
can continue to enjoy their holidays without worrying about regressions. ;-)
[ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ]
Analyzed-by: Xie XiuQi <[email protected]>
Analyzed-by: Vincent Guittot <[email protected]>
Reported-by: Zhipeng Xie <[email protected]>
Reported-by: Sargun Dhillon <[email protected]>
Reported-by: Xie XiuQi <[email protected]>
Tested-by: Zhipeng Xie <[email protected]>
Tested-by: Sargun Dhillon <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Acked-by: Vincent Guittot <[email protected]>
Cc: <[email protected]> # v4.13+
Cc: Bin Li <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
static NORETURN void die_initial_contact(int unexpected)
{
/*
* A hang-up after seeing some response from the other end
* means that it is unexpected, as we know the other end is
* willing to talk to us. A hang-up before seeing any
* response does not necessarily mean an ACL problem, though.
*/
if (unexpected)
die(_("the remote end hung up upon initial contact"));
else
die(_("Could not read from remote repository.\n\n"
"Please make sure you have the correct access rights\n"
"and the repository exists."));
}
| 0 |
[] |
git
|
a02ea577174ab8ed18f847cf1693f213e0b9c473
| 339,987,034,426,028,160,000,000,000,000,000,000,000 | 15 |
git_connect_git(): forbid newlines in host and path
When we connect to a git:// server, we send an initial request that
looks something like:
002dgit-upload-pack repo.git\0host=example.com
If the repo path contains a newline, then it's included literally, and
we get:
002egit-upload-pack repo
.git\0host=example.com
This works fine if you really do have a newline in your repository name;
the server side uses the pktline framing to parse the string, not
newlines. However, there are many _other_ protocols in the wild that do
parse on newlines, such as HTTP. So a carefully constructed git:// URL
can actually turn into a valid HTTP request. For example:
git://localhost:1234/%0d%0a%0d%0aGET%20/%20HTTP/1.1 %0d%0aHost:localhost%0d%0a%0d%0a
becomes:
0050git-upload-pack /
GET / HTTP/1.1
Host:localhost
host=localhost:1234
on the wire. Again, this isn't a problem for a real Git server, but it
does mean that feeding a malicious URL to Git (e.g., through a
submodule) can cause it to make unexpected cross-protocol requests.
Since repository names with newlines are presumably quite rare (and
indeed, we already disallow them in git-over-http), let's just disallow
them over this protocol.
Hostnames could likewise inject a newline, but this is unlikely a
problem in practice; we'd try resolving the hostname with a newline in
it, which wouldn't work. Still, it doesn't hurt to err on the side of
caution there, since we would not expect them to work in the first
place.
The ssh and local code paths are unaffected by this patch. In both cases
we're trying to run upload-pack via a shell, and will quote the newline
so that it makes it intact. An attacker can point an ssh url at an
arbitrary port, of course, but unless there's an actual ssh server
there, we'd never get as far as sending our shell command anyway. We
_could_ similarly restrict newlines in those protocols out of caution,
but there seems little benefit to doing so.
The new test here is run alongside the git-daemon tests, which cover the
same protocol, but it shouldn't actually contact the daemon at all. In
theory we could make the test more robust by setting up an actual
repository with a newline in it (so that our clone would succeed if our
new check didn't kick in). But a repo directory with newline in it is
likely not portable across all filesystems. Likewise, we could check
git-daemon's log that it was not contacted at all, but we do not
currently record the log (and anyway, it would make the test racy with
the daemon's log write). We'll just check the client-side stderr to make
sure we hit the expected code path.
Reported-by: Harold Kim <[email protected]>
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]>
|
NOEXPORT char **arg_alloc(char *str) {
size_t max_arg, i;
char **tmp, **retval;
max_arg=strlen(str)/2+1;
tmp=str_alloc((max_arg+1)*sizeof(char *));
i=0;
while(*str && i<max_arg) {
tmp[i++]=str;
while(*str && !isspace((unsigned char)*str))
++str;
while(*str && isspace((unsigned char)*str))
*str++='\0';
}
tmp[i]=NULL; /* null-terminate the table */
retval=arg_dup(tmp);
str_free(tmp);
return retval;
}
| 0 |
[
"CWE-295"
] |
stunnel
|
ebad9ddc4efb2635f37174c9d800d06206f1edf9
| 279,037,601,067,551,200,000,000,000,000,000,000,000 | 21 |
stunnel-5.57
|
static int selinux_syslog(int type)
{
int rc;
switch (type) {
case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
rc = task_has_system(current, SYSTEM__SYSLOG_READ);
break;
case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
/* Set level of messages printed to console */
case SYSLOG_ACTION_CONSOLE_LEVEL:
rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE);
break;
case SYSLOG_ACTION_CLOSE: /* Close log */
case SYSLOG_ACTION_OPEN: /* Open log */
case SYSLOG_ACTION_READ: /* Read from log */
case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */
case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
default:
rc = task_has_system(current, SYSTEM__SYSLOG_MOD);
break;
}
return rc;
}
| 0 |
[
"CWE-264"
] |
linux
|
259e5e6c75a910f3b5e656151dc602f53f9d7548
| 325,801,003,644,199,170,000,000,000,000,000,000,000 | 26 |
Add PR_{GET,SET}_NO_NEW_PRIVS to prevent execve from granting privs
With this change, calling
prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
disables privilege granting operations at execve-time. For example, a
process will not be able to execute a setuid binary to change their uid
or gid if this bit is set. The same is true for file capabilities.
Additionally, LSM_UNSAFE_NO_NEW_PRIVS is defined to ensure that
LSMs respect the requested behavior.
To determine if the NO_NEW_PRIVS bit is set, a task may call
prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
It returns 1 if set and 0 if it is not set. If any of the arguments are
non-zero, it will return -1 and set errno to -EINVAL.
(PR_SET_NO_NEW_PRIVS behaves similarly.)
This functionality is desired for the proposed seccomp filter patch
series. By using PR_SET_NO_NEW_PRIVS, it allows a task to modify the
system call behavior for itself and its child tasks without being
able to impact the behavior of a more privileged task.
Another potential use is making certain privileged operations
unprivileged. For example, chroot may be considered "safe" if it cannot
affect privileged tasks.
Note, this patch causes execve to fail when PR_SET_NO_NEW_PRIVS is
set and AppArmor is in use. It is fixed in a subsequent patch.
Signed-off-by: Andy Lutomirski <[email protected]>
Signed-off-by: Will Drewry <[email protected]>
Acked-by: Eric Paris <[email protected]>
Acked-by: Kees Cook <[email protected]>
v18: updated change desc
v17: using new define values as per 3.4
Signed-off-by: James Morris <[email protected]>
|
vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
{
struct mount *mnt;
struct dentry *root;
if (!type)
return ERR_PTR(-ENODEV);
mnt = alloc_vfsmnt(name);
if (!mnt)
return ERR_PTR(-ENOMEM);
if (flags & MS_KERNMOUNT)
mnt->mnt.mnt_flags = MNT_INTERNAL;
root = mount_fs(type, flags, name, data);
if (IS_ERR(root)) {
mnt_free_id(mnt);
free_vfsmnt(mnt);
return ERR_CAST(root);
}
mnt->mnt.mnt_root = root;
mnt->mnt.mnt_sb = root->d_sb;
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
mnt->mnt_parent = mnt;
lock_mount_hash();
list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
unlock_mount_hash();
return &mnt->mnt;
}
| 0 |
[
"CWE-269"
] |
user-namespace
|
a6138db815df5ee542d848318e5dae681590fccd
| 190,380,735,822,315,260,000,000,000,000,000,000,000 | 31 |
mnt: Only change user settable mount flags in remount
Kenton Varda <[email protected]> discovered that by remounting a
read-only bind mount read-only in a user namespace the
MNT_LOCK_READONLY bit would be cleared, allowing an unprivileged user
to the remount a read-only mount read-write.
Correct this by replacing the mask of mount flags to preserve
with a mask of mount flags that may be changed, and preserve
all others. This ensures that any future bugs with this mask and
remount will fail in an easy to detect way where new mount flags
simply won't change.
Cc: [email protected]
Acked-by: Serge E. Hallyn <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
|
fr_window_clipboard_remove_file_list (FrWindow *window,
GList *file_list)
{
GList *scan1;
if (window->priv->copy_data == NULL)
return;
if (file_list == NULL) {
fr_clipboard_data_unref (window->priv->copy_data);
window->priv->copy_data = NULL;
return;
}
for (scan1 = file_list; scan1; scan1 = scan1->next) {
const char *name1 = scan1->data;
GList *scan2;
for (scan2 = window->priv->copy_data->files; scan2;) {
const char *name2 = scan2->data;
if (strcmp (name1, name2) == 0) {
GList *tmp = scan2->next;
window->priv->copy_data->files = g_list_remove_link (window->priv->copy_data->files, scan2);
g_free (scan2->data);
g_list_free (scan2);
scan2 = tmp;
}
else
scan2 = scan2->next;
}
}
if (window->priv->copy_data->files == NULL) {
fr_clipboard_data_unref (window->priv->copy_data);
window->priv->copy_data = NULL;
gtk_clipboard_clear (gtk_widget_get_clipboard (GTK_WIDGET (window), FR_CLIPBOARD));
}
}
| 0 |
[
"CWE-22"
] |
file-roller
|
b147281293a8307808475e102a14857055f81631
| 177,717,602,798,435,340,000,000,000,000,000,000,000 | 39 |
libarchive: sanitize filenames before extracting
|
_XimReconnectModeSetAttr(
Xim im)
{
XimDefIMValues im_values;
if(!_XimSetIMResourceList(&im->core.im_resources,
&im->core.im_num_resources)) {
return False;
}
if(!_XimSetICResourceList(&im->core.ic_resources,
&im->core.ic_num_resources)) {
return False;
}
_XimSetIMMode(im->core.im_resources, im->core.im_num_resources);
if (im->private.proto.default_styles) {
if (im->core.styles)
Xfree(im->core.styles);
im->core.styles = im->private.proto.default_styles;
}
return True;
}
| 0 |
[
"CWE-190"
] |
libx11
|
1a566c9e00e5f35c1f9e7f3d741a02e5170852b2
| 133,990,162,655,866,770,000,000,000,000,000,000,000 | 24 |
Zero out buffers in functions
It looks like uninitialized stack or heap memory can leak
out via padding bytes.
Signed-off-by: Matthieu Herrb <[email protected]>
Reviewed-by: Matthieu Herrb <[email protected]>
|
static __init int svm_hardware_setup(void)
{
int cpu;
struct page *iopm_pages;
void *iopm_va;
int r;
unsigned int order = get_order(IOPM_SIZE);
/*
* NX is required for shadow paging and for NPT if the NX huge pages
* mitigation is enabled.
*/
if (!boot_cpu_has(X86_FEATURE_NX)) {
pr_err_ratelimited("NX (Execute Disable) not supported\n");
return -EOPNOTSUPP;
}
kvm_enable_efer_bits(EFER_NX);
iopm_pages = alloc_pages(GFP_KERNEL, order);
if (!iopm_pages)
return -ENOMEM;
iopm_va = page_address(iopm_pages);
memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
init_msrpm_offsets();
supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
kvm_enable_efer_bits(EFER_FFXSR);
if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
kvm_has_tsc_control = true;
kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
kvm_tsc_scaling_ratio_frac_bits = 32;
}
tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
/* Check for pause filtering support */
if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
pause_filter_count = 0;
pause_filter_thresh = 0;
} else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
pause_filter_thresh = 0;
}
if (nested) {
printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
}
/*
* KVM's MMU doesn't support using 2-level paging for itself, and thus
* NPT isn't supported if the host is using 2-level paging since host
* CR4 is unchanged on VMRUN.
*/
if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE))
npt_enabled = false;
if (!boot_cpu_has(X86_FEATURE_NPT))
npt_enabled = false;
kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G);
pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
/* Note, SEV setup consumes npt_enabled. */
sev_hardware_setup();
svm_hv_hardware_setup();
svm_adjust_mmio_mask();
for_each_possible_cpu(cpu) {
r = svm_cpu_init(cpu);
if (r)
goto err;
}
if (nrips) {
if (!boot_cpu_has(X86_FEATURE_NRIPS))
nrips = false;
}
enable_apicv = avic = avic && npt_enabled && boot_cpu_has(X86_FEATURE_AVIC);
if (enable_apicv) {
pr_info("AVIC enabled\n");
amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
}
if (vls) {
if (!npt_enabled ||
!boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
!IS_ENABLED(CONFIG_X86_64)) {
vls = false;
} else {
pr_info("Virtual VMLOAD VMSAVE supported\n");
}
}
if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK))
svm_gp_erratum_intercept = false;
if (vgif) {
if (!boot_cpu_has(X86_FEATURE_VGIF))
vgif = false;
else
pr_info("Virtual GIF supported\n");
}
svm_set_cpu_caps();
/*
* It seems that on AMD processors PTE's accessed bit is
* being set by the CPU hardware before the NPF vmexit.
* This is not expected behaviour and our tests fail because
* of it.
* A workaround here is to disable support for
* GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled.
* In this case userspace can know if there is support using
* KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle
* it
* If future AMD CPU models change the behaviour described above,
* this variable can be changed accordingly
*/
allow_smaller_maxphyaddr = !npt_enabled;
return 0;
err:
svm_hardware_teardown();
return r;
}
| 0 |
[
"CWE-862"
] |
kvm
|
0f923e07124df069ba68d8bb12324398f4b6b709
| 277,146,174,824,683,340,000,000,000,000,000,000,000 | 138 |
KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653)
* Invert the mask of bits that we pick from L2 in
nested_vmcb02_prepare_control
* Invert and explicitly use VIRQ related bits bitmask in svm_clear_vintr
This fixes a security issue that allowed a malicious L1 to run L2 with
AVIC enabled, which allowed the L2 to exploit the uninitialized and enabled
AVIC to read/write the host physical memory at some offsets.
Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler")
Signed-off-by: Maxim Levitsky <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page)
{
/* write first size bytes of page to sector of rdev
* Increment mddev->pending_writes before returning
* and decrement it on completion, waking up sb_wait
* if zero is reached.
* If an error occurred, call md_error
*/
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
atomic_inc(&mddev->pending_writes);
submit_bio(WRITE_FLUSH_FUA, bio);
}
| 0 |
[
"CWE-200"
] |
linux
|
b6878d9e03043695dbf3fa1caa6dfc09db225b16
| 111,735,645,301,287,130,000,000,000,000,000,000,000 | 20 |
md: use kzalloc() when bitmap is disabled
In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a
mdu_bitmap_file_t called "file".
5769 file = kmalloc(sizeof(*file), GFP_NOIO);
5770 if (!file)
5771 return -ENOMEM;
This structure is copied to user space at the end of the function.
5786 if (err == 0 &&
5787 copy_to_user(arg, file, sizeof(*file)))
5788 err = -EFAULT
But if bitmap is disabled only the first byte of "file" is initialized
with zero, so it's possible to read some bytes (up to 4095) of kernel
space memory from user space. This is an information leak.
5775 /* bitmap disabled, zero the first byte and copy out */
5776 if (!mddev->bitmap_info.file)
5777 file->pathname[0] = '\0';
Signed-off-by: Benjamin Randazzo <[email protected]>
Signed-off-by: NeilBrown <[email protected]>
|
static void wsgi_release_interpreter(InterpreterObject *handle)
{
PyThreadState *tstate = NULL;
PyGILState_STATE state;
/*
* Need to release and destroy the thread state that
* was created against the interpreter. This will
* release the GIL. Note that it should be safe to
* always assume that the simplified GIL state API
* lock was originally unlocked as always calling in
* from an Apache thread when we acquire the
* interpreter in the first place.
*/
if (*handle->name) {
tstate = PyThreadState_Get();
PyEval_ReleaseThread(tstate);
}
else
PyGILState_Release(PyGILState_UNLOCKED);
/*
* Need to reacquire the Python GIL just so we can
* decrement our reference count to the interpreter
* itself. If the interpreter has since been removed
* from the table of interpreters this will result
* in its destruction if its the last reference.
*/
state = PyGILState_Ensure();
Py_DECREF(handle);
PyGILState_Release(state);
}
| 0 |
[
"CWE-264"
] |
mod_wsgi
|
d9d5fea585b23991f76532a9b07de7fcd3b649f4
| 295,787,232,959,052,700,000,000,000,000,000,000,000 | 37 |
Local privilege escalation when using daemon mode. (CVE-2014-0240)
|
static int cert_crl(X509_STORE_CTX *ctx, X509_CRL *crl, X509 *x)
{
int ok;
X509_REVOKED *rev;
/*
* The rules changed for this... previously if a CRL contained unhandled
* critical extensions it could still be used to indicate a certificate
* was revoked. This has since been changed since critical extension can
* change the meaning of CRL entries.
*/
if (!(ctx->param->flags & X509_V_FLAG_IGNORE_CRITICAL)
&& (crl->flags & EXFLAG_CRITICAL)) {
ctx->error = X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION;
ok = ctx->verify_cb(0, ctx);
if (!ok)
return 0;
}
/*
* Look for serial number of certificate in CRL If found make sure reason
* is not removeFromCRL.
*/
if (X509_CRL_get0_by_cert(crl, &rev, x)) {
if (rev->reason == CRL_REASON_REMOVE_FROM_CRL)
return 2;
ctx->error = X509_V_ERR_CERT_REVOKED;
ok = ctx->verify_cb(0, ctx);
if (!ok)
return 0;
}
return 1;
}
| 0 |
[
"CWE-119"
] |
openssl
|
370ac320301e28bb615cee80124c042649c95d14
| 264,138,968,692,458,900,000,000,000,000,000,000,000 | 32 |
Fix length checks in X509_cmp_time to avoid out-of-bounds reads.
Also tighten X509_cmp_time to reject more than three fractional
seconds in the time; and to reject trailing garbage after the offset.
CVE-2015-1789
Reviewed-by: Viktor Dukhovni <[email protected]>
Reviewed-by: Richard Levitte <[email protected]>
|
Item *Item_string::safe_charset_converter(CHARSET_INFO *tocs)
{
Item_string *conv;
uint conv_errors;
char *ptr;
String tmp, cstr, *ostr= val_str(&tmp);
cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors);
if (conv_errors || !(conv= new Item_string(cstr.ptr(), cstr.length(),
cstr.charset(),
collation.derivation)))
{
/*
Safe conversion is not possible (or EOM).
We could not convert a string into the requested character set
without data loss. The target charset does not cover all the
characters from the string. Operation cannot be done correctly.
*/
return NULL;
}
if (!(ptr= current_thd->strmake(cstr.ptr(), cstr.length())))
return NULL;
conv->str_value.set(ptr, cstr.length(), cstr.charset());
/* Ensure that no one is going to change the result string */
conv->str_value.mark_as_const();
return conv;
}
| 0 |
[] |
server
|
b000e169562697aa072600695d4f0c0412f94f4f
| 129,156,241,620,373,600,000,000,000,000,000,000,000 | 26 |
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL))
based on:
commit f7316aa0c9a
Author: Ajo Robert <[email protected]>
Date: Thu Aug 24 17:03:21 2017 +0530
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item.
|
NOEXPORT void psk_free(PSK_KEYS *head) {
while(head) {
PSK_KEYS *next=head->next;
str_free(head->identity);
str_free(head->key_val);
str_free(head);
head=next;
}
}
| 0 |
[
"CWE-295"
] |
stunnel
|
ebad9ddc4efb2635f37174c9d800d06206f1edf9
| 272,751,726,323,188,070,000,000,000,000,000,000,000 | 9 |
stunnel-5.57
|
pci_emul_diow(struct vmctx *ctx, int vcpu, struct pci_vdev *dev, int baridx,
uint64_t offset, int size, uint64_t value)
{
int i;
void *offset_ptr;
struct pci_emul_dummy *dummy = dev->arg;
if (baridx == 0) {
if (offset + size > DIOSZ) {
printf("diow: iow too large, offset %ld size %d\n",
offset, size);
return;
}
offset_ptr = (void *) &dummy->ioregs[offset];
if (size == 1)
*(uint8_t *)offset_ptr = value & 0xff;
else if (size == 2)
*(uint16_t *)offset_ptr = value & 0xffff;
else if (size == 4)
*(uint32_t *)offset = value;
else
printf("diow: iow unknown size %d\n", size);
/*
* Special magic value to generate an interrupt
*/
if (offset == 4 && size == 4 && pci_msi_enabled(dev))
pci_generate_msi(dev, value % pci_msi_maxmsgnum(dev));
if (value == 0xabcdef) {
for (i = 0; i < pci_msi_maxmsgnum(dev); i++)
pci_generate_msi(dev, i);
}
}
if (baridx == 1 || baridx == 2) {
if (offset + size > DMEMSZ) {
printf("diow: memw too large, offset %ld size %d\n",
offset, size);
return;
}
i = baridx - 1; /* 'memregs' index */
offset_ptr = (void *) &dummy->memregs[i][offset];
if (size == 1)
*(uint8_t *)offset_ptr = value;
else if (size == 2)
*(uint16_t *)offset_ptr = value;
else if (size == 4)
*(uint32_t *)offset_ptr = value;
else if (size == 8)
*(uint64_t *)offset_ptr = value;
else
printf("diow: memw unknown size %d\n", size);
/*
* magic interrupt ??
*/
}
if (baridx > 2 || baridx < 0)
printf("diow: unknown bar idx %d\n", baridx);
}
| 0 |
[
"CWE-617",
"CWE-703"
] |
acrn-hypervisor
|
6199e653418eda58cd698d8769820904453e2535
| 323,277,101,084,750,050,000,000,000,000,000,000,000 | 65 |
dm: validate the input in 'pci_emul_mem_handler()'
checking the inputs explicitly instead of using Assert.
Tracked-On: #4003
Signed-off-by: Yonghua Huang <[email protected]>
Reviewed-by: Shuo Liu <[email protected]>
Acked-by: Yu Wang <[email protected]>
|
static int _crypt_format_integrity(struct crypt_device *cd,
const char *uuid,
struct crypt_params_integrity *params)
{
int r;
uint32_t integrity_tag_size;
char *integrity = NULL, *journal_integrity = NULL, *journal_crypt = NULL;
struct volume_key *journal_crypt_key = NULL, *journal_mac_key = NULL;
if (!params)
return -EINVAL;
if (uuid) {
log_err(cd, _("UUID is not supported for this crypt type."));
return -EINVAL;
}
r = device_check_access(cd, crypt_metadata_device(cd), DEV_EXCL);
if (r < 0)
return r;
/* Wipe first 8 sectors - fs magic numbers etc. */
r = crypt_wipe_device(cd, crypt_metadata_device(cd), CRYPT_WIPE_ZERO, 0,
8 * SECTOR_SIZE, 8 * SECTOR_SIZE, NULL, NULL);
if (r < 0) {
log_err(cd, _("Cannot wipe header on device %s."),
mdata_device_path(cd));
return r;
}
if (!(cd->type = strdup(CRYPT_INTEGRITY)))
return -ENOMEM;
if (params->journal_crypt_key) {
journal_crypt_key = crypt_alloc_volume_key(params->journal_crypt_key_size,
params->journal_crypt_key);
if (!journal_crypt_key)
return -ENOMEM;
}
if (params->journal_integrity_key) {
journal_mac_key = crypt_alloc_volume_key(params->journal_integrity_key_size,
params->journal_integrity_key);
if (!journal_mac_key) {
r = -ENOMEM;
goto out;
}
}
if (params->integrity && !(integrity = strdup(params->integrity))) {
r = -ENOMEM;
goto out;
}
if (params->journal_integrity && !(journal_integrity = strdup(params->journal_integrity))) {
r = -ENOMEM;
goto out;
}
if (params->journal_crypt && !(journal_crypt = strdup(params->journal_crypt))) {
r = -ENOMEM;
goto out;
}
integrity_tag_size = INTEGRITY_hash_tag_size(integrity);
if (integrity_tag_size > 0 && params->tag_size && integrity_tag_size != params->tag_size)
log_std(cd, _("WARNING: Requested tag size %d bytes differs from %s size output (%d bytes).\n"),
params->tag_size, integrity, integrity_tag_size);
if (params->tag_size)
integrity_tag_size = params->tag_size;
cd->u.integrity.journal_crypt_key = journal_crypt_key;
cd->u.integrity.journal_mac_key = journal_mac_key;
cd->u.integrity.params.journal_size = params->journal_size;
cd->u.integrity.params.journal_watermark = params->journal_watermark;
cd->u.integrity.params.journal_commit_time = params->journal_commit_time;
cd->u.integrity.params.interleave_sectors = params->interleave_sectors;
cd->u.integrity.params.buffer_sectors = params->buffer_sectors;
cd->u.integrity.params.sector_size = params->sector_size;
cd->u.integrity.params.tag_size = integrity_tag_size;
cd->u.integrity.params.integrity = integrity;
cd->u.integrity.params.journal_integrity = journal_integrity;
cd->u.integrity.params.journal_crypt = journal_crypt;
r = INTEGRITY_format(cd, params, cd->u.integrity.journal_crypt_key, cd->u.integrity.journal_mac_key);
if (r)
log_err(cd, _("Cannot format integrity for device %s."),
mdata_device_path(cd));
out:
if (r) {
crypt_free_volume_key(journal_crypt_key);
crypt_free_volume_key(journal_mac_key);
free(integrity);
free(journal_integrity);
free(journal_crypt);
}
return r;
}
| 0 |
[
"CWE-345"
] |
cryptsetup
|
0113ac2d889c5322659ad0596d4cfc6da53e356c
| 198,260,043,276,526,130,000,000,000,000,000,000,000 | 98 |
Fix CVE-2021-4122 - LUKS2 reencryption crash recovery attack
Fix possible attacks against data confidentiality through LUKS2 online
reencryption extension crash recovery.
An attacker can modify on-disk metadata to simulate decryption in
progress with crashed (unfinished) reencryption step and persistently
decrypt part of the LUKS device.
This attack requires repeated physical access to the LUKS device but
no knowledge of user passphrases.
The decryption step is performed after a valid user activates
the device with a correct passphrase and modified metadata.
There are no visible warnings for the user that such recovery happened
(except using the luksDump command). The attack can also be reversed
afterward (simulating crashed encryption from a plaintext) with
possible modification of revealed plaintext.
The problem was caused by reusing a mechanism designed for actual
reencryption operation without reassessing the security impact for new
encryption and decryption operations. While the reencryption requires
calculating and verifying both key digests, no digest was needed to
initiate decryption recovery if the destination is plaintext (no
encryption key). Also, some metadata (like encryption cipher) is not
protected, and an attacker could change it. Note that LUKS2 protects
visible metadata only when a random change occurs. It does not protect
against intentional modification but such modification must not cause
a violation of data confidentiality.
The fix introduces additional digest protection of reencryption
metadata. The digest is calculated from known keys and critical
reencryption metadata. Now an attacker cannot create correct metadata
digest without knowledge of a passphrase for used keyslots.
For more details, see LUKS2 On-Disk Format Specification version 1.1.0.
|
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
{
struct net *net = xp_net(policy);
struct xfrm_policy *pol;
struct xfrm_policy *delpol;
struct hlist_head *chain;
struct hlist_node *newpos;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
delpol = NULL;
newpos = NULL;
hlist_for_each_entry(pol, chain, bydst) {
if (pol->type == policy->type &&
!selector_cmp(&pol->selector, &policy->selector) &&
xfrm_policy_mark_match(policy, pol) &&
xfrm_sec_ctx_match(pol->security, policy->security) &&
!WARN_ON(delpol)) {
if (excl) {
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return -EEXIST;
}
delpol = pol;
if (policy->priority > pol->priority)
continue;
} else if (policy->priority >= pol->priority) {
newpos = &pol->bydst;
continue;
}
if (delpol)
break;
}
if (newpos)
hlist_add_behind(&policy->bydst, newpos);
else
hlist_add_head(&policy->bydst, chain);
__xfrm_policy_link(policy, dir);
atomic_inc(&net->xfrm.flow_cache_genid);
/* After previous checking, family can either be AF_INET or AF_INET6 */
if (policy->family == AF_INET)
rt_genid_bump_ipv4(net);
else
rt_genid_bump_ipv6(net);
if (delpol) {
xfrm_policy_requeue(delpol, policy);
__xfrm_policy_unlink(delpol, dir);
}
policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
policy->curlft.add_time = get_seconds();
policy->curlft.use_time = 0;
if (!mod_timer(&policy->timer, jiffies + HZ))
xfrm_pol_hold(policy);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (delpol)
xfrm_policy_kill(delpol);
else if (xfrm_bydst_should_resize(net, dir, NULL))
schedule_work(&net->xfrm.policy_hash_work);
return 0;
}
| 0 |
[
"CWE-125"
] |
ipsec
|
7bab09631c2a303f87a7eb7e3d69e888673b9b7e
| 195,545,179,742,199,070,000,000,000,000,000,000,000 | 64 |
xfrm: policy: check policy direction value
The 'dir' parameter in xfrm_migrate() is a user-controlled byte which is used
as an array index. This can lead to an out-of-bound access, kernel lockup and
DoS. Add a check for the 'dir' value.
This fixes CVE-2017-11600.
References: https://bugzilla.redhat.com/show_bug.cgi?id=1474928
Fixes: 80c9abaabf42 ("[XFRM]: Extension for dynamic update of endpoint address(es)")
Cc: <[email protected]> # v2.6.21-rc1
Reported-by: "bo Zhang" <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]>
|
expectation_remove(struct hmap *alg_expectations,
const struct conn_key *key, uint32_t basis)
{
struct alg_exp_node *alg_exp_node;
HMAP_FOR_EACH_WITH_HASH (alg_exp_node, node, conn_key_hash(key, basis),
alg_expectations) {
if (!conn_key_cmp(&alg_exp_node->key, key)) {
hmap_remove(alg_expectations, &alg_exp_node->node);
break;
}
}
}
| 0 |
[
"CWE-400"
] |
ovs
|
79349cbab0b2a755140eedb91833ad2760520a83
| 252,352,134,158,193,870,000,000,000,000,000,000,000 | 13 |
flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]>
|
calc_ntlm2_session_response (const char *nonce,
guchar nt_hash[21],
guchar lm_hash[21],
guchar *lm_resp,
gsize lm_resp_sz,
guchar *nt_resp)
{
guint32 client_nonce[2];
guchar ntlmv2_hash[16];
GChecksum *ntlmv2_cksum;
gsize ntlmv2_hash_sz = sizeof (ntlmv2_hash);
/* FIXME: if GLib ever gets a more secure random number
* generator, use it here
*/
client_nonce[0] = g_random_int();
client_nonce[1] = g_random_int();
ntlmv2_cksum = g_checksum_new (G_CHECKSUM_MD5);
g_checksum_update (ntlmv2_cksum, (const guchar *) nonce, 8);
g_checksum_update (ntlmv2_cksum, (const guchar *) client_nonce, sizeof (client_nonce));
g_checksum_get_digest (ntlmv2_cksum, ntlmv2_hash, &ntlmv2_hash_sz);
g_checksum_free (ntlmv2_cksum);
/* Send the padded client nonce as a fake lm_resp */
memset (lm_resp, 0, lm_resp_sz);
memcpy (lm_resp, client_nonce, sizeof (client_nonce));
/* Compute nt_hash as usual but with a new nonce */
calc_response (nt_hash, ntlmv2_hash, nt_resp);
}
| 0 |
[
"CWE-125"
] |
libsoup
|
f8a54ac85eec2008c85393f331cdd251af8266ad
| 235,873,131,657,708,200,000,000,000,000,000,000,000 | 31 |
NTLM: Avoid a potential heap buffer overflow in v2 authentication
Check the length of the decoded v2 challenge before attempting to
parse it, to avoid reading past it.
Fixes #173
|
void Item_singlerow_subselect::bring_value()
{
if (!exec() && assigned())
{
null_value= true;
for (uint i= 0; i < max_columns ; i++)
{
if (!row[i]->null_value)
{
null_value= false;
return;
}
}
}
else
reset();
}
| 0 |
[
"CWE-89"
] |
server
|
3c209bfc040ddfc41ece8357d772547432353fd2
| 95,954,075,631,129,400,000,000,000,000,000,000,000 | 17 |
MDEV-25994: Crash with union of my_decimal type in ORDER BY clause
When single-row subquery fails with "Subquery reutrns more than 1 row"
error, it will raise an error and return NULL.
On the other hand, Item_singlerow_subselect sets item->maybe_null=0
for table-less subqueries like "(SELECT not_null_value)" (*)
This discrepancy (item with maybe_null=0 returning NULL) causes the
code in Type_handler_decimal_result::make_sort_key_part() to crash.
Fixed this by allowing inference (*) only when the subquery is NOT a
UNION.
|
static int oidc_authenticate_user(request_rec *r, oidc_cfg *c,
oidc_provider_t *provider, const char *original_url,
const char *login_hint, const char *id_token_hint, const char *prompt,
const char *auth_request_params, const char *path_scope) {
oidc_debug(r, "enter");
if (provider == NULL) {
// TODO: should we use an explicit redirect to the discovery endpoint (maybe a "discovery" param to the redirect_uri)?
if (c->metadata_dir != NULL) {
/*
* Will be handled in the content handler; avoid:
* No authentication done but request not allowed without authentication
* by setting r->user
*/
oidc_debug(r, "defer discovery to the content handler");
oidc_request_state_set(r, OIDC_REQUEST_STATE_KEY_DISCOVERY, "");
r->user = "";
return OK;
}
/* we're not using multiple OP's configured in a metadata directory, pick the statically configured OP */
if (oidc_provider_static_config(r, c, &provider) == FALSE)
return HTTP_INTERNAL_SERVER_ERROR;
}
/* generate the random nonce value that correlates requests and responses */
char *nonce = NULL;
if (oidc_proto_generate_nonce(r, &nonce, OIDC_PROTO_NONCE_LENGTH) == FALSE)
return HTTP_INTERNAL_SERVER_ERROR;
char *pkce_state = NULL;
char *code_challenge = NULL;
if ((oidc_util_spaced_string_contains(r->pool, provider->response_type,
OIDC_PROTO_CODE) == TRUE) && (provider->pkce != NULL)) {
/* generate the code verifier value that correlates authorization requests and code exchange requests */
if (provider->pkce->state(r, &pkce_state) == FALSE)
return HTTP_INTERNAL_SERVER_ERROR;
/* generate the PKCE code challenge */
if (provider->pkce->challenge(r, pkce_state, &code_challenge) == FALSE)
return HTTP_INTERNAL_SERVER_ERROR;
}
/* create the state between request/response */
oidc_proto_state_t *proto_state = oidc_proto_state_new();
oidc_proto_state_set_original_url(proto_state, original_url);
oidc_proto_state_set_original_method(proto_state,
oidc_original_request_method(r, c, TRUE));
oidc_proto_state_set_issuer(proto_state, provider->issuer);
oidc_proto_state_set_response_type(proto_state, provider->response_type);
oidc_proto_state_set_nonce(proto_state, nonce);
oidc_proto_state_set_timestamp_now(proto_state);
if (provider->response_mode)
oidc_proto_state_set_response_mode(proto_state,
provider->response_mode);
if (prompt)
oidc_proto_state_set_prompt(proto_state, prompt);
if (pkce_state)
oidc_proto_state_set_pkce_state(proto_state, pkce_state);
/* get a hash value that fingerprints the browser concatenated with the random input */
char *state = oidc_get_browser_state_hash(r, c, nonce);
/*
* create state that restores the context when the authorization response comes in
* and cryptographically bind it to the browser
*/
int rc = oidc_authorization_request_set_cookie(r, c, state, proto_state);
if (rc != OK) {
oidc_proto_state_destroy(proto_state);
return rc;
}
/*
* printout errors if Cookie settings are not going to work
* TODO: separate this code out into its own function
*/
apr_uri_t o_uri;
memset(&o_uri, 0, sizeof(apr_uri_t));
apr_uri_t r_uri;
memset(&r_uri, 0, sizeof(apr_uri_t));
apr_uri_parse(r->pool, original_url, &o_uri);
apr_uri_parse(r->pool, oidc_get_redirect_uri(r, c), &r_uri);
if ((apr_strnatcmp(o_uri.scheme, r_uri.scheme) != 0)
&& (apr_strnatcmp(r_uri.scheme, "https") == 0)) {
oidc_error(r,
"the URL scheme (%s) of the configured " OIDCRedirectURI " does not match the URL scheme of the URL being accessed (%s): the \"state\" and \"session\" cookies will not be shared between the two!",
r_uri.scheme, o_uri.scheme);
oidc_proto_state_destroy(proto_state);
return HTTP_INTERNAL_SERVER_ERROR;
}
if (c->cookie_domain == NULL) {
if (apr_strnatcmp(o_uri.hostname, r_uri.hostname) != 0) {
char *p = strstr(o_uri.hostname, r_uri.hostname);
if ((p == NULL) || (apr_strnatcmp(r_uri.hostname, p) != 0)) {
oidc_error(r,
"the URL hostname (%s) of the configured " OIDCRedirectURI " does not match the URL hostname of the URL being accessed (%s): the \"state\" and \"session\" cookies will not be shared between the two!",
r_uri.hostname, o_uri.hostname);
oidc_proto_state_destroy(proto_state);
return HTTP_INTERNAL_SERVER_ERROR;
}
}
} else {
if (!oidc_util_cookie_domain_valid(r_uri.hostname, c->cookie_domain)) {
oidc_error(r,
"the domain (%s) configured in " OIDCCookieDomain " does not match the URL hostname (%s) of the URL being accessed (%s): setting \"state\" and \"session\" cookies will not work!!",
c->cookie_domain, o_uri.hostname, original_url);
oidc_proto_state_destroy(proto_state);
return HTTP_INTERNAL_SERVER_ERROR;
}
}
/* send off to the OpenID Connect Provider */
// TODO: maybe show intermediate/progress screen "redirecting to"
return oidc_proto_authorization_request(r, provider, login_hint,
oidc_get_redirect_uri_iss(r, c, provider), state, proto_state,
id_token_hint, code_challenge, auth_request_params, path_scope);
}
| 0 |
[
"CWE-601"
] |
mod_auth_openidc
|
03e6bfb446f4e3f27c003d30d6a433e5dd8e2b3d
| 292,619,035,342,954,120,000,000,000,000,000,000,000 | 123 |
apply OIDCRedirectURLsAllowed setting to target_link_uri
closes #672; thanks @Meheni
release 2.4.9.4
Signed-off-by: Hans Zandbelt <[email protected]>
|
size_t size() const override { return HeaderMapImpl::size(); }
| 0 |
[] |
envoy
|
2c60632d41555ec8b3d9ef5246242be637a2db0f
| 172,286,107,370,094,170,000,000,000,000,000,000,000 | 1 |
http: header map security fixes for duplicate headers (#197)
Previously header matching did not match on all headers for
non-inline headers. This patch changes the default behavior to
always logically match on all headers. Multiple individual
headers will be logically concatenated with ',' similar to what
is done with inline headers. This makes the behavior effectively
consistent. This behavior can be temporary reverted by setting
the runtime value "envoy.reloadable_features.header_match_on_all_headers"
to "false".
Targeted fixes have been additionally performed on the following
extensions which make them consider all duplicate headers by default as
a comma concatenated list:
1) Any extension using CEL matching on headers.
2) The header to metadata filter.
3) The JWT filter.
4) The Lua filter.
Like primary header matching used in routing, RBAC, etc. this behavior
can be disabled by setting the runtime value
"envoy.reloadable_features.header_match_on_all_headers" to false.
Finally, the setCopy() header map API previously only set the first
header in the case of duplicate non-inline headers. setCopy() now
behaves similiarly to the other set*() APIs and replaces all found
headers with a single value. This may have had security implications
in the extauth filter which uses this API. This behavior can be disabled
by setting the runtime value
"envoy.reloadable_features.http_set_copy_replace_all_headers" to false.
Fixes https://github.com/envoyproxy/envoy-setec/issues/188
Signed-off-by: Matt Klein <[email protected]>
|
receive_packet (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
int entry = np->cur_rx % RX_RING_SIZE;
int cnt = 30;
/* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
while (1) {
struct netdev_desc *desc = &np->rx_ring[entry];
int pkt_len;
u64 frame_status;
if (!(desc->status & cpu_to_le64(RFDDone)) ||
!(desc->status & cpu_to_le64(FrameStart)) ||
!(desc->status & cpu_to_le64(FrameEnd)))
break;
/* Chip omits the CRC. */
frame_status = le64_to_cpu(desc->status);
pkt_len = frame_status & 0xffff;
if (--cnt < 0)
break;
/* Update rx error statistics, drop packet. */
if (frame_status & RFS_Errors) {
np->stats.rx_errors++;
if (frame_status & (RxRuntFrame | RxLengthError))
np->stats.rx_length_errors++;
if (frame_status & RxFCSError)
np->stats.rx_crc_errors++;
if (frame_status & RxAlignmentError && np->speed != 1000)
np->stats.rx_frame_errors++;
if (frame_status & RxFIFOOverrun)
np->stats.rx_fifo_errors++;
} else {
struct sk_buff *skb;
/* Small skbuffs for short packets */
if (pkt_len > copy_thresh) {
pci_unmap_single (np->pdev,
desc_to_dma(desc),
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
pci_dma_sync_single_for_cpu(np->pdev,
desc_to_dma(desc),
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
skb_copy_to_linear_data (skb,
np->rx_skbuff[entry]->data,
pkt_len);
skb_put (skb, pkt_len);
pci_dma_sync_single_for_device(np->pdev,
desc_to_dma(desc),
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
}
skb->protocol = eth_type_trans (skb, dev);
#if 0
/* Checksum done by hw, but csum value unavailable. */
if (np->pdev->pci_rev_id >= 0x0c &&
!(frame_status & (TCPError | UDPError | IPError))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
#endif
netif_rx (skb);
}
entry = (entry + 1) % RX_RING_SIZE;
}
spin_lock(&np->rx_lock);
np->cur_rx = entry;
/* Re-allocate skbuffs to fill the descriptor ring */
entry = np->old_rx;
while (entry != np->cur_rx) {
struct sk_buff *skb;
/* Dropped packets don't need to re-allocate */
if (np->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
if (skb == NULL) {
np->rx_ring[entry].fraginfo = 0;
printk (KERN_INFO
"%s: receive_packet: "
"Unable to re-allocate Rx skbuff.#%d\n",
dev->name, entry);
break;
}
np->rx_skbuff[entry] = skb;
np->rx_ring[entry].fraginfo =
cpu_to_le64 (pci_map_single
(np->pdev, skb->data, np->rx_buf_sz,
PCI_DMA_FROMDEVICE));
}
np->rx_ring[entry].fraginfo |=
cpu_to_le64((u64)np->rx_buf_sz << 48);
np->rx_ring[entry].status = 0;
entry = (entry + 1) % RX_RING_SIZE;
}
np->old_rx = entry;
spin_unlock(&np->rx_lock);
return 0;
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
1bb57e940e1958e40d51f2078f50c3a96a9b2d75
| 134,598,806,674,548,900,000,000,000,000,000,000,000 | 102 |
dl2k: Clean up rio_ioctl
The dl2k driver's rio_ioctl call has a few issues:
- No permissions checking
- Implements SIOCGMIIREG and SIOCGMIIREG using the SIOCDEVPRIVATE numbers
- Has a few ioctls that may have been used for debugging at one point
but have no place in the kernel proper.
This patch removes all but the MII ioctls, renumbers them to use the
standard ones, and adds the proper permission check for SIOCSMIIREG.
We can also get rid of the dl2k-specific struct mii_data in favor of
the generic struct mii_ioctl_data.
Since we have the phyid on hand, we can add the SIOCGMIIPHY ioctl too.
Most of the MII code for the driver could probably be converted to use
the generic MII library but I don't have a device to test the results.
Reported-by: Stephan Mueller <[email protected]>
Signed-off-by: Jeff Mahoney <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void HeaderMapImpl::iterateReverse(HeaderMap::ConstIterateCb cb) const {
for (auto it = headers_.rbegin(); it != headers_.rend(); it++) {
if (cb(*it) == HeaderMap::Iterate::Break) {
break;
}
}
}
| 0 |
[] |
envoy
|
2c60632d41555ec8b3d9ef5246242be637a2db0f
| 304,057,274,806,890,670,000,000,000,000,000,000,000 | 7 |
http: header map security fixes for duplicate headers (#197)
Previously header matching did not match on all headers for
non-inline headers. This patch changes the default behavior to
always logically match on all headers. Multiple individual
headers will be logically concatenated with ',' similar to what
is done with inline headers. This makes the behavior effectively
consistent. This behavior can be temporary reverted by setting
the runtime value "envoy.reloadable_features.header_match_on_all_headers"
to "false".
Targeted fixes have been additionally performed on the following
extensions which make them consider all duplicate headers by default as
a comma concatenated list:
1) Any extension using CEL matching on headers.
2) The header to metadata filter.
3) The JWT filter.
4) The Lua filter.
Like primary header matching used in routing, RBAC, etc. this behavior
can be disabled by setting the runtime value
"envoy.reloadable_features.header_match_on_all_headers" to false.
Finally, the setCopy() header map API previously only set the first
header in the case of duplicate non-inline headers. setCopy() now
behaves similiarly to the other set*() APIs and replaces all found
headers with a single value. This may have had security implications
in the extauth filter which uses this API. This behavior can be disabled
by setting the runtime value
"envoy.reloadable_features.http_set_copy_replace_all_headers" to false.
Fixes https://github.com/envoyproxy/envoy-setec/issues/188
Signed-off-by: Matt Klein <[email protected]>
|
static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
struct sctp_transport *trans,
struct sctp_association *asoc,
struct sctp_sock *sp,
int hb_change,
int pmtud_change,
int sackdelay_change)
{
int error;
if (params->spp_flags & SPP_HB_DEMAND && trans) {
error = sctp_primitive_REQUESTHEARTBEAT(trans->asoc->base.net,
trans->asoc, trans);
if (error)
return error;
}
/* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of
* this field is ignored. Note also that a value of zero indicates
* the current setting should be left unchanged.
*/
if (params->spp_flags & SPP_HB_ENABLE) {
/* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is
* set. This lets us use 0 value when this flag
* is set.
*/
if (params->spp_flags & SPP_HB_TIME_IS_ZERO)
params->spp_hbinterval = 0;
if (params->spp_hbinterval ||
(params->spp_flags & SPP_HB_TIME_IS_ZERO)) {
if (trans) {
trans->hbinterval =
msecs_to_jiffies(params->spp_hbinterval);
} else if (asoc) {
asoc->hbinterval =
msecs_to_jiffies(params->spp_hbinterval);
} else {
sp->hbinterval = params->spp_hbinterval;
}
}
}
if (hb_change) {
if (trans) {
trans->param_flags =
(trans->param_flags & ~SPP_HB) | hb_change;
} else if (asoc) {
asoc->param_flags =
(asoc->param_flags & ~SPP_HB) | hb_change;
} else {
sp->param_flags =
(sp->param_flags & ~SPP_HB) | hb_change;
}
}
/* When Path MTU discovery is disabled the value specified here will
* be the "fixed" path mtu (i.e. the value of the spp_flags field must
* include the flag SPP_PMTUD_DISABLE for this field to have any
* effect).
*/
if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
if (trans) {
trans->pathmtu = params->spp_pathmtu;
sctp_assoc_sync_pmtu(asoc);
} else if (asoc) {
sctp_assoc_set_pmtu(asoc, params->spp_pathmtu);
} else {
sp->pathmtu = params->spp_pathmtu;
}
}
if (pmtud_change) {
if (trans) {
int update = (trans->param_flags & SPP_PMTUD_DISABLE) &&
(params->spp_flags & SPP_PMTUD_ENABLE);
trans->param_flags =
(trans->param_flags & ~SPP_PMTUD) | pmtud_change;
if (update) {
sctp_transport_pmtu(trans, sctp_opt2sk(sp));
sctp_assoc_sync_pmtu(asoc);
}
} else if (asoc) {
asoc->param_flags =
(asoc->param_flags & ~SPP_PMTUD) | pmtud_change;
} else {
sp->param_flags =
(sp->param_flags & ~SPP_PMTUD) | pmtud_change;
}
}
/* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the
* value of this field is ignored. Note also that a value of zero
* indicates the current setting should be left unchanged.
*/
if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) {
if (trans) {
trans->sackdelay =
msecs_to_jiffies(params->spp_sackdelay);
} else if (asoc) {
asoc->sackdelay =
msecs_to_jiffies(params->spp_sackdelay);
} else {
sp->sackdelay = params->spp_sackdelay;
}
}
if (sackdelay_change) {
if (trans) {
trans->param_flags =
(trans->param_flags & ~SPP_SACKDELAY) |
sackdelay_change;
} else if (asoc) {
asoc->param_flags =
(asoc->param_flags & ~SPP_SACKDELAY) |
sackdelay_change;
} else {
sp->param_flags =
(sp->param_flags & ~SPP_SACKDELAY) |
sackdelay_change;
}
}
/* Note that a value of zero indicates the current setting should be
left unchanged.
*/
if (params->spp_pathmaxrxt) {
if (trans) {
trans->pathmaxrxt = params->spp_pathmaxrxt;
} else if (asoc) {
asoc->pathmaxrxt = params->spp_pathmaxrxt;
} else {
sp->pathmaxrxt = params->spp_pathmaxrxt;
}
}
if (params->spp_flags & SPP_IPV6_FLOWLABEL) {
if (trans) {
if (trans->ipaddr.sa.sa_family == AF_INET6) {
trans->flowlabel = params->spp_ipv6_flowlabel &
SCTP_FLOWLABEL_VAL_MASK;
trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
}
} else if (asoc) {
struct sctp_transport *t;
list_for_each_entry(t, &asoc->peer.transport_addr_list,
transports) {
if (t->ipaddr.sa.sa_family != AF_INET6)
continue;
t->flowlabel = params->spp_ipv6_flowlabel &
SCTP_FLOWLABEL_VAL_MASK;
t->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
}
asoc->flowlabel = params->spp_ipv6_flowlabel &
SCTP_FLOWLABEL_VAL_MASK;
asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
} else if (sctp_opt2sk(sp)->sk_family == AF_INET6) {
sp->flowlabel = params->spp_ipv6_flowlabel &
SCTP_FLOWLABEL_VAL_MASK;
sp->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
}
}
if (params->spp_flags & SPP_DSCP) {
if (trans) {
trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
trans->dscp |= SCTP_DSCP_SET_MASK;
} else if (asoc) {
struct sctp_transport *t;
list_for_each_entry(t, &asoc->peer.transport_addr_list,
transports) {
t->dscp = params->spp_dscp &
SCTP_DSCP_VAL_MASK;
t->dscp |= SCTP_DSCP_SET_MASK;
}
asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
asoc->dscp |= SCTP_DSCP_SET_MASK;
} else {
sp->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
sp->dscp |= SCTP_DSCP_SET_MASK;
}
}
return 0;
}
| 0 |
[
"CWE-362"
] |
linux
|
b166a20b07382b8bc1dcee2a448715c9c2c81b5b
| 8,581,181,332,241,378,000,000,000,000,000,000,000 | 188 |
net/sctp: fix race condition in sctp_destroy_sock
If sctp_destroy_sock is called without sock_net(sk)->sctp.addr_wq_lock
held and sp->do_auto_asconf is true, then an element is removed
from the auto_asconf_splist without any proper locking.
This can happen in the following functions:
1. In sctp_accept, if sctp_sock_migrate fails.
2. In inet_create or inet6_create, if there is a bpf program
attached to BPF_CGROUP_INET_SOCK_CREATE which denies
creation of the sctp socket.
The bug is fixed by acquiring addr_wq_lock in sctp_destroy_sock
instead of sctp_close.
This addresses CVE-2021-23133.
Reported-by: Or Cohen <[email protected]>
Reviewed-by: Xin Long <[email protected]>
Fixes: 610236587600 ("bpf: Add new cgroup attach type to enable sock modifications")
Signed-off-by: Or Cohen <[email protected]>
Acked-by: Marcelo Ricardo Leitner <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void gnutls_global_set_time_function(gnutls_time_func time_func)
{
gnutls_time = time_func;
}
| 0 |
[
"CWE-20"
] |
gnutls
|
b0a3048e56611a2deee4976aeba3b8c0740655a6
| 98,480,831,396,525,880,000,000,000,000,000,000,000 | 4 |
env: use secure_getenv when reading environment variables
|
void AES::ProcessAndXorBlock(const byte* in, const byte* xOr, byte* out) const
{
if (dir_ == ENCRYPTION)
encrypt(in, xOr, out);
else
decrypt(in, xOr, out);
}
| 0 |
[] |
mysql-server
|
5c6169fb309981b564a17bee31b367a18866d674
| 181,417,052,488,495,560,000,000,000,000,000,000,000 | 7 |
Bug #24740291: YASSL UPDATE TO 2.4.2
|
get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
{
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (sas_ss_flags(sp) == 0)
sp = current->sas_ss_sp + current->sas_ss_size;
}
return (void __user *)((sp - frame_size) & -8ul);
}
| 0 |
[] |
linux-2.6
|
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
| 146,322,814,853,405,460,000,000,000,000,000,000,000 | 10 |
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]>
|
static php_ldap_bictx *_php_sasl_setdefs(LDAP *ld, char *sasl_mech, char *sasl_realm, char *sasl_authc_id, char *passwd, char *sasl_authz_id)
{
php_ldap_bictx *ctx;
ctx = ber_memalloc(sizeof(php_ldap_bictx));
ctx->mech = (sasl_mech) ? ber_strdup(sasl_mech) : NULL;
ctx->realm = (sasl_realm) ? ber_strdup(sasl_realm) : NULL;
ctx->authcid = (sasl_authc_id) ? ber_strdup(sasl_authc_id) : NULL;
ctx->passwd = (passwd) ? ber_strdup(passwd) : NULL;
ctx->authzid = (sasl_authz_id) ? ber_strdup(sasl_authz_id) : NULL;
if (ctx->mech == NULL) {
ldap_get_option(ld, LDAP_OPT_X_SASL_MECH, &ctx->mech);
}
if (ctx->realm == NULL) {
ldap_get_option(ld, LDAP_OPT_X_SASL_REALM, &ctx->realm);
}
if (ctx->authcid == NULL) {
ldap_get_option(ld, LDAP_OPT_X_SASL_AUTHCID, &ctx->authcid);
}
if (ctx->authzid == NULL) {
ldap_get_option(ld, LDAP_OPT_X_SASL_AUTHZID, &ctx->authzid);
}
return ctx;
}
| 0 |
[
"CWE-476"
] |
php-src
|
49782c54994ecca2ef2a061063bd5a7079c43527
| 43,172,545,239,012,590,000,000,000,000,000,000,000 | 26 |
Fix bug #76248 - Malicious LDAP-Server Response causes Crash
|
static Bool PrintHelpArg(char *arg_name, u32 search_type, GF_FilterSession *fs)
{
char szDesc[100];
Bool first=GF_TRUE;
GF_GPACArg an_arg;
u32 i, count;
u32 res = 0;
u32 alen = (u32) strlen(arg_name);
res += PrintHelpForArgs(arg_name, m4b_gen_args, NULL, search_type, "general");
res += PrintHelpForArgs(arg_name, m4b_split_args, NULL, search_type, "split");
res += PrintHelpForArgs(arg_name, m4b_dash_args, NULL, search_type, "dash");
res += PrintHelpForArgs(arg_name, m4b_imp_args, NULL, search_type, "import");
res += PrintHelpForArgs(arg_name, m4b_imp_fileopt_args, NULL, search_type, "import (per-file option)");
res += PrintHelpForArgs(arg_name, m4b_senc_args, NULL, search_type, "encode");
res += PrintHelpForArgs(arg_name, m4b_crypt_args, NULL, search_type, "crypt");
res += PrintHelpForArgs(arg_name, m4b_hint_args, NULL, search_type, "hint");
res += PrintHelpForArgs(arg_name, m4b_extr_args, NULL, search_type, "extract");
res += PrintHelpForArgs(arg_name, m4b_dump_args, NULL, search_type, "dump");
res += PrintHelpForArgs(arg_name, m4b_meta_args, NULL, search_type, "meta");
res += PrintHelpForArgs(arg_name, m4b_swf_args, NULL, search_type, "swf");
res += PrintHelpForArgs(arg_name, m4b_liveenc_args, NULL, search_type, "live");
res += PrintHelpForArgs(arg_name, m4b_usage_args, NULL, search_type, "");
res += PrintHelpForArgs(arg_name, NULL, (GF_GPACArg *) gf_sys_get_options(), search_type, "core");
if (!fs) return res;
memset(&an_arg, 0, sizeof(GF_GPACArg));
count = gf_fs_filters_registers_count(fs);
for (i=0; i<count; i++) {
u32 j=0;
const GF_FilterRegister *reg = gf_fs_get_filter_register(fs, i);
while (reg->args) {
u32 len;
const GF_FilterArgs *arg = ®->args[j];
if (!arg || !arg->arg_name) break;
j++;
if ((search_type==SEARCH_ARG_EXACT) && strcmp(arg->arg_name, arg_name)) continue;
if ((search_type==SEARCH_ARG_CLOSE) && !gf_sys_word_match(arg->arg_name, arg_name)) continue;
if (search_type==SEARCH_DESC) {
if (stricmp(arg->arg_name, arg_name) && !gf_strnistr(arg->arg_desc, arg_name, alen)) continue;
}
an_arg.name = arg->arg_name;
if (search_type==SEARCH_ARG_EXACT) {
an_arg.description = arg->arg_desc;
switch (arg->arg_type) {
case GF_PROP_BOOL:
an_arg.type = GF_ARG_BOOL;
break;
case GF_PROP_UINT:
case GF_PROP_SINT:
an_arg.type = GF_ARG_INT;
break;
case GF_PROP_DOUBLE:
an_arg.type = GF_ARG_DOUBLE;
break;
case GF_PROP_STRING_LIST:
case GF_PROP_UINT_LIST:
case GF_PROP_SINT_LIST:
case GF_PROP_VEC2I_LIST:
an_arg.type = GF_ARG_STRINGS;
break;
case GF_PROP_4CC:
an_arg.type = GF_ARG_4CC;
break;
case GF_PROP_4CC_LIST:
an_arg.type = GF_ARG_4CCS;
break;
default:
an_arg.type = GF_ARG_STRING;
break;
}
if (first) {
first = GF_FALSE;
gf_sys_format_help(helpout, 0, "\nGlobal filter session arguments matching %s:\n", arg_name);
//. Syntax is `--arg` or `--arg=VAL`. `[F]` indicates filter name. See `gpac -h` and `gpac -h F` for more info.\n");
}
fprintf(helpout, "[%s]", reg->name);
len = (u32)strlen(reg->name);
while (len<10) {
len++;
fprintf(helpout, " ");
}
fprintf(helpout, " ");
} else if (search_type==SEARCH_DESC) {
sprintf(szDesc, "see filter %s", reg->name);
an_arg.description = szDesc;
}
gf_sys_print_arg(helpout, GF_PRINTARG_ADD_DASH, &an_arg, "TEST");
res++;
}
}
if (res) return GF_TRUE;
return GF_FALSE;
}
| 0 |
[
"CWE-787"
] |
gpac
|
4e56ad72ac1afb4e049a10f2d99e7512d7141f9d
| 115,647,702,609,720,000,000,000,000,000,000,000,000 | 99 |
fixed #2216
|
void streamNextID(streamID *last_id, streamID *new_id) {
uint64_t ms = mstime();
if (ms > last_id->ms) {
new_id->ms = ms;
new_id->seq = 0;
} else {
*new_id = *last_id;
streamIncrID(new_id);
}
}
| 0 |
[
"CWE-190"
] |
redis
|
f6a40570fa63d5afdd596c78083d754081d80ae3
| 22,368,770,607,957,636,000,000,000,000,000,000,000 | 10 |
Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628)
- fix possible heap corruption in ziplist and listpack resulting by trying to
allocate more than the maximum size of 4GB.
- prevent ziplist (hash and zset) from reaching size of above 1GB, will be
converted to HT encoding, that's not a useful size.
- prevent listpack (stream) from reaching size of above 1GB.
- XADD will start a new listpack if the new record may cause the previous
listpack to grow over 1GB.
- XADD will respond with an error if a single stream record is over 1GB
- List type (ziplist in quicklist) was truncating strings that were over 4GB,
now it'll respond with an error.
|
generate_auth(Key *key, const unsigned char *data, int data_len,
unsigned char *auth, int auth_len)
{
switch (key->class) {
case NTP_MAC:
return HSH_Hash(key->data.ntp_mac.hash_id, key->data.ntp_mac.value,
key->data.ntp_mac.length, data, data_len, auth, auth_len);
case CMAC:
return CMC_Hash(key->data.cmac, data, data_len, auth, auth_len);
default:
return 0;
}
}
| 0 |
[
"CWE-59"
] |
chrony
|
e18903a6b56341481a2e08469c0602010bf7bfe3
| 287,657,318,921,900,700,000,000,000,000,000,000,000 | 13 |
switch to new util file functions
Replace all fopen(), rename(), and unlink() calls with the new util
functions.
|
Perl_croak_sv(pTHX_ SV *baseex)
{
SV *ex = with_queued_errors(mess_sv(baseex, 0));
PERL_ARGS_ASSERT_CROAK_SV;
invoke_exception_hook(ex, FALSE);
die_unwind(ex);
}
| 0 |
[
"CWE-119",
"CWE-703",
"CWE-787"
] |
perl5
|
34716e2a6ee2af96078d62b065b7785c001194be
| 200,552,455,162,701,400,000,000,000,000,000,000,000 | 7 |
Perl_my_setenv(); handle integer wrap
RT #133204
Wean this function off int/I32 and onto UV/Size_t.
Also, replace all malloc-ish calls with a wrapper that does
overflow checks,
In particular, it was doing (nlen + vlen + 2) which could wrap when
the combined length of the environment variable name and value
exceeded around 0x7fffffff.
The wrapper check function is probably overkill, but belt and braces...
NB this function has several variant parts, #ifdef'ed by platform
type; I have blindly changed the parts that aren't compiled under linux.
|
SecureElementStatus_t SecureElementSetDevEui( uint8_t* devEui )
{
if( devEui == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
memcpy1( SeNvmCtx.DevEui, devEui, SE_EUI_SIZE );
SeNvmCtxChanged( );
return SECURE_ELEMENT_SUCCESS;
}
| 0 |
[
"CWE-120",
"CWE-787"
] |
LoRaMac-node
|
e3063a91daa7ad8a687223efa63079f0c24568e4
| 116,802,333,378,743,010,000,000,000,000,000,000,000 | 10 |
Added received buffer size checks.
|
MYSQL *mysql_connect_ssl_check(MYSQL *mysql_arg, const char *host,
const char *user, const char *passwd,
const char *db, uint port,
const char *unix_socket, ulong client_flag,
my_bool ssl_required MY_ATTRIBUTE((unused)))
{
MYSQL *mysql= mysql_real_connect(mysql_arg, host, user, passwd, db, port,
unix_socket, client_flag);
#if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY)
if (mysql && /* connection established. */
ssl_required && /* --ssl-mode=REQUIRED. */
!mysql_get_ssl_cipher(mysql)) /* non-SSL connection. */
{
NET *net= &mysql->net;
net->last_errno= CR_SSL_CONNECTION_ERROR;
strmov(net->last_error, "--ssl-mode=REQUIRED option forbids non SSL connections");
strmov(net->sqlstate, "HY000");
return NULL;
}
#endif
return mysql;
}
| 1 |
[
"CWE-319"
] |
mysql-server
|
0002e1380d5f8c113b6bce91f2cf3f75136fd7c7
| 196,364,607,586,004,220,000,000,000,000,000,000,000 | 22 |
BUG#25575605: SETTING --SSL-MODE=REQUIRED SENDS CREDENTIALS BEFORE VERIFYING SSL CONNECTION
MYSQL_OPT_SSL_MODE option introduced.
It is set in case of --ssl-mode=REQUIRED and permits only SSL connection.
(cherry picked from commit f91b941842d240b8a62645e507f5554e8be76aec)
|
void bnx2x__link_status_update(struct bnx2x *bp)
{
if (bp->state != BNX2X_STATE_OPEN)
return;
/* read updated dcb configuration */
if (IS_PF(bp)) {
bnx2x_dcbx_pmf_update(bp);
bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
if (bp->link_vars.link_up)
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
else
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
/* indicate link status */
bnx2x_link_report(bp);
} else { /* VF */
bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full |
SUPPORTED_2500baseX_Full |
SUPPORTED_10000baseT_Full |
SUPPORTED_TP |
SUPPORTED_FIBRE |
SUPPORTED_Autoneg |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause);
bp->port.advertising[0] = bp->port.supported[0];
bp->link_params.bp = bp;
bp->link_params.port = BP_PORT(bp);
bp->link_params.req_duplex[0] = DUPLEX_FULL;
bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
bp->link_params.req_line_speed[0] = SPEED_10000;
bp->link_params.speed_cap_mask[0] = 0x7f0000;
bp->link_params.switch_cfg = SWITCH_CFG_10G;
bp->link_vars.mac_type = MAC_TYPE_BMAC;
bp->link_vars.line_speed = SPEED_10000;
bp->link_vars.link_status =
(LINK_STATUS_LINK_UP |
LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
bp->link_vars.link_up = 1;
bp->link_vars.duplex = DUPLEX_FULL;
bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
__bnx2x_link_report(bp);
bnx2x_sample_bulletin(bp);
/* if bulletin board did not have an update for link status
* __bnx2x_link_report will report current status
* but it will NOT duplicate report in case of already reported
* during sampling bulletin board.
*/
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
}
}
| 0 |
[
"CWE-20"
] |
linux
|
8914a595110a6eca69a5e275b323f5d09e18f4f9
| 118,825,510,288,973,460,000,000,000,000,000,000,000 | 58 |
bnx2x: disable GSO where gso_size is too big for hardware
If a bnx2x card is passed a GSO packet with a gso_size larger than
~9700 bytes, it will cause a firmware error that will bring the card
down:
bnx2x: [bnx2x_attn_int_deasserted3:4323(enP24p1s0f0)]MC assert!
bnx2x: [bnx2x_mc_assert:720(enP24p1s0f0)]XSTORM_ASSERT_LIST_INDEX 0x2
bnx2x: [bnx2x_mc_assert:736(enP24p1s0f0)]XSTORM_ASSERT_INDEX 0x0 = 0x00000000 0x25e43e47 0x00463e01 0x00010052
bnx2x: [bnx2x_mc_assert:750(enP24p1s0f0)]Chip Revision: everest3, FW Version: 7_13_1
... (dump of values continues) ...
Detect when the mac length of a GSO packet is greater than the maximum
packet size (9700 bytes) and disable GSO.
Signed-off-by: Daniel Axtens <[email protected]>
Reviewed-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
longlong Item_ref::val_time_packed()
{
DBUG_ASSERT(fixed);
longlong tmp= (*ref)->val_time_packed_result();
null_value= (*ref)->null_value;
return tmp;
}
| 0 |
[
"CWE-416"
] |
server
|
c02ebf3510850ba78a106be9974c94c3b97d8585
| 295,217,020,738,564,880,000,000,000,000,000,000,000 | 7 |
MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments.
|
save_pipeline (clear)
int clear;
{
sigset_t set, oset;
struct pipeline_saver *saver;
BLOCK_CHILD (set, oset);
saver = alloc_pipeline_saver ();
saver->pipeline = the_pipeline;
saver->next = saved_pipeline;
saved_pipeline = saver;
if (clear)
the_pipeline = (PROCESS *)NULL;
saved_already_making_children = already_making_children;
UNBLOCK_CHILD (oset);
}
| 0 |
[] |
bash
|
955543877583837c85470f7fb8a97b7aa8d45e6c
| 261,729,909,885,822,370,000,000,000,000,000,000,000 | 16 |
bash-4.4-rc2 release
|
check_pidfile(void)
{
const char *pidfile = CNF_GetPidFile();
FILE *in;
int pid, count;
in = fopen(pidfile, "r");
if (!in)
return;
count = fscanf(in, "%d", &pid);
fclose(in);
if (count != 1)
return;
if (getsid(pid) < 0)
return;
LOG_FATAL("Another chronyd may already be running (pid=%d), check %s",
pid, pidfile);
}
| 1 |
[
"CWE-59"
] |
chrony
|
e18903a6b56341481a2e08469c0602010bf7bfe3
| 209,572,017,262,673,180,000,000,000,000,000,000,000 | 22 |
switch to new util file functions
Replace all fopen(), rename(), and unlink() calls with the new util
functions.
|
int isolate_movable_page(struct page *page, isolate_mode_t mode)
{
struct address_space *mapping;
/*
* Avoid burning cycles with pages that are yet under __free_pages(),
* or just got freed under us.
*
* In case we 'win' a race for a movable page being freed under us and
* raise its refcount preventing __free_pages() from doing its job
* the put_page() at the end of this block will take care of
* release this page, thus avoiding a nasty leakage.
*/
if (unlikely(!get_page_unless_zero(page)))
goto out;
/*
* Check PageMovable before holding a PG_lock because page's owner
* assumes anybody doesn't touch PG_lock of newly allocated page
* so unconditionally grapping the lock ruins page's owner side.
*/
if (unlikely(!__PageMovable(page)))
goto out_putpage;
/*
* As movable pages are not isolated from LRU lists, concurrent
* compaction threads can race against page migration functions
* as well as race against the releasing a page.
*
* In order to avoid having an already isolated movable page
* being (wrongly) re-isolated while it is under migration,
* or to avoid attempting to isolate pages being released,
* lets be sure we have the page lock
* before proceeding with the movable page isolation steps.
*/
if (unlikely(!trylock_page(page)))
goto out_putpage;
if (!PageMovable(page) || PageIsolated(page))
goto out_no_isolated;
mapping = page_mapping(page);
VM_BUG_ON_PAGE(!mapping, page);
if (!mapping->a_ops->isolate_page(page, mode))
goto out_no_isolated;
/* Driver shouldn't use PG_isolated bit of page->flags */
WARN_ON_ONCE(PageIsolated(page));
__SetPageIsolated(page);
unlock_page(page);
return 0;
out_no_isolated:
unlock_page(page);
out_putpage:
put_page(page);
out:
return -EBUSY;
}
| 0 |
[
"CWE-200"
] |
linux
|
197e7e521384a23b9e585178f3f11c9fa08274b9
| 47,332,977,516,846,260,000,000,000,000,000,000,000 | 60 |
Sanitize 'move_pages()' permission checks
The 'move_paghes()' system call was introduced long long ago with the
same permission checks as for sending a signal (except using
CAP_SYS_NICE instead of CAP_SYS_KILL for the overriding capability).
That turns out to not be a great choice - while the system call really
only moves physical page allocations around (and you need other
capabilities to do a lot of it), you can check the return value to map
out some the virtual address choices and defeat ASLR of a binary that
still shares your uid.
So change the access checks to the more common 'ptrace_may_access()'
model instead.
This tightens the access checks for the uid, and also effectively
changes the CAP_SYS_NICE check to CAP_SYS_PTRACE, but it's unlikely that
anybody really _uses_ this legacy system call any more (we hav ebetter
NUMA placement models these days), so I expect nobody to notice.
Famous last words.
Reported-by: Otto Ebeling <[email protected]>
Acked-by: Eric W. Biederman <[email protected]>
Cc: Willy Tarreau <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
|
static inline int is_loop_device(struct file *file)
{
struct inode *i = file->f_mapping->host;
return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
}
| 0 |
[
"CWE-416",
"CWE-362"
] |
linux
|
ae6650163c66a7eff1acd6eb8b0f752dcfa8eba5
| 118,048,171,349,955,020,000,000,000,000,000,000,000 | 6 |
loop: fix concurrent lo_open/lo_release
范龙飞 reports that KASAN can report a use-after-free in __lock_acquire.
The reason is due to insufficient serialization in lo_release(), which
will continue to use the loop device even after it has decremented the
lo_refcnt to zero.
In the meantime, another process can come in, open the loop device
again as it is being shut down. Confusion ensues.
Reported-by: 范龙飞 <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
int rdma_addr_size_in6(struct sockaddr_in6 *addr)
{
int ret = rdma_addr_size((struct sockaddr *) addr);
return ret <= sizeof(*addr) ? ret : 0;
}
| 0 |
[] |
net
|
6c8991f41546c3c472503dff1ea9daaddf9331c2
| 176,825,042,939,121,300,000,000,000,000,000,000,000 | 6 |
net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup
ipv6_stub uses the ip6_dst_lookup function to allow other modules to
perform IPv6 lookups. However, this function skips the XFRM layer
entirely.
All users of ipv6_stub->ip6_dst_lookup use ip_route_output_flow (via the
ip_route_output_key and ip_route_output helpers) for their IPv4 lookups,
which calls xfrm_lookup_route(). This patch fixes this inconsistent
behavior by switching the stub to ip6_dst_lookup_flow, which also calls
xfrm_lookup_route().
This requires some changes in all the callers, as these two functions
take different arguments and have different return types.
Fixes: 5f81bd2e5d80 ("ipv6: export a stub for IPv6 symbols used by vxlan")
Reported-by: Xiumei Mu <[email protected]>
Signed-off-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
RSAEP(
TPM2B *dInOut, // IN: size of the encrypted block and the size of
// the encrypted value. It must be the size of
// the modulus.
// OUT: the encrypted data. Will receive the
// decrypted value
OBJECT *key // IN: the key to use
)
{
TPM2B_TYPE(4BYTES, 4);
TPM2B_4BYTES(e) = {{4, {(BYTE)((RSA_DEFAULT_PUBLIC_EXPONENT >> 24) & 0xff),
(BYTE)((RSA_DEFAULT_PUBLIC_EXPONENT >> 16) & 0xff),
(BYTE)((RSA_DEFAULT_PUBLIC_EXPONENT >> 8) & 0xff),
(BYTE)((RSA_DEFAULT_PUBLIC_EXPONENT)& 0xff)}}};
//
if(key->publicArea.parameters.rsaDetail.exponent != 0)
UINT32_TO_BYTE_ARRAY(key->publicArea.parameters.rsaDetail.exponent,
e.t.buffer);
return ModExpB(dInOut->size, dInOut->buffer, dInOut->size, dInOut->buffer,
e.t.size, e.t.buffer, key->publicArea.unique.rsa.t.size,
key->publicArea.unique.rsa.t.buffer);
}
| 0 |
[
"CWE-787"
] |
libtpms
|
505ef841c00b4c096b1977c667cb957bec3a1d8b
| 337,737,057,415,839,680,000,000,000,000,000,000,000 | 22 |
tpm2: Fix output buffer parameter and size for RSA decyrption
For the RSA decryption we have to use an output buffer of the size of the
(largest possible) RSA key for the decryption to always work.
This fixes a stack corruption bug that caused a SIGBUS and termination of
'swtpm'.
Signed-off-by: Stefan Berger <[email protected]>
|
static int jpc_pi_nextlrcp(register jpc_pi_t *pi)
{
jpc_pchg_t *pchg;
int *prclyrno;
pchg = pi->pchg;
if (!pi->prgvolfirst) {
prclyrno = &pi->pirlvl->prclyrnos[pi->prcno];
goto skip;
} else {
pi->prgvolfirst = false;
}
for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno <
JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) {
for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls &&
pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) {
for (pi->compno = pchg->compnostart, pi->picomp =
&pi->picomps[pi->compno]; pi->compno < pi->numcomps
&& pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno,
++pi->picomp) {
if (pi->rlvlno >= pi->picomp->numrlvls) {
continue;
}
pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno];
for (pi->prcno = 0, prclyrno =
pi->pirlvl->prclyrnos; pi->prcno <
pi->pirlvl->numprcs; ++pi->prcno,
++prclyrno) {
if (pi->lyrno >= *prclyrno) {
*prclyrno = pi->lyrno;
++(*prclyrno);
return 0;
}
skip:
;
}
}
}
}
return 1;
}
| 0 |
[
"CWE-189"
] |
jasper
|
3c55b399c36ef46befcb21e4ebc4799367f89684
| 107,577,678,005,805,340,000,000,000,000,000,000,000 | 42 |
At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems.
|
static inline int get_byte(LZOContext *c)
{
if (c->in < c->in_end)
return *c->in++;
c->error |= AV_LZO_INPUT_DEPLETED;
return 1;
}
| 0 |
[
"CWE-190"
] |
FFmpeg
|
d6af26c55c1ea30f85a7d9edbc373f53be1743ee
| 46,420,697,258,533,440,000,000,000,000,000,000,000 | 7 |
avutil/lzo: Fix integer overflow
Embargoed-till: 2014-06-27 requested by researcher, but embargo broken by libav today (git and mailing list)
Fixes: LMS-2014-06-16-4
Found-by: "Don A. Bailey" <[email protected]>
See: ccda51b14c0fcae2fad73a24872dce75a7964996
Signed-off-by: Michael Niedermayer <[email protected]>
|
Account_options() { }
| 0 |
[
"CWE-703"
] |
server
|
39feab3cd31b5414aa9b428eaba915c251ac34a2
| 335,056,557,604,353,770,000,000,000,000,000,000,000 | 1 |
MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <[email protected]>
|
static void Ins_IP( INS_ARG )
{
TT_F26Dot6 org_a, org_b, org_x,
cur_a, cur_b, cur_x,
distance;
Int point;
(void)args;
if ( CUR.top < CUR.GS.loop ||
BOUNDS(CUR.GS.rp1, CUR.zp0.n_points) ||
BOUNDS(CUR.GS.rp2, CUR.zp1.n_points))
{
CUR.error = TT_Err_Invalid_Reference;
return;
}
org_a = CUR_Func_dualproj( CUR.zp0.org_x[CUR.GS.rp1],
CUR.zp0.org_y[CUR.GS.rp1] );
org_b = CUR_Func_dualproj( CUR.zp1.org_x[CUR.GS.rp2],
CUR.zp1.org_y[CUR.GS.rp2] );
cur_a = CUR_Func_project( CUR.zp0.cur_x[CUR.GS.rp1],
CUR.zp0.cur_y[CUR.GS.rp1] );
cur_b = CUR_Func_project( CUR.zp1.cur_x[CUR.GS.rp2],
CUR.zp1.cur_y[CUR.GS.rp2] );
while ( CUR.GS.loop > 0 )
{
CUR.args--;
point = (Int)CUR.stack[CUR.args];
if ( BOUNDS( point, CUR.zp2.n_points ) )
{
CUR.error = TT_Err_Invalid_Reference;
return;
}
org_x = CUR_Func_dualproj( CUR.zp2.org_x[point],
CUR.zp2.org_y[point] );
cur_x = CUR_Func_project( CUR.zp2.cur_x[point],
CUR.zp2.cur_y[point] );
if ( ( org_a <= org_b && org_x <= org_a ) ||
( org_a > org_b && org_x >= org_a ) )
distance = ( cur_a - org_a ) + ( org_x - cur_x );
else if ( ( org_a <= org_b && org_x >= org_b ) ||
( org_a > org_b && org_x < org_b ) )
distance = ( cur_b - org_b ) + ( org_x - cur_x );
else
/* note: it seems that rounding this value isn't a good */
/* idea (cf. width of capital 'S' in Times) */
distance = MulDiv_Round( cur_b - cur_a,
org_x - org_a,
org_b - org_a ) + ( cur_a - cur_x );
CUR_Func_move( &CUR.zp2, point, distance );
CUR.GS.loop--;
}
CUR.GS.loop = 1;
CUR.new_top = CUR.args;
}
| 0 |
[
"CWE-416"
] |
ghostpdl
|
98f6da60b9d463c617e631fc254cf6d66f2e8e3c
| 184,923,160,759,769,100,000,000,000,000,000,000,000 | 71 |
Bug 698026: bounds check zone pointers in Ins_IP()
|
videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
dprintk(2,"vm_open %p [count=%d,vma=%08lx-%08lx]\n",map,
map->count,vma->vm_start,vma->vm_end);
map->count++;
}
| 1 |
[
"CWE-119",
"CWE-787"
] |
linux
|
0b29669c065f60501e7289e1950fa2a618962358
| 198,627,285,198,280,800,000,000,000,000,000,000,000 | 9 |
V4L/DVB (6751): V4L: Memory leak! Fix count in videobuf-vmalloc mmap
This is pretty serious bug. map->count is never initialized after the
call to kmalloc making the count start at some random trash value. The
end result is leaking videobufs.
Also, fix up the debug statements to print unsigned values.
Pushed to http://ifup.org/hg/v4l-dvb too
Signed-off-by: Brandon Philips <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
_gnutls_recv_handshake(gnutls_session_t session,
gnutls_handshake_description_t type,
unsigned int optional, gnutls_buffer_st * buf)
{
int ret, ret2;
handshake_buffer_st hsk;
ret = _gnutls_handshake_io_recv_int(session, type, &hsk, optional);
if (ret < 0) {
if (optional != 0
&& ret == GNUTLS_E_UNEXPECTED_HANDSHAKE_PACKET) {
if (buf)
_gnutls_buffer_init(buf);
return 0;
}
return gnutls_assert_val_fatal(ret);
}
session->internals.last_handshake_in = hsk.htype;
ret = call_hook_func(session, hsk.htype, GNUTLS_HOOK_PRE, 1, hsk.data.data, hsk.data.length);
if (ret < 0) {
gnutls_assert();
goto cleanup;
}
ret = handshake_hash_add_recvd(session, hsk.htype,
hsk.header, hsk.header_size,
hsk.data.data,
hsk.data.length);
if (ret < 0) {
gnutls_assert();
goto cleanup;
}
switch (hsk.htype) {
case GNUTLS_HANDSHAKE_CLIENT_HELLO_V2:
case GNUTLS_HANDSHAKE_CLIENT_HELLO:
case GNUTLS_HANDSHAKE_SERVER_HELLO:
if (hsk.htype == GNUTLS_HANDSHAKE_CLIENT_HELLO_V2)
ret =
_gnutls_read_client_hello_v2(session,
hsk.data.data,
hsk.data.length);
else
ret =
recv_hello(session, hsk.data.data,
hsk.data.length);
if (ret < 0) {
gnutls_assert();
goto cleanup;
}
break;
case GNUTLS_HANDSHAKE_HELLO_VERIFY_REQUEST:
ret =
recv_hello_verify_request(session,
hsk.data.data,
hsk.data.length);
if (ret < 0) {
gnutls_assert();
goto cleanup;
} else {
/* Signal our caller we have received a verification cookie
and ClientHello needs to be sent again. */
ret = 1;
}
break;
case GNUTLS_HANDSHAKE_SERVER_HELLO_DONE:
if (hsk.data.length == 0)
ret = 0;
else {
gnutls_assert();
ret = GNUTLS_E_UNEXPECTED_PACKET_LENGTH;
goto cleanup;
}
break;
case GNUTLS_HANDSHAKE_CERTIFICATE_PKT:
case GNUTLS_HANDSHAKE_CERTIFICATE_STATUS:
case GNUTLS_HANDSHAKE_FINISHED:
case GNUTLS_HANDSHAKE_SERVER_KEY_EXCHANGE:
case GNUTLS_HANDSHAKE_CLIENT_KEY_EXCHANGE:
case GNUTLS_HANDSHAKE_CERTIFICATE_REQUEST:
case GNUTLS_HANDSHAKE_CERTIFICATE_VERIFY:
case GNUTLS_HANDSHAKE_SUPPLEMENTAL:
case GNUTLS_HANDSHAKE_NEW_SESSION_TICKET:
ret = hsk.data.length;
break;
default:
gnutls_assert();
/* we shouldn't actually arrive here in any case .
* unexpected messages should be catched after _gnutls_handshake_io_recv_int()
*/
ret = GNUTLS_E_UNEXPECTED_HANDSHAKE_PACKET;
goto cleanup;
}
ret2 = call_hook_func(session, hsk.htype, GNUTLS_HOOK_POST, 1, hsk.data.data, hsk.data.length);
if (ret2 < 0) {
ret = ret2;
gnutls_assert();
goto cleanup;
}
if (buf) {
*buf = hsk.data;
return ret;
}
cleanup:
_gnutls_handshake_buffer_clear(&hsk);
return ret;
}
| 0 |
[
"CWE-310"
] |
gnutls
|
db9a7d810f9ee4c9cc49731f5fd9bdeae68d7eaa
| 257,082,396,676,602,570,000,000,000,000,000,000,000 | 116 |
handshake: check for TLS_FALLBACK_SCSV
If TLS_FALLBACK_SCSV was sent by the client during the handshake, and
the advertised protocol version is lower than GNUTLS_TLS_VERSION_MAX,
send the "Inappropriate fallback" fatal alert and abort the handshake.
This mechanism was defined in RFC7507.
|
rl_restore_prompt ()
{
FREE (local_prompt);
FREE (local_prompt_prefix);
local_prompt = saved_local_prompt;
local_prompt_prefix = saved_local_prefix;
local_prompt_len = saved_local_length;
prompt_prefix_length = saved_prefix_length;
prompt_last_invisible = saved_last_invisible;
prompt_visible_length = saved_visible_length;
prompt_invis_chars_first_line = saved_invis_chars_first_line;
prompt_physical_chars = saved_physical_chars;
/* can test saved_local_prompt to see if prompt info has been saved. */
saved_local_prompt = saved_local_prefix = (char *)0;
saved_local_length = 0;
saved_last_invisible = saved_visible_length = saved_prefix_length = 0;
saved_invis_chars_first_line = saved_physical_chars = 0;
}
| 0 |
[] |
bash
|
955543877583837c85470f7fb8a97b7aa8d45e6c
| 180,557,867,207,032,800,000,000,000,000,000,000,000 | 20 |
bash-4.4-rc2 release
|
ofputil_frag_handling_to_string(enum ofputil_frag_handling frag)
{
switch (frag) {
case OFPUTIL_FRAG_NORMAL: return "normal";
case OFPUTIL_FRAG_DROP: return "drop";
case OFPUTIL_FRAG_REASM: return "reassemble";
case OFPUTIL_FRAG_NX_MATCH: return "nx-match";
}
OVS_NOT_REACHED();
}
| 0 |
[
"CWE-772"
] |
ovs
|
77ad4225d125030420d897c873e4734ac708c66b
| 332,994,737,582,714,860,000,000,000,000,000,000,000 | 11 |
ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod().
Found by libFuzzer.
Reported-by: Bhargava Shastry <[email protected]>
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]>
|
int smb_vfs_call_sys_acl_delete_def_file(struct vfs_handle_struct *handle,
const char *path)
{
VFS_FIND(sys_acl_delete_def_file);
return handle->fns->sys_acl_delete_def_file(handle, path);
}
| 0 |
[
"CWE-22"
] |
samba
|
bd269443e311d96ef495a9db47d1b95eb83bb8f4
| 328,003,696,991,338,240,000,000,000,000,000,000,000 | 6 |
Fix bug 7104 - "wide links" and "unix extensions" are incompatible.
Change parameter "wide links" to default to "no".
Ensure "wide links = no" if "unix extensions = yes" on a share.
Fix man pages to refect this.
Remove "within share" checks for a UNIX symlink set - even if
widelinks = no. The server will not follow that link anyway.
Correct DEBUG message in check_reduced_name() to add missing "\n"
so it's really clear when a path is being denied as it's outside
the enclosing share path.
Jeremy.
|
uint32_t GPMF_PayloadSampleCount(GPMF_stream *ms)
{
uint32_t count = 0;
if (ms)
{
uint32_t fourcc = GPMF_Key(ms);
GPMF_stream find_stream;
GPMF_CopyState(ms, &find_stream);
if (GPMF_OK == GPMF_FindNext(&find_stream, fourcc, GPMF_CURRENT_LEVEL)) // Count the instances, not the repeats
{
count=2;
while (GPMF_OK == GPMF_FindNext(&find_stream, fourcc, GPMF_CURRENT_LEVEL))
{
count++;
}
}
else
{
count = GPMF_Repeat(ms);
}
}
return count;
}
| 0 |
[
"CWE-125",
"CWE-369",
"CWE-787"
] |
gpmf-parser
|
341f12cd5b97ab419e53853ca00176457c9f1681
| 5,643,964,417,229,794,000,000,000,000,000,000,000 | 25 |
fixed many security issues with the too crude mp4 reader
|
tiff_document_load (EvDocument *document,
const char *uri,
GError **error)
{
TiffDocument *tiff_document = TIFF_DOCUMENT (document);
gchar *filename;
TIFF *tiff;
filename = g_filename_from_uri (uri, NULL, error);
if (!filename)
return FALSE;
push_handlers ();
#ifdef G_OS_WIN32
{
wchar_t *wfilename = g_utf8_to_utf16 (filename, -1, NULL, NULL, error);
if (wfilename == NULL) {
return FALSE;
}
tiff = TIFFOpenW (wfilename, "r");
g_free (wfilename);
}
#else
tiff = TIFFOpen (filename, "r");
#endif
if (tiff) {
guint32 w, h;
/* FIXME: unused data? why bother here */
TIFFGetField(tiff, TIFFTAG_IMAGEWIDTH, &w);
TIFFGetField(tiff, TIFFTAG_IMAGELENGTH, &h);
}
if (!tiff) {
pop_handlers ();
g_set_error_literal (error,
EV_DOCUMENT_ERROR,
EV_DOCUMENT_ERROR_INVALID,
_("Invalid document"));
g_free (filename);
return FALSE;
}
tiff_document->tiff = tiff;
g_free (tiff_document->uri);
g_free (filename);
tiff_document->uri = g_strdup (uri);
pop_handlers ();
return TRUE;
}
| 0 |
[
"CWE-754"
] |
evince
|
234f034a4d15cd46dd556f4945f99fbd57ef5f15
| 192,803,378,438,024,800,000,000,000,000,000,000,000 | 56 |
tiff: Handle failure from TIFFReadRGBAImageOriented
The TIFFReadRGBAImageOriented function returns zero if it was unable to
read the image. Return NULL in this case instead of displaying
uninitialized memory.
Fixes #1129
|
dns_message_logpacket2(dns_message_t *message,
const char *description, isc_sockaddr_t *address,
isc_logcategory_t *category, isc_logmodule_t *module,
int level, isc_mem_t *mctx)
{
REQUIRE(address != NULL);
logfmtpacket(message, description, address, category, module,
&dns_master_style_debug, level, mctx);
}
| 0 |
[
"CWE-617"
] |
bind9
|
6ed167ad0a647dff20c8cb08c944a7967df2d415
| 137,949,887,581,276,380,000,000,000,000,000,000,000 | 10 |
Always keep a copy of the message
this allows it to be available even when dns_message_parse()
returns a error.
|
void timing_operation_complete( char operation ) {
#ifndef _WIN32
if (g_use_seccomp) {
return;
}
dev_assert(current_operation == operation);
#ifdef _WIN32
current_operation_end = clock();
if (timing_log) {
double begin_to_end = (current_operation_end - current_operation_begin) / (double)CLOCKS_PER_SEC;
double begin_to_first_byte = begin_to_end;
if (current_operation_first_byte != 0) { // if we were successful
begin_to_first_byte = (current_operation_first_byte - current_operation_begin) / (double)CLOCKS_PER_SEC;
}
fprintf(timing_log, "%c %f %f\n", current_operation, begin_to_first_byte, begin_to_end);
fflush(timing_log);
}
current_operation_end = 0;
current_operation_begin = 0;
current_operation_first_byte = 0;
#else
gettimeofday(¤t_operation_end, NULL);
if (timing_log) {
double begin = current_operation_begin.tv_sec + (double)current_operation_begin.tv_usec / 1000000.;
double end = current_operation_end.tv_sec + (double)current_operation_end.tv_usec / 1000000.;
double first_byte = current_operation_first_byte.tv_sec + (double)current_operation_first_byte.tv_usec / 1000000.;
double begin_to_end = end - begin;
double begin_to_first_byte = begin_to_end;
if (current_operation_first_byte.tv_sec != 0) { // if we were successful
begin_to_first_byte = first_byte - begin;
}
fprintf(timing_log, "%c %f %f\n", current_operation, begin_to_first_byte, begin_to_end);
fflush(timing_log);
}
memset(¤t_operation_end, 0, sizeof(current_operation_end));
memset(¤t_operation_begin, 0, sizeof(current_operation_begin));
memset(¤t_operation_first_byte, 0, sizeof(current_operation_first_byte));
#endif
#endif
}
| 0 |
[
"CWE-399",
"CWE-190"
] |
lepton
|
6a5ceefac1162783fffd9506a3de39c85c725761
| 203,189,438,256,241,500,000,000,000,000,000,000,000 | 40 |
fix #111
|
static void io_req_task_work_add_fallback(struct io_kiocb *req,
task_work_func_t cb)
{
init_task_work(&req->task_work, cb);
io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work);
}
| 0 |
[
"CWE-787"
] |
linux
|
d1f82808877bb10d3deee7cf3374a4eb3fb582db
| 237,930,129,557,357,050,000,000,000,000,000,000,000 | 6 |
io_uring: truncate lengths larger than MAX_RW_COUNT on provide buffers
Read and write operations are capped to MAX_RW_COUNT. Some read ops rely on
that limit, and that is not guaranteed by the IORING_OP_PROVIDE_BUFFERS.
Truncate those lengths when doing io_add_buffers, so buffer addresses still
use the uncapped length.
Also, take the chance and change struct io_buffer len member to __u32, so
it matches struct io_provide_buffer len member.
This fixes CVE-2021-3491, also reported as ZDI-CAN-13546.
Fixes: ddf0322db79c ("io_uring: add IORING_OP_PROVIDE_BUFFERS")
Reported-by: Billy Jheng Bing-Jhong (@st424204)
Signed-off-by: Thadeu Lima de Souza Cascardo <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
static void update_numa_stats(struct numa_stats *ns, int nid)
{
int cpu;
memset(ns, 0, sizeof(*ns));
for_each_cpu(cpu, cpumask_of_node(nid)) {
struct rq *rq = cpu_rq(cpu);
ns->load += weighted_cpuload(rq);
ns->compute_capacity += capacity_of(cpu);
}
}
| 0 |
[
"CWE-400",
"CWE-703",
"CWE-835"
] |
linux
|
c40f7d74c741a907cfaeb73a7697081881c497d0
| 240,738,321,769,221,000,000,000,000,000,000,000,000 | 13 |
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c
Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the
scheduler under high loads, starting at around the v4.18 time frame,
and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list
manipulation.
Do a (manual) revert of:
a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
It turns out that the list_del_leaf_cfs_rq() introduced by this commit
is a surprising property that was not considered in followup commits
such as:
9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list")
As Vincent Guittot explains:
"I think that there is a bigger problem with commit a9e7f6544b9c and
cfs_rq throttling:
Let take the example of the following topology TG2 --> TG1 --> root:
1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1
cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in
one path because it has never been used and can't be throttled so
tmp_alone_branch will point to leaf_cfs_rq_list at the end.
2) Then TG1 is throttled
3) and we add TG3 as a new child of TG1.
4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1
cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list.
With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list.
So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1
cfs_rq is removed from the list.
Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list
but tmp_alone_branch still points to TG3 cfs_rq because its throttled
parent can't be enqueued when the lock is released.
tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should.
So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch
points on another TG cfs_rq, the next TG cfs_rq that will be added,
will be linked outside rq->leaf_cfs_rq_list - which is bad.
In addition, we can break the ordering of the cfs_rq in
rq->leaf_cfs_rq_list but this ordering is used to update and
propagate the update from leaf down to root."
Instead of trying to work through all these cases and trying to reproduce
the very high loads that produced the lockup to begin with, simplify
the code temporarily by reverting a9e7f6544b9c - which change was clearly
not thought through completely.
This (hopefully) gives us a kernel that doesn't lock up so people
can continue to enjoy their holidays without worrying about regressions. ;-)
[ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ]
Analyzed-by: Xie XiuQi <[email protected]>
Analyzed-by: Vincent Guittot <[email protected]>
Reported-by: Zhipeng Xie <[email protected]>
Reported-by: Sargun Dhillon <[email protected]>
Reported-by: Xie XiuQi <[email protected]>
Tested-by: Zhipeng Xie <[email protected]>
Tested-by: Sargun Dhillon <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Acked-by: Vincent Guittot <[email protected]>
Cc: <[email protected]> # v4.13+
Cc: Bin Li <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
static const char *pp_getenv(const Token *t, bool warn)
{
const char *txt = tok_text(t);
const char *v;
char *buf = NULL;
bool is_string = false;
if (!t)
return NULL;
switch (t->type) {
case TOK_ENVIRON:
txt += 2; /* Skip leading %! */
is_string = nasm_isquote(*txt);
break;
case TOK_STRING:
is_string = true;
break;
case TOK_INTERNAL_STRING:
case TOK_NAKED_STRING:
case TOK_ID:
is_string = false;
break;
default:
return NULL;
}
if (is_string) {
buf = nasm_strdup(txt);
nasm_unquote_cstr(buf, NULL);
txt = buf;
}
v = getenv(txt);
if (warn && !v) {
/*!
*!environment [on] nonexistent environment variable
*! warns if a nonexistent environment variable
*! is accessed using the \c{%!} preprocessor
*! construct (see \k{getenv}.) Such environment
*! variables are treated as empty (with this
*! warning issued) starting in NASM 2.15;
*! earlier versions of NASM would treat this as
*! an error.
*/
nasm_warn(WARN_ENVIRONMENT, "nonexistent environment variable `%s'", txt);
v = "";
}
if (buf)
nasm_free(buf);
return v;
}
| 0 |
[] |
nasm
|
6299a3114ce0f3acd55d07de201a8ca2f0a83059
| 209,393,811,786,470,850,000,000,000,000,000,000,000 | 57 |
BR 3392708: fix NULL pointer reference for invalid %stacksize
After issuing an error message for a missing %stacksize argument, need
to quit rather than continuing to try to access the pointer.
Fold uses of tok_text() while we are at it.
Reported-by: Suhwan <[email protected]>
Signed-off-by: H. Peter Anvin (Intel) <[email protected]>
|
int RGWPutObjProcessor_Multipart::prepare(RGWRados *store, string *oid_rand)
{
string oid = obj_str;
upload_id = s->info.args.get("uploadId");
if (!oid_rand) {
mp.init(oid, upload_id);
} else {
mp.init(oid, upload_id, *oid_rand);
}
part_num = s->info.args.get("partNumber");
if (part_num.empty()) {
ldout(s->cct, 10) << "part number is empty" << dendl;
return -EINVAL;
}
string err;
uint64_t num = (uint64_t)strict_strtol(part_num.c_str(), 10, &err);
if (!err.empty()) {
ldout(s->cct, 10) << "bad part number: " << part_num << ": " << err << dendl;
return -EINVAL;
}
string upload_prefix = oid + ".";
if (!oid_rand) {
upload_prefix.append(upload_id);
} else {
upload_prefix.append(*oid_rand);
}
rgw_obj target_obj;
target_obj.init(bucket, oid);
manifest.set_prefix(upload_prefix);
manifest.set_multipart_part_rule(store->ctx()->_conf->rgw_obj_stripe_size, num);
int r = manifest_gen.create_begin(store->ctx(), &manifest, s->bucket_info.placement_rule, bucket, target_obj);
if (r < 0) {
return r;
}
cur_obj = manifest_gen.get_cur_obj(store);
rgw_raw_obj_to_obj(bucket, cur_obj, &head_obj);
head_obj.index_hash_source = obj_str;
r = prepare_init(store, NULL);
if (r < 0) {
return r;
}
return 0;
}
| 0 |
[
"CWE-770"
] |
ceph
|
ab29bed2fc9f961fe895de1086a8208e21ddaddc
| 323,832,356,279,543,960,000,000,000,000,000,000,000 | 55 |
rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests
|
static void _parse_resource_directory(struct PE_(r_bin_pe_obj_t) *bin, Pe_image_resource_directory *dir, ut64 offDir, int type, int id, SdbHash *dirs) {
int index = 0;
ut32 totalRes = dir->NumberOfNamedEntries + dir->NumberOfIdEntries;
ut64 rsrc_base = bin->resource_directory_offset;
ut64 off;
if (totalRes > R_PE_MAX_RESOURCES) {
return;
}
for (index = 0; index < totalRes; index++) {
Pe_image_resource_directory_entry entry;
off = rsrc_base + offDir + sizeof(*dir) + index * sizeof(entry);
char *key = sdb_fmt ("0x%08"PFMT64x, off);
if (sdb_ht_find (dirs, key, NULL)) {
break;
}
sdb_ht_insert (dirs, key, "1");
if (off > bin->size || off + sizeof (entry) > bin->size) {
break;
}
if (r_buf_read_at (bin->b, off, (ut8*)&entry, sizeof(entry)) < 1) {
eprintf ("Warning: read resource entry\n");
break;
}
if (entry.u2.s.DataIsDirectory) {
//detect here malicious file trying to making us infinite loop
Pe_image_resource_directory identEntry;
off = rsrc_base + entry.u2.s.OffsetToDirectory;
int len = r_buf_read_at (bin->b, off, (ut8*) &identEntry, sizeof (identEntry));
if (len < 1 || len != sizeof (Pe_image_resource_directory)) {
eprintf ("Warning: parsing resource directory\n");
}
_parse_resource_directory (bin, &identEntry,
entry.u2.s.OffsetToDirectory, type, entry.u1.Id, dirs);
continue;
}
Pe_image_resource_data_entry *data = R_NEW0 (Pe_image_resource_data_entry);
if (!data) {
break;
}
off = rsrc_base + entry.u2.OffsetToData;
if (off > bin->size || off + sizeof (data) > bin->size) {
free (data);
break;
}
if (r_buf_read_at (bin->b, off, (ut8*)data, sizeof (*data)) != sizeof (*data)) {
eprintf ("Warning: read (resource data entry)\n");
free (data);
break;
}
if (type == PE_RESOURCE_ENTRY_VERSION) {
char key[64];
int counter = 0;
Sdb *sdb = sdb_new0 ();
if (!sdb) {
free (data);
sdb_free (sdb);
continue;
}
PE_DWord data_paddr = bin_pe_rva_to_paddr (bin, data->OffsetToData);
if (!data_paddr) {
bprintf ("Warning: bad RVA in resource data entry\n");
free (data);
sdb_free (sdb);
continue;
}
PE_DWord cur_paddr = data_paddr;
if ((cur_paddr & 0x3) != 0) {
bprintf ("Warning: not aligned version info address\n");
free (data);
sdb_free (sdb);
continue;
}
while (cur_paddr < (data_paddr + data->Size) && cur_paddr < bin->size) {
PE_VS_VERSIONINFO* vs_VersionInfo = Pe_r_bin_pe_parse_version_info (bin, cur_paddr);
if (vs_VersionInfo) {
snprintf (key, 30, "VS_VERSIONINFO%d", counter++);
sdb_ns_set (sdb, key, Pe_r_bin_store_resource_version_info (vs_VersionInfo));
} else {
break;
}
if (vs_VersionInfo->wLength < 1) {
// Invalid version length
break;
}
cur_paddr += vs_VersionInfo->wLength;
free_VS_VERSIONINFO (vs_VersionInfo);
align32 (cur_paddr);
}
sdb_ns_set (bin->kv, "vs_version_info", sdb);
}
r_pe_resource *rs = R_NEW0 (r_pe_resource);
if (!rs) {
free (data);
break;
}
rs->timestr = _time_stamp_to_str (dir->TimeDateStamp);
rs->type = strdup (_resource_type_str (type));
rs->language = strdup (_resource_lang_str (entry.u1.Name & 0x3ff));
rs->data = data;
rs->name = id;
r_list_append (bin->resources, rs);
}
}
| 0 |
[
"CWE-125"
] |
radare2
|
4e1cf0d3e6f6fe2552a269def0af1cd2403e266c
| 301,570,503,979,180,900,000,000,000,000,000,000,000 | 104 |
Fix crash in pe
|
static int is_fullwidth(int c)
{
int i;
if (c < mbfl_eaw_table[0].begin) {
return 0;
}
for (i = 0; i < sizeof(mbfl_eaw_table) / sizeof(mbfl_eaw_table[0]); i++) {
if (mbfl_eaw_table[i].begin <= c && c <= mbfl_eaw_table[i].end) {
return 1;
}
}
return 0;
}
| 0 |
[
"CWE-119"
] |
php-src
|
64f42c73efc58e88671ad76b6b6bc8e2b62713e1
| 199,508,071,510,156,300,000,000,000,000,000,000,000 | 16 |
Fixed bug #71906: AddressSanitizer: negative-size-param (-1) in mbfl_strcut
|
int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
int nb_sectors)
{
int max_discard;
if (!bs->drv) {
return -ENOMEDIUM;
} else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
return -EIO;
} else if (bs->read_only) {
return -EROFS;
}
bdrv_reset_dirty(bs, sector_num, nb_sectors);
/* Do nothing if disabled. */
if (!(bs->open_flags & BDRV_O_UNMAP)) {
return 0;
}
if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
return 0;
}
max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT;
while (nb_sectors > 0) {
int ret;
int num = nb_sectors;
/* align request */
if (bs->bl.discard_alignment &&
num >= bs->bl.discard_alignment &&
sector_num % bs->bl.discard_alignment) {
if (num > bs->bl.discard_alignment) {
num = bs->bl.discard_alignment;
}
num -= sector_num % bs->bl.discard_alignment;
}
/* limit request size */
if (num > max_discard) {
num = max_discard;
}
if (bs->drv->bdrv_co_discard) {
ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
} else {
BlockDriverAIOCB *acb;
CoroutineIOCompletion co = {
.coroutine = qemu_coroutine_self(),
};
acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
bdrv_co_io_em_complete, &co);
if (acb == NULL) {
return -EIO;
} else {
qemu_coroutine_yield();
ret = co.ret;
}
}
if (ret && ret != -ENOTSUP) {
return ret;
}
sector_num += num;
nb_sectors -= num;
}
return 0;
}
| 0 |
[
"CWE-190"
] |
qemu
|
8f4754ede56e3f9ea3fd7207f4a7c4453e59285b
| 151,528,939,313,634,920,000,000,000,000,000,000,000 | 70 |
block: Limit request size (CVE-2014-0143)
Limiting the size of a single request to INT_MAX not only fixes a
direct integer overflow in bdrv_check_request() (which would only
trigger bad behaviour with ridiculously huge images, as in close to
2^64 bytes), but can also prevent overflows in all block drivers.
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>
|
static bool update_insert_id(THD *thd, set_var *var)
{
if (!var->value)
{
my_error(ER_NO_DEFAULT, MYF(0), var->var->name.str);
return true;
}
thd->force_one_auto_inc_interval(var->save_result.ulonglong_value);
return false;
}
| 0 |
[
"CWE-264"
] |
mysql-server
|
48bd8b16fe382be302c6f0b45931be5aa6f29a0e
| 226,359,931,864,897,300,000,000,000,000,000,000,000 | 10 |
Bug#24388753: PRIVILEGE ESCALATION USING MYSQLD_SAFE
[This is the 5.5/5.6 version of the bugfix].
The problem was that it was possible to write log files ending
in .ini/.cnf that later could be parsed as an options file.
This made it possible for users to specify startup options
without the permissions to do so.
This patch fixes the problem by disallowing general query log
and slow query log to be written to files ending in .ini and .cnf.
|
initialize (void)
{
char *env_sysrc;
file_stats_t flstats;
bool ok = true;
memset(&flstats, 0, sizeof(flstats));
/* Run a non-standard system rc file when the according environment
variable has been set. For internal testing purposes only! */
env_sysrc = getenv ("SYSTEM_WGETRC");
if (env_sysrc && file_exists_p (env_sysrc, &flstats))
{
ok &= run_wgetrc (env_sysrc, &flstats);
/* If there are any problems parsing the system wgetrc file, tell
the user and exit */
if (! ok)
{
fprintf (stderr, _("\
Parsing system wgetrc file (env SYSTEM_WGETRC) failed. Please check\n\
'%s',\n\
or specify a different file using --config.\n"), env_sysrc);
return WGET_EXIT_PARSE_ERROR;
}
}
/* Otherwise, if SYSTEM_WGETRC is defined, use it. */
#ifdef SYSTEM_WGETRC
else if (file_exists_p (SYSTEM_WGETRC, &flstats))
ok &= run_wgetrc (SYSTEM_WGETRC, &flstats);
/* If there are any problems parsing the system wgetrc file, tell
the user and exit */
if (! ok)
{
fprintf (stderr, _("\
Parsing system wgetrc file failed. Please check\n\
'%s',\n\
or specify a different file using --config.\n"), SYSTEM_WGETRC);
return WGET_EXIT_PARSE_ERROR;
}
#endif
/* Override it with your own, if one exists. */
opt.wgetrcfile = wgetrc_file_name ();
if (!opt.wgetrcfile)
return 0;
/* #### We should canonicalize `file' and SYSTEM_WGETRC with
something like realpath() before comparing them with `strcmp' */
#ifdef SYSTEM_WGETRC
if (!strcmp (opt.wgetrcfile, SYSTEM_WGETRC))
{
fprintf (stderr, _("\
%s: Warning: Both system and user wgetrc point to %s.\n"),
exec_name, quote (opt.wgetrcfile));
}
else
#endif
#ifndef FUZZING
if (file_exists_p (opt.wgetrcfile, &flstats))
#endif
ok &= run_wgetrc (opt.wgetrcfile, &flstats);
xfree (opt.wgetrcfile);
/* If there were errors processing either `.wgetrc', abort. */
if (!ok)
return WGET_EXIT_PARSE_ERROR;
return 0;
}
| 0 |
[
"CWE-200"
] |
wget
|
c125d24762962d91050d925fbbd9e6f30b2302f8
| 296,814,880,424,930,140,000,000,000,000,000,000,000 | 67 |
Don't use extended attributes (--xattr) by default
* src/init.c (defaults): Set enable_xattr to false by default
* src/main.c (print_help): Reverse option logic of --xattr
* doc/wget.texi: Add description for --xattr
Users may not be aware that the origin URL and Referer are saved
including credentials, and possibly access tokens within
the urls.
|
SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
{
const struct cred *cred = current_cred();
int retval;
uid_t ruid, euid, suid;
ruid = from_kuid_munged(cred->user_ns, cred->uid);
euid = from_kuid_munged(cred->user_ns, cred->euid);
suid = from_kuid_munged(cred->user_ns, cred->suid);
if (!(retval = put_user(ruid, ruidp)) &&
!(retval = put_user(euid, euidp)))
retval = put_user(suid, suidp);
return retval;
}
| 0 |
[
"CWE-16",
"CWE-79"
] |
linux
|
2702b1526c7278c4d65d78de209a465d4de2885e
| 26,555,650,981,018,680,000,000,000,000,000,000,000 | 16 |
kernel/sys.c: fix stack memory content leak via UNAME26
Calling uname() with the UNAME26 personality set allows a leak of kernel
stack contents. This fixes it by defensively calculating the length of
copy_to_user() call, making the len argument unsigned, and initializing
the stack buffer to zero (now technically unneeded, but hey, overkill).
CVE-2012-0957
Reported-by: PaX Team <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Cc: Andi Kleen <[email protected]>
Cc: PaX Team <[email protected]>
Cc: Brad Spengler <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
{
struct smb_rqst rqst;
struct smb2_query_info_rsp *rsp = NULL;
struct kvec iov;
struct kvec rsp_iov;
int rc = 0;
int resp_buftype;
struct cifs_ses *ses = tcon->ses;
FILE_SYSTEM_POSIX_INFO *info = NULL;
int flags = 0;
rc = build_qfs_info_req(&iov, tcon, FS_POSIX_INFORMATION,
sizeof(FILE_SYSTEM_POSIX_INFO),
persistent_fid, volatile_fid);
if (rc)
return rc;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = &iov;
rqst.rq_nvec = 1;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
goto posix_qfsinf_exit;
}
rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
info = (FILE_SYSTEM_POSIX_INFO *)(
le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
sizeof(FILE_SYSTEM_POSIX_INFO));
if (!rc)
copy_posix_fs_info_to_kstatfs(info, fsdata);
posix_qfsinf_exit:
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
return rc;
}
| 0 |
[
"CWE-416",
"CWE-200"
] |
linux
|
6a3eb3360667170988f8a6477f6686242061488a
| 100,367,434,335,744,920,000,000,000,000,000,000,000 | 46 |
cifs: Fix use-after-free in SMB2_write
There is a KASAN use-after-free:
BUG: KASAN: use-after-free in SMB2_write+0x1342/0x1580
Read of size 8 at addr ffff8880b6a8e450 by task ln/4196
Should not release the 'req' because it will use in the trace.
Fixes: eccb4422cf97 ("smb3: Add ftrace tracepoints for improved SMB3 debugging")
Signed-off-by: ZhangXiaoxu <[email protected]>
Signed-off-by: Steve French <[email protected]>
CC: Stable <[email protected]> 4.18+
Reviewed-by: Pavel Shilovsky <[email protected]>
|
struct ion_client *ion_client_create(struct ion_device *dev,
const char *name)
{
struct ion_client *client;
struct task_struct *task;
struct rb_node **p;
struct rb_node *parent = NULL;
struct ion_client *entry;
pid_t pid;
if (!name) {
pr_err("%s: Name cannot be null\n", __func__);
return ERR_PTR(-EINVAL);
}
get_task_struct(current->group_leader);
task_lock(current->group_leader);
pid = task_pid_nr(current->group_leader);
/*
* don't bother to store task struct for kernel threads,
* they can't be killed anyway
*/
if (current->group_leader->flags & PF_KTHREAD) {
put_task_struct(current->group_leader);
task = NULL;
} else {
task = current->group_leader;
}
task_unlock(current->group_leader);
client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
if (!client)
goto err_put_task_struct;
client->dev = dev;
client->handles = RB_ROOT;
idr_init(&client->idr);
mutex_init(&client->lock);
client->task = task;
client->pid = pid;
client->name = kstrdup(name, GFP_KERNEL);
if (!client->name)
goto err_free_client;
down_write(&dev->lock);
client->display_serial = ion_get_client_serial(&dev->clients, name);
client->display_name = kasprintf(
GFP_KERNEL, "%s-%d", name, client->display_serial);
if (!client->display_name) {
up_write(&dev->lock);
goto err_free_client_name;
}
p = &dev->clients.rb_node;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct ion_client, node);
if (client < entry)
p = &(*p)->rb_left;
else if (client > entry)
p = &(*p)->rb_right;
}
rb_link_node(&client->node, parent, p);
rb_insert_color(&client->node, &dev->clients);
client->debug_root = debugfs_create_file(client->display_name, 0664,
dev->clients_debug_root,
client, &debug_client_fops);
if (!client->debug_root) {
char buf[256], *path;
path = dentry_path(dev->clients_debug_root, buf, 256);
pr_err("Failed to create client debugfs at %s/%s\n",
path, client->display_name);
}
up_write(&dev->lock);
return client;
err_free_client_name:
kfree(client->name);
err_free_client:
kfree(client);
err_put_task_struct:
if (task)
put_task_struct(current->group_leader);
return ERR_PTR(-ENOMEM);
}
| 0 |
[
"CWE-416",
"CWE-284"
] |
linux
|
9590232bb4f4cc824f3425a6e1349afbe6d6d2b7
| 16,482,281,862,326,706,000,000,000,000,000,000,000 | 89 |
staging/android/ion : fix a race condition in the ion driver
There is a use-after-free problem in the ion driver.
This is caused by a race condition in the ion_ioctl()
function.
A handle has ref count of 1 and two tasks on different
cpus calls ION_IOC_FREE simultaneously.
cpu 0 cpu 1
-------------------------------------------------------
ion_handle_get_by_id()
(ref == 2)
ion_handle_get_by_id()
(ref == 3)
ion_free()
(ref == 2)
ion_handle_put()
(ref == 1)
ion_free()
(ref == 0 so ion_handle_destroy() is
called
and the handle is freed.)
ion_handle_put() is called and it
decreases the slub's next free pointer
The problem is detected as an unaligned access in the
spin lock functions since it uses load exclusive
instruction. In some cases it corrupts the slub's
free pointer which causes a mis-aligned access to the
next free pointer.(kmalloc returns a pointer like
ffffc0745b4580aa). And it causes lots of other
hard-to-debug problems.
This symptom is caused since the first member in the
ion_handle structure is the reference count and the
ion driver decrements the reference after it has been
freed.
To fix this problem client->lock mutex is extended
to protect all the codes that uses the handle.
Signed-off-by: Eun Taik Lee <[email protected]>
Reviewed-by: Laura Abbott <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static void nodeBlobReset(Rtree *pRtree){
if( pRtree->pNodeBlob && pRtree->inWrTrans==0 && pRtree->nCursor==0 ){
sqlite3_blob *pBlob = pRtree->pNodeBlob;
pRtree->pNodeBlob = 0;
sqlite3_blob_close(pBlob);
}
}
| 0 |
[
"CWE-125"
] |
sqlite
|
e41fd72acc7a06ce5a6a7d28154db1ffe8ba37a8
| 93,658,922,658,945,900,000,000,000,000,000,000,000 | 7 |
Enhance the rtreenode() function of rtree (used for testing) so that it
uses the newer sqlite3_str object for better performance and improved
error reporting.
FossilOrigin-Name: 90acdbfce9c088582d5165589f7eac462b00062bbfffacdcc786eb9cf3ea5377
|
GF_Err iSLT_box_size(GF_Box *s)
{
s->size += 8;
return GF_OK;
}
| 0 |
[
"CWE-703"
] |
gpac
|
f19668964bf422cf5a63e4dbe1d3c6c75edadcbb
| 215,625,875,335,721,000,000,000,000,000,000,000,000 | 5 |
fixed #1879
|
ssize_t lxc_sendfile_nointr(int out_fd, int in_fd, off_t *offset, size_t count)
{
ssize_t ret;
again:
ret = sendfile(out_fd, in_fd, offset, count);
if (ret < 0) {
if (errno == EINTR)
goto again;
return -1;
}
return ret;
}
| 0 |
[
"CWE-78"
] |
lxc
|
6400238d08cdf1ca20d49bafb85f4e224348bf9d
| 307,622,979,696,809,870,000,000,000,000,000,000,000 | 15 |
CVE-2019-5736 (runC): rexec callers as memfd
Adam Iwaniuk and Borys Popławski discovered that an attacker can compromise the
runC host binary from inside a privileged runC container. As a result, this
could be exploited to gain root access on the host. runC is used as the default
runtime for containers with Docker, containerd, Podman, and CRI-O.
The attack can be made when attaching to a running container or when starting a
container running a specially crafted image. For example, when runC attaches
to a container the attacker can trick it into executing itself. This could be
done by replacing the target binary inside the container with a custom binary
pointing back at the runC binary itself. As an example, if the target binary
was /bin/bash, this could be replaced with an executable script specifying the
interpreter path #!/proc/self/exe (/proc/self/exec is a symbolic link created
by the kernel for every process which points to the binary that was executed
for that process). As such when /bin/bash is executed inside the container,
instead the target of /proc/self/exe will be executed - which will point to the
runc binary on the host. The attacker can then proceed to write to the target
of /proc/self/exe to try and overwrite the runC binary on the host. However in
general, this will not succeed as the kernel will not permit it to be
overwritten whilst runC is executing. To overcome this, the attacker can
instead open a file descriptor to /proc/self/exe using the O_PATH flag and then
proceed to reopen the binary as O_WRONLY through /proc/self/fd/<nr> and try to
write to it in a busy loop from a separate process. Ultimately it will succeed
when the runC binary exits. After this the runC binary is compromised and can
be used to attack other containers or the host itself.
This attack is only possible with privileged containers since it requires root
privilege on the host to overwrite the runC binary. Unprivileged containers
with a non-identity ID mapping do not have the permission to write to the host
binary and therefore are unaffected by this attack.
LXC is also impacted in a similar manner by this vulnerability, however as the
LXC project considers privileged containers to be unsafe no CVE has been
assigned for this issue for LXC. Quoting from the
https://linuxcontainers.org/lxc/security/ project's Security information page:
"As privileged containers are considered unsafe, we typically will not consider
new container escape exploits to be security issues worthy of a CVE and quick
fix. We will however try to mitigate those issues so that accidental damage to
the host is prevented."
To prevent this attack, LXC has been patched to create a temporary copy of the
calling binary itself when it starts or attaches to containers. To do this LXC
creates an anonymous, in-memory file using the memfd_create() system call and
copies itself into the temporary in-memory file, which is then sealed to
prevent further modifications. LXC then executes this sealed, in-memory file
instead of the original on-disk binary. Any compromising write operations from
a privileged container to the host LXC binary will then write to the temporary
in-memory binary and not to the host binary on-disk, preserving the integrity
of the host LXC binary. Also as the temporary, in-memory LXC binary is sealed,
writes to this will also fail.
Note: memfd_create() was added to the Linux kernel in the 3.17 release.
Signed-off-by: Christian Brauner <[email protected]>
Co-Developed-by: Alesa Sarai <[email protected]>
Acked-by: Serge Hallyn <[email protected]>
Signed-off-by: Christian Brauner <[email protected]>
|
void X509_STORE_CTX_set0_dane(X509_STORE_CTX *ctx, struct dane_st *dane)
{
ctx->dane = dane;
}
| 0 |
[] |
openssl
|
33cc5dde478ba5ad79f8fd4acd8737f0e60e236e
| 265,309,907,942,038,450,000,000,000,000,000,000,000 | 4 |
Compat self-signed trust with reject-only aux data
When auxiliary data contains only reject entries, continue to trust
self-signed objects just as when no auxiliary data is present.
This makes it possible to reject specific uses without changing
what's accepted (and thus overring the underlying EKU).
Added new supported certs and doubled test count from 38 to 76.
Reviewed-by: Dr. Stephen Henson <[email protected]>
|
apr_status_t oidc_cache_mutex_child_init(apr_pool_t *p, server_rec *s,
oidc_cache_mutex_t *m) {
// oidc_sdebug(s, "enter: %d (m=%pp,s=%pp, p=%d)", (m && m->sema) ? *m->sema : -1, m->mutex ? m->mutex : 0, s, m->is_parent);
if (m->is_parent == FALSE)
return APR_SUCCESS;
/* initialize the lock for the child process */
apr_status_t rv = apr_global_mutex_child_init(&m->mutex,
(const char *) m->mutex_filename, p);
if (rv != APR_SUCCESS) {
oidc_serror(s,
"apr_global_mutex_child_init failed to reopen mutex on file %s: %s (%d)",
m->mutex_filename, oidc_cache_status2str(rv), rv);
} else {
apr_global_mutex_lock(m->mutex);
m->sema = apr_shm_baseaddr_get(m->shm);
(*m->sema)++;
apr_global_mutex_unlock(m->mutex);
}
m->is_parent = FALSE;
//oidc_sdebug(s, "semaphore: %d (m=%pp,s=%pp)", *m->sema, m, s);
return rv;
}
| 0 |
[
"CWE-330"
] |
mod_auth_openidc
|
375407c16c61a70b56fdbe13b0d2c8f11398e92c
| 117,624,362,158,841,700,000,000,000,000,000,000,000 | 28 |
use encrypted JWTs for storing encrypted cache contents
- avoid using static AAD/IV; thanks @niebardzo
- bump to 2.4.9-dev
Signed-off-by: Hans Zandbelt <[email protected]>
|
format_versions_list(config_line_t *ln)
{
smartlist_t *versions;
char *result;
versions = smartlist_new();
for ( ; ln; ln = ln->next) {
smartlist_split_string(versions, ln->value, ",",
SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, 0);
}
sort_version_list(versions, 1);
result = smartlist_join_strings(versions,",",0,NULL);
SMARTLIST_FOREACH(versions,char *,s,tor_free(s));
smartlist_free(versions);
return result;
}
| 0 |
[] |
tor
|
02e05bd74dbec614397b696cfcda6525562a4675
| 295,364,138,334,780,240,000,000,000,000,000,000,000 | 15 |
When examining descriptors as a dirserver, reject ones with bad versions
This is an extra fix for bug 21278: it ensures that these
descriptors and platforms will never be listed in a legit consensus.
|
static int mem_resize(jas_stream_memobj_t *m, int bufsize)
{
unsigned char *buf;
assert(m->buf_);
if (!(buf = jas_realloc(m->buf_, bufsize * sizeof(unsigned char)))) {
return -1;
}
m->buf_ = buf;
m->bufsize_ = bufsize;
return 0;
}
| 1 |
[
"CWE-189"
] |
jasper
|
3c55b399c36ef46befcb21e4ebc4799367f89684
| 203,493,757,926,343,200,000,000,000,000,000,000,000 | 12 |
At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems.
|
extract_data_get_extraction_requested (ExtractData *extract_data,
const char *pathname)
{
if (extract_data->file_list != NULL)
return g_hash_table_lookup (extract_data->files_to_extract, pathname) != NULL;
else
return TRUE;
}
| 0 |
[
"CWE-22"
] |
file-roller
|
b147281293a8307808475e102a14857055f81631
| 61,119,311,083,636,490,000,000,000,000,000,000,000 | 8 |
libarchive: sanitize filenames before extracting
|
int64 GetElapsedTime (LARGE_INTEGER *lastPerfCounter)
{
LARGE_INTEGER freq;
LARGE_INTEGER counter = KeQueryPerformanceCounter (&freq);
int64 elapsed = (counter.QuadPart - lastPerfCounter->QuadPart) * 1000000LL / freq.QuadPart;
*lastPerfCounter = counter;
return elapsed;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
VeraCrypt
|
f30f9339c9a0b9bbcc6f5ad38804af39db1f479e
| 41,825,297,699,562,863,000,000,000,000,000,000,000 | 10 |
Windows: fix low severity vulnerability in driver that allowed reading 3 bytes of kernel stack memory (with a rare possibility of 25 additional bytes). Reported by Tim Harrison.
|
static BROTLI_INLINE size_t BrotliGetRemainingBytes(BrotliBitReader* br) {
static const size_t kCap = (size_t)1 << BROTLI_LARGE_MAX_WBITS;
if (br->avail_in > kCap) return kCap;
return br->avail_in + (BrotliGetAvailableBits(br) >> 3);
}
| 0 |
[
"CWE-120"
] |
brotli
|
223d80cfbec8fd346e32906c732c8ede21f0cea6
| 322,240,401,335,864,270,000,000,000,000,000,000,000 | 5 |
Update (#826)
* IMPORTANT: decoder: fix potential overflow when input chunk is >2GiB
* simplify max Huffman table size calculation
* eliminate symbol duplicates (static arrays in .h files)
* minor combing in research/ code
|
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
OpContext op_context(context, node);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), op_context.params->num_splits);
auto input_type = op_context.input->type;
TF_LITE_ENSURE(context,
input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
input_type == kTfLiteInt16 || input_type == kTfLiteInt32 ||
input_type == kTfLiteInt64 || input_type == kTfLiteInt8);
for (int i = 0; i < NumOutputs(node); ++i) {
GetOutput(context, node, i)->type = input_type;
}
auto size_splits = op_context.size_splits;
TF_LITE_ENSURE_EQ(context, NumDimensions(size_splits), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), NumElements(size_splits));
// If we know the contents of the 'size_splits' tensor and the 'axis' tensor,
// resize all outputs. Otherwise, wait until Eval().
if (IsConstantTensor(op_context.size_splits) &&
IsConstantTensor(op_context.axis)) {
return ResizeOutputTensors(context, node, op_context.input,
op_context.size_splits, op_context.axis);
} else {
return UseDynamicOutputTensors(context, node);
}
}
| 1 |
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
1970c2158b1ffa416d159d03c3370b9a462aee35
| 81,386,516,982,577,535,000,000,000,000,000,000,000 | 30 |
[tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
|
static bool is_blocked_key(uint8_t key_type, uint8_t *key_value)
{
uint32_t i = 0;
for (i = 0; i < ARRAY_SIZE(blocked_keys); ++i) {
if (key_type == blocked_keys[i].type &&
!memcmp(blocked_keys[i].val, key_value,
sizeof(blocked_keys[i].val)))
return true;
}
return false;
}
| 0 |
[
"CWE-862",
"CWE-863"
] |
bluez
|
b497b5942a8beb8f89ca1c359c54ad67ec843055
| 60,682,162,065,347,070,000,000,000,000,000,000,000 | 13 |
adapter: Fix storing discoverable setting
discoverable setting shall only be store when changed via Discoverable
property and not when discovery client set it as that be considered
temporary just for the lifetime of the discovery.
|
RGWOpType get_type() override { return RGW_OP_DELETE_CORS; }
| 0 |
[
"CWE-770"
] |
ceph
|
ab29bed2fc9f961fe895de1086a8208e21ddaddc
| 300,110,483,421,190,200,000,000,000,000,000,000,000 | 1 |
rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests
|
static RBinXtrData *get_the_meta(RBin *bin, RBuffer *buf) {
RBinXtrMetadata *meta = R_NEW0 (RBinXtrMetadata);
meta->machine = "mono";
meta->type = "assembly";
meta->libname = NULL;
meta->xtr_type = "xalz";
ut32 osz = r_buf_read_le32_at (buf, 8);
int datalen = r_buf_size (buf) - 0xc;
ut8 *data = malloc (datalen);
if (!data) {
free (meta);
return NULL;
}
r_buf_read_at (buf, 0xc, data, datalen);
int consumed = 0;
int outsize = 0;
ut8 *obuf = r_inflate_lz4 (data, datalen, &consumed, &outsize);
if (obuf && outsize == osz) {
buf = r_buf_new_with_pointers (obuf, outsize, true);
RBinXtrData *res = r_bin_xtrdata_new (buf, 0, r_buf_size (buf), 0, meta);
free (data);
return res;
}
R_LOG_ERROR ("LZ4 decompression failed");
free (data);
free (meta);
free (obuf);
return NULL;
}
| 0 |
[
"CWE-125"
] |
radare2
|
fc285cecb8469f0262db0170bf6dd7c01d9b8ed5
| 135,849,517,023,762,090,000,000,000,000,000,000,000 | 30 |
Fix #20354
|
next_can_zero (void *nxdata)
{
struct b_conn *b_conn = nxdata;
return b_conn->b->can_zero (b_conn->b, b_conn->conn);
}
| 0 |
[
"CWE-406"
] |
nbdkit
|
bf0d61883a2f02f4388ec10dc92d4c61c093679e
| 260,551,617,845,259,800,000,000,000,000,000,000,000 | 5 |
server: Fix regression for NBD_OPT_INFO before NBD_OPT_GO
Most known NBD clients do not bother with NBD_OPT_INFO (except for
clients like 'qemu-nbd --list' that don't ever intend to connect), but
go straight to NBD_OPT_GO. However, it's not too hard to hack up qemu
to add in an extra client step (whether info on the same name, or more
interestingly, info on a different name), as a patch against qemu
commit 6f214b30445:
| diff --git i/nbd/client.c w/nbd/client.c
| index f6733962b49b..425292ac5ea9 100644
| --- i/nbd/client.c
| +++ w/nbd/client.c
| @@ -1038,6 +1038,14 @@ int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
| * TLS). If it is not available, fall back to
| * NBD_OPT_LIST for nicer error messages about a missing
| * export, then use NBD_OPT_EXPORT_NAME. */
| + if (getenv ("HACK"))
| + info->name[0]++;
| + result = nbd_opt_info_or_go(ioc, NBD_OPT_INFO, info, errp);
| + if (getenv ("HACK"))
| + info->name[0]--;
| + if (result < 0) {
| + return -EINVAL;
| + }
| result = nbd_opt_info_or_go(ioc, NBD_OPT_GO, info, errp);
| if (result < 0) {
| return -EINVAL;
This works just fine in 1.14.0, where we call .open only once (so the
INFO and GO repeat calls into the same plugin handle), but in 1.14.1
it regressed into causing an assertion failure: we are now calling
.open a second time on a connection that is already opened:
$ nbdkit -rfv null &
$ hacked-qemu-io -f raw -r nbd://localhost -c quit
...
nbdkit: null[1]: debug: null: open readonly=1
nbdkit: backend.c:179: backend_open: Assertion `h->handle == NULL' failed.
Worse, on the mainline development, we have recently made it possible
for plugins to actively report different information for different
export names; for example, a plugin may choose to report different
answers for .can_write on export A than for export B; but if we share
cached handles, then an NBD_OPT_INFO on one export prevents correct
answers for NBD_OPT_GO on the second export name. (The HACK envvar in
my qemu modifications can be used to demonstrate cross-name requests,
which are even less likely in a real client).
The solution is to call .close after NBD_OPT_INFO, coupled with enough
glue logic to reset cached connection handles back to the state
expected by .open. This in turn means factoring out another backend_*
function, but also gives us an opportunity to change
backend_set_handle to no longer accept NULL.
The assertion failure is, to some extent, a possible denial of service
attack (one client can force nbdkit to exit by merely sending OPT_INFO
before OPT_GO, preventing the next client from connecting), although
this is mitigated by using TLS to weed out untrusted clients. Still,
the fact that we introduced a potential DoS attack while trying to fix
a traffic amplification security bug is not very nice.
Sadly, as there are no known clients that easily trigger this mode of
operation (OPT_INFO before OPT_GO), there is no easy way to cover this
via a testsuite addition. I may end up hacking something into libnbd.
Fixes: c05686f957
Signed-off-by: Eric Blake <[email protected]>
(cherry picked from commit a6b88b195a959b17524d1c8353fd425d4891dc5f)
Conflicts:
server/backend.c
server/connections.c
server/filters.c
server/internal.h
server/plugins.c
No backend.c in the stable branch, and less things to reset, so instead
ode that logic into filter_close.
Signed-off-by: Eric Blake <[email protected]>
|
static void update_depend_map_for_order(JOIN *join, ORDER *order)
{
for (; order ; order=order->next)
{
table_map depend_map;
order->item[0]->update_used_tables();
order->depend_map=depend_map=order->item[0]->used_tables();
order->used= 0;
// Not item_sum(), RAND() and no reference to table outside of sub select
if (!(order->depend_map & (OUTER_REF_TABLE_BIT | RAND_TABLE_BIT))
&& !order->item[0]->with_sum_func() &&
join->join_tab)
{
for (JOIN_TAB **tab=join->map2table;
depend_map ;
tab++, depend_map>>=1)
{
if (depend_map & 1)
order->depend_map|=(*tab)->ref.depend_map;
}
}
}
}
| 0 |
[] |
server
|
8c34eab9688b4face54f15f89f5d62bdfd93b8a7
| 282,416,310,242,790,000,000,000,000,000,000,000,000 | 23 |
MDEV-28094 Window function in expression in ORDER BY
call item->split_sum_func() in setup_order() just as
it's done in setup_fields()
|
static void ch345_broken_sysex_input(struct snd_usb_midi_in_endpoint *ep,
uint8_t *buffer, int buffer_length)
{
unsigned int i, cin, length;
for (i = 0; i + 3 < buffer_length; i += 4) {
if (buffer[i] == 0 && i > 0)
break;
cin = buffer[i] & 0x0f;
if (ep->in_sysex &&
cin == ep->last_cin &&
(buffer[i + 1 + (cin == 0x6)] & 0x80) == 0)
cin = 0x4;
#if 0
if (buffer[i + 1] == 0x90) {
/*
* Either a corrupted running status or a real note-on
* message; impossible to detect reliably.
*/
}
#endif
length = snd_usbmidi_cin_length[cin];
snd_usbmidi_input_data(ep, 0, &buffer[i + 1], length);
ep->in_sysex = cin == 0x4;
if (!ep->in_sysex)
ep->last_cin = cin;
}
}
| 0 |
[
"CWE-703"
] |
linux
|
07d86ca93db7e5cdf4743564d98292042ec21af7
| 152,413,561,521,370,190,000,000,000,000,000,000,000 | 28 |
ALSA: usb-audio: avoid freeing umidi object twice
The 'umidi' object will be free'd on the error path by snd_usbmidi_free()
when tearing down the rawmidi interface. So we shouldn't try to free it
in snd_usbmidi_create() after having registered the rawmidi interface.
Found by KASAN.
Signed-off-by: Andrey Konovalov <[email protected]>
Acked-by: Clemens Ladisch <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
|
istr_set_get_as_list (GHashTable *table)
{
GList *list;
list = NULL;
g_hash_table_foreach (table, add_istr_to_list, &list);
return list;
}
| 0 |
[
"CWE-20"
] |
nautilus
|
1630f53481f445ada0a455e9979236d31a8d3bb0
| 168,515,070,150,967,510,000,000,000,000,000,000,000 | 8 |
mime-actions: use file metadata for trusting desktop files
Currently we only trust desktop files that have the executable bit
set, and don't replace the displayed icon or the displayed name until
it's trusted, which prevents for running random programs by a malicious
desktop file.
However, the executable permission is preserved if the desktop file
comes from a compressed file.
To prevent this, add a metadata::trusted metadata to the file once the
user acknowledges the file as trusted. This adds metadata to the file,
which cannot be added unless it has access to the computer.
Also remove the SHEBANG "trusted" content we were putting inside the
desktop file, since that doesn't add more security since it can come
with the file itself.
https://bugzilla.gnome.org/show_bug.cgi?id=777991
|
DEFUN(dispVer, VERSION, "Display the version of w3m")
{
disp_message(Sprintf("w3m version %s", w3m_version)->ptr, TRUE);
}
| 0 |
[
"CWE-59",
"CWE-241"
] |
w3m
|
18dcbadf2771cdb0c18509b14e4e73505b242753
| 214,886,934,576,456,260,000,000,000,000,000,000,000 | 4 |
Make temporary directory safely when ~/.w3m is unwritable
|
read_cupsd_conf(cups_file_t *fp) /* I - File to read from */
{
int linenum; /* Current line number */
char line[HTTP_MAX_BUFFER],
/* Line from file */
temp[HTTP_MAX_BUFFER],
/* Temporary buffer for value */
*value, /* Pointer to value */
*valueptr; /* Pointer into value */
int valuelen; /* Length of value */
http_addrlist_t *addrlist, /* Address list */
*addr; /* Current address */
cups_file_t *incfile; /* Include file */
char incname[1024]; /* Include filename */
/*
* Loop through each line in the file...
*/
linenum = 0;
while (cupsFileGetConf(fp, line, sizeof(line), &value, &linenum))
{
/*
* Decode the directive...
*/
if (!_cups_strcasecmp(line, "Include") && value)
{
/*
* Include filename
*/
if (value[0] == '/')
strlcpy(incname, value, sizeof(incname));
else
snprintf(incname, sizeof(incname), "%s/%s", ServerRoot, value);
if ((incfile = cupsFileOpen(incname, "rb")) == NULL)
cupsdLogMessage(CUPSD_LOG_ERROR,
"Unable to include config file \"%s\" - %s",
incname, strerror(errno));
else
{
read_cupsd_conf(incfile);
cupsFileClose(incfile);
}
}
else if (!_cups_strcasecmp(line, "<Location") && value)
{
/*
* <Location path>
*/
linenum = read_location(fp, value, linenum);
if (linenum == 0)
return (0);
}
else if (!_cups_strcasecmp(line, "<Policy") && value)
{
/*
* <Policy name>
*/
linenum = read_policy(fp, value, linenum);
if (linenum == 0)
return (0);
}
else if (!_cups_strcasecmp(line, "FaxRetryInterval") && value)
{
JobRetryInterval = atoi(value);
cupsdLogMessage(CUPSD_LOG_WARN,
"FaxRetryInterval is deprecated; use "
"JobRetryInterval on line %d of %s.", linenum, ConfigurationFile);
}
else if (!_cups_strcasecmp(line, "FaxRetryLimit") && value)
{
JobRetryLimit = atoi(value);
cupsdLogMessage(CUPSD_LOG_WARN,
"FaxRetryLimit is deprecated; use "
"JobRetryLimit on line %d of %s.", linenum, ConfigurationFile);
}
#ifdef HAVE_SSL
else if (!_cups_strcasecmp(line, "SSLOptions"))
{
/*
* SSLOptions [AllowRC4] [AllowSSL3] [AllowDH] [DenyCBC] [DenyTLS1.0] [None]
*/
int options = _HTTP_TLS_NONE,/* SSL/TLS options */
min_version = _HTTP_TLS_1_0,
max_version = _HTTP_TLS_MAX;
if (value)
{
char *start, /* Start of option */
*end; /* End of option */
for (start = value; *start; start = end)
{
/*
* Find end of keyword...
*/
end = start;
while (*end && !_cups_isspace(*end))
end ++;
if (*end)
*end++ = '\0';
/*
* Compare...
*/
if (!_cups_strcasecmp(start, "AllowRC4"))
options |= _HTTP_TLS_ALLOW_RC4;
else if (!_cups_strcasecmp(start, "AllowSSL3"))
min_version = _HTTP_TLS_SSL3;
else if (!_cups_strcasecmp(start, "AllowDH"))
options |= _HTTP_TLS_ALLOW_DH;
else if (!_cups_strcasecmp(start, "DenyCBC"))
options |= _HTTP_TLS_DENY_CBC;
else if (!_cups_strcasecmp(start, "DenyTLS1.0"))
min_version = _HTTP_TLS_1_1;
else if (!_cups_strcasecmp(start, "MaxTLS1.0"))
max_version = _HTTP_TLS_1_0;
else if (!_cups_strcasecmp(start, "MaxTLS1.1"))
max_version = _HTTP_TLS_1_1;
else if (!_cups_strcasecmp(start, "MaxTLS1.2"))
max_version = _HTTP_TLS_1_2;
else if (!_cups_strcasecmp(start, "MaxTLS1.3"))
max_version = _HTTP_TLS_1_3;
else if (!_cups_strcasecmp(start, "MinTLS1.0"))
min_version = _HTTP_TLS_1_0;
else if (!_cups_strcasecmp(start, "MinTLS1.1"))
min_version = _HTTP_TLS_1_1;
else if (!_cups_strcasecmp(start, "MinTLS1.2"))
min_version = _HTTP_TLS_1_2;
else if (!_cups_strcasecmp(start, "MinTLS1.3"))
min_version = _HTTP_TLS_1_3;
else if (!_cups_strcasecmp(start, "None"))
options = _HTTP_TLS_NONE;
else if (_cups_strcasecmp(start, "NoEmptyFragments"))
cupsdLogMessage(CUPSD_LOG_WARN, "Unknown SSL option %s at line %d.", start, linenum);
}
}
_httpTLSSetOptions(options, min_version, max_version);
}
#endif /* HAVE_SSL */
else if ((!_cups_strcasecmp(line, "Port") || !_cups_strcasecmp(line, "Listen")
#ifdef HAVE_SSL
|| !_cups_strcasecmp(line, "SSLPort") || !_cups_strcasecmp(line, "SSLListen")
#endif /* HAVE_SSL */
) && value)
{
/*
* Add listening address(es) to the list...
*/
cupsd_listener_t *lis; /* New listeners array */
/*
* Get the address list...
*/
addrlist = get_address(value, IPP_PORT);
if (!addrlist)
{
cupsdLogMessage(CUPSD_LOG_ERROR, "Bad %s address %s at line %d.", line,
value, linenum);
continue;
}
/*
* Add each address...
*/
for (addr = addrlist; addr; addr = addr->next)
{
/*
* See if this address is already present...
*/
for (lis = (cupsd_listener_t *)cupsArrayFirst(Listeners);
lis;
lis = (cupsd_listener_t *)cupsArrayNext(Listeners))
if (httpAddrEqual(&(addr->addr), &(lis->address)) &&
httpAddrPort(&(addr->addr)) == httpAddrPort(&(lis->address)))
break;
if (lis)
{
#ifdef HAVE_ONDEMAND
if (!lis->on_demand)
#endif /* HAVE_ONDEMAND */
{
httpAddrString(&lis->address, temp, sizeof(temp));
cupsdLogMessage(CUPSD_LOG_WARN,
"Duplicate listen address \"%s\" ignored.", temp);
}
continue;
}
/*
* Allocate another listener...
*/
if (!Listeners)
Listeners = cupsArrayNew(NULL, NULL);
if (!Listeners)
{
cupsdLogMessage(CUPSD_LOG_ERROR,
"Unable to allocate %s at line %d - %s.",
line, linenum, strerror(errno));
break;
}
if ((lis = calloc(1, sizeof(cupsd_listener_t))) == NULL)
{
cupsdLogMessage(CUPSD_LOG_ERROR,
"Unable to allocate %s at line %d - %s.",
line, linenum, strerror(errno));
break;
}
cupsArrayAdd(Listeners, lis);
/*
* Copy the current address and log it...
*/
memcpy(&(lis->address), &(addr->addr), sizeof(lis->address));
lis->fd = -1;
#ifdef HAVE_SSL
if (!_cups_strcasecmp(line, "SSLPort") || !_cups_strcasecmp(line, "SSLListen"))
lis->encryption = HTTP_ENCRYPT_ALWAYS;
#endif /* HAVE_SSL */
httpAddrString(&lis->address, temp, sizeof(temp));
#ifdef AF_LOCAL
if (lis->address.addr.sa_family == AF_LOCAL)
cupsdLogMessage(CUPSD_LOG_INFO, "Listening to %s (Domain)", temp);
else
#endif /* AF_LOCAL */
cupsdLogMessage(CUPSD_LOG_INFO, "Listening to %s:%d (IPv%d)", temp,
httpAddrPort(&(lis->address)),
httpAddrFamily(&(lis->address)) == AF_INET ? 4 : 6);
if (!httpAddrLocalhost(&(lis->address)))
RemotePort = httpAddrPort(&(lis->address));
}
/*
* Free the list...
*/
httpAddrFreeList(addrlist);
}
else if (!_cups_strcasecmp(line, "BrowseProtocols") ||
!_cups_strcasecmp(line, "BrowseLocalProtocols"))
{
/*
* "BrowseProtocols name [... name]"
* "BrowseLocalProtocols name [... name]"
*/
int protocols = parse_protocols(value);
if (protocols < 0)
{
cupsdLogMessage(CUPSD_LOG_ERROR,
"Unknown browse protocol \"%s\" on line %d of %s.",
value, linenum, ConfigurationFile);
break;
}
BrowseLocalProtocols = protocols;
}
else if (!_cups_strcasecmp(line, "DefaultAuthType") && value)
{
/*
* DefaultAuthType {basic,digest,basicdigest,negotiate}
*/
if (!_cups_strcasecmp(value, "none"))
default_auth_type = CUPSD_AUTH_NONE;
else if (!_cups_strcasecmp(value, "basic"))
default_auth_type = CUPSD_AUTH_BASIC;
else if (!_cups_strcasecmp(value, "negotiate"))
default_auth_type = CUPSD_AUTH_NEGOTIATE;
else if (!_cups_strcasecmp(value, "auto"))
default_auth_type = CUPSD_AUTH_AUTO;
else
{
cupsdLogMessage(CUPSD_LOG_WARN,
"Unknown default authorization type %s on line %d of %s.",
value, linenum, ConfigurationFile);
if (FatalErrors & CUPSD_FATAL_CONFIG)
return (0);
}
}
#ifdef HAVE_SSL
else if (!_cups_strcasecmp(line, "DefaultEncryption"))
{
/*
* DefaultEncryption {Never,IfRequested,Required}
*/
if (!value || !_cups_strcasecmp(value, "never"))
DefaultEncryption = HTTP_ENCRYPT_NEVER;
else if (!_cups_strcasecmp(value, "required"))
DefaultEncryption = HTTP_ENCRYPT_REQUIRED;
else if (!_cups_strcasecmp(value, "ifrequested"))
DefaultEncryption = HTTP_ENCRYPT_IF_REQUESTED;
else
{
cupsdLogMessage(CUPSD_LOG_WARN,
"Unknown default encryption %s on line %d of %s.",
value, linenum, ConfigurationFile);
if (FatalErrors & CUPSD_FATAL_CONFIG)
return (0);
}
}
#endif /* HAVE_SSL */
else if (!_cups_strcasecmp(line, "HostNameLookups") && value)
{
/*
* Do hostname lookups?
*/
if (!_cups_strcasecmp(value, "off") || !_cups_strcasecmp(value, "no") ||
!_cups_strcasecmp(value, "false"))
HostNameLookups = 0;
else if (!_cups_strcasecmp(value, "on") || !_cups_strcasecmp(value, "yes") ||
!_cups_strcasecmp(value, "true"))
HostNameLookups = 1;
else if (!_cups_strcasecmp(value, "double"))
HostNameLookups = 2;
else
cupsdLogMessage(CUPSD_LOG_WARN, "Unknown HostNameLookups %s on line %d of %s.",
value, linenum, ConfigurationFile);
}
else if (!_cups_strcasecmp(line, "AccessLogLevel") && value)
{
/*
* Amount of logging to do to access log...
*/
if (!_cups_strcasecmp(value, "all"))
AccessLogLevel = CUPSD_ACCESSLOG_ALL;
else if (!_cups_strcasecmp(value, "actions"))
AccessLogLevel = CUPSD_ACCESSLOG_ACTIONS;
else if (!_cups_strcasecmp(value, "config"))
AccessLogLevel = CUPSD_ACCESSLOG_CONFIG;
else if (!_cups_strcasecmp(value, "none"))
AccessLogLevel = CUPSD_ACCESSLOG_NONE;
else
cupsdLogMessage(CUPSD_LOG_WARN, "Unknown AccessLogLevel %s on line %d of %s.",
value, linenum, ConfigurationFile);
}
else if (!_cups_strcasecmp(line, "LogLevel") && value)
{
/*
* Amount of logging to do to error log...
*/
if (!_cups_strcasecmp(value, "debug2"))
LogLevel = CUPSD_LOG_DEBUG2;
else if (!_cups_strcasecmp(value, "debug"))
LogLevel = CUPSD_LOG_DEBUG;
else if (!_cups_strcasecmp(value, "info"))
LogLevel = CUPSD_LOG_INFO;
else if (!_cups_strcasecmp(value, "notice"))
LogLevel = CUPSD_LOG_NOTICE;
else if (!_cups_strcasecmp(value, "warn"))
LogLevel = CUPSD_LOG_WARN;
else if (!_cups_strcasecmp(value, "error"))
LogLevel = CUPSD_LOG_ERROR;
else if (!_cups_strcasecmp(value, "crit"))
LogLevel = CUPSD_LOG_CRIT;
else if (!_cups_strcasecmp(value, "alert"))
LogLevel = CUPSD_LOG_ALERT;
else if (!_cups_strcasecmp(value, "emerg"))
LogLevel = CUPSD_LOG_EMERG;
else if (!_cups_strcasecmp(value, "none"))
LogLevel = CUPSD_LOG_NONE;
else
cupsdLogMessage(CUPSD_LOG_WARN, "Unknown LogLevel %s on line %d of %s.",
value, linenum, ConfigurationFile);
}
else if (!_cups_strcasecmp(line, "LogTimeFormat") && value)
{
/*
* Amount of logging to do to error log...
*/
if (!_cups_strcasecmp(value, "standard"))
LogTimeFormat = CUPSD_TIME_STANDARD;
else if (!_cups_strcasecmp(value, "usecs"))
LogTimeFormat = CUPSD_TIME_USECS;
else
cupsdLogMessage(CUPSD_LOG_WARN, "Unknown LogTimeFormat %s on line %d of %s.",
value, linenum, ConfigurationFile);
}
else if (!_cups_strcasecmp(line, "ServerTokens") && value)
{
/*
* Set the string used for the Server header...
*/
struct utsname plat; /* Platform info */
uname(&plat);
if (!_cups_strcasecmp(value, "ProductOnly"))
cupsdSetString(&ServerHeader, "CUPS IPP");
else if (!_cups_strcasecmp(value, "Major"))
cupsdSetStringf(&ServerHeader, "CUPS/%d IPP/2", CUPS_VERSION_MAJOR);
else if (!_cups_strcasecmp(value, "Minor"))
cupsdSetStringf(&ServerHeader, "CUPS/%d.%d IPP/2.1", CUPS_VERSION_MAJOR,
CUPS_VERSION_MINOR);
else if (!_cups_strcasecmp(value, "Minimal"))
cupsdSetString(&ServerHeader, CUPS_MINIMAL " IPP/2.1");
else if (!_cups_strcasecmp(value, "OS"))
cupsdSetStringf(&ServerHeader, CUPS_MINIMAL " (%s %s) IPP/2.1",
plat.sysname, plat.release);
else if (!_cups_strcasecmp(value, "Full"))
cupsdSetStringf(&ServerHeader, CUPS_MINIMAL " (%s %s; %s) IPP/2.1",
plat.sysname, plat.release, plat.machine);
else if (!_cups_strcasecmp(value, "None"))
cupsdSetString(&ServerHeader, "");
else
cupsdLogMessage(CUPSD_LOG_WARN, "Unknown ServerTokens %s on line %d of %s.",
value, linenum, ConfigurationFile);
}
else if (!_cups_strcasecmp(line, "PassEnv") && value)
{
/*
* PassEnv variable [... variable]
*/
for (; *value;)
{
for (valuelen = 0; value[valuelen]; valuelen ++)
if (_cups_isspace(value[valuelen]) || value[valuelen] == ',')
break;
if (value[valuelen])
{
value[valuelen] = '\0';
valuelen ++;
}
cupsdSetEnv(value, NULL);
for (value += valuelen; *value; value ++)
if (!_cups_isspace(*value) || *value != ',')
break;
}
}
else if (!_cups_strcasecmp(line, "ServerAlias") && value)
{
/*
* ServerAlias name [... name]
*/
if (!ServerAlias)
ServerAlias = cupsArrayNew(NULL, NULL);
for (; *value;)
{
for (valuelen = 0; value[valuelen]; valuelen ++)
if (_cups_isspace(value[valuelen]) || value[valuelen] == ',')
break;
if (value[valuelen])
{
value[valuelen] = '\0';
valuelen ++;
}
cupsdAddAlias(ServerAlias, value);
for (value += valuelen; *value; value ++)
if (!_cups_isspace(*value) || *value != ',')
break;
}
}
else if (!_cups_strcasecmp(line, "SetEnv") && value)
{
/*
* SetEnv variable value
*/
for (valueptr = value; *valueptr && !isspace(*valueptr & 255); valueptr ++);
if (*valueptr)
{
/*
* Found a value...
*/
while (isspace(*valueptr & 255))
*valueptr++ = '\0';
cupsdSetEnv(value, valueptr);
}
else
cupsdLogMessage(CUPSD_LOG_ERROR,
"Missing value for SetEnv directive on line %d of %s.",
linenum, ConfigurationFile);
}
else if (!_cups_strcasecmp(line, "AccessLog") ||
!_cups_strcasecmp(line, "CacheDir") ||
!_cups_strcasecmp(line, "ConfigFilePerm") ||
!_cups_strcasecmp(line, "DataDir") ||
!_cups_strcasecmp(line, "DocumentRoot") ||
!_cups_strcasecmp(line, "ErrorLog") ||
!_cups_strcasecmp(line, "FatalErrors") ||
!_cups_strcasecmp(line, "FileDevice") ||
!_cups_strcasecmp(line, "FontPath") ||
!_cups_strcasecmp(line, "Group") ||
!_cups_strcasecmp(line, "LogFilePerm") ||
!_cups_strcasecmp(line, "LPDConfigFile") ||
!_cups_strcasecmp(line, "PageLog") ||
!_cups_strcasecmp(line, "Printcap") ||
!_cups_strcasecmp(line, "PrintcapFormat") ||
!_cups_strcasecmp(line, "RemoteRoot") ||
!_cups_strcasecmp(line, "RequestRoot") ||
!_cups_strcasecmp(line, "ServerBin") ||
!_cups_strcasecmp(line, "ServerCertificate") ||
!_cups_strcasecmp(line, "ServerKey") ||
!_cups_strcasecmp(line, "ServerKeychain") ||
!_cups_strcasecmp(line, "ServerRoot") ||
!_cups_strcasecmp(line, "SMBConfigFile") ||
!_cups_strcasecmp(line, "StateDir") ||
!_cups_strcasecmp(line, "SystemGroup") ||
!_cups_strcasecmp(line, "SystemGroupAuthKey") ||
!_cups_strcasecmp(line, "TempDir") ||
!_cups_strcasecmp(line, "User"))
{
cupsdLogMessage(CUPSD_LOG_INFO,
"Please move \"%s%s%s\" on line %d of %s to the %s file; "
"this will become an error in a future release.",
line, value ? " " : "", value ? value : "", linenum,
ConfigurationFile, CupsFilesFile);
}
else
parse_variable(ConfigurationFile, linenum, line, value,
sizeof(cupsd_vars) / sizeof(cupsd_vars[0]), cupsd_vars);
}
return (1);
}
| 1 |
[] |
cups
|
d47f6aec436e0e9df6554436e391471097686ecc
| 258,831,427,634,352,750,000,000,000,000,000,000,000 | 564 |
Fix local privilege escalation to root and sandbox bypasses in scheduler
(rdar://37836779, rdar://37836995, rdar://37837252, rdar://37837581)
|
do_implicit_handshake (GTlsConnectionBase *tls,
gint64 timeout,
GCancellable *cancellable,
GError **error)
{
GTlsConnectionBasePrivate *priv = g_tls_connection_base_get_instance_private (tls);
GTlsConnectionBaseClass *tls_class = G_TLS_CONNECTION_BASE_GET_CLASS (tls);
gint64 *thread_timeout = NULL;
/* We have op_mutex */
g_assert (!priv->handshake_context);
if (timeout != 0)
{
priv->handshake_context = g_main_context_new ();
g_main_context_push_thread_default (priv->handshake_context);
}
else
{
priv->handshake_context = g_main_context_ref_thread_default ();
}
g_assert (!priv->implicit_handshake);
priv->implicit_handshake = g_task_new (tls, cancellable,
timeout ? sync_handshake_thread_completed : NULL,
NULL);
g_task_set_source_tag (priv->implicit_handshake, do_implicit_handshake);
g_task_set_name (priv->implicit_handshake, "[glib-networking] do_implicit_handshake");
thread_timeout = g_new0 (gint64, 1);
g_task_set_task_data (priv->implicit_handshake,
thread_timeout, g_free);
if (tls_class->prepare_handshake)
tls_class->prepare_handshake (tls, priv->advertised_protocols);
if (timeout != 0)
{
GError *my_error = NULL;
gboolean success;
/* In the blocking case, run the handshake operation synchronously in
* another thread, and delegate handling the timeout to that thread; it
* should return G_IO_ERROR_TIMED_OUT iff (timeout > 0) and the operation
* times out. If (timeout < 0) it should block indefinitely until the
* operation is complete or errors. */
*thread_timeout = timeout;
g_mutex_unlock (&priv->op_mutex);
g_task_set_return_on_cancel (priv->implicit_handshake, TRUE);
g_task_run_in_thread (priv->implicit_handshake, handshake_thread);
crank_sync_handshake_context (tls, cancellable);
success = finish_handshake (tls,
priv->implicit_handshake,
&my_error);
g_main_context_pop_thread_default (priv->handshake_context);
g_clear_pointer (&priv->handshake_context, g_main_context_unref);
g_clear_object (&priv->implicit_handshake);
yield_op (tls, G_TLS_CONNECTION_BASE_OP_HANDSHAKE,
G_TLS_CONNECTION_BASE_OK);
g_mutex_lock (&priv->op_mutex);
if (my_error)
g_propagate_error (error, my_error);
return success;
}
else
{
/* In the non-blocking case, start the asynchronous handshake operation
* and return EWOULDBLOCK to the caller, who will handle polling for
* completion of the handshake and whatever operation they actually cared
* about. Run the actual operation as blocking in its thread. */
*thread_timeout = -1; /* blocking */
g_task_run_in_thread (priv->implicit_handshake,
async_handshake_thread);
/* Intentionally not translated because this is not a fatal error to be
* presented to the user, and to avoid this showing up in profiling. */
g_set_error_literal (error, G_IO_ERROR, G_IO_ERROR_WOULD_BLOCK, "Operation would block");
return FALSE;
}
}
| 0 |
[
"CWE-295"
] |
glib-networking
|
29513946809590c4912550f6f8620468f9836d94
| 20,184,556,789,751,474,000,000,000,000,000,000,000 | 89 |
Return bad identity error if identity is unset
When the server-identity property of GTlsClientConnection is unset, the
documentation sasy we need to fail the certificate verification with
G_TLS_CERTIFICATE_BAD_IDENTITY. This is important because otherwise,
it's easy for applications to fail to specify server identity.
Unfortunately, we did not correctly implement the intended, documented
behavior. When server identity is missing, we check the validity of the
TLS certificate, but do not check if it corresponds to the expected
server (since we have no expected server). Then we assume the identity
is good, instead of returning bad identity, as documented. This means,
for example, that evil.com can present a valid certificate issued to
evil.com, and we would happily accept it for paypal.com.
Fixes #135
|
horizontalDifferenceF(float *ip, int n, int stride, uint16 *wp, uint16 *FromLT2)
{
int32 r1, g1, b1, a1, r2, g2, b2, a2, mask;
float fltsize = Fltsize;
#define CLAMP(v) ( (v<(float)0.) ? 0 \
: (v<(float)2.) ? FromLT2[(int)(v*fltsize)] \
: (v>(float)24.2) ? 2047 \
: LogK1*log(v*LogK2) + 0.5 )
mask = CODE_MASK;
if (n >= stride) {
if (stride == 3) {
r2 = wp[0] = (uint16) CLAMP(ip[0]);
g2 = wp[1] = (uint16) CLAMP(ip[1]);
b2 = wp[2] = (uint16) CLAMP(ip[2]);
n -= 3;
while (n > 0) {
n -= 3;
wp += 3;
ip += 3;
r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1;
}
} else if (stride == 4) {
r2 = wp[0] = (uint16) CLAMP(ip[0]);
g2 = wp[1] = (uint16) CLAMP(ip[1]);
b2 = wp[2] = (uint16) CLAMP(ip[2]);
a2 = wp[3] = (uint16) CLAMP(ip[3]);
n -= 4;
while (n > 0) {
n -= 4;
wp += 4;
ip += 4;
r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1;
a1 = (int32) CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1;
}
} else {
REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]); wp++; ip++)
n -= stride;
while (n > 0) {
REPEAT(stride,
wp[0] = (uint16)(((int32)CLAMP(ip[0])-(int32)CLAMP(ip[-stride])) & mask);
wp++; ip++)
n -= stride;
}
}
}
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
libtiff
|
83a4b92815ea04969d494416eaae3d4c6b338e4a
| 155,579,648,849,169,340,000,000,000,000,000,000,000 | 52 |
* tools/tiffcrop.c: fix various out-of-bounds write vulnerabilities
in heap or stack allocated buffers. Reported as MSVR 35093,
MSVR 35096 and MSVR 35097. Discovered by Axel Souchet and Vishal
Chauhan from the MSRC Vulnerabilities & Mitigations team.
* tools/tiff2pdf.c: fix out-of-bounds write vulnerabilities in
heap allocate buffer in t2p_process_jpeg_strip(). Reported as MSVR
35098. Discovered by Axel Souchet and Vishal Chauhan from the MSRC
Vulnerabilities & Mitigations team.
* libtiff/tif_pixarlog.c: fix out-of-bounds write vulnerabilities
in heap allocated buffers. Reported as MSVR 35094. Discovered by
Axel Souchet and Vishal Chauhan from the MSRC Vulnerabilities &
Mitigations team.
* libtiff/tif_write.c: fix issue in error code path of TIFFFlushData1()
that didn't reset the tif_rawcc and tif_rawcp members. I'm not
completely sure if that could happen in practice outside of the odd
behaviour of t2p_seekproc() of tiff2pdf). The report points that a
better fix could be to check the return value of TIFFFlushData1() in
places where it isn't done currently, but it seems this patch is enough.
Reported as MSVR 35095. Discovered by Axel Souchet & Vishal Chauhan &
Suha Can from the MSRC Vulnerabilities & Mitigations team.
|
static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
{
char path[FN_REFLEN+1];
int error;
handler *file= lpt->table->file;
THD *thd= lpt->thd;
DBUG_ENTER("mysql_change_partitions");
build_table_filename(path, sizeof(path) - 1, lpt->db.str, lpt->table_name.str, "", 0);
if(mysql_trans_prepare_alter_copy_data(thd))
DBUG_RETURN(TRUE);
/* TODO: test if bulk_insert would increase the performance */
if (unlikely((error= file->ha_change_partitions(lpt->create_info, path,
&lpt->copied,
&lpt->deleted,
lpt->pack_frm_data,
lpt->pack_frm_len))))
{
file->print_error(error, MYF(error != ER_OUTOFMEMORY ? 0 : ME_FATALERROR));
}
if (mysql_trans_commit_alter_copy_data(thd))
error= 1; /* The error has been reported */
DBUG_RETURN(MY_TEST(error));
}
| 0 |
[
"CWE-416"
] |
server
|
c02ebf3510850ba78a106be9974c94c3b97d8585
| 249,442,146,433,373,600,000,000,000,000,000,000,000 | 29 |
MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments.
|
int proc_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
bfdc0b497faa82a0ba2f9dddcf109231dd519fcc
| 305,856,483,007,385,660,000,000,000,000,000,000,000 | 5 |
sysctl: restrict write access to dmesg_restrict
When dmesg_restrict is set to 1 CAP_SYS_ADMIN is needed to read the kernel
ring buffer. But a root user without CAP_SYS_ADMIN is able to reset
dmesg_restrict to 0.
This is an issue when e.g. LXC (Linux Containers) are used and complete
user space is running without CAP_SYS_ADMIN. A unprivileged and jailed
root user can bypass the dmesg_restrict protection.
With this patch writing to dmesg_restrict is only allowed when root has
CAP_SYS_ADMIN.
Signed-off-by: Richard Weinberger <[email protected]>
Acked-by: Dan Rosenberg <[email protected]>
Acked-by: Serge E. Hallyn <[email protected]>
Cc: Eric Paris <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: James Morris <[email protected]>
Cc: Eugene Teo <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
{
struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
mutex_lock(&fs_devices->device_list_mutex);
btrfs_sysfs_remove_device(tgtdev);
if (tgtdev->bdev)
fs_devices->open_devices--;
fs_devices->num_devices--;
btrfs_assign_next_active_device(tgtdev, NULL);
list_del_rcu(&tgtdev->dev_list);
mutex_unlock(&fs_devices->device_list_mutex);
/*
* The update_dev_time() with in btrfs_scratch_superblocks()
* may lead to a call to btrfs_show_devname() which will try
* to hold device_list_mutex. And here this device
* is already out of device list, so we don't have to hold
* the device_list_mutex lock.
*/
btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
tgtdev->name->str);
btrfs_close_bdev(tgtdev);
synchronize_rcu();
btrfs_free_device(tgtdev);
}
| 0 |
[
"CWE-476",
"CWE-703"
] |
linux
|
e4571b8c5e9ffa1e85c0c671995bd4dcc5c75091
| 101,174,345,887,776,480,000,000,000,000,000,000,000 | 33 |
btrfs: fix NULL pointer dereference when deleting device by invalid id
[BUG]
It's easy to trigger NULL pointer dereference, just by removing a
non-existing device id:
# mkfs.btrfs -f -m single -d single /dev/test/scratch1 \
/dev/test/scratch2
# mount /dev/test/scratch1 /mnt/btrfs
# btrfs device remove 3 /mnt/btrfs
Then we have the following kernel NULL pointer dereference:
BUG: kernel NULL pointer dereference, address: 0000000000000000
#PF: supervisor read access in kernel mode
#PF: error_code(0x0000) - not-present page
PGD 0 P4D 0
Oops: 0000 [#1] PREEMPT SMP NOPTI
CPU: 9 PID: 649 Comm: btrfs Not tainted 5.14.0-rc3-custom+ #35
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:btrfs_rm_device+0x4de/0x6b0 [btrfs]
btrfs_ioctl+0x18bb/0x3190 [btrfs]
? lock_is_held_type+0xa5/0x120
? find_held_lock.constprop.0+0x2b/0x80
? do_user_addr_fault+0x201/0x6a0
? lock_release+0xd2/0x2d0
? __x64_sys_ioctl+0x83/0xb0
__x64_sys_ioctl+0x83/0xb0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
[CAUSE]
Commit a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return
btrfs_device directly") moves the "missing" device path check into
btrfs_rm_device().
But btrfs_rm_device() itself can have case where it only receives
@devid, with NULL as @device_path.
In that case, calling strcmp() on NULL will trigger the NULL pointer
dereference.
Before that commit, we handle the "missing" case inside
btrfs_find_device_by_devspec(), which will not check @device_path at all
if @devid is provided, thus no way to trigger the bug.
[FIX]
Before calling strcmp(), also make sure @device_path is not NULL.
Fixes: a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return btrfs_device directly")
CC: [email protected] # 5.4+
Reported-by: butt3rflyh4ck <[email protected]>
Reviewed-by: Anand Jain <[email protected]>
Signed-off-by: Qu Wenruo <[email protected]>
Reviewed-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]>
|
__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod, int status)
{
struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
struct nvme_completion *cqe = &fod->rspiubuf.cqe;
unsigned long flags;
bool abort;
spin_lock_irqsave(&fod->flock, flags);
abort = fod->abort;
spin_unlock_irqrestore(&fod->flock, flags);
/* if we have a CQE, snoop the last sq_head value */
if (!status)
fod->queue->sqhd = cqe->sq_head;
if (abort) {
nvmet_fc_abort_op(tgtport, fod);
return;
}
/* if an error handling the cmd post initial parsing */
if (status) {
/* fudge up a failed CQE status for our transport error */
memset(cqe, 0, sizeof(*cqe));
cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
cqe->sq_id = cpu_to_le16(fod->queue->qid);
cqe->command_id = sqe->command_id;
cqe->status = cpu_to_le16(status);
} else {
/*
* try to push the data even if the SQE status is non-zero.
* There may be a status where data still was intended to
* be moved
*/
if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
/* push the data over before sending rsp */
nvmet_fc_transfer_fcp_data(tgtport, fod,
NVMET_FCOP_READDATA);
return;
}
/* writes & no data - fall thru */
}
/* data no longer needed */
nvmet_fc_free_tgt_pgs(fod);
nvmet_fc_xmt_fcp_rsp(tgtport, fod);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
0c319d3a144d4b8f1ea2047fd614d2149b68f889
| 149,647,547,399,789,920,000,000,000,000,000,000,000 | 51 |
nvmet-fc: ensure target queue id within range.
When searching for queue id's ensure they are within the expected range.
Signed-off-by: James Smart <[email protected]>
Signed-off-by: Christoph Hellwig <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq)
{
uint16_t old, new;
bool v;
/* We need to expose used array entries before checking used event. */
smp_mb();
/* Always notify when queue is empty (when feature acknowledge) */
if (((vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) &&
!vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx)) {
return true;
}
if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) {
return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
}
v = vq->signalled_used_valid;
vq->signalled_used_valid = true;
old = vq->signalled_used;
new = vq->signalled_used = vring_used_idx(vq);
return !v || vring_need_event(vring_used_event(vq), new, old);
}
| 0 |
[
"CWE-94"
] |
qemu
|
cc45995294b92d95319b4782750a3580cabdbc0c
| 242,882,446,242,202,400,000,000,000,000,000,000,000 | 22 |
virtio: out-of-bounds buffer write on invalid state load
CVE-2013-4151 QEMU 1.0 out-of-bounds buffer write in
virtio_load@hw/virtio/virtio.c
So we have this code since way back when:
num = qemu_get_be32(f);
for (i = 0; i < num; i++) {
vdev->vq[i].vring.num = qemu_get_be32(f);
array of vqs has size VIRTIO_PCI_QUEUE_MAX, so
on invalid input this will write beyond end of buffer.
Signed-off-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Michael Roth <[email protected]>
Signed-off-by: Juan Quintela <[email protected]>
|
irc_server_hdata_server_cb (const void *pointer, void *data,
const char *hdata_name)
{
struct t_hdata *hdata;
/* make C compiler happy */
(void) pointer;
(void) data;
hdata = weechat_hdata_new (hdata_name, "prev_server", "next_server",
0, 0, NULL, NULL);
if (hdata)
{
WEECHAT_HDATA_VAR(struct t_irc_server, name, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, options, POINTER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, temp_server, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, reloading_from_config, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, reloaded_from_config, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, addresses_eval, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, addresses_count, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, addresses_array, STRING, 0, "addresses_count", NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, ports_array, INTEGER, 0, "addresses_count", NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, retry_array, INTEGER, 0, "addresses_count", NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, index_current_address, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, current_address, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, current_ip, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, current_port, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, current_retry, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, sock, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, hook_connect, POINTER, 0, NULL, "hook");
WEECHAT_HDATA_VAR(struct t_irc_server, hook_fd, POINTER, 0, NULL, "hook");
WEECHAT_HDATA_VAR(struct t_irc_server, hook_timer_connection, POINTER, 0, NULL, "hook");
WEECHAT_HDATA_VAR(struct t_irc_server, hook_timer_sasl, POINTER, 0, NULL, "hook");
WEECHAT_HDATA_VAR(struct t_irc_server, is_connected, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, ssl_connected, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, disconnected, INTEGER, 0, NULL, NULL);
#ifdef HAVE_GNUTLS
WEECHAT_HDATA_VAR(struct t_irc_server, gnutls_sess, OTHER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, tls_cert, OTHER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, tls_cert_key, OTHER, 0, NULL, NULL);
#endif /* HAVE_GNUTLS */
WEECHAT_HDATA_VAR(struct t_irc_server, unterminated_message, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nicks_count, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nicks_array, STRING, 0, "nicks_count", NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nick_first_tried, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nick_alternate_number, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nick, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nick_modes, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, host, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, checking_cap_ls, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, cap_ls, HASHTABLE, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, checking_cap_list, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, cap_list, HASHTABLE, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, isupport, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, prefix_modes, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, prefix_chars, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, nick_max_length, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, user_max_length, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, host_max_length, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, casemapping, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, chantypes, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, chanmodes, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, monitor, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, monitor_time, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, reconnect_delay, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, reconnect_start, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, command_time, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, reconnect_join, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, disable_autojoin, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, is_away, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, away_message, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, away_time, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, lag, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, lag_displayed, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, lag_check_time, OTHER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, lag_next_check, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, lag_last_refresh, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, cmd_list_regexp, POINTER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, last_user_message, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, last_away_check, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, last_data_purge, TIME, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, outqueue, POINTER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, last_outqueue, POINTER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, redirects, POINTER, 0, NULL, "irc_redirect");
WEECHAT_HDATA_VAR(struct t_irc_server, last_redirect, POINTER, 0, NULL, "irc_redirect");
WEECHAT_HDATA_VAR(struct t_irc_server, notify_list, POINTER, 0, NULL, "irc_notify");
WEECHAT_HDATA_VAR(struct t_irc_server, last_notify, POINTER, 0, NULL, "irc_notify");
WEECHAT_HDATA_VAR(struct t_irc_server, notify_count, INTEGER, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, join_manual, HASHTABLE, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, join_channel_key, HASHTABLE, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, join_noswitch, HASHTABLE, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, buffer, POINTER, 0, NULL, "buffer");
WEECHAT_HDATA_VAR(struct t_irc_server, buffer_as_string, STRING, 0, NULL, NULL);
WEECHAT_HDATA_VAR(struct t_irc_server, channels, POINTER, 0, NULL, "irc_channel");
WEECHAT_HDATA_VAR(struct t_irc_server, last_channel, POINTER, 0, NULL, "irc_channel");
WEECHAT_HDATA_VAR(struct t_irc_server, prev_server, POINTER, 0, NULL, hdata_name);
WEECHAT_HDATA_VAR(struct t_irc_server, next_server, POINTER, 0, NULL, hdata_name);
WEECHAT_HDATA_LIST(irc_servers, WEECHAT_HDATA_LIST_CHECK_POINTERS);
WEECHAT_HDATA_LIST(last_irc_server, 0);
}
return hdata;
}
| 0 |
[
"CWE-120",
"CWE-787"
] |
weechat
|
40ccacb4330a64802b1f1e28ed9a6b6d3ca9197f
| 160,977,661,095,206,830,000,000,000,000,000,000,000 | 102 |
irc: fix crash when a new message 005 is received with longer nick prefixes
Thanks to Stuart Nevans Locke for reporting the issue.
|
Channel* GetChanTarget() const { return chantarget; }
| 0 |
[
"CWE-200",
"CWE-732"
] |
inspircd
|
4350a11c663b0d75f8119743bffb7736d87abd4d
| 63,600,400,821,805,370,000,000,000,000,000,000,000 | 1 |
Fix sending malformed pong messages in some cases.
|
int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
struct inode **delegated_inode, unsigned int flags)
{
int error;
bool is_dir = d_is_dir(old_dentry);
const unsigned char *old_name;
struct inode *source = old_dentry->d_inode;
struct inode *target = new_dentry->d_inode;
bool new_is_dir = false;
unsigned max_links = new_dir->i_sb->s_max_links;
if (source == target)
return 0;
error = may_delete(old_dir, old_dentry, is_dir);
if (error)
return error;
if (!target) {
error = may_create(new_dir, new_dentry);
} else {
new_is_dir = d_is_dir(new_dentry);
if (!(flags & RENAME_EXCHANGE))
error = may_delete(new_dir, new_dentry, is_dir);
else
error = may_delete(new_dir, new_dentry, new_is_dir);
}
if (error)
return error;
if (!old_dir->i_op->rename && !old_dir->i_op->rename2)
return -EPERM;
if (flags && !old_dir->i_op->rename2)
return -EINVAL;
/*
* If we are going to change the parent - check write permissions,
* we'll need to flip '..'.
*/
if (new_dir != old_dir) {
if (is_dir) {
error = inode_permission(source, MAY_WRITE);
if (error)
return error;
}
if ((flags & RENAME_EXCHANGE) && new_is_dir) {
error = inode_permission(target, MAY_WRITE);
if (error)
return error;
}
}
error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry,
flags);
if (error)
return error;
old_name = fsnotify_oldname_init(old_dentry->d_name.name);
dget(new_dentry);
if (!is_dir || (flags & RENAME_EXCHANGE))
lock_two_nondirectories(source, target);
else if (target)
mutex_lock(&target->i_mutex);
error = -EBUSY;
if (is_local_mountpoint(old_dentry) || is_local_mountpoint(new_dentry))
goto out;
if (max_links && new_dir != old_dir) {
error = -EMLINK;
if (is_dir && !new_is_dir && new_dir->i_nlink >= max_links)
goto out;
if ((flags & RENAME_EXCHANGE) && !is_dir && new_is_dir &&
old_dir->i_nlink >= max_links)
goto out;
}
if (is_dir && !(flags & RENAME_EXCHANGE) && target)
shrink_dcache_parent(new_dentry);
if (!is_dir) {
error = try_break_deleg(source, delegated_inode);
if (error)
goto out;
}
if (target && !new_is_dir) {
error = try_break_deleg(target, delegated_inode);
if (error)
goto out;
}
if (!old_dir->i_op->rename2) {
error = old_dir->i_op->rename(old_dir, old_dentry,
new_dir, new_dentry);
} else {
WARN_ON(old_dir->i_op->rename != NULL);
error = old_dir->i_op->rename2(old_dir, old_dentry,
new_dir, new_dentry, flags);
}
if (error)
goto out;
if (!(flags & RENAME_EXCHANGE) && target) {
if (is_dir)
target->i_flags |= S_DEAD;
dont_mount(new_dentry);
detach_mounts(new_dentry);
}
if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) {
if (!(flags & RENAME_EXCHANGE))
d_move(old_dentry, new_dentry);
else
d_exchange(old_dentry, new_dentry);
}
out:
if (!is_dir || (flags & RENAME_EXCHANGE))
unlock_two_nondirectories(source, target);
else if (target)
mutex_unlock(&target->i_mutex);
dput(new_dentry);
if (!error) {
fsnotify_move(old_dir, new_dir, old_name, is_dir,
!(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry);
if (flags & RENAME_EXCHANGE) {
fsnotify_move(new_dir, old_dir, old_dentry->d_name.name,
new_is_dir, NULL, new_dentry);
}
}
fsnotify_oldname_free(old_name);
return error;
}
| 0 |
[
"CWE-416"
] |
linux
|
f15133df088ecadd141ea1907f2c96df67c729f0
| 286,521,308,268,740,700,000,000,000,000,000,000,000 | 132 |
path_openat(): fix double fput()
path_openat() jumps to the wrong place after do_tmpfile() - it has
already done path_cleanup() (as part of path_lookupat() called by
do_tmpfile()), so doing that again can lead to double fput().
Cc: [email protected] # v3.11+
Signed-off-by: Al Viro <[email protected]>
|
static Image *ReadSGIImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status;
MagickSizeType
n,
number_pixels;
MemoryInfo
*pixel_info;
register Quantum
*q;
register ssize_t
i,
x;
register unsigned char
*p;
SGIInfo
iris_info;
size_t
bytes_per_pixel,
quantum;
ssize_t
count,
y,
z;
unsigned char
*pixels;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read SGI raster header.
*/
(void) memset(&iris_info,0,sizeof(iris_info));
iris_info.magic=ReadBlobMSBShort(image);
do
{
/*
Verify SGI identifier.
*/
if (iris_info.magic != 0x01DA)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
iris_info.storage=(unsigned char) ReadBlobByte(image);
switch (iris_info.storage)
{
case 0x00: image->compression=NoCompression; break;
case 0x01: image->compression=RLECompression; break;
default:
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
iris_info.bytes_per_pixel=(unsigned char) ReadBlobByte(image);
if ((iris_info.bytes_per_pixel == 0) || (iris_info.bytes_per_pixel > 2))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
iris_info.dimension=ReadBlobMSBShort(image);
if ((iris_info.dimension == 0) || (iris_info.dimension > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
iris_info.columns=ReadBlobMSBShort(image);
iris_info.rows=ReadBlobMSBShort(image);
iris_info.depth=ReadBlobMSBShort(image);
if ((iris_info.depth == 0) || (iris_info.depth > 4))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
iris_info.minimum_value=ReadBlobMSBLong(image);
iris_info.maximum_value=ReadBlobMSBLong(image);
iris_info.sans=ReadBlobMSBLong(image);
count=ReadBlob(image,sizeof(iris_info.name),(unsigned char *)
iris_info.name);
if ((size_t) count != sizeof(iris_info.name))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
iris_info.name[sizeof(iris_info.name)-1]='\0';
if (*iris_info.name != '\0')
(void) SetImageProperty(image,"label",iris_info.name,exception);
iris_info.pixel_format=ReadBlobMSBLong(image);
if (iris_info.pixel_format != 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
count=ReadBlob(image,sizeof(iris_info.filler),iris_info.filler);
if ((size_t) count != sizeof(iris_info.filler))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
image->columns=iris_info.columns;
image->rows=iris_info.rows;
image->alpha_trait=iris_info.depth == 4 ? BlendPixelTrait :
UndefinedPixelTrait;
image->depth=(size_t) MagickMin(iris_info.depth,MAGICKCORE_QUANTUM_DEPTH);
if (iris_info.pixel_format == 0)
image->depth=(size_t) MagickMin((size_t) 8*iris_info.bytes_per_pixel,
MAGICKCORE_QUANTUM_DEPTH);
if (iris_info.depth < 3)
{
image->storage_class=PseudoClass;
image->colors=(size_t) (iris_info.bytes_per_pixel > 1 ? 65535 : 256);
}
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if ((MagickSizeType) (image->columns*image->rows/255) > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status != MagickFalse)
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
/*
Allocate SGI pixels.
*/
bytes_per_pixel=(size_t) iris_info.bytes_per_pixel;
number_pixels=(MagickSizeType) iris_info.columns*iris_info.rows;
if ((4*bytes_per_pixel*number_pixels) != ((MagickSizeType) (size_t)
(4*bytes_per_pixel*number_pixels)))
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixel_info=AcquireVirtualMemory(iris_info.columns,iris_info.rows*4*
bytes_per_pixel*sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
for (n=0; n < (4*bytes_per_pixel*number_pixels); n++)
pixels[n]=0;
if ((int) iris_info.storage != 0x01)
{
unsigned char
*scanline;
/*
Read standard image format.
*/
scanline=(unsigned char *) AcquireQuantumMemory(iris_info.columns,
bytes_per_pixel*sizeof(*scanline));
if (scanline == (unsigned char *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
for (z=0; z < (ssize_t) iris_info.depth; z++)
{
p=pixels+bytes_per_pixel*z;
for (y=0; y < (ssize_t) iris_info.rows; y++)
{
count=ReadBlob(image,bytes_per_pixel*iris_info.columns,scanline);
if (count != (ssize_t) (bytes_per_pixel*iris_info.columns))
break;
if (bytes_per_pixel == 2)
for (x=0; x < (ssize_t) iris_info.columns; x++)
{
*p=scanline[2*x];
*(p+1)=scanline[2*x+1];
p+=8;
}
else
for (x=0; x < (ssize_t) iris_info.columns; x++)
{
*p=scanline[x];
p+=4;
}
}
if (y < (ssize_t) iris_info.rows)
break;
}
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
}
else
{
MemoryInfo
*packet_info;
size_t
*runlength;
ssize_t
offset,
*offsets;
unsigned char
*packets;
unsigned int
data_order;
/*
Read runlength-encoded image format.
*/
offsets=(ssize_t *) AcquireQuantumMemory((size_t) iris_info.rows,
iris_info.depth*sizeof(*offsets));
runlength=(size_t *) AcquireQuantumMemory(iris_info.rows,
iris_info.depth*sizeof(*runlength));
packet_info=AcquireVirtualMemory((size_t) iris_info.columns+10UL,4UL*
sizeof(*packets));
if ((offsets == (ssize_t *) NULL) || (runlength == (size_t *) NULL) ||
(packet_info == (MemoryInfo *) NULL))
{
offsets=(ssize_t *) RelinquishMagickMemory(offsets);
runlength=(size_t *) RelinquishMagickMemory(runlength);
if (packet_info != (MemoryInfo *) NULL)
packet_info=RelinquishVirtualMemory(packet_info);
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
packets=(unsigned char *) GetVirtualMemoryBlob(packet_info);
for (i=0; i < (ssize_t) (iris_info.rows*iris_info.depth); i++)
offsets[i]=(ssize_t) ReadBlobMSBSignedLong(image);
for (i=0; i < (ssize_t) (iris_info.rows*iris_info.depth); i++)
{
runlength[i]=ReadBlobMSBLong(image);
if (runlength[i] > (4*(size_t) iris_info.columns+10))
{
packet_info=RelinquishVirtualMemory(packet_info);
runlength=(size_t *) RelinquishMagickMemory(runlength);
offsets=(ssize_t *) RelinquishMagickMemory(offsets);
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
}
/*
Check data order.
*/
offset=0;
data_order=0;
for (y=0; ((y < (ssize_t) iris_info.rows) && (data_order == 0)); y++)
for (z=0; ((z < (ssize_t) iris_info.depth) && (data_order == 0)); z++)
{
if (offsets[y+z*iris_info.rows] < offset)
data_order=1;
offset=offsets[y+z*iris_info.rows];
}
offset=(ssize_t) TellBlob(image);
if (data_order == 1)
{
for (z=0; z < (ssize_t) iris_info.depth; z++)
{
p=pixels;
for (y=0; y < (ssize_t) iris_info.rows; y++)
{
if (offset != offsets[y+z*iris_info.rows])
{
offset=offsets[y+z*iris_info.rows];
offset=(ssize_t) SeekBlob(image,(MagickOffsetType) offset,
SEEK_SET);
}
count=ReadBlob(image,(size_t) runlength[y+z*iris_info.rows],
packets);
if (count != (ssize_t) runlength[y+z*iris_info.rows])
break;
offset+=(ssize_t) runlength[y+z*iris_info.rows];
status=SGIDecode(bytes_per_pixel,(ssize_t)
(runlength[y+z*iris_info.rows]/bytes_per_pixel),packets,
(ssize_t) iris_info.columns,p+bytes_per_pixel*z);
if (status == MagickFalse)
{
packet_info=RelinquishVirtualMemory(packet_info);
runlength=(size_t *) RelinquishMagickMemory(runlength);
offsets=(ssize_t *) RelinquishMagickMemory(offsets);
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,
"ImproperImageHeader");
}
p+=(iris_info.columns*4*bytes_per_pixel);
}
if (y < (ssize_t) iris_info.rows)
break;
}
}
else
{
MagickOffsetType
position;
position=TellBlob(image);
p=pixels;
for (y=0; y < (ssize_t) iris_info.rows; y++)
{
for (z=0; z < (ssize_t) iris_info.depth; z++)
{
if (offset != offsets[y+z*iris_info.rows])
{
offset=offsets[y+z*iris_info.rows];
offset=(ssize_t) SeekBlob(image,(MagickOffsetType) offset,
SEEK_SET);
}
count=ReadBlob(image,(size_t) runlength[y+z*iris_info.rows],
packets);
if (count != (ssize_t) runlength[y+z*iris_info.rows])
break;
offset+=(ssize_t) runlength[y+z*iris_info.rows];
status=SGIDecode(bytes_per_pixel,(ssize_t)
(runlength[y+z*iris_info.rows]/bytes_per_pixel),packets,
(ssize_t) iris_info.columns,p+bytes_per_pixel*z);
if (status == MagickFalse)
{
packet_info=RelinquishVirtualMemory(packet_info);
runlength=(size_t *) RelinquishMagickMemory(runlength);
offsets=(ssize_t *) RelinquishMagickMemory(offsets);
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,
"ImproperImageHeader");
}
}
if (z < (ssize_t) iris_info.depth)
break;
p+=(iris_info.columns*4*bytes_per_pixel);
}
offset=(ssize_t) SeekBlob(image,position,SEEK_SET);
}
packet_info=RelinquishVirtualMemory(packet_info);
runlength=(size_t *) RelinquishMagickMemory(runlength);
offsets=(ssize_t *) RelinquishMagickMemory(offsets);
}
/*
Convert SGI raster image to pixel packets.
*/
if (image->storage_class == DirectClass)
{
/*
Convert SGI image to DirectClass pixel packets.
*/
if (bytes_per_pixel == 2)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
p=pixels+(image->rows-y-1)*8*image->columns;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,ScaleShortToQuantum((unsigned short)
((*(p+0) << 8) | (*(p+1)))),q);
SetPixelGreen(image,ScaleShortToQuantum((unsigned short)
((*(p+2) << 8) | (*(p+3)))),q);
SetPixelBlue(image,ScaleShortToQuantum((unsigned short)
((*(p+4) << 8) | (*(p+5)))),q);
SetPixelAlpha(image,OpaqueAlpha,q);
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,ScaleShortToQuantum((unsigned short)
((*(p+6) << 8) | (*(p+7)))),q);
p+=8;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
}
else
for (y=0; y < (ssize_t) image->rows; y++)
{
p=pixels+(image->rows-y-1)*4*image->columns;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,ScaleCharToQuantum(*p),q);
SetPixelGreen(image,ScaleCharToQuantum(*(p+1)),q);
SetPixelBlue(image,ScaleCharToQuantum(*(p+2)),q);
SetPixelAlpha(image,OpaqueAlpha,q);
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,ScaleCharToQuantum(*(p+3)),q);
p+=4;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
}
else
{
/*
Create grayscale map.
*/
if (AcquireImageColormap(image,image->colors,exception) == MagickFalse)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Convert SGI image to PseudoClass pixel packets.
*/
if (bytes_per_pixel == 2)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
p=pixels+(image->rows-y-1)*8*image->columns;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=(*p << 8);
quantum|=(*(p+1));
SetPixelIndex(image,(Quantum) quantum,q);
p+=8;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
}
else
for (y=0; y < (ssize_t) image->rows; y++)
{
p=pixels+(image->rows-y-1)*4*image->columns;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,*p,q);
p+=4;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
(void) SyncImage(image,exception);
}
pixel_info=RelinquishVirtualMemory(pixel_info);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
iris_info.magic=ReadBlobMSBShort(image);
if (iris_info.magic == 0x01DA)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
status=MagickFalse;
break;
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (iris_info.magic == 0x01DA);
(void) CloseBlob(image);
if (status == MagickFalse)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
}
| 0 |
[
"CWE-787"
] |
ImageMagick
|
6ae32a9038e360b3491969d5d03d490884f02b4c
| 178,939,131,686,535,380,000,000,000,000,000,000,000 | 503 |
https://github.com/ImageMagick/ImageMagick/issues/1562
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.